Lines Matching refs:ctrl
151 static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
162 dev_warn_once(ctrl->dev,
168 static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
173 xfer = &ctrl->xfer;
174 pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
182 pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
184 writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
187 static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
191 pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
193 pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
194 writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
197 static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
201 qcom_qspi_pio_xfer_cfg(ctrl);
204 writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
207 if (ctrl->xfer.dir == QSPI_WRITE)
211 writel(ints, ctrl->base + MSTR_INT_EN);
214 qcom_qspi_pio_xfer_ctrl(ctrl);
220 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
223 spin_lock_irqsave(&ctrl->lock, flags);
224 writel(0, ctrl->base + MSTR_INT_EN);
225 ctrl->xfer.rem_bytes = 0;
226 spin_unlock_irqrestore(&ctrl->lock, flags);
229 static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
234 if (speed_hz == ctrl->last_speed)
238 ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
240 dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
249 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu);
251 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
256 ctrl->last_speed = speed_hz;
265 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
274 ret = qcom_qspi_set_speed(ctrl, speed_hz);
278 spin_lock_irqsave(&ctrl->lock, flags);
282 ctrl->xfer.dir = QSPI_READ;
283 ctrl->xfer.buswidth = xfer->rx_nbits;
284 ctrl->xfer.rx_buf = xfer->rx_buf;
286 ctrl->xfer.dir = QSPI_WRITE;
287 ctrl->xfer.buswidth = xfer->tx_nbits;
288 ctrl->xfer.tx_buf = xfer->tx_buf;
290 ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
292 ctrl->xfer.rem_bytes = xfer->len;
293 qcom_qspi_pio_xfer(ctrl);
295 spin_unlock_irqrestore(&ctrl->lock, flags);
305 struct qcom_qspi *ctrl;
310 ctrl = spi_master_get_devdata(master);
311 spin_lock_irqsave(&ctrl->lock, flags);
313 mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
325 writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
326 spin_unlock_irqrestore(&ctrl->lock, flags);
331 static irqreturn_t pio_read(struct qcom_qspi *ctrl)
342 rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
345 dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
350 wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
356 word_buf = ctrl->xfer.rx_buf;
357 ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
358 ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
359 ctrl->xfer.rx_buf = word_buf + words_to_read;
363 byte_buf = ctrl->xfer.rx_buf;
364 rd_fifo = readl(ctrl->base + RD_FIFO);
365 ctrl->xfer.rem_bytes -= bytes_to_read;
368 ctrl->xfer.rx_buf = byte_buf;
374 static irqreturn_t pio_write(struct qcom_qspi *ctrl)
376 const void *xfer_buf = ctrl->xfer.tx_buf;
384 wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
387 if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
389 wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
390 ctrl->xfer.rem_bytes -= wr_size;
395 ctrl->base + PIO_DATAOUT_1B);
396 ctrl->xfer.tx_buf = byte_buf;
403 rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
407 ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
410 iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
411 ctrl->xfer.tx_buf = word_buf + wr_size;
421 struct qcom_qspi *ctrl = dev_id;
424 spin_lock(&ctrl->lock);
426 int_status = readl(ctrl->base + MSTR_INT_STATUS);
427 writel(int_status, ctrl->base + MSTR_INT_STATUS);
429 if (ctrl->xfer.dir == QSPI_WRITE) {
431 ret = pio_write(ctrl);
434 ret = pio_read(ctrl);
439 dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
441 dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
443 dev_err(ctrl->dev, "IRQ error: NOC response error\n");
447 if (!ctrl->xfer.rem_bytes) {
448 writel(0, ctrl->base + MSTR_INT_EN);
449 spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
452 spin_unlock(&ctrl->lock);
461 struct qcom_qspi *ctrl;
465 master = devm_spi_alloc_master(dev, sizeof(*ctrl));
471 ctrl = spi_master_get_devdata(master);
473 spin_lock_init(&ctrl->lock);
474 ctrl->dev = dev;
475 ctrl->base = devm_platform_ioremap_resource(pdev, 0);
476 if (IS_ERR(ctrl->base))
477 return PTR_ERR(ctrl->base);
479 ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS,
480 sizeof(*ctrl->clks), GFP_KERNEL);
481 if (!ctrl->clks)
484 ctrl->clks[QSPI_CLK_CORE].id = "core";
485 ctrl->clks[QSPI_CLK_IFACE].id = "iface";
486 ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
490 ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config");
491 if (IS_ERR(ctrl->icc_path_cpu_to_qspi))
492 return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi),
496 ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
499 dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
504 ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
506 dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
515 IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
534 ctrl->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
535 if (IS_ERR(ctrl->opp_table))
536 return PTR_ERR(ctrl->opp_table);
556 dev_pm_opp_put_clkname(ctrl->opp_table);
564 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
571 dev_pm_opp_put_clkname(ctrl->opp_table);
579 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
584 clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
586 ret = icc_disable(ctrl->icc_path_cpu_to_qspi);
588 dev_err_ratelimited(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
599 struct qcom_qspi *ctrl = spi_master_get_devdata(master);
602 ret = icc_enable(ctrl->icc_path_cpu_to_qspi);
604 dev_err_ratelimited(ctrl->dev, "%s: ICC enable failed for cpu: %d\n",
609 ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
613 return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);