Lines Matching defs:sdd
197 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data *sdd)
199 void __iomem *regs = sdd->regs;
218 } while (TX_FIFO_LVL(val, sdd) && loops--);
221 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
227 if (RX_FIFO_LVL(val, sdd))
234 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
247 struct s3c64xx_spi_driver_data *sdd;
252 sdd = container_of(data,
255 sdd = container_of(data,
258 spin_lock_irqsave(&sdd->lock, flags);
261 sdd->state &= ~RXBUSY;
262 if (!(sdd->state & TXBUSY))
263 complete(&sdd->xfer_completion);
265 sdd->state &= ~TXBUSY;
266 if (!(sdd->state & RXBUSY))
267 complete(&sdd->xfer_completion);
270 spin_unlock_irqrestore(&sdd->lock, flags);
276 struct s3c64xx_spi_driver_data *sdd;
284 sdd = container_of((void *)dma,
287 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
288 config.src_addr_width = sdd->cur_bpw / 8;
292 sdd = container_of((void *)dma,
295 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
296 config.dst_addr_width = sdd->cur_bpw / 8;
304 dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
315 dev_err(&sdd->pdev->dev, "DMA submission failed");
325 struct s3c64xx_spi_driver_data *sdd =
328 if (sdd->cntrlr_info->no_cs)
332 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO)) {
333 writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
335 u32 ssel = readl(sdd->regs + S3C64XX_SPI_CS_REG);
339 writel(ssel, sdd->regs + S3C64XX_SPI_CS_REG);
342 if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
344 sdd->regs + S3C64XX_SPI_CS_REG);
350 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
352 if (is_polling(sdd))
355 spi->dma_rx = sdd->rx_dma.ch;
356 spi->dma_tx = sdd->tx_dma.ch;
365 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
367 return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
370 static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
373 void __iomem *regs = sdd->regs;
391 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
397 sdd->state |= TXBUSY;
401 ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
403 switch (sdd->cur_bpw) {
421 sdd->state |= RXBUSY;
423 if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
424 && !(sdd->cur_mode & SPI_CPHA))
430 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
433 ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
446 static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
449 void __iomem *regs = sdd->regs;
454 u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
461 } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
464 return RX_FIFO_LVL(status, sdd);
467 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
470 void __iomem *regs = sdd->regs;
476 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
481 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
495 while ((TX_FIFO_LVL(status, sdd)
496 || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
511 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
514 void __iomem *regs = sdd->regs;
523 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
529 } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
536 sdd->state &= ~TXBUSY;
548 loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
552 cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
555 switch (sdd->cur_bpw) {
572 sdd->state &= ~RXBUSY;
577 static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
579 void __iomem *regs = sdd->regs;
584 if (!sdd->port_conf->clk_from_cmu) {
596 if (sdd->cur_mode & SPI_CPOL)
599 if (sdd->cur_mode & SPI_CPHA)
609 switch (sdd->cur_bpw) {
626 if (sdd->port_conf->clk_from_cmu) {
628 ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
631 sdd->cur_speed = clk_get_rate(sdd->src_clk) / 2;
636 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
654 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
659 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
675 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
676 const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
686 reinit_completion(&sdd->xfer_completion);
692 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
693 sdd->cur_bpw = bpw;
694 sdd->cur_speed = speed;
695 sdd->cur_mode = spi->mode;
696 status = s3c64xx_spi_config(sdd);
701 if (!is_polling(sdd) && (xfer->len > fifo_len) &&
702 sdd->rx_dma.ch && sdd->tx_dma.ch) {
705 } else if (is_polling(sdd) && xfer->len > fifo_len) {
716 spin_lock_irqsave(&sdd->lock, flags);
719 sdd->state &= ~RXBUSY;
720 sdd->state &= ~TXBUSY;
725 status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
727 spin_unlock_irqrestore(&sdd->lock, flags);
735 status = s3c64xx_wait_for_dma(sdd, xfer);
737 status = s3c64xx_wait_for_pio(sdd, xfer);
743 (sdd->state & RXBUSY) ? 'f' : 'p',
744 (sdd->state & TXBUSY) ? 'f' : 'p',
750 if (xfer->tx_buf && (sdd->state & TXBUSY)) {
751 dmaengine_pause(sdd->tx_dma.ch);
752 dmaengine_tx_status(sdd->tx_dma.ch, sdd->tx_dma.cookie, &s);
753 dmaengine_terminate_all(sdd->tx_dma.ch);
757 if (xfer->rx_buf && (sdd->state & RXBUSY)) {
758 dmaengine_pause(sdd->rx_dma.ch);
759 dmaengine_tx_status(sdd->rx_dma.ch, sdd->rx_dma.cookie, &s);
760 dmaengine_terminate_all(sdd->rx_dma.ch);
765 s3c64xx_flush_fifo(sdd);
833 struct s3c64xx_spi_driver_data *sdd;
836 sdd = spi_master_get_devdata(spi->master);
869 pm_runtime_get_sync(&sdd->pdev->dev);
872 if (!sdd->port_conf->clk_from_cmu) {
876 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
881 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
886 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
896 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
907 pm_runtime_mark_last_busy(&sdd->pdev->dev);
908 pm_runtime_put_autosuspend(&sdd->pdev->dev);
914 pm_runtime_mark_last_busy(&sdd->pdev->dev);
915 pm_runtime_put_autosuspend(&sdd->pdev->dev);
953 struct s3c64xx_spi_driver_data *sdd = data;
954 struct spi_master *spi = sdd->master;
957 val = readl(sdd->regs + S3C64XX_SPI_STATUS);
977 writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
978 writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
983 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd)
985 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
986 void __iomem *regs = sdd->regs;
989 sdd->cur_speed = 0;
992 writel(0, sdd->regs + S3C64XX_SPI_CS_REG);
993 else if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
994 writel(S3C64XX_SPI_CS_SIG_INACT, sdd->regs + S3C64XX_SPI_CS_REG);
999 if (!sdd->port_conf->clk_from_cmu)
1021 s3c64xx_flush_fifo(sdd);
1078 struct s3c64xx_spi_driver_data *sdd;
1116 sdd = spi_master_get_devdata(master);
1117 sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
1118 sdd->master = master;
1119 sdd->cntrlr_info = sci;
1120 sdd->pdev = pdev;
1121 sdd->sfr_start = mem_res->start;
1129 sdd->port_id = ret;
1131 sdd->port_id = pdev->id;
1134 sdd->cur_bpw = 8;
1136 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1137 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1140 master->bus_num = sdd->port_id;
1154 if (!is_polling(sdd))
1157 sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
1158 if (IS_ERR(sdd->regs)) {
1159 ret = PTR_ERR(sdd->regs);
1170 sdd->clk = devm_clk_get(&pdev->dev, "spi");
1171 if (IS_ERR(sdd->clk)) {
1173 ret = PTR_ERR(sdd->clk);
1177 ret = clk_prepare_enable(sdd->clk);
1184 sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
1185 if (IS_ERR(sdd->src_clk)) {
1188 ret = PTR_ERR(sdd->src_clk);
1192 ret = clk_prepare_enable(sdd->src_clk);
1198 if (sdd->port_conf->clk_ioclk) {
1199 sdd->ioclk = devm_clk_get(&pdev->dev, "spi_ioclk");
1200 if (IS_ERR(sdd->ioclk)) {
1202 ret = PTR_ERR(sdd->ioclk);
1206 ret = clk_prepare_enable(sdd->ioclk);
1213 if (!is_polling(sdd)) {
1215 sdd->rx_dma.ch = dma_request_chan(&pdev->dev, "rx");
1216 if (IS_ERR(sdd->rx_dma.ch)) {
1218 ret = PTR_ERR(sdd->rx_dma.ch);
1221 sdd->tx_dma.ch = dma_request_chan(&pdev->dev, "tx");
1222 if (IS_ERR(sdd->tx_dma.ch)) {
1224 ret = PTR_ERR(sdd->tx_dma.ch);
1236 s3c64xx_spi_hwinit(sdd);
1238 spin_lock_init(&sdd->lock);
1239 init_completion(&sdd->xfer_completion);
1242 "spi-s3c64xx", sdd);
1251 sdd->regs + S3C64XX_SPI_INT_EN);
1260 sdd->port_id, master->num_chipselect);
1262 mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
1274 if (!is_polling(sdd))
1275 dma_release_channel(sdd->tx_dma.ch);
1277 if (!is_polling(sdd))
1278 dma_release_channel(sdd->rx_dma.ch);
1280 clk_disable_unprepare(sdd->ioclk);
1282 clk_disable_unprepare(sdd->src_clk);
1284 clk_disable_unprepare(sdd->clk);
1294 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1298 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1300 if (!is_polling(sdd)) {
1301 dma_release_channel(sdd->rx_dma.ch);
1302 dma_release_channel(sdd->tx_dma.ch);
1305 clk_disable_unprepare(sdd->ioclk);
1307 clk_disable_unprepare(sdd->src_clk);
1309 clk_disable_unprepare(sdd->clk);
1322 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1332 sdd->cur_speed = 0; /* Output Clock is stopped */
1340 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1341 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1359 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1361 clk_disable_unprepare(sdd->clk);
1362 clk_disable_unprepare(sdd->src_clk);
1363 clk_disable_unprepare(sdd->ioclk);
1371 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1374 if (sdd->port_conf->clk_ioclk) {
1375 ret = clk_prepare_enable(sdd->ioclk);
1380 ret = clk_prepare_enable(sdd->src_clk);
1384 ret = clk_prepare_enable(sdd->clk);
1388 s3c64xx_spi_hwinit(sdd);
1392 sdd->regs + S3C64XX_SPI_INT_EN);
1397 clk_disable_unprepare(sdd->src_clk);
1399 clk_disable_unprepare(sdd->ioclk);