162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Special handling for DW DMA core 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (c) 2009, 2014 Intel Corporation. 662306a36Sopenharmony_ci */ 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include <linux/completion.h> 962306a36Sopenharmony_ci#include <linux/dma-mapping.h> 1062306a36Sopenharmony_ci#include <linux/dmaengine.h> 1162306a36Sopenharmony_ci#include <linux/irqreturn.h> 1262306a36Sopenharmony_ci#include <linux/jiffies.h> 1362306a36Sopenharmony_ci#include <linux/module.h> 1462306a36Sopenharmony_ci#include <linux/pci.h> 1562306a36Sopenharmony_ci#include <linux/platform_data/dma-dw.h> 1662306a36Sopenharmony_ci#include <linux/spi/spi.h> 1762306a36Sopenharmony_ci#include <linux/types.h> 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ci#include "spi-dw.h" 2062306a36Sopenharmony_ci 2162306a36Sopenharmony_ci#define DW_SPI_RX_BUSY 0 2262306a36Sopenharmony_ci#define DW_SPI_RX_BURST_LEVEL 16 2362306a36Sopenharmony_ci#define DW_SPI_TX_BUSY 1 2462306a36Sopenharmony_ci#define DW_SPI_TX_BURST_LEVEL 16 2562306a36Sopenharmony_ci 2662306a36Sopenharmony_cistatic bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) 2762306a36Sopenharmony_ci{ 2862306a36Sopenharmony_ci struct dw_dma_slave *s = param; 2962306a36Sopenharmony_ci 3062306a36Sopenharmony_ci if (s->dma_dev != chan->device->dev) 3162306a36Sopenharmony_ci return false; 3262306a36Sopenharmony_ci 3362306a36Sopenharmony_ci chan->private = s; 3462306a36Sopenharmony_ci return true; 3562306a36Sopenharmony_ci} 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_cistatic void dw_spi_dma_maxburst_init(struct dw_spi *dws) 3862306a36Sopenharmony_ci{ 3962306a36Sopenharmony_ci struct dma_slave_caps caps; 4062306a36Sopenharmony_ci u32 max_burst, def_burst; 4162306a36Sopenharmony_ci int ret; 4262306a36Sopenharmony_ci 4362306a36Sopenharmony_ci def_burst = dws->fifo_len / 2; 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_ci ret = dma_get_slave_caps(dws->rxchan, &caps); 4662306a36Sopenharmony_ci if (!ret && caps.max_burst) 4762306a36Sopenharmony_ci max_burst = caps.max_burst; 4862306a36Sopenharmony_ci else 4962306a36Sopenharmony_ci max_burst = DW_SPI_RX_BURST_LEVEL; 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_ci dws->rxburst = min(max_burst, def_burst); 5262306a36Sopenharmony_ci dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); 5362306a36Sopenharmony_ci 5462306a36Sopenharmony_ci ret = dma_get_slave_caps(dws->txchan, &caps); 5562306a36Sopenharmony_ci if (!ret && caps.max_burst) 5662306a36Sopenharmony_ci max_burst = caps.max_burst; 5762306a36Sopenharmony_ci else 5862306a36Sopenharmony_ci max_burst = DW_SPI_TX_BURST_LEVEL; 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci /* 6162306a36Sopenharmony_ci * Having a Rx DMA channel serviced with higher priority than a Tx DMA 6262306a36Sopenharmony_ci * channel might not be enough to provide a well balanced DMA-based 6362306a36Sopenharmony_ci * SPI transfer interface. There might still be moments when the Tx DMA 6462306a36Sopenharmony_ci * channel is occasionally handled faster than the Rx DMA channel. 6562306a36Sopenharmony_ci * That in its turn will eventually cause the SPI Rx FIFO overflow if 6662306a36Sopenharmony_ci * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's 6762306a36Sopenharmony_ci * cleared by the Rx DMA channel. In order to fix the problem the Tx 6862306a36Sopenharmony_ci * DMA activity is intentionally slowed down by limiting the SPI Tx 6962306a36Sopenharmony_ci * FIFO depth with a value twice bigger than the Tx burst length. 7062306a36Sopenharmony_ci */ 7162306a36Sopenharmony_ci dws->txburst = min(max_burst, def_burst); 7262306a36Sopenharmony_ci dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); 7362306a36Sopenharmony_ci} 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_cistatic int dw_spi_dma_caps_init(struct dw_spi *dws) 7662306a36Sopenharmony_ci{ 7762306a36Sopenharmony_ci struct dma_slave_caps tx, rx; 7862306a36Sopenharmony_ci int ret; 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci ret = dma_get_slave_caps(dws->txchan, &tx); 8162306a36Sopenharmony_ci if (ret) 8262306a36Sopenharmony_ci return ret; 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci ret = dma_get_slave_caps(dws->rxchan, &rx); 8562306a36Sopenharmony_ci if (ret) 8662306a36Sopenharmony_ci return ret; 8762306a36Sopenharmony_ci 8862306a36Sopenharmony_ci if (!(tx.directions & BIT(DMA_MEM_TO_DEV) && 8962306a36Sopenharmony_ci rx.directions & BIT(DMA_DEV_TO_MEM))) 9062306a36Sopenharmony_ci return -ENXIO; 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) 9362306a36Sopenharmony_ci dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); 9462306a36Sopenharmony_ci else if (tx.max_sg_burst > 0) 9562306a36Sopenharmony_ci dws->dma_sg_burst = tx.max_sg_burst; 9662306a36Sopenharmony_ci else if (rx.max_sg_burst > 0) 9762306a36Sopenharmony_ci dws->dma_sg_burst = rx.max_sg_burst; 9862306a36Sopenharmony_ci else 9962306a36Sopenharmony_ci dws->dma_sg_burst = 0; 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci /* 10262306a36Sopenharmony_ci * Assuming both channels belong to the same DMA controller hence the 10362306a36Sopenharmony_ci * peripheral side address width capabilities most likely would be 10462306a36Sopenharmony_ci * the same. 10562306a36Sopenharmony_ci */ 10662306a36Sopenharmony_ci dws->dma_addr_widths = tx.dst_addr_widths & rx.src_addr_widths; 10762306a36Sopenharmony_ci 10862306a36Sopenharmony_ci return 0; 10962306a36Sopenharmony_ci} 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_cistatic int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) 11262306a36Sopenharmony_ci{ 11362306a36Sopenharmony_ci struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; 11462306a36Sopenharmony_ci struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; 11562306a36Sopenharmony_ci struct pci_dev *dma_dev; 11662306a36Sopenharmony_ci dma_cap_mask_t mask; 11762306a36Sopenharmony_ci int ret = -EBUSY; 11862306a36Sopenharmony_ci 11962306a36Sopenharmony_ci /* 12062306a36Sopenharmony_ci * Get pci device for DMA controller, currently it could only 12162306a36Sopenharmony_ci * be the DMA controller of Medfield 12262306a36Sopenharmony_ci */ 12362306a36Sopenharmony_ci dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); 12462306a36Sopenharmony_ci if (!dma_dev) 12562306a36Sopenharmony_ci return -ENODEV; 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_ci dma_cap_zero(mask); 12862306a36Sopenharmony_ci dma_cap_set(DMA_SLAVE, mask); 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci /* 1. Init rx channel */ 13162306a36Sopenharmony_ci rx->dma_dev = &dma_dev->dev; 13262306a36Sopenharmony_ci dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); 13362306a36Sopenharmony_ci if (!dws->rxchan) 13462306a36Sopenharmony_ci goto err_exit; 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_ci /* 2. Init tx channel */ 13762306a36Sopenharmony_ci tx->dma_dev = &dma_dev->dev; 13862306a36Sopenharmony_ci dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); 13962306a36Sopenharmony_ci if (!dws->txchan) 14062306a36Sopenharmony_ci goto free_rxchan; 14162306a36Sopenharmony_ci 14262306a36Sopenharmony_ci dws->host->dma_rx = dws->rxchan; 14362306a36Sopenharmony_ci dws->host->dma_tx = dws->txchan; 14462306a36Sopenharmony_ci 14562306a36Sopenharmony_ci init_completion(&dws->dma_completion); 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_ci ret = dw_spi_dma_caps_init(dws); 14862306a36Sopenharmony_ci if (ret) 14962306a36Sopenharmony_ci goto free_txchan; 15062306a36Sopenharmony_ci 15162306a36Sopenharmony_ci dw_spi_dma_maxburst_init(dws); 15262306a36Sopenharmony_ci 15362306a36Sopenharmony_ci pci_dev_put(dma_dev); 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci return 0; 15662306a36Sopenharmony_ci 15762306a36Sopenharmony_cifree_txchan: 15862306a36Sopenharmony_ci dma_release_channel(dws->txchan); 15962306a36Sopenharmony_ci dws->txchan = NULL; 16062306a36Sopenharmony_cifree_rxchan: 16162306a36Sopenharmony_ci dma_release_channel(dws->rxchan); 16262306a36Sopenharmony_ci dws->rxchan = NULL; 16362306a36Sopenharmony_cierr_exit: 16462306a36Sopenharmony_ci pci_dev_put(dma_dev); 16562306a36Sopenharmony_ci return ret; 16662306a36Sopenharmony_ci} 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_cistatic int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) 16962306a36Sopenharmony_ci{ 17062306a36Sopenharmony_ci int ret; 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci dws->rxchan = dma_request_chan(dev, "rx"); 17362306a36Sopenharmony_ci if (IS_ERR(dws->rxchan)) { 17462306a36Sopenharmony_ci ret = PTR_ERR(dws->rxchan); 17562306a36Sopenharmony_ci dws->rxchan = NULL; 17662306a36Sopenharmony_ci goto err_exit; 17762306a36Sopenharmony_ci } 17862306a36Sopenharmony_ci 17962306a36Sopenharmony_ci dws->txchan = dma_request_chan(dev, "tx"); 18062306a36Sopenharmony_ci if (IS_ERR(dws->txchan)) { 18162306a36Sopenharmony_ci ret = PTR_ERR(dws->txchan); 18262306a36Sopenharmony_ci dws->txchan = NULL; 18362306a36Sopenharmony_ci goto free_rxchan; 18462306a36Sopenharmony_ci } 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci dws->host->dma_rx = dws->rxchan; 18762306a36Sopenharmony_ci dws->host->dma_tx = dws->txchan; 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_ci init_completion(&dws->dma_completion); 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci ret = dw_spi_dma_caps_init(dws); 19262306a36Sopenharmony_ci if (ret) 19362306a36Sopenharmony_ci goto free_txchan; 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci dw_spi_dma_maxburst_init(dws); 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci return 0; 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_cifree_txchan: 20062306a36Sopenharmony_ci dma_release_channel(dws->txchan); 20162306a36Sopenharmony_ci dws->txchan = NULL; 20262306a36Sopenharmony_cifree_rxchan: 20362306a36Sopenharmony_ci dma_release_channel(dws->rxchan); 20462306a36Sopenharmony_ci dws->rxchan = NULL; 20562306a36Sopenharmony_cierr_exit: 20662306a36Sopenharmony_ci return ret; 20762306a36Sopenharmony_ci} 20862306a36Sopenharmony_ci 20962306a36Sopenharmony_cistatic void dw_spi_dma_exit(struct dw_spi *dws) 21062306a36Sopenharmony_ci{ 21162306a36Sopenharmony_ci if (dws->txchan) { 21262306a36Sopenharmony_ci dmaengine_terminate_sync(dws->txchan); 21362306a36Sopenharmony_ci dma_release_channel(dws->txchan); 21462306a36Sopenharmony_ci } 21562306a36Sopenharmony_ci 21662306a36Sopenharmony_ci if (dws->rxchan) { 21762306a36Sopenharmony_ci dmaengine_terminate_sync(dws->rxchan); 21862306a36Sopenharmony_ci dma_release_channel(dws->rxchan); 21962306a36Sopenharmony_ci } 22062306a36Sopenharmony_ci} 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_cistatic irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) 22362306a36Sopenharmony_ci{ 22462306a36Sopenharmony_ci dw_spi_check_status(dws, false); 22562306a36Sopenharmony_ci 22662306a36Sopenharmony_ci complete(&dws->dma_completion); 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci return IRQ_HANDLED; 22962306a36Sopenharmony_ci} 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_cistatic enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) 23262306a36Sopenharmony_ci{ 23362306a36Sopenharmony_ci switch (n_bytes) { 23462306a36Sopenharmony_ci case 1: 23562306a36Sopenharmony_ci return DMA_SLAVE_BUSWIDTH_1_BYTE; 23662306a36Sopenharmony_ci case 2: 23762306a36Sopenharmony_ci return DMA_SLAVE_BUSWIDTH_2_BYTES; 23862306a36Sopenharmony_ci case 4: 23962306a36Sopenharmony_ci return DMA_SLAVE_BUSWIDTH_4_BYTES; 24062306a36Sopenharmony_ci default: 24162306a36Sopenharmony_ci return DMA_SLAVE_BUSWIDTH_UNDEFINED; 24262306a36Sopenharmony_ci } 24362306a36Sopenharmony_ci} 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_cistatic bool dw_spi_can_dma(struct spi_controller *host, 24662306a36Sopenharmony_ci struct spi_device *spi, struct spi_transfer *xfer) 24762306a36Sopenharmony_ci{ 24862306a36Sopenharmony_ci struct dw_spi *dws = spi_controller_get_devdata(host); 24962306a36Sopenharmony_ci enum dma_slave_buswidth dma_bus_width; 25062306a36Sopenharmony_ci 25162306a36Sopenharmony_ci if (xfer->len <= dws->fifo_len) 25262306a36Sopenharmony_ci return false; 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci dma_bus_width = dw_spi_dma_convert_width(dws->n_bytes); 25562306a36Sopenharmony_ci 25662306a36Sopenharmony_ci return dws->dma_addr_widths & BIT(dma_bus_width); 25762306a36Sopenharmony_ci} 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_cistatic int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) 26062306a36Sopenharmony_ci{ 26162306a36Sopenharmony_ci unsigned long long ms; 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci ms = len * MSEC_PER_SEC * BITS_PER_BYTE; 26462306a36Sopenharmony_ci do_div(ms, speed); 26562306a36Sopenharmony_ci ms += ms + 200; 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci if (ms > UINT_MAX) 26862306a36Sopenharmony_ci ms = UINT_MAX; 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci ms = wait_for_completion_timeout(&dws->dma_completion, 27162306a36Sopenharmony_ci msecs_to_jiffies(ms)); 27262306a36Sopenharmony_ci 27362306a36Sopenharmony_ci if (ms == 0) { 27462306a36Sopenharmony_ci dev_err(&dws->host->cur_msg->spi->dev, 27562306a36Sopenharmony_ci "DMA transaction timed out\n"); 27662306a36Sopenharmony_ci return -ETIMEDOUT; 27762306a36Sopenharmony_ci } 27862306a36Sopenharmony_ci 27962306a36Sopenharmony_ci return 0; 28062306a36Sopenharmony_ci} 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_cistatic inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) 28362306a36Sopenharmony_ci{ 28462306a36Sopenharmony_ci return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT); 28562306a36Sopenharmony_ci} 28662306a36Sopenharmony_ci 28762306a36Sopenharmony_cistatic int dw_spi_dma_wait_tx_done(struct dw_spi *dws, 28862306a36Sopenharmony_ci struct spi_transfer *xfer) 28962306a36Sopenharmony_ci{ 29062306a36Sopenharmony_ci int retry = DW_SPI_WAIT_RETRIES; 29162306a36Sopenharmony_ci struct spi_delay delay; 29262306a36Sopenharmony_ci u32 nents; 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci nents = dw_readl(dws, DW_SPI_TXFLR); 29562306a36Sopenharmony_ci delay.unit = SPI_DELAY_UNIT_SCK; 29662306a36Sopenharmony_ci delay.value = nents * dws->n_bytes * BITS_PER_BYTE; 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_ci while (dw_spi_dma_tx_busy(dws) && retry--) 29962306a36Sopenharmony_ci spi_delay_exec(&delay, xfer); 30062306a36Sopenharmony_ci 30162306a36Sopenharmony_ci if (retry < 0) { 30262306a36Sopenharmony_ci dev_err(&dws->host->dev, "Tx hanged up\n"); 30362306a36Sopenharmony_ci return -EIO; 30462306a36Sopenharmony_ci } 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci return 0; 30762306a36Sopenharmony_ci} 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci/* 31062306a36Sopenharmony_ci * dws->dma_chan_busy is set before the dma transfer starts, callback for tx 31162306a36Sopenharmony_ci * channel will clear a corresponding bit. 31262306a36Sopenharmony_ci */ 31362306a36Sopenharmony_cistatic void dw_spi_dma_tx_done(void *arg) 31462306a36Sopenharmony_ci{ 31562306a36Sopenharmony_ci struct dw_spi *dws = arg; 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 31862306a36Sopenharmony_ci if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) 31962306a36Sopenharmony_ci return; 32062306a36Sopenharmony_ci 32162306a36Sopenharmony_ci complete(&dws->dma_completion); 32262306a36Sopenharmony_ci} 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_cistatic int dw_spi_dma_config_tx(struct dw_spi *dws) 32562306a36Sopenharmony_ci{ 32662306a36Sopenharmony_ci struct dma_slave_config txconf; 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_ci memset(&txconf, 0, sizeof(txconf)); 32962306a36Sopenharmony_ci txconf.direction = DMA_MEM_TO_DEV; 33062306a36Sopenharmony_ci txconf.dst_addr = dws->dma_addr; 33162306a36Sopenharmony_ci txconf.dst_maxburst = dws->txburst; 33262306a36Sopenharmony_ci txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 33362306a36Sopenharmony_ci txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 33462306a36Sopenharmony_ci txconf.device_fc = false; 33562306a36Sopenharmony_ci 33662306a36Sopenharmony_ci return dmaengine_slave_config(dws->txchan, &txconf); 33762306a36Sopenharmony_ci} 33862306a36Sopenharmony_ci 33962306a36Sopenharmony_cistatic int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, 34062306a36Sopenharmony_ci unsigned int nents) 34162306a36Sopenharmony_ci{ 34262306a36Sopenharmony_ci struct dma_async_tx_descriptor *txdesc; 34362306a36Sopenharmony_ci dma_cookie_t cookie; 34462306a36Sopenharmony_ci int ret; 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, 34762306a36Sopenharmony_ci DMA_MEM_TO_DEV, 34862306a36Sopenharmony_ci DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 34962306a36Sopenharmony_ci if (!txdesc) 35062306a36Sopenharmony_ci return -ENOMEM; 35162306a36Sopenharmony_ci 35262306a36Sopenharmony_ci txdesc->callback = dw_spi_dma_tx_done; 35362306a36Sopenharmony_ci txdesc->callback_param = dws; 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_ci cookie = dmaengine_submit(txdesc); 35662306a36Sopenharmony_ci ret = dma_submit_error(cookie); 35762306a36Sopenharmony_ci if (ret) { 35862306a36Sopenharmony_ci dmaengine_terminate_sync(dws->txchan); 35962306a36Sopenharmony_ci return ret; 36062306a36Sopenharmony_ci } 36162306a36Sopenharmony_ci 36262306a36Sopenharmony_ci set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci return 0; 36562306a36Sopenharmony_ci} 36662306a36Sopenharmony_ci 36762306a36Sopenharmony_cistatic inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) 36862306a36Sopenharmony_ci{ 36962306a36Sopenharmony_ci return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT); 37062306a36Sopenharmony_ci} 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_cistatic int dw_spi_dma_wait_rx_done(struct dw_spi *dws) 37362306a36Sopenharmony_ci{ 37462306a36Sopenharmony_ci int retry = DW_SPI_WAIT_RETRIES; 37562306a36Sopenharmony_ci struct spi_delay delay; 37662306a36Sopenharmony_ci unsigned long ns, us; 37762306a36Sopenharmony_ci u32 nents; 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci /* 38062306a36Sopenharmony_ci * It's unlikely that DMA engine is still doing the data fetching, but 38162306a36Sopenharmony_ci * if it's let's give it some reasonable time. The timeout calculation 38262306a36Sopenharmony_ci * is based on the synchronous APB/SSI reference clock rate, on a 38362306a36Sopenharmony_ci * number of data entries left in the Rx FIFO, times a number of clock 38462306a36Sopenharmony_ci * periods normally needed for a single APB read/write transaction 38562306a36Sopenharmony_ci * without PREADY signal utilized (which is true for the DW APB SSI 38662306a36Sopenharmony_ci * controller). 38762306a36Sopenharmony_ci */ 38862306a36Sopenharmony_ci nents = dw_readl(dws, DW_SPI_RXFLR); 38962306a36Sopenharmony_ci ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; 39062306a36Sopenharmony_ci if (ns <= NSEC_PER_USEC) { 39162306a36Sopenharmony_ci delay.unit = SPI_DELAY_UNIT_NSECS; 39262306a36Sopenharmony_ci delay.value = ns; 39362306a36Sopenharmony_ci } else { 39462306a36Sopenharmony_ci us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 39562306a36Sopenharmony_ci delay.unit = SPI_DELAY_UNIT_USECS; 39662306a36Sopenharmony_ci delay.value = clamp_val(us, 0, USHRT_MAX); 39762306a36Sopenharmony_ci } 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci while (dw_spi_dma_rx_busy(dws) && retry--) 40062306a36Sopenharmony_ci spi_delay_exec(&delay, NULL); 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_ci if (retry < 0) { 40362306a36Sopenharmony_ci dev_err(&dws->host->dev, "Rx hanged up\n"); 40462306a36Sopenharmony_ci return -EIO; 40562306a36Sopenharmony_ci } 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_ci return 0; 40862306a36Sopenharmony_ci} 40962306a36Sopenharmony_ci 41062306a36Sopenharmony_ci/* 41162306a36Sopenharmony_ci * dws->dma_chan_busy is set before the dma transfer starts, callback for rx 41262306a36Sopenharmony_ci * channel will clear a corresponding bit. 41362306a36Sopenharmony_ci */ 41462306a36Sopenharmony_cistatic void dw_spi_dma_rx_done(void *arg) 41562306a36Sopenharmony_ci{ 41662306a36Sopenharmony_ci struct dw_spi *dws = arg; 41762306a36Sopenharmony_ci 41862306a36Sopenharmony_ci clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 41962306a36Sopenharmony_ci if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) 42062306a36Sopenharmony_ci return; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci complete(&dws->dma_completion); 42362306a36Sopenharmony_ci} 42462306a36Sopenharmony_ci 42562306a36Sopenharmony_cistatic int dw_spi_dma_config_rx(struct dw_spi *dws) 42662306a36Sopenharmony_ci{ 42762306a36Sopenharmony_ci struct dma_slave_config rxconf; 42862306a36Sopenharmony_ci 42962306a36Sopenharmony_ci memset(&rxconf, 0, sizeof(rxconf)); 43062306a36Sopenharmony_ci rxconf.direction = DMA_DEV_TO_MEM; 43162306a36Sopenharmony_ci rxconf.src_addr = dws->dma_addr; 43262306a36Sopenharmony_ci rxconf.src_maxburst = dws->rxburst; 43362306a36Sopenharmony_ci rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 43462306a36Sopenharmony_ci rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); 43562306a36Sopenharmony_ci rxconf.device_fc = false; 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci return dmaengine_slave_config(dws->rxchan, &rxconf); 43862306a36Sopenharmony_ci} 43962306a36Sopenharmony_ci 44062306a36Sopenharmony_cistatic int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, 44162306a36Sopenharmony_ci unsigned int nents) 44262306a36Sopenharmony_ci{ 44362306a36Sopenharmony_ci struct dma_async_tx_descriptor *rxdesc; 44462306a36Sopenharmony_ci dma_cookie_t cookie; 44562306a36Sopenharmony_ci int ret; 44662306a36Sopenharmony_ci 44762306a36Sopenharmony_ci rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, 44862306a36Sopenharmony_ci DMA_DEV_TO_MEM, 44962306a36Sopenharmony_ci DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 45062306a36Sopenharmony_ci if (!rxdesc) 45162306a36Sopenharmony_ci return -ENOMEM; 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci rxdesc->callback = dw_spi_dma_rx_done; 45462306a36Sopenharmony_ci rxdesc->callback_param = dws; 45562306a36Sopenharmony_ci 45662306a36Sopenharmony_ci cookie = dmaengine_submit(rxdesc); 45762306a36Sopenharmony_ci ret = dma_submit_error(cookie); 45862306a36Sopenharmony_ci if (ret) { 45962306a36Sopenharmony_ci dmaengine_terminate_sync(dws->rxchan); 46062306a36Sopenharmony_ci return ret; 46162306a36Sopenharmony_ci } 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_ci set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 46462306a36Sopenharmony_ci 46562306a36Sopenharmony_ci return 0; 46662306a36Sopenharmony_ci} 46762306a36Sopenharmony_ci 46862306a36Sopenharmony_cistatic int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) 46962306a36Sopenharmony_ci{ 47062306a36Sopenharmony_ci u16 imr, dma_ctrl; 47162306a36Sopenharmony_ci int ret; 47262306a36Sopenharmony_ci 47362306a36Sopenharmony_ci if (!xfer->tx_buf) 47462306a36Sopenharmony_ci return -EINVAL; 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci /* Setup DMA channels */ 47762306a36Sopenharmony_ci ret = dw_spi_dma_config_tx(dws); 47862306a36Sopenharmony_ci if (ret) 47962306a36Sopenharmony_ci return ret; 48062306a36Sopenharmony_ci 48162306a36Sopenharmony_ci if (xfer->rx_buf) { 48262306a36Sopenharmony_ci ret = dw_spi_dma_config_rx(dws); 48362306a36Sopenharmony_ci if (ret) 48462306a36Sopenharmony_ci return ret; 48562306a36Sopenharmony_ci } 48662306a36Sopenharmony_ci 48762306a36Sopenharmony_ci /* Set the DMA handshaking interface */ 48862306a36Sopenharmony_ci dma_ctrl = DW_SPI_DMACR_TDMAE; 48962306a36Sopenharmony_ci if (xfer->rx_buf) 49062306a36Sopenharmony_ci dma_ctrl |= DW_SPI_DMACR_RDMAE; 49162306a36Sopenharmony_ci dw_writel(dws, DW_SPI_DMACR, dma_ctrl); 49262306a36Sopenharmony_ci 49362306a36Sopenharmony_ci /* Set the interrupt mask */ 49462306a36Sopenharmony_ci imr = DW_SPI_INT_TXOI; 49562306a36Sopenharmony_ci if (xfer->rx_buf) 49662306a36Sopenharmony_ci imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI; 49762306a36Sopenharmony_ci dw_spi_umask_intr(dws, imr); 49862306a36Sopenharmony_ci 49962306a36Sopenharmony_ci reinit_completion(&dws->dma_completion); 50062306a36Sopenharmony_ci 50162306a36Sopenharmony_ci dws->transfer_handler = dw_spi_dma_transfer_handler; 50262306a36Sopenharmony_ci 50362306a36Sopenharmony_ci return 0; 50462306a36Sopenharmony_ci} 50562306a36Sopenharmony_ci 50662306a36Sopenharmony_cistatic int dw_spi_dma_transfer_all(struct dw_spi *dws, 50762306a36Sopenharmony_ci struct spi_transfer *xfer) 50862306a36Sopenharmony_ci{ 50962306a36Sopenharmony_ci int ret; 51062306a36Sopenharmony_ci 51162306a36Sopenharmony_ci /* Submit the DMA Tx transfer */ 51262306a36Sopenharmony_ci ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents); 51362306a36Sopenharmony_ci if (ret) 51462306a36Sopenharmony_ci goto err_clear_dmac; 51562306a36Sopenharmony_ci 51662306a36Sopenharmony_ci /* Submit the DMA Rx transfer if required */ 51762306a36Sopenharmony_ci if (xfer->rx_buf) { 51862306a36Sopenharmony_ci ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, 51962306a36Sopenharmony_ci xfer->rx_sg.nents); 52062306a36Sopenharmony_ci if (ret) 52162306a36Sopenharmony_ci goto err_clear_dmac; 52262306a36Sopenharmony_ci 52362306a36Sopenharmony_ci /* rx must be started before tx due to spi instinct */ 52462306a36Sopenharmony_ci dma_async_issue_pending(dws->rxchan); 52562306a36Sopenharmony_ci } 52662306a36Sopenharmony_ci 52762306a36Sopenharmony_ci dma_async_issue_pending(dws->txchan); 52862306a36Sopenharmony_ci 52962306a36Sopenharmony_ci ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz); 53062306a36Sopenharmony_ci 53162306a36Sopenharmony_cierr_clear_dmac: 53262306a36Sopenharmony_ci dw_writel(dws, DW_SPI_DMACR, 0); 53362306a36Sopenharmony_ci 53462306a36Sopenharmony_ci return ret; 53562306a36Sopenharmony_ci} 53662306a36Sopenharmony_ci 53762306a36Sopenharmony_ci/* 53862306a36Sopenharmony_ci * In case if at least one of the requested DMA channels doesn't support the 53962306a36Sopenharmony_ci * hardware accelerated SG list entries traverse, the DMA driver will most 54062306a36Sopenharmony_ci * likely work that around by performing the IRQ-based SG list entries 54162306a36Sopenharmony_ci * resubmission. That might and will cause a problem if the DMA Tx channel is 54262306a36Sopenharmony_ci * recharged and re-executed before the Rx DMA channel. Due to 54362306a36Sopenharmony_ci * non-deterministic IRQ-handler execution latency the DMA Tx channel will 54462306a36Sopenharmony_ci * start pushing data to the SPI bus before the Rx DMA channel is even 54562306a36Sopenharmony_ci * reinitialized with the next inbound SG list entry. By doing so the DMA Tx 54662306a36Sopenharmony_ci * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while 54762306a36Sopenharmony_ci * the DMA Rx channel being recharged and re-executed will eventually be 54862306a36Sopenharmony_ci * overflown. 54962306a36Sopenharmony_ci * 55062306a36Sopenharmony_ci * In order to solve the problem we have to feed the DMA engine with SG list 55162306a36Sopenharmony_ci * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs 55262306a36Sopenharmony_ci * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg 55362306a36Sopenharmony_ci * and rx_sg lists may have different number of entries of different lengths 55462306a36Sopenharmony_ci * (though total length should match) let's virtually split the SG-lists to the 55562306a36Sopenharmony_ci * set of DMA transfers, which length is a minimum of the ordered SG-entries 55662306a36Sopenharmony_ci * lengths. An ASCII-sketch of the implemented algo is following: 55762306a36Sopenharmony_ci * xfer->len 55862306a36Sopenharmony_ci * |___________| 55962306a36Sopenharmony_ci * tx_sg list: |___|____|__| 56062306a36Sopenharmony_ci * rx_sg list: |_|____|____| 56162306a36Sopenharmony_ci * DMA transfers: |_|_|__|_|__| 56262306a36Sopenharmony_ci * 56362306a36Sopenharmony_ci * Note in order to have this workaround solving the denoted problem the DMA 56462306a36Sopenharmony_ci * engine driver should properly initialize the max_sg_burst capability and set 56562306a36Sopenharmony_ci * the DMA device max segment size parameter with maximum data block size the 56662306a36Sopenharmony_ci * DMA engine supports. 56762306a36Sopenharmony_ci */ 56862306a36Sopenharmony_ci 56962306a36Sopenharmony_cistatic int dw_spi_dma_transfer_one(struct dw_spi *dws, 57062306a36Sopenharmony_ci struct spi_transfer *xfer) 57162306a36Sopenharmony_ci{ 57262306a36Sopenharmony_ci struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; 57362306a36Sopenharmony_ci unsigned int tx_len = 0, rx_len = 0; 57462306a36Sopenharmony_ci unsigned int base, len; 57562306a36Sopenharmony_ci int ret; 57662306a36Sopenharmony_ci 57762306a36Sopenharmony_ci sg_init_table(&tx_tmp, 1); 57862306a36Sopenharmony_ci sg_init_table(&rx_tmp, 1); 57962306a36Sopenharmony_ci 58062306a36Sopenharmony_ci for (base = 0, len = 0; base < xfer->len; base += len) { 58162306a36Sopenharmony_ci /* Fetch next Tx DMA data chunk */ 58262306a36Sopenharmony_ci if (!tx_len) { 58362306a36Sopenharmony_ci tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); 58462306a36Sopenharmony_ci sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); 58562306a36Sopenharmony_ci tx_len = sg_dma_len(tx_sg); 58662306a36Sopenharmony_ci } 58762306a36Sopenharmony_ci 58862306a36Sopenharmony_ci /* Fetch next Rx DMA data chunk */ 58962306a36Sopenharmony_ci if (!rx_len) { 59062306a36Sopenharmony_ci rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); 59162306a36Sopenharmony_ci sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); 59262306a36Sopenharmony_ci rx_len = sg_dma_len(rx_sg); 59362306a36Sopenharmony_ci } 59462306a36Sopenharmony_ci 59562306a36Sopenharmony_ci len = min(tx_len, rx_len); 59662306a36Sopenharmony_ci 59762306a36Sopenharmony_ci sg_dma_len(&tx_tmp) = len; 59862306a36Sopenharmony_ci sg_dma_len(&rx_tmp) = len; 59962306a36Sopenharmony_ci 60062306a36Sopenharmony_ci /* Submit DMA Tx transfer */ 60162306a36Sopenharmony_ci ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1); 60262306a36Sopenharmony_ci if (ret) 60362306a36Sopenharmony_ci break; 60462306a36Sopenharmony_ci 60562306a36Sopenharmony_ci /* Submit DMA Rx transfer */ 60662306a36Sopenharmony_ci ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1); 60762306a36Sopenharmony_ci if (ret) 60862306a36Sopenharmony_ci break; 60962306a36Sopenharmony_ci 61062306a36Sopenharmony_ci /* Rx must be started before Tx due to SPI instinct */ 61162306a36Sopenharmony_ci dma_async_issue_pending(dws->rxchan); 61262306a36Sopenharmony_ci 61362306a36Sopenharmony_ci dma_async_issue_pending(dws->txchan); 61462306a36Sopenharmony_ci 61562306a36Sopenharmony_ci /* 61662306a36Sopenharmony_ci * Here we only need to wait for the DMA transfer to be 61762306a36Sopenharmony_ci * finished since SPI controller is kept enabled during the 61862306a36Sopenharmony_ci * procedure this loop implements and there is no risk to lose 61962306a36Sopenharmony_ci * data left in the Tx/Rx FIFOs. 62062306a36Sopenharmony_ci */ 62162306a36Sopenharmony_ci ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz); 62262306a36Sopenharmony_ci if (ret) 62362306a36Sopenharmony_ci break; 62462306a36Sopenharmony_ci 62562306a36Sopenharmony_ci reinit_completion(&dws->dma_completion); 62662306a36Sopenharmony_ci 62762306a36Sopenharmony_ci sg_dma_address(&tx_tmp) += len; 62862306a36Sopenharmony_ci sg_dma_address(&rx_tmp) += len; 62962306a36Sopenharmony_ci tx_len -= len; 63062306a36Sopenharmony_ci rx_len -= len; 63162306a36Sopenharmony_ci } 63262306a36Sopenharmony_ci 63362306a36Sopenharmony_ci dw_writel(dws, DW_SPI_DMACR, 0); 63462306a36Sopenharmony_ci 63562306a36Sopenharmony_ci return ret; 63662306a36Sopenharmony_ci} 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_cistatic int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) 63962306a36Sopenharmony_ci{ 64062306a36Sopenharmony_ci unsigned int nents; 64162306a36Sopenharmony_ci int ret; 64262306a36Sopenharmony_ci 64362306a36Sopenharmony_ci nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); 64462306a36Sopenharmony_ci 64562306a36Sopenharmony_ci /* 64662306a36Sopenharmony_ci * Execute normal DMA-based transfer (which submits the Rx and Tx SG 64762306a36Sopenharmony_ci * lists directly to the DMA engine at once) if either full hardware 64862306a36Sopenharmony_ci * accelerated SG list traverse is supported by both channels, or the 64962306a36Sopenharmony_ci * Tx-only SPI transfer is requested, or the DMA engine is capable to 65062306a36Sopenharmony_ci * handle both SG lists on hardware accelerated basis. 65162306a36Sopenharmony_ci */ 65262306a36Sopenharmony_ci if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) 65362306a36Sopenharmony_ci ret = dw_spi_dma_transfer_all(dws, xfer); 65462306a36Sopenharmony_ci else 65562306a36Sopenharmony_ci ret = dw_spi_dma_transfer_one(dws, xfer); 65662306a36Sopenharmony_ci if (ret) 65762306a36Sopenharmony_ci return ret; 65862306a36Sopenharmony_ci 65962306a36Sopenharmony_ci if (dws->host->cur_msg->status == -EINPROGRESS) { 66062306a36Sopenharmony_ci ret = dw_spi_dma_wait_tx_done(dws, xfer); 66162306a36Sopenharmony_ci if (ret) 66262306a36Sopenharmony_ci return ret; 66362306a36Sopenharmony_ci } 66462306a36Sopenharmony_ci 66562306a36Sopenharmony_ci if (xfer->rx_buf && dws->host->cur_msg->status == -EINPROGRESS) 66662306a36Sopenharmony_ci ret = dw_spi_dma_wait_rx_done(dws); 66762306a36Sopenharmony_ci 66862306a36Sopenharmony_ci return ret; 66962306a36Sopenharmony_ci} 67062306a36Sopenharmony_ci 67162306a36Sopenharmony_cistatic void dw_spi_dma_stop(struct dw_spi *dws) 67262306a36Sopenharmony_ci{ 67362306a36Sopenharmony_ci if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) { 67462306a36Sopenharmony_ci dmaengine_terminate_sync(dws->txchan); 67562306a36Sopenharmony_ci clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); 67662306a36Sopenharmony_ci } 67762306a36Sopenharmony_ci if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) { 67862306a36Sopenharmony_ci dmaengine_terminate_sync(dws->rxchan); 67962306a36Sopenharmony_ci clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); 68062306a36Sopenharmony_ci } 68162306a36Sopenharmony_ci} 68262306a36Sopenharmony_ci 68362306a36Sopenharmony_cistatic const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { 68462306a36Sopenharmony_ci .dma_init = dw_spi_dma_init_mfld, 68562306a36Sopenharmony_ci .dma_exit = dw_spi_dma_exit, 68662306a36Sopenharmony_ci .dma_setup = dw_spi_dma_setup, 68762306a36Sopenharmony_ci .can_dma = dw_spi_can_dma, 68862306a36Sopenharmony_ci .dma_transfer = dw_spi_dma_transfer, 68962306a36Sopenharmony_ci .dma_stop = dw_spi_dma_stop, 69062306a36Sopenharmony_ci}; 69162306a36Sopenharmony_ci 69262306a36Sopenharmony_civoid dw_spi_dma_setup_mfld(struct dw_spi *dws) 69362306a36Sopenharmony_ci{ 69462306a36Sopenharmony_ci dws->dma_ops = &dw_spi_dma_mfld_ops; 69562306a36Sopenharmony_ci} 69662306a36Sopenharmony_ciEXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE); 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_cistatic const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { 69962306a36Sopenharmony_ci .dma_init = dw_spi_dma_init_generic, 70062306a36Sopenharmony_ci .dma_exit = dw_spi_dma_exit, 70162306a36Sopenharmony_ci .dma_setup = dw_spi_dma_setup, 70262306a36Sopenharmony_ci .can_dma = dw_spi_can_dma, 70362306a36Sopenharmony_ci .dma_transfer = dw_spi_dma_transfer, 70462306a36Sopenharmony_ci .dma_stop = dw_spi_dma_stop, 70562306a36Sopenharmony_ci}; 70662306a36Sopenharmony_ci 70762306a36Sopenharmony_civoid dw_spi_dma_setup_generic(struct dw_spi *dws) 70862306a36Sopenharmony_ci{ 70962306a36Sopenharmony_ci dws->dma_ops = &dw_spi_dma_generic_ops; 71062306a36Sopenharmony_ci} 71162306a36Sopenharmony_ciEXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE); 712