Lines Matching refs:bs

163 static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
174 bs->debugfs_dir = dir;
178 &bs->count_transfer_polling);
180 &bs->count_transfer_irq);
182 &bs->count_transfer_irq_after_polling);
184 &bs->count_transfer_dma);
187 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
189 debugfs_remove_recursive(bs->debugfs_dir);
190 bs->debugfs_dir = NULL;
193 static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
198 static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
203 static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned int reg)
205 return readl(bs->regs + reg);
208 static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned int reg, u32 val)
210 writel(val, bs->regs + reg);
213 static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
217 while ((bs->rx_len) &&
218 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
219 byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
220 if (bs->rx_buf)
221 *bs->rx_buf++ = byte;
222 bs->rx_len--;
226 static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
230 while ((bs->tx_len) &&
231 (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
232 byte = bs->tx_buf ? *bs->tx_buf++ : 0;
233 bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
234 bs->tx_len--;
240 * @bs: BCM2835 SPI controller
243 * The caller must ensure that @bs->rx_len is greater than or equal to @count,
246 * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
248 static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
253 bs->rx_len -= count;
256 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
258 memcpy(bs->rx_buf, &val, len);
259 bs->rx_buf += len;
266 * @bs: BCM2835 SPI controller
269 * The caller must ensure that @bs->tx_len is greater than or equal to @count,
274 static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
279 bs->tx_len -= count;
282 if (bs->tx_buf) {
284 memcpy(&val, bs->tx_buf, len);
285 bs->tx_buf += len;
289 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
296 * @bs: BCM2835 SPI controller
302 static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
304 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
310 * @bs: BCM2835 SPI controller
313 static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
317 count = min(count, bs->rx_len);
318 bs->rx_len -= count;
321 val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
322 if (bs->rx_buf)
323 *bs->rx_buf++ = val;
329 * @bs: BCM2835 SPI controller
332 static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
336 count = min(count, bs->tx_len);
337 bs->tx_len -= count;
340 val = bs->tx_buf ? *bs->tx_buf++ : 0;
341 bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
345 static void bcm2835_spi_reset_hw(struct bcm2835_spi *bs)
347 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
365 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
367 bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
372 struct bcm2835_spi *bs = dev_id;
373 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
384 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
386 bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
388 if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
389 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
392 bcm2835_rd_fifo(bs);
394 bcm2835_wr_fifo(bs);
396 if (!bs->rx_len) {
398 bcm2835_spi_reset_hw(bs);
400 spi_finalize_current_transfer(bs->ctlr);
411 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
414 bs->count_transfer_irq++;
420 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
424 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
425 bcm2835_wr_fifo(bs);
429 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
439 * @bs: BCM2835 SPI controller
483 struct bcm2835_spi *bs,
488 bs->tfr = tfr;
489 bs->tx_prologue = 0;
490 bs->rx_prologue = 0;
491 bs->tx_spillover = false;
493 if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
494 bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
496 if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
497 bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
499 if (bs->rx_prologue > bs->tx_prologue) {
500 if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
501 bs->tx_prologue = bs->rx_prologue;
503 bs->tx_prologue += 4;
504 bs->tx_spillover =
511 if (!bs->tx_prologue)
515 if (bs->rx_prologue) {
516 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
517 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
519 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
520 bcm2835_wait_tx_fifo_empty(bs);
521 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
522 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_RX
528 bs->rx_prologue, DMA_FROM_DEVICE);
530 sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
531 sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
534 if (!bs->tx_buf)
541 tx_remaining = bs->tx_prologue - bs->rx_prologue;
543 bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
544 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
546 bcm2835_wr_fifo_count(bs, tx_remaining);
547 bcm2835_wait_tx_fifo_empty(bs);
548 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX
552 if (likely(!bs->tx_spillover)) {
553 sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
554 sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
564 * @bs: BCM2835 SPI controller
570 static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
572 struct spi_transfer *tfr = bs->tfr;
574 if (!bs->tx_prologue)
577 if (bs->rx_prologue) {
578 sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
579 sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
582 if (!bs->tx_buf)
585 if (likely(!bs->tx_spillover)) {
586 sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
587 sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
589 sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
594 bs->tx_prologue = 0;
606 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
614 bs->tx_dma_active = false;
615 bs->rx_dma_active = false;
616 bcm2835_spi_undo_prologue(bs);
619 bcm2835_spi_reset_hw(bs);
634 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
637 while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
638 bcm2835_wr(bs, BCM2835_SPI_CS, bs->target->clear_rx_cs);
640 bs->tx_dma_active = false;
648 if (cmpxchg(&bs->rx_dma_active, true, false))
651 bcm2835_spi_undo_prologue(bs);
652 bcm2835_spi_reset_hw(bs);
660 * @bs: BCM2835 SPI controller
669 struct bcm2835_spi *bs,
710 bs->target = target;
771 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
776 bs->count_transfer_dma++;
782 bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
785 if (bs->tx_buf) {
786 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, true);
788 cookie = dmaengine_submit(bs->fill_tx_desc);
795 bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
798 bcm2835_wr(bs, BCM2835_SPI_CS,
801 bs->tx_dma_active = true;
811 if (bs->rx_buf) {
812 ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, false);
820 bs->tx_dma_active = false;
826 bs->rx_dma_active = true;
833 if (!bs->rx_buf && !bs->tx_dma_active &&
834 cmpxchg(&bs->rx_dma_active, true, false)) {
836 bcm2835_spi_reset_hw(bs);
843 bcm2835_spi_reset_hw(bs);
844 bcm2835_spi_undo_prologue(bs);
861 struct bcm2835_spi *bs)
866 if (bs->fill_tx_desc)
867 dmaengine_desc_free(bs->fill_tx_desc);
869 if (bs->fill_tx_addr)
871 bs->fill_tx_addr, sizeof(u32),
887 struct bcm2835_spi *bs)
931 bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
935 if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
937 bs->fill_tx_addr = 0;
942 bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
943 bs->fill_tx_addr,
946 if (!bs->fill_tx_desc) {
952 ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
981 bcm2835_dma_release(ctlr, bs);
998 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1002 bs->count_transfer_polling++;
1005 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
1011 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
1017 while (bs->rx_len) {
1019 bcm2835_wr_fifo(bs);
1022 bcm2835_rd_fifo(bs);
1027 if (bs->rx_len && time_after(jiffies, timeout)) {
1031 bs->tx_len, bs->rx_len);
1035 bs->count_transfer_irq_after_polling++;
1043 bcm2835_spi_reset_hw(bs);
1052 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1061 if (spi_hz >= bs->clk_hz / 2) {
1065 cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz);
1073 tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
1074 bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
1081 bs->tx_buf = tfr->tx_buf;
1082 bs->rx_buf = tfr->rx_buf;
1083 bs->tx_len = tfr->len;
1084 bs->rx_len = tfr->len;
1114 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1134 bcm2835_wr(bs, BCM2835_SPI_CS, target->prepare_cs);
1142 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1147 bs->tx_dma_active = false;
1151 bs->rx_dma_active = false;
1153 bcm2835_spi_undo_prologue(bs);
1156 bcm2835_spi_reset_hw(bs);
1183 struct bcm2835_spi *bs,
1222 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1236 ret = bcm2835_spi_setup_dma(ctlr, spi, bs, target);
1328 struct bcm2835_spi *bs;
1331 ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
1348 bs = spi_controller_get_devdata(ctlr);
1349 bs->ctlr = ctlr;
1351 bs->regs = devm_platform_ioremap_resource(pdev, 0);
1352 if (IS_ERR(bs->regs))
1353 return PTR_ERR(bs->regs);
1355 bs->clk = devm_clk_get(&pdev->dev, NULL);
1356 if (IS_ERR(bs->clk))
1357 return dev_err_probe(&pdev->dev, PTR_ERR(bs->clk),
1360 ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
1362 bs->irq = platform_get_irq(pdev, 0);
1363 if (bs->irq < 0)
1364 return bs->irq;
1366 err = clk_prepare_enable(bs->clk);
1369 bs->clk_hz = clk_get_rate(bs->clk);
1371 err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
1376 bcm2835_wr(bs, BCM2835_SPI_CS,
1379 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt,
1380 IRQF_SHARED, dev_name(&pdev->dev), bs);
1393 bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
1398 bcm2835_dma_release(ctlr, bs);
1400 clk_disable_unprepare(bs->clk);
1407 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1409 bcm2835_debugfs_remove(bs);
1413 bcm2835_dma_release(ctlr, bs);
1416 bcm2835_wr(bs, BCM2835_SPI_CS,
1419 clk_disable_unprepare(bs->clk);