Lines Matching refs:ctlr
148 struct spi_controller *ctlr = container_of(dev, \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
315 struct spi_controller *ctlr)
332 (xfer->tx_buf != ctlr->dummy_tx))
335 (xfer->rx_buf != ctlr->dummy_rx))
552 * @ctlr: Controller to which device is connected
567 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
571 if (!spi_controller_get(ctlr))
576 spi_controller_put(ctlr);
583 spi_controller_put(ctlr);
587 spi->master = spi->controller = ctlr;
588 spi->dev.parent = &ctlr->dev;
591 spi->mode = ctlr->buswidth_override_bits;
630 struct spi_controller *ctlr = spi->controller;
631 struct device *dev = ctlr->dev.parent;
635 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
637 ctlr->num_chipselect);
658 !device_is_registered(&ctlr->dev)) {
662 if (ctlr->cs_gpiods)
663 spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
701 struct spi_controller *ctlr = spi->controller;
704 mutex_lock(&ctlr->add_lock);
706 mutex_unlock(&ctlr->add_lock);
713 * @ctlr: Controller to which device is connected
725 struct spi_device *spi_new_device(struct spi_controller *ctlr,
739 proxy = spi_alloc_device(ctlr);
757 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
801 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
806 if (ctlr->bus_num != bi->bus_num)
809 dev = spi_new_device(ctlr, bi);
811 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
849 struct spi_controller *ctlr;
855 list_for_each_entry(ctlr, &spi_controller_list, list)
856 spi_match_controller_to_boardinfo(ctlr,
927 * @ctlr: the @spi_controller
930 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
936 res->release(ctlr, message, res->data);
1005 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1030 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1082 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1086 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1089 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1102 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1105 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1108 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1114 if (!ctlr->can_dma)
1117 if (ctlr->dma_tx)
1118 tx_dev = ctlr->dma_tx->device->dev;
1119 else if (ctlr->dma_map_dev)
1120 tx_dev = ctlr->dma_map_dev;
1122 tx_dev = ctlr->dev.parent;
1124 if (ctlr->dma_rx)
1125 rx_dev = ctlr->dma_rx->device->dev;
1126 else if (ctlr->dma_map_dev)
1127 rx_dev = ctlr->dma_map_dev;
1129 rx_dev = ctlr->dev.parent;
1135 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1139 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1148 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1152 spi_unmap_buf_attrs(ctlr, tx_dev,
1161 ctlr->cur_rx_dma_dev = rx_dev;
1162 ctlr->cur_tx_dma_dev = tx_dev;
1163 ctlr->cur_msg_mapped = true;
1168 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1170 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1171 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1174 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1181 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1184 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1186 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1190 ctlr->cur_msg_mapped = false;
1195 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1198 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1199 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1201 if (!ctlr->cur_msg_mapped)
1210 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1213 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1214 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1216 if (!ctlr->cur_msg_mapped)
1225 static inline int __spi_map_msg(struct spi_controller *ctlr,
1231 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1248 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1258 if (xfer->tx_buf == ctlr->dummy_tx)
1260 if (xfer->rx_buf == ctlr->dummy_rx)
1264 return __spi_unmap_msg(ctlr, msg);
1267 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1273 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1279 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1282 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1288 tmp = krealloc(ctlr->dummy_tx, max_tx,
1292 ctlr->dummy_tx = tmp;
1296 tmp = krealloc(ctlr->dummy_rx, max_rx,
1300 ctlr->dummy_rx = tmp;
1309 xfer->tx_buf = ctlr->dummy_tx;
1311 xfer->rx_buf = ctlr->dummy_rx;
1316 return __spi_map_msg(ctlr, msg);
1319 static int spi_transfer_wait(struct spi_controller *ctlr,
1323 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1328 if (spi_controller_is_slave(ctlr)) {
1329 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1354 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1481 static int spi_transfer_one_message(struct spi_controller *ctlr,
1487 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1499 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1500 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1502 if (!ctlr->ptp_sts_supported) {
1508 reinit_completion(&ctlr->xfer_completion);
1511 spi_dma_sync_for_device(ctlr, xfer);
1512 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1514 spi_dma_sync_for_cpu(ctlr, xfer);
1516 if (ctlr->cur_msg_mapped &&
1518 __spi_unmap_msg(ctlr, msg);
1519 ctlr->fallback = true;
1534 ret = spi_transfer_wait(ctlr, msg, xfer);
1539 spi_dma_sync_for_cpu(ctlr, xfer);
1547 if (!ctlr->ptp_sts_supported) {
1585 if (msg->status && ctlr->handle_err)
1586 ctlr->handle_err(ctlr, msg);
1588 spi_finalize_current_message(ctlr);
1595 * @ctlr: the controller reporting completion
1601 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1603 complete(&ctlr->xfer_completion);
1607 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1609 if (ctlr->auto_runtime_pm) {
1610 pm_runtime_mark_last_busy(ctlr->dev.parent);
1611 pm_runtime_put_autosuspend(ctlr->dev.parent);
1615 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1621 if (!was_busy && ctlr->auto_runtime_pm) {
1622 ret = pm_runtime_get_sync(ctlr->dev.parent);
1624 pm_runtime_put_noidle(ctlr->dev.parent);
1625 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1629 spi_finalize_current_message(ctlr);
1636 trace_spi_controller_busy(ctlr);
1638 if (!was_busy && ctlr->prepare_transfer_hardware) {
1639 ret = ctlr->prepare_transfer_hardware(ctlr);
1641 dev_err(&ctlr->dev,
1645 if (ctlr->auto_runtime_pm)
1646 pm_runtime_put(ctlr->dev.parent);
1649 spi_finalize_current_message(ctlr);
1657 ret = spi_split_transfers_maxsize(ctlr, msg,
1662 spi_finalize_current_message(ctlr);
1666 if (ctlr->prepare_message) {
1667 ret = ctlr->prepare_message(ctlr, msg);
1669 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1672 spi_finalize_current_message(ctlr);
1678 ret = spi_map_msg(ctlr, msg);
1681 spi_finalize_current_message(ctlr);
1685 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1698 * ctlr->cur_msg.
1705 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1706 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1707 reinit_completion(&ctlr->cur_msg_completion);
1710 ret = ctlr->transfer_one_message(ctlr, msg);
1712 dev_err(&ctlr->dev,
1717 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1719 if (READ_ONCE(ctlr->cur_msg_incomplete))
1720 wait_for_completion(&ctlr->cur_msg_completion);
1727 * @ctlr: controller to process queue for
1738 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1746 mutex_lock(&ctlr->io_mutex);
1749 spin_lock_irqsave(&ctlr->queue_lock, flags);
1752 if (ctlr->cur_msg)
1756 if (list_empty(&ctlr->queue) || !ctlr->running) {
1757 if (!ctlr->busy)
1762 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1763 !ctlr->unprepare_transfer_hardware) {
1764 spi_idle_runtime_pm(ctlr);
1765 ctlr->busy = false;
1766 ctlr->queue_empty = true;
1767 trace_spi_controller_idle(ctlr);
1769 kthread_queue_work(ctlr->kworker,
1770 &ctlr->pump_messages);
1775 ctlr->busy = false;
1776 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1778 kfree(ctlr->dummy_rx);
1779 ctlr->dummy_rx = NULL;
1780 kfree(ctlr->dummy_tx);
1781 ctlr->dummy_tx = NULL;
1782 if (ctlr->unprepare_transfer_hardware &&
1783 ctlr->unprepare_transfer_hardware(ctlr))
1784 dev_err(&ctlr->dev,
1786 spi_idle_runtime_pm(ctlr);
1787 trace_spi_controller_idle(ctlr);
1789 spin_lock_irqsave(&ctlr->queue_lock, flags);
1790 ctlr->queue_empty = true;
1795 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1796 ctlr->cur_msg = msg;
1799 if (ctlr->busy)
1802 ctlr->busy = true;
1803 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1805 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1806 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1808 ctlr->cur_msg = NULL;
1809 ctlr->fallback = false;
1811 mutex_unlock(&ctlr->io_mutex);
1819 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1820 mutex_unlock(&ctlr->io_mutex);
1829 struct spi_controller *ctlr =
1832 __spi_pump_messages(ctlr, true);
1837 * @ctlr: Pointer to the spi_controller structure of the driver
1855 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1872 local_irq_save(ctlr->irq_flags);
1882 * @ctlr: Pointer to the spi_controller structure of the driver
1892 void spi_take_timestamp_post(struct spi_controller *ctlr,
1908 local_irq_restore(ctlr->irq_flags);
1921 * @ctlr: controller to boost priority of
1934 static void spi_set_thread_rt(struct spi_controller *ctlr)
1936 dev_info(&ctlr->dev,
1938 sched_set_fifo(ctlr->kworker->task);
1941 static int spi_init_queue(struct spi_controller *ctlr)
1943 ctlr->running = false;
1944 ctlr->busy = false;
1945 ctlr->queue_empty = true;
1947 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1948 if (IS_ERR(ctlr->kworker)) {
1949 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1950 return PTR_ERR(ctlr->kworker);
1953 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1962 if (ctlr->rt)
1963 spi_set_thread_rt(ctlr);
1971 * @ctlr: the controller to check for queued messages
1978 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1984 spin_lock_irqsave(&ctlr->queue_lock, flags);
1985 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1987 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1995 * @ctlr: the controller to return the message to
2000 void spi_finalize_current_message(struct spi_controller *ctlr)
2006 mesg = ctlr->cur_msg;
2008 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2015 if (unlikely(ctlr->ptp_sts_supported))
2019 spi_unmap_msg(ctlr, mesg);
2028 spi_res_release(ctlr, mesg);
2030 if (mesg->prepared && ctlr->unprepare_message) {
2031 ret = ctlr->unprepare_message(ctlr, mesg);
2033 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2040 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2042 if (READ_ONCE(ctlr->cur_msg_need_completion))
2043 complete(&ctlr->cur_msg_completion);
2053 static int spi_start_queue(struct spi_controller *ctlr)
2057 spin_lock_irqsave(&ctlr->queue_lock, flags);
2059 if (ctlr->running || ctlr->busy) {
2060 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2064 ctlr->running = true;
2065 ctlr->cur_msg = NULL;
2066 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2068 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2073 static int spi_stop_queue(struct spi_controller *ctlr)
2079 spin_lock_irqsave(&ctlr->queue_lock, flags);
2083 * A wait_queue on the ctlr->busy could be used, but then the common
2087 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2088 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2090 spin_lock_irqsave(&ctlr->queue_lock, flags);
2093 if (!list_empty(&ctlr->queue) || ctlr->busy)
2096 ctlr->running = false;
2098 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2101 dev_warn(&ctlr->dev, "could not stop message queue\n");
2107 static int spi_destroy_queue(struct spi_controller *ctlr)
2111 ret = spi_stop_queue(ctlr);
2120 dev_err(&ctlr->dev, "problem destroying queue\n");
2124 kthread_destroy_worker(ctlr->kworker);
2133 struct spi_controller *ctlr = spi->controller;
2136 spin_lock_irqsave(&ctlr->queue_lock, flags);
2138 if (!ctlr->running) {
2139 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2145 list_add_tail(&msg->queue, &ctlr->queue);
2146 ctlr->queue_empty = false;
2147 if (!ctlr->busy && need_pump)
2148 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2150 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2166 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2170 ctlr->transfer = spi_queued_transfer;
2171 if (!ctlr->transfer_one_message)
2172 ctlr->transfer_one_message = spi_transfer_one_message;
2175 ret = spi_init_queue(ctlr);
2177 dev_err(&ctlr->dev, "problem initializing queue\n");
2180 ctlr->queued = true;
2181 ret = spi_start_queue(ctlr);
2183 dev_err(&ctlr->dev, "problem starting queue\n");
2190 spi_destroy_queue(ctlr);
2198 * @ctlr: controller to process queue for
2205 void spi_flush_queue(struct spi_controller *ctlr)
2207 if (ctlr->transfer == spi_queued_transfer)
2208 __spi_pump_messages(ctlr, false);
2230 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2266 dev_warn(&ctlr->dev,
2290 dev_warn(&ctlr->dev,
2297 if (spi_controller_is_slave(ctlr)) {
2299 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2309 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2328 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2334 spi = spi_alloc_device(ctlr);
2336 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2345 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2349 rc = of_spi_parse_dt(ctlr, spi, nc);
2361 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2376 * @ctlr: Pointer to spi_controller device
2381 static void of_register_spi_devices(struct spi_controller *ctlr)
2386 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2389 spi = of_register_spi_device(ctlr, nc);
2391 dev_warn(&ctlr->dev,
2398 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2416 struct spi_controller *ctlr = spi->controller;
2421 ancillary = spi_alloc_device(ctlr);
2436 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2455 struct spi_controller *ctlr;
2539 struct spi_controller *ctlr = lookup->ctlr;
2559 if (ctlr) {
2560 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2569 ctlr = acpi_spi_find_controller_by_adev(adev);
2570 if (!ctlr)
2573 lookup->ctlr = ctlr;
2583 if (ctlr->fw_translate_cs) {
2584 int cs = ctlr->fw_translate_cs(ctlr,
2616 * @ctlr: controller to which the spi device belongs
2623 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2626 * Note: If index is -1, ctlr must be set.
2630 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2640 if (!ctlr && index == -1)
2643 lookup.ctlr = ctlr;
2659 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2667 spi = spi_alloc_device(lookup.ctlr);
2669 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2685 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2694 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2713 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2725 struct spi_controller *ctlr = data;
2730 return acpi_register_spi_device(ctlr, adev);
2735 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2740 handle = ACPI_HANDLE(ctlr->dev.parent);
2746 acpi_spi_add_device, NULL, ctlr, NULL);
2748 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2751 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2756 struct spi_controller *ctlr;
2758 ctlr = container_of(dev, struct spi_controller, dev);
2759 kfree(ctlr);
2776 struct spi_controller *ctlr = spi->controller;
2778 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2779 return ctlr->slave_abort(ctlr);
2787 struct spi_controller *ctlr = spi->controller;
2789 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2790 return ctlr->target_abort(ctlr);
2799 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2803 child = device_find_any_child(&ctlr->dev);
2810 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2821 child = device_find_any_child(&ctlr->dev);
2830 spi = spi_alloc_device(ctlr);
2900 struct spi_controller *ctlr;
2901 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2906 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2907 if (!ctlr)
2910 device_initialize(&ctlr->dev);
2911 INIT_LIST_HEAD(&ctlr->queue);
2912 spin_lock_init(&ctlr->queue_lock);
2913 spin_lock_init(&ctlr->bus_lock_spinlock);
2914 mutex_init(&ctlr->bus_lock_mutex);
2915 mutex_init(&ctlr->io_mutex);
2916 mutex_init(&ctlr->add_lock);
2917 ctlr->bus_num = -1;
2918 ctlr->num_chipselect = 1;
2919 ctlr->slave = slave;
2921 ctlr->dev.class = &spi_slave_class;
2923 ctlr->dev.class = &spi_master_class;
2924 ctlr->dev.parent = dev;
2925 pm_suspend_ignore_children(&ctlr->dev, true);
2926 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2928 return ctlr;
2932 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2934 spi_controller_put(*(struct spi_controller **)ctlr);
2956 struct spi_controller **ptr, *ctlr;
2963 ctlr = __spi_alloc_controller(dev, size, slave);
2964 if (ctlr) {
2965 ctlr->devm_allocated = true;
2966 *ptr = ctlr;
2972 return ctlr;
2978 * @ctlr: The SPI master to grab GPIO descriptors for
2980 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2984 struct device *dev = &ctlr->dev;
2996 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2998 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3002 ctlr->cs_gpiods = cs;
3033 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3040 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3042 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3043 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3051 static int spi_controller_check_ops(struct spi_controller *ctlr)
3060 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3061 if (!ctlr->transfer && !ctlr->transfer_one &&
3062 !ctlr->transfer_one_message) {
3071 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3076 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3080 ctlr->bus_num = id;
3086 * @ctlr: initialized master, originally from spi_alloc_master() or
3107 int spi_register_controller(struct spi_controller *ctlr)
3109 struct device *dev = ctlr->dev.parent;
3121 status = spi_controller_check_ops(ctlr);
3125 if (ctlr->bus_num < 0)
3126 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3127 if (ctlr->bus_num >= 0) {
3129 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3133 if (ctlr->bus_num < 0) {
3140 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3144 ctlr->bus_lock_flag = 0;
3145 init_completion(&ctlr->xfer_completion);
3146 init_completion(&ctlr->cur_msg_completion);
3147 if (!ctlr->max_dma_len)
3148 ctlr->max_dma_len = INT_MAX;
3154 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3156 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3157 status = spi_get_gpio_descs(ctlr);
3164 ctlr->mode_bits |= SPI_CS_HIGH;
3171 if (!ctlr->num_chipselect) {
3177 ctlr->last_cs = -1;
3179 status = device_add(&ctlr->dev);
3183 spi_controller_is_slave(ctlr) ? "slave" : "master",
3184 dev_name(&ctlr->dev));
3191 if (ctlr->transfer) {
3193 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3194 status = spi_controller_initialize_queue(ctlr);
3196 device_del(&ctlr->dev);
3201 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3202 if (!ctlr->pcpu_statistics) {
3209 list_add_tail(&ctlr->list, &spi_controller_list);
3211 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3215 of_register_spi_devices(ctlr);
3216 acpi_register_spi_devices(ctlr);
3220 spi_destroy_queue(ctlr);
3223 idr_remove(&spi_master_idr, ctlr->bus_num);
3238 * @ctlr: initialized controller, originally from spi_alloc_master() or
3248 struct spi_controller *ctlr)
3257 ret = spi_register_controller(ctlr);
3259 *ptr = ctlr;
3277 * @ctlr: the controller being unregistered
3287 void spi_unregister_controller(struct spi_controller *ctlr)
3290 int id = ctlr->bus_num;
3294 mutex_lock(&ctlr->add_lock);
3296 device_for_each_child(&ctlr->dev, NULL, __unregister);
3302 if (ctlr->queued) {
3303 if (spi_destroy_queue(ctlr))
3304 dev_err(&ctlr->dev, "queue remove failed\n");
3307 list_del(&ctlr->list);
3310 device_del(&ctlr->dev);
3314 if (found == ctlr)
3319 mutex_unlock(&ctlr->add_lock);
3325 if (!ctlr->devm_allocated)
3326 put_device(&ctlr->dev);
3330 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3332 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3335 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3337 mutex_lock(&ctlr->bus_lock_mutex);
3338 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3339 mutex_unlock(&ctlr->bus_lock_mutex);
3342 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3344 mutex_lock(&ctlr->bus_lock_mutex);
3345 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3346 mutex_unlock(&ctlr->bus_lock_mutex);
3349 int spi_controller_suspend(struct spi_controller *ctlr)
3354 if (ctlr->queued) {
3355 ret = spi_stop_queue(ctlr);
3357 dev_err(&ctlr->dev, "queue stop failed\n");
3360 __spi_mark_suspended(ctlr);
3365 int spi_controller_resume(struct spi_controller *ctlr)
3369 __spi_mark_resumed(ctlr);
3371 if (ctlr->queued) {
3372 ret = spi_start_queue(ctlr);
3374 dev_err(&ctlr->dev, "queue restart failed\n");
3384 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3393 rxfer->release(ctlr, msg, res);
3515 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3575 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3587 * @ctlr: the @spi_controller for this transfer
3594 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3611 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3627 * @ctlr: the @spi_controller for this transfer
3634 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3654 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3672 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3675 if (ctlr->bits_per_word_mask) {
3679 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3887 struct spi_controller *ctlr = spi->controller;
3901 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3909 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3928 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3930 unsigned flags = ctlr->flags;
3960 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3961 xfer->speed_hz = ctlr->max_speed_hz;
3963 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3981 if (xfer->speed_hz && ctlr->min_speed_hz &&
3982 xfer->speed_hz < ctlr->min_speed_hz)
4035 struct spi_controller *ctlr = spi->controller;
4042 if (!ctlr->transfer)
4047 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4052 if (!ctlr->ptp_sts_supported) {
4059 return ctlr->transfer(spi, message);
4095 struct spi_controller *ctlr = spi->controller;
4103 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4105 if (ctlr->bus_lock_flag)
4110 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4149 struct spi_controller *ctlr = spi->controller;
4157 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4161 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4167 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4172 mutex_lock(&ctlr->io_mutex);
4174 was_busy = ctlr->busy;
4176 ctlr->cur_msg = msg;
4177 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4179 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4180 ctlr->cur_msg = NULL;
4181 ctlr->fallback = false;
4184 kfree(ctlr->dummy_rx);
4185 ctlr->dummy_rx = NULL;
4186 kfree(ctlr->dummy_tx);
4187 ctlr->dummy_tx = NULL;
4188 if (ctlr->unprepare_transfer_hardware &&
4189 ctlr->unprepare_transfer_hardware(ctlr))
4190 dev_err(&ctlr->dev,
4192 spi_idle_runtime_pm(ctlr);
4195 mutex_unlock(&ctlr->io_mutex);
4215 struct spi_controller *ctlr = spi->controller;
4217 if (__spi_check_suspended(ctlr)) {
4228 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4237 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4243 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4246 __spi_transfer_message_noqueue(ctlr, message);
4326 * @ctlr: SPI bus master that should be locked for exclusive bus access
4339 int spi_bus_lock(struct spi_controller *ctlr)
4343 mutex_lock(&ctlr->bus_lock_mutex);
4345 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4346 ctlr->bus_lock_flag = 1;
4347 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4357 * @ctlr: SPI bus master that was locked for exclusive bus access
4368 int spi_bus_unlock(struct spi_controller *ctlr)
4370 ctlr->bus_lock_flag = 0;
4372 mutex_unlock(&ctlr->bus_lock_mutex);
4488 struct spi_controller *ctlr;
4493 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4494 if (ctlr == NULL)
4498 put_device(&ctlr->dev);
4507 spi = of_register_spi_device(ctlr, rd->dn);
4508 put_device(&ctlr->dev);
4579 struct spi_controller *ctlr;
4584 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4585 if (!ctlr)
4588 acpi_register_spi_device(ctlr, adev);
4589 put_device(&ctlr->dev);