Lines Matching refs:ctlr

119 	struct spi_controller *ctlr = container_of(dev,			\
121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
290 struct spi_controller *ctlr)
305 (xfer->tx_buf != ctlr->dummy_tx))
308 (xfer->rx_buf != ctlr->dummy_rx))
477 * @ctlr: Controller to which device is connected
492 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
496 if (!spi_controller_get(ctlr))
501 spi_controller_put(ctlr);
505 spi->master = spi->controller = ctlr;
506 spi->dev.parent = &ctlr->dev;
510 spi->mode = ctlr->buswidth_override_bits;
560 struct spi_controller *ctlr = spi->controller;
561 struct device *dev = ctlr->dev.parent;
565 if (spi->chip_select >= ctlr->num_chipselect) {
567 ctlr->num_chipselect);
578 mutex_lock(&ctlr->add_lock);
589 !device_is_registered(&ctlr->dev)) {
595 if (ctlr->cs_gpiods)
596 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
597 else if (ctlr->cs_gpios)
598 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
622 mutex_unlock(&ctlr->add_lock);
629 * @ctlr: Controller to which device is connected
641 struct spi_device *spi_new_device(struct spi_controller *ctlr,
654 proxy = spi_alloc_device(ctlr);
672 dev_err(&ctlr->dev,
718 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
723 if (ctlr->bus_num != bi->bus_num)
726 dev = spi_new_device(ctlr, bi);
728 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
767 struct spi_controller *ctlr;
779 list_for_each_entry(ctlr, &spi_controller_list, list)
780 spi_match_controller_to_boardinfo(ctlr,
856 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
881 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
937 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
948 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
954 if (!ctlr->can_dma)
957 if (ctlr->dma_tx)
958 tx_dev = ctlr->dma_tx->device->dev;
960 tx_dev = ctlr->dev.parent;
962 if (ctlr->dma_rx)
963 rx_dev = ctlr->dma_rx->device->dev;
965 rx_dev = ctlr->dev.parent;
968 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
972 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
980 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
984 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
991 ctlr->cur_msg_mapped = true;
996 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1001 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1004 if (ctlr->dma_tx)
1005 tx_dev = ctlr->dma_tx->device->dev;
1007 tx_dev = ctlr->dev.parent;
1009 if (ctlr->dma_rx)
1010 rx_dev = ctlr->dma_rx->device->dev;
1012 rx_dev = ctlr->dev.parent;
1015 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1018 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1019 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1022 ctlr->cur_msg_mapped = false;
1027 static inline int __spi_map_msg(struct spi_controller *ctlr,
1033 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1040 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1050 if (xfer->tx_buf == ctlr->dummy_tx)
1052 if (xfer->rx_buf == ctlr->dummy_rx)
1056 return __spi_unmap_msg(ctlr, msg);
1059 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1065 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1071 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1074 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1080 tmp = krealloc(ctlr->dummy_tx, max_tx,
1084 ctlr->dummy_tx = tmp;
1089 tmp = krealloc(ctlr->dummy_rx, max_rx,
1093 ctlr->dummy_rx = tmp;
1102 xfer->tx_buf = ctlr->dummy_tx;
1104 xfer->rx_buf = ctlr->dummy_rx;
1109 return __spi_map_msg(ctlr, msg);
1112 static int spi_transfer_wait(struct spi_controller *ctlr,
1116 struct spi_statistics *statm = &ctlr->statistics;
1121 if (spi_controller_is_slave(ctlr)) {
1122 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1137 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1252 static int spi_transfer_one_message(struct spi_controller *ctlr,
1258 struct spi_statistics *statm = &ctlr->statistics;
1269 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1270 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1272 if (!ctlr->ptp_sts_supported) {
1278 reinit_completion(&ctlr->xfer_completion);
1281 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1283 if (ctlr->cur_msg_mapped &&
1285 __spi_unmap_msg(ctlr, msg);
1286 ctlr->fallback = true;
1301 ret = spi_transfer_wait(ctlr, msg, xfer);
1312 if (!ctlr->ptp_sts_supported) {
1345 if (msg->status && ctlr->handle_err)
1346 ctlr->handle_err(ctlr, msg);
1348 spi_finalize_current_message(ctlr);
1355 * @ctlr: the controller reporting completion
1361 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1363 complete(&ctlr->xfer_completion);
1367 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1369 if (ctlr->auto_runtime_pm) {
1370 pm_runtime_mark_last_busy(ctlr->dev.parent);
1371 pm_runtime_put_autosuspend(ctlr->dev.parent);
1377 * @ctlr: controller to process queue for
1388 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1397 spin_lock_irqsave(&ctlr->queue_lock, flags);
1400 if (ctlr->cur_msg) {
1401 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1406 if (ctlr->idling) {
1407 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1408 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1413 if (list_empty(&ctlr->queue) || !ctlr->running) {
1414 if (!ctlr->busy) {
1415 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1421 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1422 !ctlr->unprepare_transfer_hardware) {
1423 spi_idle_runtime_pm(ctlr);
1424 ctlr->busy = false;
1425 trace_spi_controller_idle(ctlr);
1427 kthread_queue_work(ctlr->kworker,
1428 &ctlr->pump_messages);
1430 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1434 ctlr->busy = false;
1435 ctlr->idling = true;
1436 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1438 kfree(ctlr->dummy_rx);
1439 ctlr->dummy_rx = NULL;
1440 kfree(ctlr->dummy_tx);
1441 ctlr->dummy_tx = NULL;
1442 if (ctlr->unprepare_transfer_hardware &&
1443 ctlr->unprepare_transfer_hardware(ctlr))
1444 dev_err(&ctlr->dev,
1446 spi_idle_runtime_pm(ctlr);
1447 trace_spi_controller_idle(ctlr);
1449 spin_lock_irqsave(&ctlr->queue_lock, flags);
1450 ctlr->idling = false;
1451 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1456 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1457 ctlr->cur_msg = msg;
1460 if (ctlr->busy)
1463 ctlr->busy = true;
1464 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1466 mutex_lock(&ctlr->io_mutex);
1468 if (!was_busy && ctlr->auto_runtime_pm) {
1469 ret = pm_runtime_get_sync(ctlr->dev.parent);
1471 pm_runtime_put_noidle(ctlr->dev.parent);
1472 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1474 mutex_unlock(&ctlr->io_mutex);
1480 trace_spi_controller_busy(ctlr);
1482 if (!was_busy && ctlr->prepare_transfer_hardware) {
1483 ret = ctlr->prepare_transfer_hardware(ctlr);
1485 dev_err(&ctlr->dev,
1489 if (ctlr->auto_runtime_pm)
1490 pm_runtime_put(ctlr->dev.parent);
1493 spi_finalize_current_message(ctlr);
1495 mutex_unlock(&ctlr->io_mutex);
1502 if (ctlr->prepare_message) {
1503 ret = ctlr->prepare_message(ctlr, msg);
1505 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1508 spi_finalize_current_message(ctlr);
1511 ctlr->cur_msg_prepared = true;
1514 ret = spi_map_msg(ctlr, msg);
1517 spi_finalize_current_message(ctlr);
1521 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1528 ret = ctlr->transfer_one_message(ctlr, msg);
1530 dev_err(&ctlr->dev,
1536 mutex_unlock(&ctlr->io_mutex);
1549 struct spi_controller *ctlr =
1552 __spi_pump_messages(ctlr, true);
1566 * @ctlr: Pointer to the spi_controller structure of the driver
1576 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1593 local_irq_save(ctlr->irq_flags);
1608 * @ctlr: Pointer to the spi_controller structure of the driver
1613 void spi_take_timestamp_post(struct spi_controller *ctlr,
1629 local_irq_restore(ctlr->irq_flags);
1642 * @ctlr: controller to boost priority of
1655 static void spi_set_thread_rt(struct spi_controller *ctlr)
1657 dev_info(&ctlr->dev,
1659 sched_set_fifo(ctlr->kworker->task);
1662 static int spi_init_queue(struct spi_controller *ctlr)
1664 ctlr->running = false;
1665 ctlr->busy = false;
1667 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1668 if (IS_ERR(ctlr->kworker)) {
1669 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1670 return PTR_ERR(ctlr->kworker);
1673 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1682 if (ctlr->rt)
1683 spi_set_thread_rt(ctlr);
1691 * @ctlr: the controller to check for queued messages
1698 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1704 spin_lock_irqsave(&ctlr->queue_lock, flags);
1705 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1707 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1715 * @ctlr: the controller to return the message to
1720 void spi_finalize_current_message(struct spi_controller *ctlr)
1727 spin_lock_irqsave(&ctlr->queue_lock, flags);
1728 mesg = ctlr->cur_msg;
1729 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1731 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1738 if (unlikely(ctlr->ptp_sts_supported))
1742 spi_unmap_msg(ctlr, mesg);
1749 spi_res_release(ctlr, mesg);
1751 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1752 ret = ctlr->unprepare_message(ctlr, mesg);
1754 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1759 spin_lock_irqsave(&ctlr->queue_lock, flags);
1760 ctlr->cur_msg = NULL;
1761 ctlr->cur_msg_prepared = false;
1762 ctlr->fallback = false;
1763 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1764 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1774 static int spi_start_queue(struct spi_controller *ctlr)
1778 spin_lock_irqsave(&ctlr->queue_lock, flags);
1780 if (ctlr->running || ctlr->busy) {
1781 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1785 ctlr->running = true;
1786 ctlr->cur_msg = NULL;
1787 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1789 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1794 static int spi_stop_queue(struct spi_controller *ctlr)
1800 spin_lock_irqsave(&ctlr->queue_lock, flags);
1804 * A wait_queue on the ctlr->busy could be used, but then the common
1808 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1809 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1811 spin_lock_irqsave(&ctlr->queue_lock, flags);
1814 if (!list_empty(&ctlr->queue) || ctlr->busy)
1817 ctlr->running = false;
1819 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1822 dev_warn(&ctlr->dev, "could not stop message queue\n");
1828 static int spi_destroy_queue(struct spi_controller *ctlr)
1832 ret = spi_stop_queue(ctlr);
1841 dev_err(&ctlr->dev, "problem destroying queue\n");
1845 kthread_destroy_worker(ctlr->kworker);
1854 struct spi_controller *ctlr = spi->controller;
1857 spin_lock_irqsave(&ctlr->queue_lock, flags);
1859 if (!ctlr->running) {
1860 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1866 list_add_tail(&msg->queue, &ctlr->queue);
1867 if (!ctlr->busy && need_pump)
1868 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1870 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1886 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1890 ctlr->transfer = spi_queued_transfer;
1891 if (!ctlr->transfer_one_message)
1892 ctlr->transfer_one_message = spi_transfer_one_message;
1895 ret = spi_init_queue(ctlr);
1897 dev_err(&ctlr->dev, "problem initializing queue\n");
1900 ctlr->queued = true;
1901 ret = spi_start_queue(ctlr);
1903 dev_err(&ctlr->dev, "problem starting queue\n");
1910 spi_destroy_queue(ctlr);
1918 * @ctlr: controller to process queue for
1925 void spi_flush_queue(struct spi_controller *ctlr)
1927 if (ctlr->transfer == spi_queued_transfer)
1928 __spi_pump_messages(ctlr, false);
1934 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1967 dev_warn(&ctlr->dev,
1988 dev_warn(&ctlr->dev,
1995 if (spi_controller_is_slave(ctlr)) {
1997 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2007 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2021 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2027 spi = spi_alloc_device(ctlr);
2029 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2038 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2042 rc = of_spi_parse_dt(ctlr, spi, nc);
2054 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2069 * @ctlr: Pointer to spi_controller device
2074 static void of_register_spi_devices(struct spi_controller *ctlr)
2079 if (!ctlr->dev.of_node)
2082 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2085 spi = of_register_spi_device(ctlr, nc);
2087 dev_warn(&ctlr->dev,
2094 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2099 struct spi_controller *ctlr;
2139 struct spi_controller *ctlr = lookup->ctlr;
2154 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2164 if (ctlr->fw_translate_cs) {
2165 int cs = ctlr->fw_translate_cs(ctlr,
2195 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2208 lookup.ctlr = ctlr;
2222 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
2230 spi = spi_alloc_device(ctlr);
2232 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
2256 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2267 struct spi_controller *ctlr = data;
2273 return acpi_register_spi_device(ctlr, adev);
2278 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2283 handle = ACPI_HANDLE(ctlr->dev.parent);
2289 acpi_spi_add_device, NULL, ctlr, NULL);
2291 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2294 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2299 struct spi_controller *ctlr;
2301 ctlr = container_of(dev, struct spi_controller, dev);
2302 kfree(ctlr);
2320 struct spi_controller *ctlr = spi->controller;
2322 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2323 return ctlr->slave_abort(ctlr);
2337 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2341 child = device_find_child(&ctlr->dev, NULL, match_true);
2349 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2360 child = device_find_child(&ctlr->dev, NULL, match_true);
2369 spi = spi_alloc_device(ctlr);
2440 struct spi_controller *ctlr;
2441 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2446 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2447 if (!ctlr)
2450 device_initialize(&ctlr->dev);
2451 ctlr->bus_num = -1;
2452 ctlr->num_chipselect = 1;
2453 ctlr->slave = slave;
2455 ctlr->dev.class = &spi_slave_class;
2457 ctlr->dev.class = &spi_master_class;
2458 ctlr->dev.parent = dev;
2459 pm_suspend_ignore_children(&ctlr->dev, true);
2460 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2462 return ctlr;
2466 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2468 spi_controller_put(*(struct spi_controller **)ctlr);
2490 struct spi_controller **ptr, *ctlr;
2497 ctlr = __spi_alloc_controller(dev, size, slave);
2498 if (ctlr) {
2499 ctlr->devm_allocated = true;
2500 *ptr = ctlr;
2506 return ctlr;
2511 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2514 struct device_node *np = ctlr->dev.of_node;
2520 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2528 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
2530 ctlr->cs_gpios = cs;
2532 if (!ctlr->cs_gpios)
2535 for (i = 0; i < ctlr->num_chipselect; i++)
2544 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
2552 * @ctlr: The SPI master to grab GPIO descriptors for
2554 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2558 struct device *dev = &ctlr->dev;
2563 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2571 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2575 ctlr->cs_gpiods = cs;
2606 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2613 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2615 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2616 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2624 static int spi_controller_check_ops(struct spi_controller *ctlr)
2633 if (ctlr->mem_ops) {
2634 if (!ctlr->mem_ops->exec_op)
2636 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2637 !ctlr->transfer_one_message) {
2646 * @ctlr: initialized master, originally from spi_alloc_master() or
2667 int spi_register_controller(struct spi_controller *ctlr)
2669 struct device *dev = ctlr->dev.parent;
2681 status = spi_controller_check_ops(ctlr);
2685 if (ctlr->bus_num >= 0) {
2688 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2689 ctlr->bus_num + 1, GFP_KERNEL);
2693 ctlr->bus_num = id;
2694 } else if (ctlr->dev.of_node) {
2696 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2698 ctlr->bus_num = id;
2700 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2701 ctlr->bus_num + 1, GFP_KERNEL);
2707 if (ctlr->bus_num < 0) {
2715 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2720 ctlr->bus_num = id;
2722 INIT_LIST_HEAD(&ctlr->queue);
2723 spin_lock_init(&ctlr->queue_lock);
2724 spin_lock_init(&ctlr->bus_lock_spinlock);
2725 mutex_init(&ctlr->bus_lock_mutex);
2726 mutex_init(&ctlr->io_mutex);
2727 mutex_init(&ctlr->add_lock);
2728 ctlr->bus_lock_flag = 0;
2729 init_completion(&ctlr->xfer_completion);
2730 if (!ctlr->max_dma_len)
2731 ctlr->max_dma_len = INT_MAX;
2736 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2738 if (!spi_controller_is_slave(ctlr)) {
2739 if (ctlr->use_gpio_descriptors) {
2740 status = spi_get_gpio_descs(ctlr);
2747 ctlr->mode_bits |= SPI_CS_HIGH;
2750 status = of_spi_get_gpio_numbers(ctlr);
2760 if (!ctlr->num_chipselect) {
2765 status = device_add(&ctlr->dev);
2769 spi_controller_is_slave(ctlr) ? "slave" : "master",
2770 dev_name(&ctlr->dev));
2777 if (ctlr->transfer) {
2779 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
2780 status = spi_controller_initialize_queue(ctlr);
2782 device_del(&ctlr->dev);
2787 spin_lock_init(&ctlr->statistics.lock);
2790 list_add_tail(&ctlr->list, &spi_controller_list);
2792 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2796 of_register_spi_devices(ctlr);
2797 acpi_register_spi_devices(ctlr);
2802 idr_remove(&spi_master_idr, ctlr->bus_num);
2817 * @ctlr: initialized controller, originally from spi_alloc_master() or
2827 struct spi_controller *ctlr)
2836 ret = spi_register_controller(ctlr);
2838 *ptr = ctlr;
2856 * @ctlr: the controller being unregistered
2866 void spi_unregister_controller(struct spi_controller *ctlr)
2869 int id = ctlr->bus_num;
2873 mutex_lock(&ctlr->add_lock);
2875 device_for_each_child(&ctlr->dev, NULL, __unregister);
2881 if (ctlr->queued) {
2882 if (spi_destroy_queue(ctlr))
2883 dev_err(&ctlr->dev, "queue remove failed\n");
2886 list_del(&ctlr->list);
2889 device_del(&ctlr->dev);
2894 if (!ctlr->devm_allocated)
2895 put_device(&ctlr->dev);
2899 if (found == ctlr)
2904 mutex_unlock(&ctlr->add_lock);
2908 int spi_controller_suspend(struct spi_controller *ctlr)
2913 if (!ctlr->queued)
2916 ret = spi_stop_queue(ctlr);
2918 dev_err(&ctlr->dev, "queue stop failed\n");
2924 int spi_controller_resume(struct spi_controller *ctlr)
2928 if (!ctlr->queued)
2931 ret = spi_start_queue(ctlr);
2933 dev_err(&ctlr->dev, "queue restart failed\n");
2941 struct spi_controller *ctlr;
2944 ctlr = container_of(dev, struct spi_controller, dev);
2945 return ctlr->bus_num == *bus_num;
2963 struct spi_controller *ctlr = NULL;
2968 ctlr = container_of(dev, struct spi_controller, dev);
2970 return ctlr;
3042 * @ctlr: the @spi_controller
3045 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3051 res->release(ctlr, message, res->data);
3064 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3073 rxfer->release(ctlr, msg, res);
3193 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3250 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3262 * @ctlr: the @spi_controller for this transfer
3269 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3285 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3302 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3305 if (ctlr->bits_per_word_mask) {
3309 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3513 struct spi_controller *ctlr = spi->controller;
3526 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3537 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3555 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3557 unsigned flags = ctlr->flags;
3587 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3588 xfer->speed_hz = ctlr->max_speed_hz;
3590 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3608 if (xfer->speed_hz && ctlr->min_speed_hz &&
3609 xfer->speed_hz < ctlr->min_speed_hz)
3657 struct spi_controller *ctlr = spi->controller;
3664 if (!ctlr->transfer)
3669 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3674 if (!ctlr->ptp_sts_supported) {
3681 return ctlr->transfer(spi, message);
3717 struct spi_controller *ctlr = spi->controller;
3725 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3727 if (ctlr->bus_lock_flag)
3732 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3771 struct spi_controller *ctlr = spi->controller;
3779 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3783 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3806 struct spi_controller *ctlr = spi->controller;
3817 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3825 if (ctlr->transfer == spi_queued_transfer) {
3826 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3832 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3841 if (ctlr->transfer == spi_queued_transfer) {
3842 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3846 __spi_pump_messages(ctlr, false);
3913 * @ctlr: SPI bus master that should be locked for exclusive bus access
3926 int spi_bus_lock(struct spi_controller *ctlr)
3930 mutex_lock(&ctlr->bus_lock_mutex);
3932 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3933 ctlr->bus_lock_flag = 1;
3934 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3944 * @ctlr: SPI bus master that was locked for exclusive bus access
3955 int spi_bus_unlock(struct spi_controller *ctlr)
3957 ctlr->bus_lock_flag = 0;
3959 mutex_unlock(&ctlr->bus_lock_mutex);
4077 struct spi_controller *ctlr;
4082 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4083 if (ctlr == NULL)
4087 put_device(&ctlr->dev);
4091 spi = of_register_spi_device(ctlr, rd->dn);
4092 put_device(&ctlr->dev);
4163 struct spi_controller *ctlr;
4168 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4169 if (!ctlr)
4172 acpi_register_spi_device(ctlr, adev);
4173 put_device(&ctlr->dev);