xref: /kernel/linux/linux-5.10/drivers/base/dd.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/dd.c - The core device/driver interactions.
4 *
5 * This file contains the (sometimes tricky) code that controls the
6 * interactions between devices and drivers, which primarily includes
7 * driver binding and unbinding.
8 *
9 * All of this code used to exist in drivers/base/bus.c, but was
10 * relocated to here in the name of compartmentalization (since it wasn't
11 * strictly code just for the 'struct bus_type'.
12 *
13 * Copyright (c) 2002-5 Patrick Mochel
14 * Copyright (c) 2002-3 Open Source Development Labs
15 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
16 * Copyright (c) 2007-2009 Novell Inc.
17 */
18
19#include <linux/debugfs.h>
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/dma-map-ops.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/kthread.h>
26#include <linux/wait.h>
27#include <linux/async.h>
28#include <linux/pm_runtime.h>
29#include <linux/pinctrl/devinfo.h>
30#include <linux/slab.h>
31
32#include "base.h"
33#include "power/power.h"
34
35/*
36 * Deferred Probe infrastructure.
37 *
38 * Sometimes driver probe order matters, but the kernel doesn't always have
39 * dependency information which means some drivers will get probed before a
40 * resource it depends on is available.  For example, an SDHCI driver may
41 * first need a GPIO line from an i2c GPIO controller before it can be
42 * initialized.  If a required resource is not available yet, a driver can
43 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
44 *
45 * Deferred probe maintains two lists of devices, a pending list and an active
46 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
47 * pending list.  A successful driver probe will trigger moving all devices
48 * from the pending to the active list so that the workqueue will eventually
49 * retry them.
50 *
51 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
52 * of the (struct device*)->p->deferred_probe pointers are manipulated
53 */
54static DEFINE_MUTEX(deferred_probe_mutex);
55static LIST_HEAD(deferred_probe_pending_list);
56static LIST_HEAD(deferred_probe_active_list);
57static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
58static struct dentry *deferred_devices;
59static bool initcalls_done;
60
61/* Save the async probe drivers' name from kernel cmdline */
62#define ASYNC_DRV_NAMES_MAX_LEN	256
63static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
64
65/*
66 * In some cases, like suspend to RAM or hibernation, It might be reasonable
67 * to prohibit probing of devices as it could be unsafe.
68 * Once defer_all_probes is true all drivers probes will be forcibly deferred.
69 */
70static bool defer_all_probes;
71
72/*
73 * deferred_probe_work_func() - Retry probing devices in the active list.
74 */
75static void deferred_probe_work_func(struct work_struct *work)
76{
77	struct device *dev;
78	struct device_private *private;
79	/*
80	 * This block processes every device in the deferred 'active' list.
81	 * Each device is removed from the active list and passed to
82	 * bus_probe_device() to re-attempt the probe.  The loop continues
83	 * until every device in the active list is removed and retried.
84	 *
85	 * Note: Once the device is removed from the list and the mutex is
86	 * released, it is possible for the device get freed by another thread
87	 * and cause a illegal pointer dereference.  This code uses
88	 * get/put_device() to ensure the device structure cannot disappear
89	 * from under our feet.
90	 */
91	mutex_lock(&deferred_probe_mutex);
92	while (!list_empty(&deferred_probe_active_list)) {
93		private = list_first_entry(&deferred_probe_active_list,
94					typeof(*dev->p), deferred_probe);
95		dev = private->device;
96		list_del_init(&private->deferred_probe);
97
98		get_device(dev);
99
100		kfree(dev->p->deferred_probe_reason);
101		dev->p->deferred_probe_reason = NULL;
102
103		/*
104		 * Drop the mutex while probing each device; the probe path may
105		 * manipulate the deferred list
106		 */
107		mutex_unlock(&deferred_probe_mutex);
108
109		/*
110		 * Force the device to the end of the dpm_list since
111		 * the PM code assumes that the order we add things to
112		 * the list is a good order for suspend but deferred
113		 * probe makes that very unsafe.
114		 */
115		device_pm_move_to_tail(dev);
116
117		dev_dbg(dev, "Retrying from deferred list\n");
118		bus_probe_device(dev);
119		mutex_lock(&deferred_probe_mutex);
120
121		put_device(dev);
122	}
123	mutex_unlock(&deferred_probe_mutex);
124}
125static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
126
127void driver_deferred_probe_add(struct device *dev)
128{
129	mutex_lock(&deferred_probe_mutex);
130	if (list_empty(&dev->p->deferred_probe)) {
131		dev_dbg(dev, "Added to deferred list\n");
132		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
133	}
134	mutex_unlock(&deferred_probe_mutex);
135}
136
137void driver_deferred_probe_del(struct device *dev)
138{
139	mutex_lock(&deferred_probe_mutex);
140	if (!list_empty(&dev->p->deferred_probe)) {
141		dev_dbg(dev, "Removed from deferred list\n");
142		list_del_init(&dev->p->deferred_probe);
143		kfree(dev->p->deferred_probe_reason);
144		dev->p->deferred_probe_reason = NULL;
145	}
146	mutex_unlock(&deferred_probe_mutex);
147}
148
149static bool driver_deferred_probe_enable = false;
150/**
151 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
152 *
153 * This functions moves all devices from the pending list to the active
154 * list and schedules the deferred probe workqueue to process them.  It
155 * should be called anytime a driver is successfully bound to a device.
156 *
157 * Note, there is a race condition in multi-threaded probe. In the case where
158 * more than one device is probing at the same time, it is possible for one
159 * probe to complete successfully while another is about to defer. If the second
160 * depends on the first, then it will get put on the pending list after the
161 * trigger event has already occurred and will be stuck there.
162 *
163 * The atomic 'deferred_trigger_count' is used to determine if a successful
164 * trigger has occurred in the midst of probing a driver. If the trigger count
165 * changes in the midst of a probe, then deferred processing should be triggered
166 * again.
167 */
168static void driver_deferred_probe_trigger(void)
169{
170	if (!driver_deferred_probe_enable)
171		return;
172
173	/*
174	 * A successful probe means that all the devices in the pending list
175	 * should be triggered to be reprobed.  Move all the deferred devices
176	 * into the active list so they can be retried by the workqueue
177	 */
178	mutex_lock(&deferred_probe_mutex);
179	atomic_inc(&deferred_trigger_count);
180	list_splice_tail_init(&deferred_probe_pending_list,
181			      &deferred_probe_active_list);
182	mutex_unlock(&deferred_probe_mutex);
183
184	/*
185	 * Kick the re-probe thread.  It may already be scheduled, but it is
186	 * safe to kick it again.
187	 */
188	schedule_work(&deferred_probe_work);
189}
190
191/**
192 * device_block_probing() - Block/defer device's probes
193 *
194 *	It will disable probing of devices and defer their probes instead.
195 */
196void device_block_probing(void)
197{
198	defer_all_probes = true;
199	/* sync with probes to avoid races. */
200	wait_for_device_probe();
201}
202
203/**
204 * device_unblock_probing() - Unblock/enable device's probes
205 *
206 *	It will restore normal behavior and trigger re-probing of deferred
207 * devices.
208 */
209void device_unblock_probing(void)
210{
211	defer_all_probes = false;
212	driver_deferred_probe_trigger();
213}
214
215/**
216 * device_set_deferred_probe_reason() - Set defer probe reason message for device
217 * @dev: the pointer to the struct device
218 * @vaf: the pointer to va_format structure with message
219 */
220void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
221{
222	const char *drv = dev_driver_string(dev);
223
224	mutex_lock(&deferred_probe_mutex);
225
226	kfree(dev->p->deferred_probe_reason);
227	dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
228
229	mutex_unlock(&deferred_probe_mutex);
230}
231
232/*
233 * deferred_devs_show() - Show the devices in the deferred probe pending list.
234 */
235static int deferred_devs_show(struct seq_file *s, void *data)
236{
237	struct device_private *curr;
238
239	mutex_lock(&deferred_probe_mutex);
240
241	list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
242		seq_printf(s, "%s\t%s", dev_name(curr->device),
243			   curr->device->p->deferred_probe_reason ?: "\n");
244
245	mutex_unlock(&deferred_probe_mutex);
246
247	return 0;
248}
249DEFINE_SHOW_ATTRIBUTE(deferred_devs);
250
251int driver_deferred_probe_timeout;
252EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
253
254static int __init deferred_probe_timeout_setup(char *str)
255{
256	int timeout;
257
258	if (!kstrtoint(str, 10, &timeout))
259		driver_deferred_probe_timeout = timeout;
260	return 1;
261}
262__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
263
264/**
265 * driver_deferred_probe_check_state() - Check deferred probe state
266 * @dev: device to check
267 *
268 * Return:
269 * -ENODEV if initcalls have completed and modules are disabled.
270 * -ETIMEDOUT if the deferred probe timeout was set and has expired
271 *  and modules are enabled.
272 * -EPROBE_DEFER in other cases.
273 *
274 * Drivers or subsystems can opt-in to calling this function instead of directly
275 * returning -EPROBE_DEFER.
276 */
277int driver_deferred_probe_check_state(struct device *dev)
278{
279	if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
280		dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
281		return -ENODEV;
282	}
283
284	if (!driver_deferred_probe_timeout && initcalls_done) {
285		dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
286		return -ETIMEDOUT;
287	}
288
289	return -EPROBE_DEFER;
290}
291
292static void deferred_probe_timeout_work_func(struct work_struct *work)
293{
294	struct device_private *p;
295
296	driver_deferred_probe_timeout = 0;
297	driver_deferred_probe_trigger();
298	flush_work(&deferred_probe_work);
299
300	mutex_lock(&deferred_probe_mutex);
301	list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
302		dev_info(p->device, "deferred probe pending\n");
303	mutex_unlock(&deferred_probe_mutex);
304}
305static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
306
307/**
308 * deferred_probe_initcall() - Enable probing of deferred devices
309 *
310 * We don't want to get in the way when the bulk of drivers are getting probed.
311 * Instead, this initcall makes sure that deferred probing is delayed until
312 * late_initcall time.
313 */
314static int deferred_probe_initcall(void)
315{
316	deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
317					       NULL, &deferred_devs_fops);
318
319	driver_deferred_probe_enable = true;
320	driver_deferred_probe_trigger();
321	/* Sort as many dependencies as possible before exiting initcalls */
322	flush_work(&deferred_probe_work);
323	initcalls_done = true;
324
325	/*
326	 * Trigger deferred probe again, this time we won't defer anything
327	 * that is optional
328	 */
329	driver_deferred_probe_trigger();
330	flush_work(&deferred_probe_work);
331
332	if (driver_deferred_probe_timeout > 0) {
333		schedule_delayed_work(&deferred_probe_timeout_work,
334			driver_deferred_probe_timeout * HZ);
335	}
336	return 0;
337}
338late_initcall(deferred_probe_initcall);
339
340static void __exit deferred_probe_exit(void)
341{
342	debugfs_remove_recursive(deferred_devices);
343}
344__exitcall(deferred_probe_exit);
345
346/**
347 * device_is_bound() - Check if device is bound to a driver
348 * @dev: device to check
349 *
350 * Returns true if passed device has already finished probing successfully
351 * against a driver.
352 *
353 * This function must be called with the device lock held.
354 */
355bool device_is_bound(struct device *dev)
356{
357	return dev->p && klist_node_attached(&dev->p->knode_driver);
358}
359
360static void driver_bound(struct device *dev)
361{
362	if (device_is_bound(dev)) {
363		pr_warn("%s: device %s already bound\n",
364			__func__, kobject_name(&dev->kobj));
365		return;
366	}
367
368	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
369		 __func__, dev_name(dev));
370
371	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
372	device_links_driver_bound(dev);
373
374	device_pm_check_callbacks(dev);
375
376	/*
377	 * Make sure the device is no longer in one of the deferred lists and
378	 * kick off retrying all pending devices
379	 */
380	driver_deferred_probe_del(dev);
381	driver_deferred_probe_trigger();
382
383	if (dev->bus)
384		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
385					     BUS_NOTIFY_BOUND_DRIVER, dev);
386
387	kobject_uevent(&dev->kobj, KOBJ_BIND);
388}
389
390static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
391			    const char *buf, size_t count)
392{
393	device_lock(dev);
394	dev->driver->coredump(dev);
395	device_unlock(dev);
396
397	return count;
398}
399static DEVICE_ATTR_WO(coredump);
400
401static int driver_sysfs_add(struct device *dev)
402{
403	int ret;
404
405	if (dev->bus)
406		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
407					     BUS_NOTIFY_BIND_DRIVER, dev);
408
409	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
410				kobject_name(&dev->kobj));
411	if (ret)
412		goto fail;
413
414	ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
415				"driver");
416	if (ret)
417		goto rm_dev;
418
419	if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
420	    !device_create_file(dev, &dev_attr_coredump))
421		return 0;
422
423	sysfs_remove_link(&dev->kobj, "driver");
424
425rm_dev:
426	sysfs_remove_link(&dev->driver->p->kobj,
427			  kobject_name(&dev->kobj));
428
429fail:
430	return ret;
431}
432
433static void driver_sysfs_remove(struct device *dev)
434{
435	struct device_driver *drv = dev->driver;
436
437	if (drv) {
438		if (drv->coredump)
439			device_remove_file(dev, &dev_attr_coredump);
440		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
441		sysfs_remove_link(&dev->kobj, "driver");
442	}
443}
444
445/**
446 * device_bind_driver - bind a driver to one device.
447 * @dev: device.
448 *
449 * Allow manual attachment of a driver to a device.
450 * Caller must have already set @dev->driver.
451 *
452 * Note that this does not modify the bus reference count.
453 * Please verify that is accounted for before calling this.
454 * (It is ok to call with no other effort from a driver's probe() method.)
455 *
456 * This function must be called with the device lock held.
457 */
458int device_bind_driver(struct device *dev)
459{
460	int ret;
461
462	ret = driver_sysfs_add(dev);
463	if (!ret)
464		driver_bound(dev);
465	else if (dev->bus)
466		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
467					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
468	return ret;
469}
470EXPORT_SYMBOL_GPL(device_bind_driver);
471
472static atomic_t probe_count = ATOMIC_INIT(0);
473static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
474
475static void driver_deferred_probe_add_trigger(struct device *dev,
476					      int local_trigger_count)
477{
478	driver_deferred_probe_add(dev);
479	/* Did a trigger occur while probing? Need to re-trigger if yes */
480	if (local_trigger_count != atomic_read(&deferred_trigger_count))
481		driver_deferred_probe_trigger();
482}
483
484static ssize_t state_synced_show(struct device *dev,
485				 struct device_attribute *attr, char *buf)
486{
487	bool val;
488
489	device_lock(dev);
490	val = dev->state_synced;
491	device_unlock(dev);
492
493	return sysfs_emit(buf, "%u\n", val);
494}
495static DEVICE_ATTR_RO(state_synced);
496
497static int really_probe(struct device *dev, struct device_driver *drv)
498{
499	int ret = -EPROBE_DEFER;
500	int local_trigger_count = atomic_read(&deferred_trigger_count);
501	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
502			   !drv->suppress_bind_attrs;
503
504	if (defer_all_probes) {
505		/*
506		 * Value of defer_all_probes can be set only by
507		 * device_block_probing() which, in turn, will call
508		 * wait_for_device_probe() right after that to avoid any races.
509		 */
510		dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
511		driver_deferred_probe_add(dev);
512		return ret;
513	}
514
515	ret = device_links_check_suppliers(dev);
516	if (ret == -EPROBE_DEFER)
517		driver_deferred_probe_add_trigger(dev, local_trigger_count);
518	if (ret)
519		return ret;
520
521	atomic_inc(&probe_count);
522	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
523		 drv->bus->name, __func__, drv->name, dev_name(dev));
524	if (!list_empty(&dev->devres_head)) {
525		dev_crit(dev, "Resources present before probing\n");
526		ret = -EBUSY;
527		goto done;
528	}
529
530re_probe:
531	dev->driver = drv;
532
533	/* If using pinctrl, bind pins now before probing */
534	ret = pinctrl_bind_pins(dev);
535	if (ret)
536		goto pinctrl_bind_failed;
537
538	if (dev->bus->dma_configure) {
539		ret = dev->bus->dma_configure(dev);
540		if (ret)
541			goto probe_failed;
542	}
543
544	ret = driver_sysfs_add(dev);
545	if (ret) {
546		pr_err("%s: driver_sysfs_add(%s) failed\n",
547		       __func__, dev_name(dev));
548		goto probe_failed;
549	}
550
551	if (dev->pm_domain && dev->pm_domain->activate) {
552		ret = dev->pm_domain->activate(dev);
553		if (ret)
554			goto probe_failed;
555	}
556
557	if (dev->bus->probe) {
558		ret = dev->bus->probe(dev);
559		if (ret)
560			goto probe_failed;
561	} else if (drv->probe) {
562		ret = drv->probe(dev);
563		if (ret)
564			goto probe_failed;
565	}
566
567	ret = device_add_groups(dev, drv->dev_groups);
568	if (ret) {
569		dev_err(dev, "device_add_groups() failed\n");
570		goto dev_groups_failed;
571	}
572
573	if (dev_has_sync_state(dev)) {
574		ret = device_create_file(dev, &dev_attr_state_synced);
575		if (ret) {
576			dev_err(dev, "state_synced sysfs add failed\n");
577			goto dev_sysfs_state_synced_failed;
578		}
579	}
580
581	if (test_remove) {
582		test_remove = false;
583
584		device_remove_file(dev, &dev_attr_state_synced);
585		device_remove_groups(dev, drv->dev_groups);
586
587		if (dev->bus->remove)
588			dev->bus->remove(dev);
589		else if (drv->remove)
590			drv->remove(dev);
591
592		devres_release_all(dev);
593		arch_teardown_dma_ops(dev);
594		kfree(dev->dma_range_map);
595		dev->dma_range_map = NULL;
596		driver_sysfs_remove(dev);
597		dev->driver = NULL;
598		dev_set_drvdata(dev, NULL);
599		if (dev->pm_domain && dev->pm_domain->dismiss)
600			dev->pm_domain->dismiss(dev);
601		pm_runtime_reinit(dev);
602
603		goto re_probe;
604	}
605
606	pinctrl_init_done(dev);
607
608	if (dev->pm_domain && dev->pm_domain->sync)
609		dev->pm_domain->sync(dev);
610
611	driver_bound(dev);
612	ret = 1;
613	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
614		 drv->bus->name, __func__, dev_name(dev), drv->name);
615	goto done;
616
617dev_sysfs_state_synced_failed:
618	device_remove_groups(dev, drv->dev_groups);
619dev_groups_failed:
620	if (dev->bus->remove)
621		dev->bus->remove(dev);
622	else if (drv->remove)
623		drv->remove(dev);
624probe_failed:
625	if (dev->bus)
626		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
627					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
628pinctrl_bind_failed:
629	device_links_no_driver(dev);
630	devres_release_all(dev);
631	arch_teardown_dma_ops(dev);
632	kfree(dev->dma_range_map);
633	dev->dma_range_map = NULL;
634	driver_sysfs_remove(dev);
635	dev->driver = NULL;
636	dev_set_drvdata(dev, NULL);
637	if (dev->pm_domain && dev->pm_domain->dismiss)
638		dev->pm_domain->dismiss(dev);
639	pm_runtime_reinit(dev);
640	dev_pm_set_driver_flags(dev, 0);
641
642	switch (ret) {
643	case -EPROBE_DEFER:
644		/* Driver requested deferred probing */
645		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
646		driver_deferred_probe_add_trigger(dev, local_trigger_count);
647		break;
648	case -ENODEV:
649	case -ENXIO:
650		pr_debug("%s: probe of %s rejects match %d\n",
651			 drv->name, dev_name(dev), ret);
652		break;
653	default:
654		/* driver matched but the probe failed */
655		pr_warn("%s: probe of %s failed with error %d\n",
656			drv->name, dev_name(dev), ret);
657	}
658	/*
659	 * Ignore errors returned by ->probe so that the next driver can try
660	 * its luck.
661	 */
662	ret = 0;
663done:
664	atomic_dec(&probe_count);
665	wake_up_all(&probe_waitqueue);
666	return ret;
667}
668
669/*
670 * For initcall_debug, show the driver probe time.
671 */
672static int really_probe_debug(struct device *dev, struct device_driver *drv)
673{
674	ktime_t calltime, rettime;
675	int ret;
676
677	calltime = ktime_get();
678	ret = really_probe(dev, drv);
679	rettime = ktime_get();
680	/*
681	 * Don't change this to pr_debug() because that requires
682	 * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
683	 * kernel commandline to print this all the time at the debug level.
684	 */
685	printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
686		 dev_name(dev), ret, ktime_us_delta(rettime, calltime));
687	return ret;
688}
689
690/**
691 * driver_probe_done
692 * Determine if the probe sequence is finished or not.
693 *
694 * Should somehow figure out how to use a semaphore, not an atomic variable...
695 */
696int driver_probe_done(void)
697{
698	int local_probe_count = atomic_read(&probe_count);
699
700	pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
701	if (local_probe_count)
702		return -EBUSY;
703	return 0;
704}
705
706/**
707 * wait_for_device_probe
708 * Wait for device probing to be completed.
709 */
710void wait_for_device_probe(void)
711{
712	/* wait for the deferred probe workqueue to finish */
713	flush_work(&deferred_probe_work);
714
715	/* wait for the known devices to complete their probing */
716	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
717	async_synchronize_full();
718}
719EXPORT_SYMBOL_GPL(wait_for_device_probe);
720
721/**
722 * driver_probe_device - attempt to bind device & driver together
723 * @drv: driver to bind a device to
724 * @dev: device to try to bind to the driver
725 *
726 * This function returns -ENODEV if the device is not registered,
727 * 1 if the device is bound successfully and 0 otherwise.
728 *
729 * This function must be called with @dev lock held.  When called for a
730 * USB interface, @dev->parent lock must be held as well.
731 *
732 * If the device has a parent, runtime-resume the parent before driver probing.
733 */
734int driver_probe_device(struct device_driver *drv, struct device *dev)
735{
736	int ret = 0;
737
738	if (!device_is_registered(dev))
739		return -ENODEV;
740
741	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
742		 drv->bus->name, __func__, dev_name(dev), drv->name);
743
744	pm_runtime_get_suppliers(dev);
745	if (dev->parent)
746		pm_runtime_get_sync(dev->parent);
747
748	pm_runtime_barrier(dev);
749	if (initcall_debug)
750		ret = really_probe_debug(dev, drv);
751	else
752		ret = really_probe(dev, drv);
753	pm_request_idle(dev);
754
755	if (dev->parent)
756		pm_runtime_put(dev->parent);
757
758	pm_runtime_put_suppliers(dev);
759	return ret;
760}
761
762static inline bool cmdline_requested_async_probing(const char *drv_name)
763{
764	return parse_option_str(async_probe_drv_names, drv_name);
765}
766
767/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
768static int __init save_async_options(char *buf)
769{
770	if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
771		pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
772
773	strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
774	return 1;
775}
776__setup("driver_async_probe=", save_async_options);
777
778bool driver_allows_async_probing(struct device_driver *drv)
779{
780	switch (drv->probe_type) {
781	case PROBE_PREFER_ASYNCHRONOUS:
782		return true;
783
784	case PROBE_FORCE_SYNCHRONOUS:
785		return false;
786
787	default:
788		if (cmdline_requested_async_probing(drv->name))
789			return true;
790
791		if (module_requested_async_probing(drv->owner))
792			return true;
793
794		return false;
795	}
796}
797
798struct device_attach_data {
799	struct device *dev;
800
801	/*
802	 * Indicates whether we are are considering asynchronous probing or
803	 * not. Only initial binding after device or driver registration
804	 * (including deferral processing) may be done asynchronously, the
805	 * rest is always synchronous, as we expect it is being done by
806	 * request from userspace.
807	 */
808	bool check_async;
809
810	/*
811	 * Indicates if we are binding synchronous or asynchronous drivers.
812	 * When asynchronous probing is enabled we'll execute 2 passes
813	 * over drivers: first pass doing synchronous probing and second
814	 * doing asynchronous probing (if synchronous did not succeed -
815	 * most likely because there was no driver requiring synchronous
816	 * probing - and we found asynchronous driver during first pass).
817	 * The 2 passes are done because we can't shoot asynchronous
818	 * probe for given device and driver from bus_for_each_drv() since
819	 * driver pointer is not guaranteed to stay valid once
820	 * bus_for_each_drv() iterates to the next driver on the bus.
821	 */
822	bool want_async;
823
824	/*
825	 * We'll set have_async to 'true' if, while scanning for matching
826	 * driver, we'll encounter one that requests asynchronous probing.
827	 */
828	bool have_async;
829};
830
831static int __device_attach_driver(struct device_driver *drv, void *_data)
832{
833	struct device_attach_data *data = _data;
834	struct device *dev = data->dev;
835	bool async_allowed;
836	int ret;
837
838	ret = driver_match_device(drv, dev);
839	if (ret == 0) {
840		/* no match */
841		return 0;
842	} else if (ret == -EPROBE_DEFER) {
843		dev_dbg(dev, "Device match requests probe deferral\n");
844		driver_deferred_probe_add(dev);
845		/*
846		 * Device can't match with a driver right now, so don't attempt
847		 * to match or bind with other drivers on the bus.
848		 */
849		return ret;
850	} else if (ret < 0) {
851		dev_dbg(dev, "Bus failed to match device: %d\n", ret);
852		return ret;
853	} /* ret > 0 means positive match */
854
855	async_allowed = driver_allows_async_probing(drv);
856
857	if (async_allowed)
858		data->have_async = true;
859
860	if (data->check_async && async_allowed != data->want_async)
861		return 0;
862
863	return driver_probe_device(drv, dev);
864}
865
866static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
867{
868	struct device *dev = _dev;
869	struct device_attach_data data = {
870		.dev		= dev,
871		.check_async	= true,
872		.want_async	= true,
873	};
874
875	device_lock(dev);
876
877	/*
878	 * Check if device has already been removed or claimed. This may
879	 * happen with driver loading, device discovery/registration,
880	 * and deferred probe processing happens all at once with
881	 * multiple threads.
882	 */
883	if (dev->p->dead || dev->driver)
884		goto out_unlock;
885
886	if (dev->parent)
887		pm_runtime_get_sync(dev->parent);
888
889	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
890	dev_dbg(dev, "async probe completed\n");
891
892	pm_request_idle(dev);
893
894	if (dev->parent)
895		pm_runtime_put(dev->parent);
896out_unlock:
897	device_unlock(dev);
898
899	put_device(dev);
900}
901
902static int __device_attach(struct device *dev, bool allow_async)
903{
904	int ret = 0;
905	bool async = false;
906
907	device_lock(dev);
908	if (dev->p->dead) {
909		goto out_unlock;
910	} else if (dev->driver) {
911		if (device_is_bound(dev)) {
912			ret = 1;
913			goto out_unlock;
914		}
915		ret = device_bind_driver(dev);
916		if (ret == 0)
917			ret = 1;
918		else {
919			dev->driver = NULL;
920			ret = 0;
921		}
922	} else {
923		struct device_attach_data data = {
924			.dev = dev,
925			.check_async = allow_async,
926			.want_async = false,
927		};
928
929		if (dev->parent)
930			pm_runtime_get_sync(dev->parent);
931
932		ret = bus_for_each_drv(dev->bus, NULL, &data,
933					__device_attach_driver);
934		if (!ret && allow_async && data.have_async) {
935			/*
936			 * If we could not find appropriate driver
937			 * synchronously and we are allowed to do
938			 * async probes and there are drivers that
939			 * want to probe asynchronously, we'll
940			 * try them.
941			 */
942			dev_dbg(dev, "scheduling asynchronous probe\n");
943			get_device(dev);
944			async = true;
945		} else {
946			pm_request_idle(dev);
947		}
948
949		if (dev->parent)
950			pm_runtime_put(dev->parent);
951	}
952out_unlock:
953	device_unlock(dev);
954	if (async)
955		async_schedule_dev(__device_attach_async_helper, dev);
956	return ret;
957}
958
959/**
960 * device_attach - try to attach device to a driver.
961 * @dev: device.
962 *
963 * Walk the list of drivers that the bus has and call
964 * driver_probe_device() for each pair. If a compatible
965 * pair is found, break out and return.
966 *
967 * Returns 1 if the device was bound to a driver;
968 * 0 if no matching driver was found;
969 * -ENODEV if the device is not registered.
970 *
971 * When called for a USB interface, @dev->parent lock must be held.
972 */
973int device_attach(struct device *dev)
974{
975	return __device_attach(dev, false);
976}
977EXPORT_SYMBOL_GPL(device_attach);
978
979void device_initial_probe(struct device *dev)
980{
981	__device_attach(dev, true);
982}
983
984/*
985 * __device_driver_lock - acquire locks needed to manipulate dev->drv
986 * @dev: Device we will update driver info for
987 * @parent: Parent device. Needed if the bus requires parent lock
988 *
989 * This function will take the required locks for manipulating dev->drv.
990 * Normally this will just be the @dev lock, but when called for a USB
991 * interface, @parent lock will be held as well.
992 */
993static void __device_driver_lock(struct device *dev, struct device *parent)
994{
995	if (parent && dev->bus->need_parent_lock)
996		device_lock(parent);
997	device_lock(dev);
998}
999
1000/*
1001 * __device_driver_unlock - release locks needed to manipulate dev->drv
1002 * @dev: Device we will update driver info for
1003 * @parent: Parent device. Needed if the bus requires parent lock
1004 *
1005 * This function will release the required locks for manipulating dev->drv.
1006 * Normally this will just be the the @dev lock, but when called for a
1007 * USB interface, @parent lock will be released as well.
1008 */
1009static void __device_driver_unlock(struct device *dev, struct device *parent)
1010{
1011	device_unlock(dev);
1012	if (parent && dev->bus->need_parent_lock)
1013		device_unlock(parent);
1014}
1015
1016/**
1017 * device_driver_attach - attach a specific driver to a specific device
1018 * @drv: Driver to attach
1019 * @dev: Device to attach it to
1020 *
1021 * Manually attach driver to a device. Will acquire both @dev lock and
1022 * @dev->parent lock if needed.
1023 */
1024int device_driver_attach(struct device_driver *drv, struct device *dev)
1025{
1026	int ret = 0;
1027
1028	__device_driver_lock(dev, dev->parent);
1029
1030	/*
1031	 * If device has been removed or someone has already successfully
1032	 * bound a driver before us just skip the driver probe call.
1033	 */
1034	if (!dev->p->dead && !dev->driver)
1035		ret = driver_probe_device(drv, dev);
1036
1037	__device_driver_unlock(dev, dev->parent);
1038
1039	return ret;
1040}
1041
1042static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
1043{
1044	struct device *dev = _dev;
1045	struct device_driver *drv;
1046	int ret = 0;
1047
1048	__device_driver_lock(dev, dev->parent);
1049
1050	drv = dev->p->async_driver;
1051
1052	/*
1053	 * If device has been removed or someone has already successfully
1054	 * bound a driver before us just skip the driver probe call.
1055	 */
1056	if (!dev->p->dead && !dev->driver)
1057		ret = driver_probe_device(drv, dev);
1058
1059	__device_driver_unlock(dev, dev->parent);
1060
1061	dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
1062
1063	put_device(dev);
1064}
1065
1066static int __driver_attach(struct device *dev, void *data)
1067{
1068	struct device_driver *drv = data;
1069	bool async = false;
1070	int ret;
1071
1072	/*
1073	 * Lock device and try to bind to it. We drop the error
1074	 * here and always return 0, because we need to keep trying
1075	 * to bind to devices and some drivers will return an error
1076	 * simply if it didn't support the device.
1077	 *
1078	 * driver_probe_device() will spit a warning if there
1079	 * is an error.
1080	 */
1081
1082	ret = driver_match_device(drv, dev);
1083	if (ret == 0) {
1084		/* no match */
1085		return 0;
1086	} else if (ret == -EPROBE_DEFER) {
1087		dev_dbg(dev, "Device match requests probe deferral\n");
1088		driver_deferred_probe_add(dev);
1089		/*
1090		 * Driver could not match with device, but may match with
1091		 * another device on the bus.
1092		 */
1093		return 0;
1094	} else if (ret < 0) {
1095		dev_dbg(dev, "Bus failed to match device: %d\n", ret);
1096		/*
1097		 * Driver could not match with device, but may match with
1098		 * another device on the bus.
1099		 */
1100		return 0;
1101	} /* ret > 0 means positive match */
1102
1103	if (driver_allows_async_probing(drv)) {
1104		/*
1105		 * Instead of probing the device synchronously we will
1106		 * probe it asynchronously to allow for more parallelism.
1107		 *
1108		 * We only take the device lock here in order to guarantee
1109		 * that the dev->driver and async_driver fields are protected
1110		 */
1111		dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
1112		device_lock(dev);
1113		if (!dev->driver) {
1114			get_device(dev);
1115			dev->p->async_driver = drv;
1116			async = true;
1117		}
1118		device_unlock(dev);
1119		if (async)
1120			async_schedule_dev(__driver_attach_async_helper, dev);
1121		return 0;
1122	}
1123
1124	device_driver_attach(drv, dev);
1125
1126	return 0;
1127}
1128
1129/**
1130 * driver_attach - try to bind driver to devices.
1131 * @drv: driver.
1132 *
1133 * Walk the list of devices that the bus has on it and try to
1134 * match the driver with each one.  If driver_probe_device()
1135 * returns 0 and the @dev->driver is set, we've found a
1136 * compatible pair.
1137 */
1138int driver_attach(struct device_driver *drv)
1139{
1140	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
1141}
1142EXPORT_SYMBOL_GPL(driver_attach);
1143
1144/*
1145 * __device_release_driver() must be called with @dev lock held.
1146 * When called for a USB interface, @dev->parent lock must be held as well.
1147 */
1148static void __device_release_driver(struct device *dev, struct device *parent)
1149{
1150	struct device_driver *drv;
1151
1152	drv = dev->driver;
1153	if (drv) {
1154		pm_runtime_get_sync(dev);
1155
1156		while (device_links_busy(dev)) {
1157			__device_driver_unlock(dev, parent);
1158
1159			device_links_unbind_consumers(dev);
1160
1161			__device_driver_lock(dev, parent);
1162			/*
1163			 * A concurrent invocation of the same function might
1164			 * have released the driver successfully while this one
1165			 * was waiting, so check for that.
1166			 */
1167			if (dev->driver != drv) {
1168				pm_runtime_put(dev);
1169				return;
1170			}
1171		}
1172
1173		driver_sysfs_remove(dev);
1174
1175		if (dev->bus)
1176			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1177						     BUS_NOTIFY_UNBIND_DRIVER,
1178						     dev);
1179
1180		pm_runtime_put_sync(dev);
1181
1182		device_remove_file(dev, &dev_attr_state_synced);
1183		device_remove_groups(dev, drv->dev_groups);
1184
1185		if (dev->bus && dev->bus->remove)
1186			dev->bus->remove(dev);
1187		else if (drv->remove)
1188			drv->remove(dev);
1189
1190		devres_release_all(dev);
1191		arch_teardown_dma_ops(dev);
1192		kfree(dev->dma_range_map);
1193		dev->dma_range_map = NULL;
1194		dev->driver = NULL;
1195		dev_set_drvdata(dev, NULL);
1196		if (dev->pm_domain && dev->pm_domain->dismiss)
1197			dev->pm_domain->dismiss(dev);
1198		pm_runtime_reinit(dev);
1199		dev_pm_set_driver_flags(dev, 0);
1200
1201		device_links_driver_cleanup(dev);
1202
1203		klist_remove(&dev->p->knode_driver);
1204		device_pm_check_callbacks(dev);
1205		if (dev->bus)
1206			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1207						     BUS_NOTIFY_UNBOUND_DRIVER,
1208						     dev);
1209
1210		kobject_uevent(&dev->kobj, KOBJ_UNBIND);
1211	}
1212}
1213
1214void device_release_driver_internal(struct device *dev,
1215				    struct device_driver *drv,
1216				    struct device *parent)
1217{
1218	__device_driver_lock(dev, parent);
1219
1220	if (!drv || drv == dev->driver)
1221		__device_release_driver(dev, parent);
1222
1223	__device_driver_unlock(dev, parent);
1224}
1225
1226/**
1227 * device_release_driver - manually detach device from driver.
1228 * @dev: device.
1229 *
1230 * Manually detach device from driver.
1231 * When called for a USB interface, @dev->parent lock must be held.
1232 *
1233 * If this function is to be called with @dev->parent lock held, ensure that
1234 * the device's consumers are unbound in advance or that their locks can be
1235 * acquired under the @dev->parent lock.
1236 */
1237void device_release_driver(struct device *dev)
1238{
1239	/*
1240	 * If anyone calls device_release_driver() recursively from
1241	 * within their ->remove callback for the same device, they
1242	 * will deadlock right here.
1243	 */
1244	device_release_driver_internal(dev, NULL, NULL);
1245}
1246EXPORT_SYMBOL_GPL(device_release_driver);
1247
1248/**
1249 * device_driver_detach - detach driver from a specific device
1250 * @dev: device to detach driver from
1251 *
1252 * Detach driver from device. Will acquire both @dev lock and @dev->parent
1253 * lock if needed.
1254 */
1255void device_driver_detach(struct device *dev)
1256{
1257	device_release_driver_internal(dev, NULL, dev->parent);
1258}
1259
1260/**
1261 * driver_detach - detach driver from all devices it controls.
1262 * @drv: driver.
1263 */
1264void driver_detach(struct device_driver *drv)
1265{
1266	struct device_private *dev_prv;
1267	struct device *dev;
1268
1269	if (driver_allows_async_probing(drv))
1270		async_synchronize_full();
1271
1272	for (;;) {
1273		spin_lock(&drv->p->klist_devices.k_lock);
1274		if (list_empty(&drv->p->klist_devices.k_list)) {
1275			spin_unlock(&drv->p->klist_devices.k_lock);
1276			break;
1277		}
1278		dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
1279				     struct device_private,
1280				     knode_driver.n_node);
1281		dev = dev_prv->device;
1282		get_device(dev);
1283		spin_unlock(&drv->p->klist_devices.k_lock);
1284		device_release_driver_internal(dev, drv, dev->parent);
1285		put_device(dev);
1286	}
1287}
1288