1// SPDX-License-Identifier: GPL-2.0
2/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 */
7#define pr_fmt(fmt) "PM: " fmt
8
9#include <linux/delay.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/platform_device.h>
13#include <linux/pm_opp.h>
14#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
16#include <linux/pm_qos.h>
17#include <linux/pm_clock.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/sched.h>
21#include <linux/suspend.h>
22#include <linux/export.h>
23#include <linux/cpu.h>
24#include <linux/debugfs.h>
25
26#include "power.h"
27
28#define GENPD_RETRY_MAX_MS	250		/* Approximate */
29
30#define GENPD_DEV_CALLBACK(genpd, type, callback, dev)		\
31({								\
32	type (*__routine)(struct device *__d); 			\
33	type __ret = (type)0;					\
34								\
35	__routine = genpd->dev_ops.callback; 			\
36	if (__routine) {					\
37		__ret = __routine(dev); 			\
38	}							\
39	__ret;							\
40})
41
42static LIST_HEAD(gpd_list);
43static DEFINE_MUTEX(gpd_list_lock);
44
45struct genpd_lock_ops {
46	void (*lock)(struct generic_pm_domain *genpd);
47	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48	int (*lock_interruptible)(struct generic_pm_domain *genpd);
49	void (*unlock)(struct generic_pm_domain *genpd);
50};
51
52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53{
54	mutex_lock(&genpd->mlock);
55}
56
57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58					int depth)
59{
60	mutex_lock_nested(&genpd->mlock, depth);
61}
62
63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64{
65	return mutex_lock_interruptible(&genpd->mlock);
66}
67
68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69{
70	return mutex_unlock(&genpd->mlock);
71}
72
73static const struct genpd_lock_ops genpd_mtx_ops = {
74	.lock = genpd_lock_mtx,
75	.lock_nested = genpd_lock_nested_mtx,
76	.lock_interruptible = genpd_lock_interruptible_mtx,
77	.unlock = genpd_unlock_mtx,
78};
79
80static void genpd_lock_spin(struct generic_pm_domain *genpd)
81	__acquires(&genpd->slock)
82{
83	unsigned long flags;
84
85	spin_lock_irqsave(&genpd->slock, flags);
86	genpd->lock_flags = flags;
87}
88
89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90					int depth)
91	__acquires(&genpd->slock)
92{
93	unsigned long flags;
94
95	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96	genpd->lock_flags = flags;
97}
98
99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100	__acquires(&genpd->slock)
101{
102	unsigned long flags;
103
104	spin_lock_irqsave(&genpd->slock, flags);
105	genpd->lock_flags = flags;
106	return 0;
107}
108
109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110	__releases(&genpd->slock)
111{
112	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113}
114
115static const struct genpd_lock_ops genpd_spin_ops = {
116	.lock = genpd_lock_spin,
117	.lock_nested = genpd_lock_nested_spin,
118	.lock_interruptible = genpd_lock_interruptible_spin,
119	.unlock = genpd_unlock_spin,
120};
121
122#define genpd_lock(p)			p->lock_ops->lock(p)
123#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
124#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
125#define genpd_unlock(p)			p->lock_ops->unlock(p)
126
127#define genpd_status_on(genpd)		(genpd->status == GENPD_STATE_ON)
128#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
129#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
130#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
131#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)
132#define genpd_is_rpm_always_on(genpd)	(genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
133
134static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
135		const struct generic_pm_domain *genpd)
136{
137	bool ret;
138
139	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
140
141	/*
142	 * Warn once if an IRQ safe device is attached to a domain, which
143	 * callbacks are allowed to sleep. This indicates a suboptimal
144	 * configuration for PM, but it doesn't matter for an always on domain.
145	 */
146	if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
147		return ret;
148
149	if (ret)
150		dev_warn_once(dev, "PM domain %s will not be powered off\n",
151				genpd->name);
152
153	return ret;
154}
155
156static int genpd_runtime_suspend(struct device *dev);
157
158/*
159 * Get the generic PM domain for a particular struct device.
160 * This validates the struct device pointer, the PM domain pointer,
161 * and checks that the PM domain pointer is a real generic PM domain.
162 * Any failure results in NULL being returned.
163 */
164static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
165{
166	if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
167		return NULL;
168
169	/* A genpd's always have its ->runtime_suspend() callback assigned. */
170	if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
171		return pd_to_genpd(dev->pm_domain);
172
173	return NULL;
174}
175
176/*
177 * This should only be used where we are certain that the pm_domain
178 * attached to the device is a genpd domain.
179 */
180static struct generic_pm_domain *dev_to_genpd(struct device *dev)
181{
182	if (IS_ERR_OR_NULL(dev->pm_domain))
183		return ERR_PTR(-EINVAL);
184
185	return pd_to_genpd(dev->pm_domain);
186}
187
188static int genpd_stop_dev(const struct generic_pm_domain *genpd,
189			  struct device *dev)
190{
191	return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
192}
193
194static int genpd_start_dev(const struct generic_pm_domain *genpd,
195			   struct device *dev)
196{
197	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
198}
199
200static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
201{
202	bool ret = false;
203
204	if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
205		ret = !!atomic_dec_and_test(&genpd->sd_count);
206
207	return ret;
208}
209
210static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
211{
212	atomic_inc(&genpd->sd_count);
213	smp_mb__after_atomic();
214}
215
216#ifdef CONFIG_DEBUG_FS
217static struct dentry *genpd_debugfs_dir;
218
219static void genpd_debug_add(struct generic_pm_domain *genpd);
220
221static void genpd_debug_remove(struct generic_pm_domain *genpd)
222{
223	if (!genpd_debugfs_dir)
224		return;
225
226	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
227}
228
229static void genpd_update_accounting(struct generic_pm_domain *genpd)
230{
231	u64 delta, now;
232
233	now = ktime_get_mono_fast_ns();
234	if (now <= genpd->accounting_time)
235		return;
236
237	delta = now - genpd->accounting_time;
238
239	/*
240	 * If genpd->status is active, it means we are just
241	 * out of off and so update the idle time and vice
242	 * versa.
243	 */
244	if (genpd->status == GENPD_STATE_ON)
245		genpd->states[genpd->state_idx].idle_time += delta;
246	else
247		genpd->on_time += delta;
248
249	genpd->accounting_time = now;
250}
251#else
252static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
253static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
254static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
255#endif
256
257static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
258					   unsigned int state)
259{
260	struct generic_pm_domain_data *pd_data;
261	struct pm_domain_data *pdd;
262	struct gpd_link *link;
263
264	/* New requested state is same as Max requested state */
265	if (state == genpd->performance_state)
266		return state;
267
268	/* New requested state is higher than Max requested state */
269	if (state > genpd->performance_state)
270		return state;
271
272	/* Traverse all devices within the domain */
273	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
274		pd_data = to_gpd_data(pdd);
275
276		if (pd_data->performance_state > state)
277			state = pd_data->performance_state;
278	}
279
280	/*
281	 * Traverse all sub-domains within the domain. This can be
282	 * done without any additional locking as the link->performance_state
283	 * field is protected by the parent genpd->lock, which is already taken.
284	 *
285	 * Also note that link->performance_state (subdomain's performance state
286	 * requirement to parent domain) is different from
287	 * link->child->performance_state (current performance state requirement
288	 * of the devices/sub-domains of the subdomain) and so can have a
289	 * different value.
290	 *
291	 * Note that we also take vote from powered-off sub-domains into account
292	 * as the same is done for devices right now.
293	 */
294	list_for_each_entry(link, &genpd->parent_links, parent_node) {
295		if (link->performance_state > state)
296			state = link->performance_state;
297	}
298
299	return state;
300}
301
302static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
303					 struct generic_pm_domain *parent,
304					 unsigned int pstate)
305{
306	if (!parent->set_performance_state)
307		return pstate;
308
309	return dev_pm_opp_xlate_performance_state(genpd->opp_table,
310						  parent->opp_table,
311						  pstate);
312}
313
314static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
315					unsigned int state, int depth)
316{
317	struct generic_pm_domain *parent;
318	struct gpd_link *link;
319	int parent_state, ret;
320
321	if (state == genpd->performance_state)
322		return 0;
323
324	/* Propagate to parents of genpd */
325	list_for_each_entry(link, &genpd->child_links, child_node) {
326		parent = link->parent;
327
328		/* Find parent's performance state */
329		ret = genpd_xlate_performance_state(genpd, parent, state);
330		if (unlikely(ret < 0))
331			goto err;
332
333		parent_state = ret;
334
335		genpd_lock_nested(parent, depth + 1);
336
337		link->prev_performance_state = link->performance_state;
338		link->performance_state = parent_state;
339		parent_state = _genpd_reeval_performance_state(parent,
340						parent_state);
341		ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
342		if (ret)
343			link->performance_state = link->prev_performance_state;
344
345		genpd_unlock(parent);
346
347		if (ret)
348			goto err;
349	}
350
351	if (genpd->set_performance_state) {
352		ret = genpd->set_performance_state(genpd, state);
353		if (ret)
354			goto err;
355	}
356
357	genpd->performance_state = state;
358	return 0;
359
360err:
361	/* Encountered an error, lets rollback */
362	list_for_each_entry_continue_reverse(link, &genpd->child_links,
363					     child_node) {
364		parent = link->parent;
365
366		genpd_lock_nested(parent, depth + 1);
367
368		parent_state = link->prev_performance_state;
369		link->performance_state = parent_state;
370
371		parent_state = _genpd_reeval_performance_state(parent,
372						parent_state);
373		if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
374			pr_err("%s: Failed to roll back to %d performance state\n",
375			       parent->name, parent_state);
376		}
377
378		genpd_unlock(parent);
379	}
380
381	return ret;
382}
383
384static int genpd_set_performance_state(struct device *dev, unsigned int state)
385{
386	struct generic_pm_domain *genpd = dev_to_genpd(dev);
387	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
388	unsigned int prev_state;
389	int ret;
390
391	prev_state = gpd_data->performance_state;
392	if (prev_state == state)
393		return 0;
394
395	gpd_data->performance_state = state;
396	state = _genpd_reeval_performance_state(genpd, state);
397
398	ret = _genpd_set_performance_state(genpd, state, 0);
399	if (ret)
400		gpd_data->performance_state = prev_state;
401
402	return ret;
403}
404
405static int genpd_drop_performance_state(struct device *dev)
406{
407	unsigned int prev_state = dev_gpd_data(dev)->performance_state;
408
409	if (!genpd_set_performance_state(dev, 0))
410		return prev_state;
411
412	return 0;
413}
414
415static void genpd_restore_performance_state(struct device *dev,
416					    unsigned int state)
417{
418	if (state)
419		genpd_set_performance_state(dev, state);
420}
421
422/**
423 * dev_pm_genpd_set_performance_state- Set performance state of device's power
424 * domain.
425 *
426 * @dev: Device for which the performance-state needs to be set.
427 * @state: Target performance state of the device. This can be set as 0 when the
428 *	   device doesn't have any performance state constraints left (And so
429 *	   the device wouldn't participate anymore to find the target
430 *	   performance state of the genpd).
431 *
432 * It is assumed that the users guarantee that the genpd wouldn't be detached
433 * while this routine is getting called.
434 *
435 * Returns 0 on success and negative error values on failures.
436 */
437int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
438{
439	struct generic_pm_domain *genpd;
440	int ret = 0;
441
442	genpd = dev_to_genpd_safe(dev);
443	if (!genpd)
444		return -ENODEV;
445
446	if (WARN_ON(!dev->power.subsys_data ||
447		     !dev->power.subsys_data->domain_data))
448		return -EINVAL;
449
450	genpd_lock(genpd);
451	if (pm_runtime_suspended(dev)) {
452		dev_gpd_data(dev)->rpm_pstate = state;
453	} else {
454		ret = genpd_set_performance_state(dev, state);
455		if (!ret)
456			dev_gpd_data(dev)->rpm_pstate = 0;
457	}
458	genpd_unlock(genpd);
459
460	return ret;
461}
462EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
463
464/**
465 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
466 *
467 * @dev: Device to handle
468 * @next: impending interrupt/wakeup for the device
469 *
470 *
471 * Allow devices to inform of the next wakeup. It's assumed that the users
472 * guarantee that the genpd wouldn't be detached while this routine is getting
473 * called. Additionally, it's also assumed that @dev isn't runtime suspended
474 * (RPM_SUSPENDED)."
475 * Although devices are expected to update the next_wakeup after the end of
476 * their usecase as well, it is possible the devices themselves may not know
477 * about that, so stale @next will be ignored when powering off the domain.
478 */
479void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
480{
481	struct generic_pm_domain *genpd;
482	struct gpd_timing_data *td;
483
484	genpd = dev_to_genpd_safe(dev);
485	if (!genpd)
486		return;
487
488	td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
489	if (td)
490		td->next_wakeup = next;
491}
492EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
493
494/**
495 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
496 * @dev: A device that is attached to the genpd.
497 *
498 * This routine should typically be called for a device, at the point of when a
499 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
500 *
501 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
502 * valid value have been set.
503 */
504ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
505{
506	struct generic_pm_domain *genpd;
507
508	genpd = dev_to_genpd_safe(dev);
509	if (!genpd)
510		return KTIME_MAX;
511
512	if (genpd->gd)
513		return genpd->gd->next_hrtimer;
514
515	return KTIME_MAX;
516}
517EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
518
519/*
520 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
521 *
522 * @dev: A device that is attached to the genpd.
523 *
524 * Allows a consumer of the genpd to notify the provider that the next power off
525 * should be synchronous.
526 *
527 * It is assumed that the users guarantee that the genpd wouldn't be detached
528 * while this routine is getting called.
529 */
530void dev_pm_genpd_synced_poweroff(struct device *dev)
531{
532	struct generic_pm_domain *genpd;
533
534	genpd = dev_to_genpd_safe(dev);
535	if (!genpd)
536		return;
537
538	genpd_lock(genpd);
539	genpd->synced_poweroff = true;
540	genpd_unlock(genpd);
541}
542EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
543
544static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
545{
546	unsigned int state_idx = genpd->state_idx;
547	ktime_t time_start;
548	s64 elapsed_ns;
549	int ret;
550
551	/* Notify consumers that we are about to power on. */
552	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
553					     GENPD_NOTIFY_PRE_ON,
554					     GENPD_NOTIFY_OFF, NULL);
555	ret = notifier_to_errno(ret);
556	if (ret)
557		return ret;
558
559	if (!genpd->power_on)
560		goto out;
561
562	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
563	if (!timed) {
564		ret = genpd->power_on(genpd);
565		if (ret)
566			goto err;
567
568		goto out;
569	}
570
571	time_start = ktime_get();
572	ret = genpd->power_on(genpd);
573	if (ret)
574		goto err;
575
576	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
577	if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
578		goto out;
579
580	genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
581	genpd->gd->max_off_time_changed = true;
582	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
583		 genpd->name, "on", elapsed_ns);
584
585out:
586	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
587	genpd->synced_poweroff = false;
588	return 0;
589err:
590	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
591				NULL);
592	return ret;
593}
594
595static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
596{
597	unsigned int state_idx = genpd->state_idx;
598	ktime_t time_start;
599	s64 elapsed_ns;
600	int ret;
601
602	/* Notify consumers that we are about to power off. */
603	ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
604					     GENPD_NOTIFY_PRE_OFF,
605					     GENPD_NOTIFY_ON, NULL);
606	ret = notifier_to_errno(ret);
607	if (ret)
608		return ret;
609
610	if (!genpd->power_off)
611		goto out;
612
613	timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
614	if (!timed) {
615		ret = genpd->power_off(genpd);
616		if (ret)
617			goto busy;
618
619		goto out;
620	}
621
622	time_start = ktime_get();
623	ret = genpd->power_off(genpd);
624	if (ret)
625		goto busy;
626
627	elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
628	if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
629		goto out;
630
631	genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
632	genpd->gd->max_off_time_changed = true;
633	pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
634		 genpd->name, "off", elapsed_ns);
635
636out:
637	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
638				NULL);
639	return 0;
640busy:
641	raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
642	return ret;
643}
644
645/**
646 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
647 * @genpd: PM domain to power off.
648 *
649 * Queue up the execution of genpd_power_off() unless it's already been done
650 * before.
651 */
652static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
653{
654	queue_work(pm_wq, &genpd->power_off_work);
655}
656
657/**
658 * genpd_power_off - Remove power from a given PM domain.
659 * @genpd: PM domain to power down.
660 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
661 * RPM status of the releated device is in an intermediate state, not yet turned
662 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
663 * be RPM_SUSPENDED, while it tries to power off the PM domain.
664 * @depth: nesting count for lockdep.
665 *
666 * If all of the @genpd's devices have been suspended and all of its subdomains
667 * have been powered down, remove power from @genpd.
668 */
669static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
670			   unsigned int depth)
671{
672	struct pm_domain_data *pdd;
673	struct gpd_link *link;
674	unsigned int not_suspended = 0;
675	int ret;
676
677	/*
678	 * Do not try to power off the domain in the following situations:
679	 * (1) The domain is already in the "power off" state.
680	 * (2) System suspend is in progress.
681	 */
682	if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
683		return 0;
684
685	/*
686	 * Abort power off for the PM domain in the following situations:
687	 * (1) The domain is configured as always on.
688	 * (2) When the domain has a subdomain being powered on.
689	 */
690	if (genpd_is_always_on(genpd) ||
691			genpd_is_rpm_always_on(genpd) ||
692			atomic_read(&genpd->sd_count) > 0)
693		return -EBUSY;
694
695	/*
696	 * The children must be in their deepest (powered-off) states to allow
697	 * the parent to be powered off. Note that, there's no need for
698	 * additional locking, as powering on a child, requires the parent's
699	 * lock to be acquired first.
700	 */
701	list_for_each_entry(link, &genpd->parent_links, parent_node) {
702		struct generic_pm_domain *child = link->child;
703		if (child->state_idx < child->state_count - 1)
704			return -EBUSY;
705	}
706
707	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
708		/*
709		 * Do not allow PM domain to be powered off, when an IRQ safe
710		 * device is part of a non-IRQ safe domain.
711		 */
712		if (!pm_runtime_suspended(pdd->dev) ||
713			irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
714			not_suspended++;
715	}
716
717	if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
718		return -EBUSY;
719
720	if (genpd->gov && genpd->gov->power_down_ok) {
721		if (!genpd->gov->power_down_ok(&genpd->domain))
722			return -EAGAIN;
723	}
724
725	/* Default to shallowest state. */
726	if (!genpd->gov)
727		genpd->state_idx = 0;
728
729	/* Don't power off, if a child domain is waiting to power on. */
730	if (atomic_read(&genpd->sd_count) > 0)
731		return -EBUSY;
732
733	ret = _genpd_power_off(genpd, true);
734	if (ret) {
735		genpd->states[genpd->state_idx].rejected++;
736		return ret;
737	}
738
739	genpd->status = GENPD_STATE_OFF;
740	genpd_update_accounting(genpd);
741	genpd->states[genpd->state_idx].usage++;
742
743	list_for_each_entry(link, &genpd->child_links, child_node) {
744		genpd_sd_counter_dec(link->parent);
745		genpd_lock_nested(link->parent, depth + 1);
746		genpd_power_off(link->parent, false, depth + 1);
747		genpd_unlock(link->parent);
748	}
749
750	return 0;
751}
752
753/**
754 * genpd_power_on - Restore power to a given PM domain and its parents.
755 * @genpd: PM domain to power up.
756 * @depth: nesting count for lockdep.
757 *
758 * Restore power to @genpd and all of its parents so that it is possible to
759 * resume a device belonging to it.
760 */
761static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
762{
763	struct gpd_link *link;
764	int ret = 0;
765
766	if (genpd_status_on(genpd))
767		return 0;
768
769	/*
770	 * The list is guaranteed not to change while the loop below is being
771	 * executed, unless one of the parents' .power_on() callbacks fiddles
772	 * with it.
773	 */
774	list_for_each_entry(link, &genpd->child_links, child_node) {
775		struct generic_pm_domain *parent = link->parent;
776
777		genpd_sd_counter_inc(parent);
778
779		genpd_lock_nested(parent, depth + 1);
780		ret = genpd_power_on(parent, depth + 1);
781		genpd_unlock(parent);
782
783		if (ret) {
784			genpd_sd_counter_dec(parent);
785			goto err;
786		}
787	}
788
789	ret = _genpd_power_on(genpd, true);
790	if (ret)
791		goto err;
792
793	genpd->status = GENPD_STATE_ON;
794	genpd_update_accounting(genpd);
795
796	return 0;
797
798 err:
799	list_for_each_entry_continue_reverse(link,
800					&genpd->child_links,
801					child_node) {
802		genpd_sd_counter_dec(link->parent);
803		genpd_lock_nested(link->parent, depth + 1);
804		genpd_power_off(link->parent, false, depth + 1);
805		genpd_unlock(link->parent);
806	}
807
808	return ret;
809}
810
811static int genpd_dev_pm_start(struct device *dev)
812{
813	struct generic_pm_domain *genpd = dev_to_genpd(dev);
814
815	return genpd_start_dev(genpd, dev);
816}
817
818static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
819				     unsigned long val, void *ptr)
820{
821	struct generic_pm_domain_data *gpd_data;
822	struct device *dev;
823
824	gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
825	dev = gpd_data->base.dev;
826
827	for (;;) {
828		struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
829		struct pm_domain_data *pdd;
830		struct gpd_timing_data *td;
831
832		spin_lock_irq(&dev->power.lock);
833
834		pdd = dev->power.subsys_data ?
835				dev->power.subsys_data->domain_data : NULL;
836		if (pdd) {
837			td = to_gpd_data(pdd)->td;
838			if (td) {
839				td->constraint_changed = true;
840				genpd = dev_to_genpd(dev);
841			}
842		}
843
844		spin_unlock_irq(&dev->power.lock);
845
846		if (!IS_ERR(genpd)) {
847			genpd_lock(genpd);
848			genpd->gd->max_off_time_changed = true;
849			genpd_unlock(genpd);
850		}
851
852		dev = dev->parent;
853		if (!dev || dev->power.ignore_children)
854			break;
855	}
856
857	return NOTIFY_DONE;
858}
859
860/**
861 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
862 * @work: Work structure used for scheduling the execution of this function.
863 */
864static void genpd_power_off_work_fn(struct work_struct *work)
865{
866	struct generic_pm_domain *genpd;
867
868	genpd = container_of(work, struct generic_pm_domain, power_off_work);
869
870	genpd_lock(genpd);
871	genpd_power_off(genpd, false, 0);
872	genpd_unlock(genpd);
873}
874
875/**
876 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
877 * @dev: Device to handle.
878 */
879static int __genpd_runtime_suspend(struct device *dev)
880{
881	int (*cb)(struct device *__dev);
882
883	if (dev->type && dev->type->pm)
884		cb = dev->type->pm->runtime_suspend;
885	else if (dev->class && dev->class->pm)
886		cb = dev->class->pm->runtime_suspend;
887	else if (dev->bus && dev->bus->pm)
888		cb = dev->bus->pm->runtime_suspend;
889	else
890		cb = NULL;
891
892	if (!cb && dev->driver && dev->driver->pm)
893		cb = dev->driver->pm->runtime_suspend;
894
895	return cb ? cb(dev) : 0;
896}
897
898/**
899 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
900 * @dev: Device to handle.
901 */
902static int __genpd_runtime_resume(struct device *dev)
903{
904	int (*cb)(struct device *__dev);
905
906	if (dev->type && dev->type->pm)
907		cb = dev->type->pm->runtime_resume;
908	else if (dev->class && dev->class->pm)
909		cb = dev->class->pm->runtime_resume;
910	else if (dev->bus && dev->bus->pm)
911		cb = dev->bus->pm->runtime_resume;
912	else
913		cb = NULL;
914
915	if (!cb && dev->driver && dev->driver->pm)
916		cb = dev->driver->pm->runtime_resume;
917
918	return cb ? cb(dev) : 0;
919}
920
921/**
922 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
923 * @dev: Device to suspend.
924 *
925 * Carry out a runtime suspend of a device under the assumption that its
926 * pm_domain field points to the domain member of an object of type
927 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
928 */
929static int genpd_runtime_suspend(struct device *dev)
930{
931	struct generic_pm_domain *genpd;
932	bool (*suspend_ok)(struct device *__dev);
933	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
934	struct gpd_timing_data *td = gpd_data->td;
935	bool runtime_pm = pm_runtime_enabled(dev);
936	ktime_t time_start = 0;
937	s64 elapsed_ns;
938	int ret;
939
940	dev_dbg(dev, "%s()\n", __func__);
941
942	genpd = dev_to_genpd(dev);
943	if (IS_ERR(genpd))
944		return -EINVAL;
945
946	/*
947	 * A runtime PM centric subsystem/driver may re-use the runtime PM
948	 * callbacks for other purposes than runtime PM. In those scenarios
949	 * runtime PM is disabled. Under these circumstances, we shall skip
950	 * validating/measuring the PM QoS latency.
951	 */
952	suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
953	if (runtime_pm && suspend_ok && !suspend_ok(dev))
954		return -EBUSY;
955
956	/* Measure suspend latency. */
957	if (td && runtime_pm)
958		time_start = ktime_get();
959
960	ret = __genpd_runtime_suspend(dev);
961	if (ret)
962		return ret;
963
964	ret = genpd_stop_dev(genpd, dev);
965	if (ret) {
966		__genpd_runtime_resume(dev);
967		return ret;
968	}
969
970	/* Update suspend latency value if the measured time exceeds it. */
971	if (td && runtime_pm) {
972		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
973		if (elapsed_ns > td->suspend_latency_ns) {
974			td->suspend_latency_ns = elapsed_ns;
975			dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
976				elapsed_ns);
977			genpd->gd->max_off_time_changed = true;
978			td->constraint_changed = true;
979		}
980	}
981
982	/*
983	 * If power.irq_safe is set, this routine may be run with
984	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
985	 */
986	if (irq_safe_dev_in_sleep_domain(dev, genpd))
987		return 0;
988
989	genpd_lock(genpd);
990	genpd_power_off(genpd, true, 0);
991	gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
992	genpd_unlock(genpd);
993
994	return 0;
995}
996
997/**
998 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
999 * @dev: Device to resume.
1000 *
1001 * Carry out a runtime resume of a device under the assumption that its
1002 * pm_domain field points to the domain member of an object of type
1003 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1004 */
1005static int genpd_runtime_resume(struct device *dev)
1006{
1007	struct generic_pm_domain *genpd;
1008	struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
1009	struct gpd_timing_data *td = gpd_data->td;
1010	bool timed = td && pm_runtime_enabled(dev);
1011	ktime_t time_start = 0;
1012	s64 elapsed_ns;
1013	int ret;
1014
1015	dev_dbg(dev, "%s()\n", __func__);
1016
1017	genpd = dev_to_genpd(dev);
1018	if (IS_ERR(genpd))
1019		return -EINVAL;
1020
1021	/*
1022	 * As we don't power off a non IRQ safe domain, which holds
1023	 * an IRQ safe device, we don't need to restore power to it.
1024	 */
1025	if (irq_safe_dev_in_sleep_domain(dev, genpd))
1026		goto out;
1027
1028	genpd_lock(genpd);
1029	genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
1030	ret = genpd_power_on(genpd, 0);
1031	genpd_unlock(genpd);
1032
1033	if (ret)
1034		return ret;
1035
1036 out:
1037	/* Measure resume latency. */
1038	if (timed)
1039		time_start = ktime_get();
1040
1041	ret = genpd_start_dev(genpd, dev);
1042	if (ret)
1043		goto err_poweroff;
1044
1045	ret = __genpd_runtime_resume(dev);
1046	if (ret)
1047		goto err_stop;
1048
1049	/* Update resume latency value if the measured time exceeds it. */
1050	if (timed) {
1051		elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
1052		if (elapsed_ns > td->resume_latency_ns) {
1053			td->resume_latency_ns = elapsed_ns;
1054			dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1055				elapsed_ns);
1056			genpd->gd->max_off_time_changed = true;
1057			td->constraint_changed = true;
1058		}
1059	}
1060
1061	return 0;
1062
1063err_stop:
1064	genpd_stop_dev(genpd, dev);
1065err_poweroff:
1066	if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
1067		genpd_lock(genpd);
1068		genpd_power_off(genpd, true, 0);
1069		gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
1070		genpd_unlock(genpd);
1071	}
1072
1073	return ret;
1074}
1075
1076static bool pd_ignore_unused;
1077static int __init pd_ignore_unused_setup(char *__unused)
1078{
1079	pd_ignore_unused = true;
1080	return 1;
1081}
1082__setup("pd_ignore_unused", pd_ignore_unused_setup);
1083
1084/**
1085 * genpd_power_off_unused - Power off all PM domains with no devices in use.
1086 */
1087static int __init genpd_power_off_unused(void)
1088{
1089	struct generic_pm_domain *genpd;
1090
1091	if (pd_ignore_unused) {
1092		pr_warn("genpd: Not disabling unused power domains\n");
1093		return 0;
1094	}
1095
1096	mutex_lock(&gpd_list_lock);
1097
1098	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1099		genpd_queue_power_off_work(genpd);
1100
1101	mutex_unlock(&gpd_list_lock);
1102
1103	return 0;
1104}
1105late_initcall_sync(genpd_power_off_unused);
1106
1107#ifdef CONFIG_PM_SLEEP
1108
1109/**
1110 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
1111 * @genpd: PM domain to power off, if possible.
1112 * @use_lock: use the lock.
1113 * @depth: nesting count for lockdep.
1114 *
1115 * Check if the given PM domain can be powered off (during system suspend or
1116 * hibernation) and do that if so.  Also, in that case propagate to its parents.
1117 *
1118 * This function is only called in "noirq" and "syscore" stages of system power
1119 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1120 * these cases the lock must be held.
1121 */
1122static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1123				 unsigned int depth)
1124{
1125	struct gpd_link *link;
1126
1127	if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
1128		return;
1129
1130	if (genpd->suspended_count != genpd->device_count
1131	    || atomic_read(&genpd->sd_count) > 0)
1132		return;
1133
1134	/* Check that the children are in their deepest (powered-off) state. */
1135	list_for_each_entry(link, &genpd->parent_links, parent_node) {
1136		struct generic_pm_domain *child = link->child;
1137		if (child->state_idx < child->state_count - 1)
1138			return;
1139	}
1140
1141	/* Choose the deepest state when suspending */
1142	genpd->state_idx = genpd->state_count - 1;
1143	if (_genpd_power_off(genpd, false))
1144		return;
1145
1146	genpd->status = GENPD_STATE_OFF;
1147
1148	list_for_each_entry(link, &genpd->child_links, child_node) {
1149		genpd_sd_counter_dec(link->parent);
1150
1151		if (use_lock)
1152			genpd_lock_nested(link->parent, depth + 1);
1153
1154		genpd_sync_power_off(link->parent, use_lock, depth + 1);
1155
1156		if (use_lock)
1157			genpd_unlock(link->parent);
1158	}
1159}
1160
1161/**
1162 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
1163 * @genpd: PM domain to power on.
1164 * @use_lock: use the lock.
1165 * @depth: nesting count for lockdep.
1166 *
1167 * This function is only called in "noirq" and "syscore" stages of system power
1168 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1169 * these cases the lock must be held.
1170 */
1171static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1172				unsigned int depth)
1173{
1174	struct gpd_link *link;
1175
1176	if (genpd_status_on(genpd))
1177		return;
1178
1179	list_for_each_entry(link, &genpd->child_links, child_node) {
1180		genpd_sd_counter_inc(link->parent);
1181
1182		if (use_lock)
1183			genpd_lock_nested(link->parent, depth + 1);
1184
1185		genpd_sync_power_on(link->parent, use_lock, depth + 1);
1186
1187		if (use_lock)
1188			genpd_unlock(link->parent);
1189	}
1190
1191	_genpd_power_on(genpd, false);
1192	genpd->status = GENPD_STATE_ON;
1193}
1194
1195/**
1196 * genpd_prepare - Start power transition of a device in a PM domain.
1197 * @dev: Device to start the transition of.
1198 *
1199 * Start a power transition of a device (during a system-wide power transition)
1200 * under the assumption that its pm_domain field points to the domain member of
1201 * an object of type struct generic_pm_domain representing a PM domain
1202 * consisting of I/O devices.
1203 */
1204static int genpd_prepare(struct device *dev)
1205{
1206	struct generic_pm_domain *genpd;
1207	int ret;
1208
1209	dev_dbg(dev, "%s()\n", __func__);
1210
1211	genpd = dev_to_genpd(dev);
1212	if (IS_ERR(genpd))
1213		return -EINVAL;
1214
1215	genpd_lock(genpd);
1216
1217	if (genpd->prepared_count++ == 0)
1218		genpd->suspended_count = 0;
1219
1220	genpd_unlock(genpd);
1221
1222	ret = pm_generic_prepare(dev);
1223	if (ret < 0) {
1224		genpd_lock(genpd);
1225
1226		genpd->prepared_count--;
1227
1228		genpd_unlock(genpd);
1229	}
1230
1231	/* Never return 1, as genpd don't cope with the direct_complete path. */
1232	return ret >= 0 ? 0 : ret;
1233}
1234
1235/**
1236 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1237 *   I/O pm domain.
1238 * @dev: Device to suspend.
1239 * @suspend_noirq: Generic suspend_noirq callback.
1240 * @resume_noirq: Generic resume_noirq callback.
1241 *
1242 * Stop the device and remove power from the domain if all devices in it have
1243 * been stopped.
1244 */
1245static int genpd_finish_suspend(struct device *dev,
1246				int (*suspend_noirq)(struct device *dev),
1247				int (*resume_noirq)(struct device *dev))
1248{
1249	struct generic_pm_domain *genpd;
1250	int ret = 0;
1251
1252	genpd = dev_to_genpd(dev);
1253	if (IS_ERR(genpd))
1254		return -EINVAL;
1255
1256	ret = suspend_noirq(dev);
1257	if (ret)
1258		return ret;
1259
1260	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1261		return 0;
1262
1263	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1264	    !pm_runtime_status_suspended(dev)) {
1265		ret = genpd_stop_dev(genpd, dev);
1266		if (ret) {
1267			resume_noirq(dev);
1268			return ret;
1269		}
1270	}
1271
1272	genpd_lock(genpd);
1273	genpd->suspended_count++;
1274	genpd_sync_power_off(genpd, true, 0);
1275	genpd_unlock(genpd);
1276
1277	return 0;
1278}
1279
1280/**
1281 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1282 * @dev: Device to suspend.
1283 *
1284 * Stop the device and remove power from the domain if all devices in it have
1285 * been stopped.
1286 */
1287static int genpd_suspend_noirq(struct device *dev)
1288{
1289	dev_dbg(dev, "%s()\n", __func__);
1290
1291	return genpd_finish_suspend(dev,
1292				    pm_generic_suspend_noirq,
1293				    pm_generic_resume_noirq);
1294}
1295
1296/**
1297 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
1298 * @dev: Device to resume.
1299 * @resume_noirq: Generic resume_noirq callback.
1300 *
1301 * Restore power to the device's PM domain, if necessary, and start the device.
1302 */
1303static int genpd_finish_resume(struct device *dev,
1304			       int (*resume_noirq)(struct device *dev))
1305{
1306	struct generic_pm_domain *genpd;
1307	int ret;
1308
1309	dev_dbg(dev, "%s()\n", __func__);
1310
1311	genpd = dev_to_genpd(dev);
1312	if (IS_ERR(genpd))
1313		return -EINVAL;
1314
1315	if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
1316		return resume_noirq(dev);
1317
1318	genpd_lock(genpd);
1319	genpd_sync_power_on(genpd, true, 0);
1320	genpd->suspended_count--;
1321	genpd_unlock(genpd);
1322
1323	if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1324	    !pm_runtime_status_suspended(dev)) {
1325		ret = genpd_start_dev(genpd, dev);
1326		if (ret)
1327			return ret;
1328	}
1329
1330	return pm_generic_resume_noirq(dev);
1331}
1332
1333/**
1334 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1335 * @dev: Device to resume.
1336 *
1337 * Restore power to the device's PM domain, if necessary, and start the device.
1338 */
1339static int genpd_resume_noirq(struct device *dev)
1340{
1341	dev_dbg(dev, "%s()\n", __func__);
1342
1343	return genpd_finish_resume(dev, pm_generic_resume_noirq);
1344}
1345
1346/**
1347 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1348 * @dev: Device to freeze.
1349 *
1350 * Carry out a late freeze of a device under the assumption that its
1351 * pm_domain field points to the domain member of an object of type
1352 * struct generic_pm_domain representing a power domain consisting of I/O
1353 * devices.
1354 */
1355static int genpd_freeze_noirq(struct device *dev)
1356{
1357	dev_dbg(dev, "%s()\n", __func__);
1358
1359	return genpd_finish_suspend(dev,
1360				    pm_generic_freeze_noirq,
1361				    pm_generic_thaw_noirq);
1362}
1363
1364/**
1365 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1366 * @dev: Device to thaw.
1367 *
1368 * Start the device, unless power has been removed from the domain already
1369 * before the system transition.
1370 */
1371static int genpd_thaw_noirq(struct device *dev)
1372{
1373	dev_dbg(dev, "%s()\n", __func__);
1374
1375	return genpd_finish_resume(dev, pm_generic_thaw_noirq);
1376}
1377
1378/**
1379 * genpd_poweroff_noirq - Completion of hibernation of device in an
1380 *   I/O PM domain.
1381 * @dev: Device to poweroff.
1382 *
1383 * Stop the device and remove power from the domain if all devices in it have
1384 * been stopped.
1385 */
1386static int genpd_poweroff_noirq(struct device *dev)
1387{
1388	dev_dbg(dev, "%s()\n", __func__);
1389
1390	return genpd_finish_suspend(dev,
1391				    pm_generic_poweroff_noirq,
1392				    pm_generic_restore_noirq);
1393}
1394
1395/**
1396 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1397 * @dev: Device to resume.
1398 *
1399 * Make sure the domain will be in the same power state as before the
1400 * hibernation the system is resuming from and start the device if necessary.
1401 */
1402static int genpd_restore_noirq(struct device *dev)
1403{
1404	dev_dbg(dev, "%s()\n", __func__);
1405
1406	return genpd_finish_resume(dev, pm_generic_restore_noirq);
1407}
1408
1409/**
1410 * genpd_complete - Complete power transition of a device in a power domain.
1411 * @dev: Device to complete the transition of.
1412 *
1413 * Complete a power transition of a device (during a system-wide power
1414 * transition) under the assumption that its pm_domain field points to the
1415 * domain member of an object of type struct generic_pm_domain representing
1416 * a power domain consisting of I/O devices.
1417 */
1418static void genpd_complete(struct device *dev)
1419{
1420	struct generic_pm_domain *genpd;
1421
1422	dev_dbg(dev, "%s()\n", __func__);
1423
1424	genpd = dev_to_genpd(dev);
1425	if (IS_ERR(genpd))
1426		return;
1427
1428	pm_generic_complete(dev);
1429
1430	genpd_lock(genpd);
1431
1432	genpd->prepared_count--;
1433	if (!genpd->prepared_count)
1434		genpd_queue_power_off_work(genpd);
1435
1436	genpd_unlock(genpd);
1437}
1438
1439static void genpd_switch_state(struct device *dev, bool suspend)
1440{
1441	struct generic_pm_domain *genpd;
1442	bool use_lock;
1443
1444	genpd = dev_to_genpd_safe(dev);
1445	if (!genpd)
1446		return;
1447
1448	use_lock = genpd_is_irq_safe(genpd);
1449
1450	if (use_lock)
1451		genpd_lock(genpd);
1452
1453	if (suspend) {
1454		genpd->suspended_count++;
1455		genpd_sync_power_off(genpd, use_lock, 0);
1456	} else {
1457		genpd_sync_power_on(genpd, use_lock, 0);
1458		genpd->suspended_count--;
1459	}
1460
1461	if (use_lock)
1462		genpd_unlock(genpd);
1463}
1464
1465/**
1466 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1467 * @dev: The device that is attached to the genpd, that can be suspended.
1468 *
1469 * This routine should typically be called for a device that needs to be
1470 * suspended during the syscore suspend phase. It may also be called during
1471 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1472 * genpd.
1473 */
1474void dev_pm_genpd_suspend(struct device *dev)
1475{
1476	genpd_switch_state(dev, true);
1477}
1478EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
1479
1480/**
1481 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1482 * @dev: The device that is attached to the genpd, which needs to be resumed.
1483 *
1484 * This routine should typically be called for a device that needs to be resumed
1485 * during the syscore resume phase. It may also be called during suspend-to-idle
1486 * to resume a corresponding CPU device that is attached to a genpd.
1487 */
1488void dev_pm_genpd_resume(struct device *dev)
1489{
1490	genpd_switch_state(dev, false);
1491}
1492EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
1493
1494#else /* !CONFIG_PM_SLEEP */
1495
1496#define genpd_prepare		NULL
1497#define genpd_suspend_noirq	NULL
1498#define genpd_resume_noirq	NULL
1499#define genpd_freeze_noirq	NULL
1500#define genpd_thaw_noirq	NULL
1501#define genpd_poweroff_noirq	NULL
1502#define genpd_restore_noirq	NULL
1503#define genpd_complete		NULL
1504
1505#endif /* CONFIG_PM_SLEEP */
1506
1507static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1508							   bool has_governor)
1509{
1510	struct generic_pm_domain_data *gpd_data;
1511	struct gpd_timing_data *td;
1512	int ret;
1513
1514	ret = dev_pm_get_subsys_data(dev);
1515	if (ret)
1516		return ERR_PTR(ret);
1517
1518	gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1519	if (!gpd_data) {
1520		ret = -ENOMEM;
1521		goto err_put;
1522	}
1523
1524	gpd_data->base.dev = dev;
1525	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1526
1527	/* Allocate data used by a governor. */
1528	if (has_governor) {
1529		td = kzalloc(sizeof(*td), GFP_KERNEL);
1530		if (!td) {
1531			ret = -ENOMEM;
1532			goto err_free;
1533		}
1534
1535		td->constraint_changed = true;
1536		td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
1537		td->next_wakeup = KTIME_MAX;
1538		gpd_data->td = td;
1539	}
1540
1541	spin_lock_irq(&dev->power.lock);
1542
1543	if (dev->power.subsys_data->domain_data)
1544		ret = -EINVAL;
1545	else
1546		dev->power.subsys_data->domain_data = &gpd_data->base;
1547
1548	spin_unlock_irq(&dev->power.lock);
1549
1550	if (ret)
1551		goto err_free;
1552
1553	return gpd_data;
1554
1555 err_free:
1556	kfree(gpd_data->td);
1557	kfree(gpd_data);
1558 err_put:
1559	dev_pm_put_subsys_data(dev);
1560	return ERR_PTR(ret);
1561}
1562
1563static void genpd_free_dev_data(struct device *dev,
1564				struct generic_pm_domain_data *gpd_data)
1565{
1566	spin_lock_irq(&dev->power.lock);
1567
1568	dev->power.subsys_data->domain_data = NULL;
1569
1570	spin_unlock_irq(&dev->power.lock);
1571
1572	kfree(gpd_data->td);
1573	kfree(gpd_data);
1574	dev_pm_put_subsys_data(dev);
1575}
1576
1577static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1578				 int cpu, bool set, unsigned int depth)
1579{
1580	struct gpd_link *link;
1581
1582	if (!genpd_is_cpu_domain(genpd))
1583		return;
1584
1585	list_for_each_entry(link, &genpd->child_links, child_node) {
1586		struct generic_pm_domain *parent = link->parent;
1587
1588		genpd_lock_nested(parent, depth + 1);
1589		genpd_update_cpumask(parent, cpu, set, depth + 1);
1590		genpd_unlock(parent);
1591	}
1592
1593	if (set)
1594		cpumask_set_cpu(cpu, genpd->cpus);
1595	else
1596		cpumask_clear_cpu(cpu, genpd->cpus);
1597}
1598
1599static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1600{
1601	if (cpu >= 0)
1602		genpd_update_cpumask(genpd, cpu, true, 0);
1603}
1604
1605static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1606{
1607	if (cpu >= 0)
1608		genpd_update_cpumask(genpd, cpu, false, 0);
1609}
1610
1611static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
1612{
1613	int cpu;
1614
1615	if (!genpd_is_cpu_domain(genpd))
1616		return -1;
1617
1618	for_each_possible_cpu(cpu) {
1619		if (get_cpu_device(cpu) == dev)
1620			return cpu;
1621	}
1622
1623	return -1;
1624}
1625
1626static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1627			    struct device *base_dev)
1628{
1629	struct genpd_governor_data *gd = genpd->gd;
1630	struct generic_pm_domain_data *gpd_data;
1631	int ret;
1632
1633	dev_dbg(dev, "%s()\n", __func__);
1634
1635	gpd_data = genpd_alloc_dev_data(dev, gd);
1636	if (IS_ERR(gpd_data))
1637		return PTR_ERR(gpd_data);
1638
1639	gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
1640
1641	ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1642	if (ret)
1643		goto out;
1644
1645	genpd_lock(genpd);
1646
1647	genpd_set_cpumask(genpd, gpd_data->cpu);
1648	dev_pm_domain_set(dev, &genpd->domain);
1649
1650	genpd->device_count++;
1651	if (gd)
1652		gd->max_off_time_changed = true;
1653
1654	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1655
1656	genpd_unlock(genpd);
1657 out:
1658	if (ret)
1659		genpd_free_dev_data(dev, gpd_data);
1660	else
1661		dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1662					DEV_PM_QOS_RESUME_LATENCY);
1663
1664	return ret;
1665}
1666
1667/**
1668 * pm_genpd_add_device - Add a device to an I/O PM domain.
1669 * @genpd: PM domain to add the device to.
1670 * @dev: Device to be added.
1671 */
1672int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1673{
1674	int ret;
1675
1676	if (!genpd || !dev)
1677		return -EINVAL;
1678
1679	mutex_lock(&gpd_list_lock);
1680	ret = genpd_add_device(genpd, dev, dev);
1681	mutex_unlock(&gpd_list_lock);
1682
1683	return ret;
1684}
1685EXPORT_SYMBOL_GPL(pm_genpd_add_device);
1686
1687static int genpd_remove_device(struct generic_pm_domain *genpd,
1688			       struct device *dev)
1689{
1690	struct generic_pm_domain_data *gpd_data;
1691	struct pm_domain_data *pdd;
1692	int ret = 0;
1693
1694	dev_dbg(dev, "%s()\n", __func__);
1695
1696	pdd = dev->power.subsys_data->domain_data;
1697	gpd_data = to_gpd_data(pdd);
1698	dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1699				   DEV_PM_QOS_RESUME_LATENCY);
1700
1701	genpd_lock(genpd);
1702
1703	if (genpd->prepared_count > 0) {
1704		ret = -EAGAIN;
1705		goto out;
1706	}
1707
1708	genpd->device_count--;
1709	if (genpd->gd)
1710		genpd->gd->max_off_time_changed = true;
1711
1712	genpd_clear_cpumask(genpd, gpd_data->cpu);
1713	dev_pm_domain_set(dev, NULL);
1714
1715	list_del_init(&pdd->list_node);
1716
1717	genpd_unlock(genpd);
1718
1719	if (genpd->detach_dev)
1720		genpd->detach_dev(genpd, dev);
1721
1722	genpd_free_dev_data(dev, gpd_data);
1723
1724	return 0;
1725
1726 out:
1727	genpd_unlock(genpd);
1728	dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
1729
1730	return ret;
1731}
1732
1733/**
1734 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1735 * @dev: Device to be removed.
1736 */
1737int pm_genpd_remove_device(struct device *dev)
1738{
1739	struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
1740
1741	if (!genpd)
1742		return -EINVAL;
1743
1744	return genpd_remove_device(genpd, dev);
1745}
1746EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1747
1748/**
1749 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1750 *
1751 * @dev: Device that should be associated with the notifier
1752 * @nb: The notifier block to register
1753 *
1754 * Users may call this function to add a genpd power on/off notifier for an
1755 * attached @dev. Only one notifier per device is allowed. The notifier is
1756 * sent when genpd is powering on/off the PM domain.
1757 *
1758 * It is assumed that the user guarantee that the genpd wouldn't be detached
1759 * while this routine is getting called.
1760 *
1761 * Returns 0 on success and negative error values on failures.
1762 */
1763int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1764{
1765	struct generic_pm_domain *genpd;
1766	struct generic_pm_domain_data *gpd_data;
1767	int ret;
1768
1769	genpd = dev_to_genpd_safe(dev);
1770	if (!genpd)
1771		return -ENODEV;
1772
1773	if (WARN_ON(!dev->power.subsys_data ||
1774		     !dev->power.subsys_data->domain_data))
1775		return -EINVAL;
1776
1777	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1778	if (gpd_data->power_nb)
1779		return -EEXIST;
1780
1781	genpd_lock(genpd);
1782	ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1783	genpd_unlock(genpd);
1784
1785	if (ret) {
1786		dev_warn(dev, "failed to add notifier for PM domain %s\n",
1787			 genpd->name);
1788		return ret;
1789	}
1790
1791	gpd_data->power_nb = nb;
1792	return 0;
1793}
1794EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1795
1796/**
1797 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1798 *
1799 * @dev: Device that is associated with the notifier
1800 *
1801 * Users may call this function to remove a genpd power on/off notifier for an
1802 * attached @dev.
1803 *
1804 * It is assumed that the user guarantee that the genpd wouldn't be detached
1805 * while this routine is getting called.
1806 *
1807 * Returns 0 on success and negative error values on failures.
1808 */
1809int dev_pm_genpd_remove_notifier(struct device *dev)
1810{
1811	struct generic_pm_domain *genpd;
1812	struct generic_pm_domain_data *gpd_data;
1813	int ret;
1814
1815	genpd = dev_to_genpd_safe(dev);
1816	if (!genpd)
1817		return -ENODEV;
1818
1819	if (WARN_ON(!dev->power.subsys_data ||
1820		     !dev->power.subsys_data->domain_data))
1821		return -EINVAL;
1822
1823	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1824	if (!gpd_data->power_nb)
1825		return -ENODEV;
1826
1827	genpd_lock(genpd);
1828	ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1829					    gpd_data->power_nb);
1830	genpd_unlock(genpd);
1831
1832	if (ret) {
1833		dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1834			 genpd->name);
1835		return ret;
1836	}
1837
1838	gpd_data->power_nb = NULL;
1839	return 0;
1840}
1841EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1842
1843static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1844			       struct generic_pm_domain *subdomain)
1845{
1846	struct gpd_link *link, *itr;
1847	int ret = 0;
1848
1849	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1850	    || genpd == subdomain)
1851		return -EINVAL;
1852
1853	/*
1854	 * If the domain can be powered on/off in an IRQ safe
1855	 * context, ensure that the subdomain can also be
1856	 * powered on/off in that context.
1857	 */
1858	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
1859		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
1860				genpd->name, subdomain->name);
1861		return -EINVAL;
1862	}
1863
1864	link = kzalloc(sizeof(*link), GFP_KERNEL);
1865	if (!link)
1866		return -ENOMEM;
1867
1868	genpd_lock(subdomain);
1869	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1870
1871	if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
1872		ret = -EINVAL;
1873		goto out;
1874	}
1875
1876	list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1877		if (itr->child == subdomain && itr->parent == genpd) {
1878			ret = -EINVAL;
1879			goto out;
1880		}
1881	}
1882
1883	link->parent = genpd;
1884	list_add_tail(&link->parent_node, &genpd->parent_links);
1885	link->child = subdomain;
1886	list_add_tail(&link->child_node, &subdomain->child_links);
1887	if (genpd_status_on(subdomain))
1888		genpd_sd_counter_inc(genpd);
1889
1890 out:
1891	genpd_unlock(genpd);
1892	genpd_unlock(subdomain);
1893	if (ret)
1894		kfree(link);
1895	return ret;
1896}
1897
1898/**
1899 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1900 * @genpd: Leader PM domain to add the subdomain to.
1901 * @subdomain: Subdomain to be added.
1902 */
1903int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1904			   struct generic_pm_domain *subdomain)
1905{
1906	int ret;
1907
1908	mutex_lock(&gpd_list_lock);
1909	ret = genpd_add_subdomain(genpd, subdomain);
1910	mutex_unlock(&gpd_list_lock);
1911
1912	return ret;
1913}
1914EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1915
1916/**
1917 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1918 * @genpd: Leader PM domain to remove the subdomain from.
1919 * @subdomain: Subdomain to be removed.
1920 */
1921int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1922			      struct generic_pm_domain *subdomain)
1923{
1924	struct gpd_link *l, *link;
1925	int ret = -EINVAL;
1926
1927	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1928		return -EINVAL;
1929
1930	genpd_lock(subdomain);
1931	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
1932
1933	if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
1934		pr_warn("%s: unable to remove subdomain %s\n",
1935			genpd->name, subdomain->name);
1936		ret = -EBUSY;
1937		goto out;
1938	}
1939
1940	list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1941		if (link->child != subdomain)
1942			continue;
1943
1944		list_del(&link->parent_node);
1945		list_del(&link->child_node);
1946		kfree(link);
1947		if (genpd_status_on(subdomain))
1948			genpd_sd_counter_dec(genpd);
1949
1950		ret = 0;
1951		break;
1952	}
1953
1954out:
1955	genpd_unlock(genpd);
1956	genpd_unlock(subdomain);
1957
1958	return ret;
1959}
1960EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1961
1962static void genpd_free_default_power_state(struct genpd_power_state *states,
1963					   unsigned int state_count)
1964{
1965	kfree(states);
1966}
1967
1968static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1969{
1970	struct genpd_power_state *state;
1971
1972	state = kzalloc(sizeof(*state), GFP_KERNEL);
1973	if (!state)
1974		return -ENOMEM;
1975
1976	genpd->states = state;
1977	genpd->state_count = 1;
1978	genpd->free_states = genpd_free_default_power_state;
1979
1980	return 0;
1981}
1982
1983static int genpd_alloc_data(struct generic_pm_domain *genpd)
1984{
1985	struct genpd_governor_data *gd = NULL;
1986	int ret;
1987
1988	if (genpd_is_cpu_domain(genpd) &&
1989	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1990		return -ENOMEM;
1991
1992	if (genpd->gov) {
1993		gd = kzalloc(sizeof(*gd), GFP_KERNEL);
1994		if (!gd) {
1995			ret = -ENOMEM;
1996			goto free;
1997		}
1998
1999		gd->max_off_time_ns = -1;
2000		gd->max_off_time_changed = true;
2001		gd->next_wakeup = KTIME_MAX;
2002		gd->next_hrtimer = KTIME_MAX;
2003	}
2004
2005	/* Use only one "off" state if there were no states declared */
2006	if (genpd->state_count == 0) {
2007		ret = genpd_set_default_power_state(genpd);
2008		if (ret)
2009			goto free;
2010	}
2011
2012	genpd->gd = gd;
2013	return 0;
2014
2015free:
2016	if (genpd_is_cpu_domain(genpd))
2017		free_cpumask_var(genpd->cpus);
2018	kfree(gd);
2019	return ret;
2020}
2021
2022static void genpd_free_data(struct generic_pm_domain *genpd)
2023{
2024	if (genpd_is_cpu_domain(genpd))
2025		free_cpumask_var(genpd->cpus);
2026	if (genpd->free_states)
2027		genpd->free_states(genpd->states, genpd->state_count);
2028	kfree(genpd->gd);
2029}
2030
2031static void genpd_lock_init(struct generic_pm_domain *genpd)
2032{
2033	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2034		spin_lock_init(&genpd->slock);
2035		genpd->lock_ops = &genpd_spin_ops;
2036	} else {
2037		mutex_init(&genpd->mlock);
2038		genpd->lock_ops = &genpd_mtx_ops;
2039	}
2040}
2041
2042/**
2043 * pm_genpd_init - Initialize a generic I/O PM domain object.
2044 * @genpd: PM domain object to initialize.
2045 * @gov: PM domain governor to associate with the domain (may be NULL).
2046 * @is_off: Initial value of the domain's power_is_off field.
2047 *
2048 * Returns 0 on successful initialization, else a negative error code.
2049 */
2050int pm_genpd_init(struct generic_pm_domain *genpd,
2051		  struct dev_power_governor *gov, bool is_off)
2052{
2053	int ret;
2054
2055	if (IS_ERR_OR_NULL(genpd))
2056		return -EINVAL;
2057
2058	INIT_LIST_HEAD(&genpd->parent_links);
2059	INIT_LIST_HEAD(&genpd->child_links);
2060	INIT_LIST_HEAD(&genpd->dev_list);
2061	RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
2062	genpd_lock_init(genpd);
2063	genpd->gov = gov;
2064	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
2065	atomic_set(&genpd->sd_count, 0);
2066	genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
2067	genpd->device_count = 0;
2068	genpd->provider = NULL;
2069	genpd->has_provider = false;
2070	genpd->accounting_time = ktime_get_mono_fast_ns();
2071	genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2072	genpd->domain.ops.runtime_resume = genpd_runtime_resume;
2073	genpd->domain.ops.prepare = genpd_prepare;
2074	genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2075	genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2076	genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2077	genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2078	genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2079	genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2080	genpd->domain.ops.complete = genpd_complete;
2081	genpd->domain.start = genpd_dev_pm_start;
2082
2083	if (genpd->flags & GENPD_FLAG_PM_CLK) {
2084		genpd->dev_ops.stop = pm_clk_suspend;
2085		genpd->dev_ops.start = pm_clk_resume;
2086	}
2087
2088	/* The always-on governor works better with the corresponding flag. */
2089	if (gov == &pm_domain_always_on_gov)
2090		genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2091
2092	/* Always-on domains must be powered on at initialization. */
2093	if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
2094			!genpd_status_on(genpd)) {
2095		pr_err("always-on PM domain %s is not on\n", genpd->name);
2096		return -EINVAL;
2097	}
2098
2099	/* Multiple states but no governor doesn't make sense. */
2100	if (!gov && genpd->state_count > 1)
2101		pr_warn("%s: no governor for states\n", genpd->name);
2102
2103	ret = genpd_alloc_data(genpd);
2104	if (ret)
2105		return ret;
2106
2107	device_initialize(&genpd->dev);
2108	dev_set_name(&genpd->dev, "%s", genpd->name);
2109
2110	mutex_lock(&gpd_list_lock);
2111	list_add(&genpd->gpd_list_node, &gpd_list);
2112	mutex_unlock(&gpd_list_lock);
2113	genpd_debug_add(genpd);
2114
2115	return 0;
2116}
2117EXPORT_SYMBOL_GPL(pm_genpd_init);
2118
2119static int genpd_remove(struct generic_pm_domain *genpd)
2120{
2121	struct gpd_link *l, *link;
2122
2123	if (IS_ERR_OR_NULL(genpd))
2124		return -EINVAL;
2125
2126	genpd_lock(genpd);
2127
2128	if (genpd->has_provider) {
2129		genpd_unlock(genpd);
2130		pr_err("Provider present, unable to remove %s\n", genpd->name);
2131		return -EBUSY;
2132	}
2133
2134	if (!list_empty(&genpd->parent_links) || genpd->device_count) {
2135		genpd_unlock(genpd);
2136		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2137		return -EBUSY;
2138	}
2139
2140	list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2141		list_del(&link->parent_node);
2142		list_del(&link->child_node);
2143		kfree(link);
2144	}
2145
2146	list_del(&genpd->gpd_list_node);
2147	genpd_unlock(genpd);
2148	genpd_debug_remove(genpd);
2149	cancel_work_sync(&genpd->power_off_work);
2150	genpd_free_data(genpd);
2151
2152	pr_debug("%s: removed %s\n", __func__, genpd->name);
2153
2154	return 0;
2155}
2156
2157/**
2158 * pm_genpd_remove - Remove a generic I/O PM domain
2159 * @genpd: Pointer to PM domain that is to be removed.
2160 *
2161 * To remove the PM domain, this function:
2162 *  - Removes the PM domain as a subdomain to any parent domains,
2163 *    if it was added.
2164 *  - Removes the PM domain from the list of registered PM domains.
2165 *
2166 * The PM domain will only be removed, if the associated provider has
2167 * been removed, it is not a parent to any other PM domain and has no
2168 * devices associated with it.
2169 */
2170int pm_genpd_remove(struct generic_pm_domain *genpd)
2171{
2172	int ret;
2173
2174	mutex_lock(&gpd_list_lock);
2175	ret = genpd_remove(genpd);
2176	mutex_unlock(&gpd_list_lock);
2177
2178	return ret;
2179}
2180EXPORT_SYMBOL_GPL(pm_genpd_remove);
2181
2182#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
2183
2184/*
2185 * Device Tree based PM domain providers.
2186 *
2187 * The code below implements generic device tree based PM domain providers that
2188 * bind device tree nodes with generic PM domains registered in the system.
2189 *
2190 * Any driver that registers generic PM domains and needs to support binding of
2191 * devices to these domains is supposed to register a PM domain provider, which
2192 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2193 *
2194 * Two simple mapping functions have been provided for convenience:
2195 *  - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2196 *  - genpd_xlate_onecell() for mapping of multiple PM domains per node by
2197 *    index.
2198 */
2199
2200/**
2201 * struct of_genpd_provider - PM domain provider registration structure
2202 * @link: Entry in global list of PM domain providers
2203 * @node: Pointer to device tree node of PM domain provider
2204 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2205 *         into a PM domain.
2206 * @data: context pointer to be passed into @xlate callback
2207 */
2208struct of_genpd_provider {
2209	struct list_head link;
2210	struct device_node *node;
2211	genpd_xlate_t xlate;
2212	void *data;
2213};
2214
2215/* List of registered PM domain providers. */
2216static LIST_HEAD(of_genpd_providers);
2217/* Mutex to protect the list above. */
2218static DEFINE_MUTEX(of_genpd_mutex);
2219
2220/**
2221 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
2222 * @genpdspec: OF phandle args to map into a PM domain
2223 * @data: xlate function private data - pointer to struct generic_pm_domain
2224 *
2225 * This is a generic xlate function that can be used to model PM domains that
2226 * have their own device tree nodes. The private data of xlate function needs
2227 * to be a valid pointer to struct generic_pm_domain.
2228 */
2229static struct generic_pm_domain *genpd_xlate_simple(
2230					struct of_phandle_args *genpdspec,
2231					void *data)
2232{
2233	return data;
2234}
2235
2236/**
2237 * genpd_xlate_onecell() - Xlate function using a single index.
2238 * @genpdspec: OF phandle args to map into a PM domain
2239 * @data: xlate function private data - pointer to struct genpd_onecell_data
2240 *
2241 * This is a generic xlate function that can be used to model simple PM domain
2242 * controllers that have one device tree node and provide multiple PM domains.
2243 * A single cell is used as an index into an array of PM domains specified in
2244 * the genpd_onecell_data struct when registering the provider.
2245 */
2246static struct generic_pm_domain *genpd_xlate_onecell(
2247					struct of_phandle_args *genpdspec,
2248					void *data)
2249{
2250	struct genpd_onecell_data *genpd_data = data;
2251	unsigned int idx = genpdspec->args[0];
2252
2253	if (genpdspec->args_count != 1)
2254		return ERR_PTR(-EINVAL);
2255
2256	if (idx >= genpd_data->num_domains) {
2257		pr_err("%s: invalid domain index %u\n", __func__, idx);
2258		return ERR_PTR(-EINVAL);
2259	}
2260
2261	if (!genpd_data->domains[idx])
2262		return ERR_PTR(-ENOENT);
2263
2264	return genpd_data->domains[idx];
2265}
2266
2267/**
2268 * genpd_add_provider() - Register a PM domain provider for a node
2269 * @np: Device node pointer associated with the PM domain provider.
2270 * @xlate: Callback for decoding PM domain from phandle arguments.
2271 * @data: Context pointer for @xlate callback.
2272 */
2273static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2274			      void *data)
2275{
2276	struct of_genpd_provider *cp;
2277
2278	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2279	if (!cp)
2280		return -ENOMEM;
2281
2282	cp->node = of_node_get(np);
2283	cp->data = data;
2284	cp->xlate = xlate;
2285	fwnode_dev_initialized(&np->fwnode, true);
2286
2287	mutex_lock(&of_genpd_mutex);
2288	list_add(&cp->link, &of_genpd_providers);
2289	mutex_unlock(&of_genpd_mutex);
2290	pr_debug("Added domain provider from %pOF\n", np);
2291
2292	return 0;
2293}
2294
2295static bool genpd_present(const struct generic_pm_domain *genpd)
2296{
2297	bool ret = false;
2298	const struct generic_pm_domain *gpd;
2299
2300	mutex_lock(&gpd_list_lock);
2301	list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2302		if (gpd == genpd) {
2303			ret = true;
2304			break;
2305		}
2306	}
2307	mutex_unlock(&gpd_list_lock);
2308
2309	return ret;
2310}
2311
2312/**
2313 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2314 * @np: Device node pointer associated with the PM domain provider.
2315 * @genpd: Pointer to PM domain associated with the PM domain provider.
2316 */
2317int of_genpd_add_provider_simple(struct device_node *np,
2318				 struct generic_pm_domain *genpd)
2319{
2320	int ret;
2321
2322	if (!np || !genpd)
2323		return -EINVAL;
2324
2325	if (!genpd_present(genpd))
2326		return -EINVAL;
2327
2328	genpd->dev.of_node = np;
2329
2330	/* Parse genpd OPP table */
2331	if (genpd->set_performance_state) {
2332		ret = dev_pm_opp_of_add_table(&genpd->dev);
2333		if (ret)
2334			return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
2335
2336		/*
2337		 * Save table for faster processing while setting performance
2338		 * state.
2339		 */
2340		genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2341		WARN_ON(IS_ERR(genpd->opp_table));
2342	}
2343
2344	ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2345	if (ret) {
2346		if (genpd->set_performance_state) {
2347			dev_pm_opp_put_opp_table(genpd->opp_table);
2348			dev_pm_opp_of_remove_table(&genpd->dev);
2349		}
2350
2351		return ret;
2352	}
2353
2354	genpd->provider = &np->fwnode;
2355	genpd->has_provider = true;
2356
2357	return 0;
2358}
2359EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2360
2361/**
2362 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2363 * @np: Device node pointer associated with the PM domain provider.
2364 * @data: Pointer to the data associated with the PM domain provider.
2365 */
2366int of_genpd_add_provider_onecell(struct device_node *np,
2367				  struct genpd_onecell_data *data)
2368{
2369	struct generic_pm_domain *genpd;
2370	unsigned int i;
2371	int ret = -EINVAL;
2372
2373	if (!np || !data)
2374		return -EINVAL;
2375
2376	if (!data->xlate)
2377		data->xlate = genpd_xlate_onecell;
2378
2379	for (i = 0; i < data->num_domains; i++) {
2380		genpd = data->domains[i];
2381
2382		if (!genpd)
2383			continue;
2384		if (!genpd_present(genpd))
2385			goto error;
2386
2387		genpd->dev.of_node = np;
2388
2389		/* Parse genpd OPP table */
2390		if (genpd->set_performance_state) {
2391			ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2392			if (ret) {
2393				dev_err_probe(&genpd->dev, ret,
2394					      "Failed to add OPP table for index %d\n", i);
2395				goto error;
2396			}
2397
2398			/*
2399			 * Save table for faster processing while setting
2400			 * performance state.
2401			 */
2402			genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2403			WARN_ON(IS_ERR(genpd->opp_table));
2404		}
2405
2406		genpd->provider = &np->fwnode;
2407		genpd->has_provider = true;
2408	}
2409
2410	ret = genpd_add_provider(np, data->xlate, data);
2411	if (ret < 0)
2412		goto error;
2413
2414	return 0;
2415
2416error:
2417	while (i--) {
2418		genpd = data->domains[i];
2419
2420		if (!genpd)
2421			continue;
2422
2423		genpd->provider = NULL;
2424		genpd->has_provider = false;
2425
2426		if (genpd->set_performance_state) {
2427			dev_pm_opp_put_opp_table(genpd->opp_table);
2428			dev_pm_opp_of_remove_table(&genpd->dev);
2429		}
2430	}
2431
2432	return ret;
2433}
2434EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
2435
2436/**
2437 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2438 * @np: Device node pointer associated with the PM domain provider
2439 */
2440void of_genpd_del_provider(struct device_node *np)
2441{
2442	struct of_genpd_provider *cp, *tmp;
2443	struct generic_pm_domain *gpd;
2444
2445	mutex_lock(&gpd_list_lock);
2446	mutex_lock(&of_genpd_mutex);
2447	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
2448		if (cp->node == np) {
2449			/*
2450			 * For each PM domain associated with the
2451			 * provider, set the 'has_provider' to false
2452			 * so that the PM domain can be safely removed.
2453			 */
2454			list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2455				if (gpd->provider == &np->fwnode) {
2456					gpd->has_provider = false;
2457
2458					if (!gpd->set_performance_state)
2459						continue;
2460
2461					dev_pm_opp_put_opp_table(gpd->opp_table);
2462					dev_pm_opp_of_remove_table(&gpd->dev);
2463				}
2464			}
2465
2466			fwnode_dev_initialized(&cp->node->fwnode, false);
2467			list_del(&cp->link);
2468			of_node_put(cp->node);
2469			kfree(cp);
2470			break;
2471		}
2472	}
2473	mutex_unlock(&of_genpd_mutex);
2474	mutex_unlock(&gpd_list_lock);
2475}
2476EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2477
2478/**
2479 * genpd_get_from_provider() - Look-up PM domain
2480 * @genpdspec: OF phandle args to use for look-up
2481 *
2482 * Looks for a PM domain provider under the node specified by @genpdspec and if
2483 * found, uses xlate function of the provider to map phandle args to a PM
2484 * domain.
2485 *
2486 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2487 * on failure.
2488 */
2489static struct generic_pm_domain *genpd_get_from_provider(
2490					struct of_phandle_args *genpdspec)
2491{
2492	struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2493	struct of_genpd_provider *provider;
2494
2495	if (!genpdspec)
2496		return ERR_PTR(-EINVAL);
2497
2498	mutex_lock(&of_genpd_mutex);
2499
2500	/* Check if we have such a provider in our array */
2501	list_for_each_entry(provider, &of_genpd_providers, link) {
2502		if (provider->node == genpdspec->np)
2503			genpd = provider->xlate(genpdspec, provider->data);
2504		if (!IS_ERR(genpd))
2505			break;
2506	}
2507
2508	mutex_unlock(&of_genpd_mutex);
2509
2510	return genpd;
2511}
2512
2513/**
2514 * of_genpd_add_device() - Add a device to an I/O PM domain
2515 * @genpdspec: OF phandle args to use for look-up PM domain
2516 * @dev: Device to be added.
2517 *
2518 * Looks-up an I/O PM domain based upon phandle args provided and adds
2519 * the device to the PM domain. Returns a negative error code on failure.
2520 */
2521int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2522{
2523	struct generic_pm_domain *genpd;
2524	int ret;
2525
2526	if (!dev)
2527		return -EINVAL;
2528
2529	mutex_lock(&gpd_list_lock);
2530
2531	genpd = genpd_get_from_provider(genpdspec);
2532	if (IS_ERR(genpd)) {
2533		ret = PTR_ERR(genpd);
2534		goto out;
2535	}
2536
2537	ret = genpd_add_device(genpd, dev, dev);
2538
2539out:
2540	mutex_unlock(&gpd_list_lock);
2541
2542	return ret;
2543}
2544EXPORT_SYMBOL_GPL(of_genpd_add_device);
2545
2546/**
2547 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2548 * @parent_spec: OF phandle args to use for parent PM domain look-up
2549 * @subdomain_spec: OF phandle args to use for subdomain look-up
2550 *
2551 * Looks-up a parent PM domain and subdomain based upon phandle args
2552 * provided and adds the subdomain to the parent PM domain. Returns a
2553 * negative error code on failure.
2554 */
2555int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2556			   struct of_phandle_args *subdomain_spec)
2557{
2558	struct generic_pm_domain *parent, *subdomain;
2559	int ret;
2560
2561	mutex_lock(&gpd_list_lock);
2562
2563	parent = genpd_get_from_provider(parent_spec);
2564	if (IS_ERR(parent)) {
2565		ret = PTR_ERR(parent);
2566		goto out;
2567	}
2568
2569	subdomain = genpd_get_from_provider(subdomain_spec);
2570	if (IS_ERR(subdomain)) {
2571		ret = PTR_ERR(subdomain);
2572		goto out;
2573	}
2574
2575	ret = genpd_add_subdomain(parent, subdomain);
2576
2577out:
2578	mutex_unlock(&gpd_list_lock);
2579
2580	return ret == -ENOENT ? -EPROBE_DEFER : ret;
2581}
2582EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2583
2584/**
2585 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2586 * @parent_spec: OF phandle args to use for parent PM domain look-up
2587 * @subdomain_spec: OF phandle args to use for subdomain look-up
2588 *
2589 * Looks-up a parent PM domain and subdomain based upon phandle args
2590 * provided and removes the subdomain from the parent PM domain. Returns a
2591 * negative error code on failure.
2592 */
2593int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2594			      struct of_phandle_args *subdomain_spec)
2595{
2596	struct generic_pm_domain *parent, *subdomain;
2597	int ret;
2598
2599	mutex_lock(&gpd_list_lock);
2600
2601	parent = genpd_get_from_provider(parent_spec);
2602	if (IS_ERR(parent)) {
2603		ret = PTR_ERR(parent);
2604		goto out;
2605	}
2606
2607	subdomain = genpd_get_from_provider(subdomain_spec);
2608	if (IS_ERR(subdomain)) {
2609		ret = PTR_ERR(subdomain);
2610		goto out;
2611	}
2612
2613	ret = pm_genpd_remove_subdomain(parent, subdomain);
2614
2615out:
2616	mutex_unlock(&gpd_list_lock);
2617
2618	return ret;
2619}
2620EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2621
2622/**
2623 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2624 * @np: Pointer to device node associated with provider
2625 *
2626 * Find the last PM domain that was added by a particular provider and
2627 * remove this PM domain from the list of PM domains. The provider is
2628 * identified by the 'provider' device structure that is passed. The PM
2629 * domain will only be removed, if the provider associated with domain
2630 * has been removed.
2631 *
2632 * Returns a valid pointer to struct generic_pm_domain on success or
2633 * ERR_PTR() on failure.
2634 */
2635struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2636{
2637	struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
2638	int ret;
2639
2640	if (IS_ERR_OR_NULL(np))
2641		return ERR_PTR(-EINVAL);
2642
2643	mutex_lock(&gpd_list_lock);
2644	list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
2645		if (gpd->provider == &np->fwnode) {
2646			ret = genpd_remove(gpd);
2647			genpd = ret ? ERR_PTR(ret) : gpd;
2648			break;
2649		}
2650	}
2651	mutex_unlock(&gpd_list_lock);
2652
2653	return genpd;
2654}
2655EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2656
2657static void genpd_release_dev(struct device *dev)
2658{
2659	of_node_put(dev->of_node);
2660	kfree(dev);
2661}
2662
2663static struct bus_type genpd_bus_type = {
2664	.name		= "genpd",
2665};
2666
2667/**
2668 * genpd_dev_pm_detach - Detach a device from its PM domain.
2669 * @dev: Device to detach.
2670 * @power_off: Currently not used
2671 *
2672 * Try to locate a corresponding generic PM domain, which the device was
2673 * attached to previously. If such is found, the device is detached from it.
2674 */
2675static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2676{
2677	struct generic_pm_domain *pd;
2678	unsigned int i;
2679	int ret = 0;
2680
2681	pd = dev_to_genpd(dev);
2682	if (IS_ERR(pd))
2683		return;
2684
2685	dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2686
2687	/* Drop the default performance state */
2688	if (dev_gpd_data(dev)->default_pstate) {
2689		dev_pm_genpd_set_performance_state(dev, 0);
2690		dev_gpd_data(dev)->default_pstate = 0;
2691	}
2692
2693	for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2694		ret = genpd_remove_device(pd, dev);
2695		if (ret != -EAGAIN)
2696			break;
2697
2698		mdelay(i);
2699		cond_resched();
2700	}
2701
2702	if (ret < 0) {
2703		dev_err(dev, "failed to remove from PM domain %s: %d",
2704			pd->name, ret);
2705		return;
2706	}
2707
2708	/* Check if PM domain can be powered off after removing this device. */
2709	genpd_queue_power_off_work(pd);
2710
2711	/* Unregister the device if it was created by genpd. */
2712	if (dev->bus == &genpd_bus_type)
2713		device_unregister(dev);
2714}
2715
2716static void genpd_dev_pm_sync(struct device *dev)
2717{
2718	struct generic_pm_domain *pd;
2719
2720	pd = dev_to_genpd(dev);
2721	if (IS_ERR(pd))
2722		return;
2723
2724	genpd_queue_power_off_work(pd);
2725}
2726
2727static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2728				 unsigned int index, bool power_on)
2729{
2730	struct of_phandle_args pd_args;
2731	struct generic_pm_domain *pd;
2732	int pstate;
2733	int ret;
2734
2735	ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2736				"#power-domain-cells", index, &pd_args);
2737	if (ret < 0)
2738		return ret;
2739
2740	mutex_lock(&gpd_list_lock);
2741	pd = genpd_get_from_provider(&pd_args);
2742	of_node_put(pd_args.np);
2743	if (IS_ERR(pd)) {
2744		mutex_unlock(&gpd_list_lock);
2745		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2746			__func__, PTR_ERR(pd));
2747		return driver_deferred_probe_check_state(base_dev);
2748	}
2749
2750	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2751
2752	ret = genpd_add_device(pd, dev, base_dev);
2753	mutex_unlock(&gpd_list_lock);
2754
2755	if (ret < 0)
2756		return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
2757
2758	dev->pm_domain->detach = genpd_dev_pm_detach;
2759	dev->pm_domain->sync = genpd_dev_pm_sync;
2760
2761	/* Set the default performance state */
2762	pstate = of_get_required_opp_performance_state(dev->of_node, index);
2763	if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
2764		ret = pstate;
2765		goto err;
2766	} else if (pstate > 0) {
2767		ret = dev_pm_genpd_set_performance_state(dev, pstate);
2768		if (ret)
2769			goto err;
2770		dev_gpd_data(dev)->default_pstate = pstate;
2771	}
2772
2773	if (power_on) {
2774		genpd_lock(pd);
2775		ret = genpd_power_on(pd, 0);
2776		genpd_unlock(pd);
2777	}
2778
2779	if (ret) {
2780		/* Drop the default performance state */
2781		if (dev_gpd_data(dev)->default_pstate) {
2782			dev_pm_genpd_set_performance_state(dev, 0);
2783			dev_gpd_data(dev)->default_pstate = 0;
2784		}
2785
2786		genpd_remove_device(pd, dev);
2787		return -EPROBE_DEFER;
2788	}
2789
2790	return 1;
2791
2792err:
2793	dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2794		pd->name, ret);
2795	genpd_remove_device(pd, dev);
2796	return ret;
2797}
2798
2799/**
2800 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2801 * @dev: Device to attach.
2802 *
2803 * Parse device's OF node to find a PM domain specifier. If such is found,
2804 * attaches the device to retrieved pm_domain ops.
2805 *
2806 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2807 * PM domain or when multiple power-domains exists for it, else a negative error
2808 * code. Note that if a power-domain exists for the device, but it cannot be
2809 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2810 * not probed and to re-try again later.
2811 */
2812int genpd_dev_pm_attach(struct device *dev)
2813{
2814	if (!dev->of_node)
2815		return 0;
2816
2817	/*
2818	 * Devices with multiple PM domains must be attached separately, as we
2819	 * can only attach one PM domain per device.
2820	 */
2821	if (of_count_phandle_with_args(dev->of_node, "power-domains",
2822				       "#power-domain-cells") != 1)
2823		return 0;
2824
2825	return __genpd_dev_pm_attach(dev, dev, 0, true);
2826}
2827EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2828
2829/**
2830 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2831 * @dev: The device used to lookup the PM domain.
2832 * @index: The index of the PM domain.
2833 *
2834 * Parse device's OF node to find a PM domain specifier at the provided @index.
2835 * If such is found, creates a virtual device and attaches it to the retrieved
2836 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2837 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2838 *
2839 * Returns the created virtual device if successfully attached PM domain, NULL
2840 * when the device don't need a PM domain, else an ERR_PTR() in case of
2841 * failures. If a power-domain exists for the device, but cannot be found or
2842 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2843 * is not probed and to re-try again later.
2844 */
2845struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2846					 unsigned int index)
2847{
2848	struct device *virt_dev;
2849	int num_domains;
2850	int ret;
2851
2852	if (!dev->of_node)
2853		return NULL;
2854
2855	/* Verify that the index is within a valid range. */
2856	num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2857						 "#power-domain-cells");
2858	if (index >= num_domains)
2859		return NULL;
2860
2861	/* Allocate and register device on the genpd bus. */
2862	virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2863	if (!virt_dev)
2864		return ERR_PTR(-ENOMEM);
2865
2866	dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2867	virt_dev->bus = &genpd_bus_type;
2868	virt_dev->release = genpd_release_dev;
2869	virt_dev->of_node = of_node_get(dev->of_node);
2870
2871	ret = device_register(virt_dev);
2872	if (ret) {
2873		put_device(virt_dev);
2874		return ERR_PTR(ret);
2875	}
2876
2877	/* Try to attach the device to the PM domain at the specified index. */
2878	ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
2879	if (ret < 1) {
2880		device_unregister(virt_dev);
2881		return ret ? ERR_PTR(ret) : NULL;
2882	}
2883
2884	pm_runtime_enable(virt_dev);
2885	genpd_queue_power_off_work(dev_to_genpd(virt_dev));
2886
2887	return virt_dev;
2888}
2889EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2890
2891/**
2892 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2893 * @dev: The device used to lookup the PM domain.
2894 * @name: The name of the PM domain.
2895 *
2896 * Parse device's OF node to find a PM domain specifier using the
2897 * power-domain-names DT property. For further description see
2898 * genpd_dev_pm_attach_by_id().
2899 */
2900struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
2901{
2902	int index;
2903
2904	if (!dev->of_node)
2905		return NULL;
2906
2907	index = of_property_match_string(dev->of_node, "power-domain-names",
2908					 name);
2909	if (index < 0)
2910		return NULL;
2911
2912	return genpd_dev_pm_attach_by_id(dev, index);
2913}
2914
2915static const struct of_device_id idle_state_match[] = {
2916	{ .compatible = "domain-idle-state", },
2917	{ }
2918};
2919
2920static int genpd_parse_state(struct genpd_power_state *genpd_state,
2921				    struct device_node *state_node)
2922{
2923	int err;
2924	u32 residency;
2925	u32 entry_latency, exit_latency;
2926
2927	err = of_property_read_u32(state_node, "entry-latency-us",
2928						&entry_latency);
2929	if (err) {
2930		pr_debug(" * %pOF missing entry-latency-us property\n",
2931			 state_node);
2932		return -EINVAL;
2933	}
2934
2935	err = of_property_read_u32(state_node, "exit-latency-us",
2936						&exit_latency);
2937	if (err) {
2938		pr_debug(" * %pOF missing exit-latency-us property\n",
2939			 state_node);
2940		return -EINVAL;
2941	}
2942
2943	err = of_property_read_u32(state_node, "min-residency-us", &residency);
2944	if (!err)
2945		genpd_state->residency_ns = 1000LL * residency;
2946
2947	genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2948	genpd_state->power_off_latency_ns = 1000LL * entry_latency;
2949	genpd_state->fwnode = &state_node->fwnode;
2950
2951	return 0;
2952}
2953
2954static int genpd_iterate_idle_states(struct device_node *dn,
2955				     struct genpd_power_state *states)
2956{
2957	int ret;
2958	struct of_phandle_iterator it;
2959	struct device_node *np;
2960	int i = 0;
2961
2962	ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2963	if (ret <= 0)
2964		return ret == -ENOENT ? 0 : ret;
2965
2966	/* Loop over the phandles until all the requested entry is found */
2967	of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2968		np = it.node;
2969		if (!of_match_node(idle_state_match, np))
2970			continue;
2971
2972		if (!of_device_is_available(np))
2973			continue;
2974
2975		if (states) {
2976			ret = genpd_parse_state(&states[i], np);
2977			if (ret) {
2978				pr_err("Parsing idle state node %pOF failed with err %d\n",
2979				       np, ret);
2980				of_node_put(np);
2981				return ret;
2982			}
2983		}
2984		i++;
2985	}
2986
2987	return i;
2988}
2989
2990/**
2991 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2992 *
2993 * @dn: The genpd device node
2994 * @states: The pointer to which the state array will be saved.
2995 * @n: The count of elements in the array returned from this function.
2996 *
2997 * Returns the device states parsed from the OF node. The memory for the states
2998 * is allocated by this function and is the responsibility of the caller to
2999 * free the memory after use. If any or zero compatible domain idle states is
3000 * found it returns 0 and in case of errors, a negative error code is returned.
3001 */
3002int of_genpd_parse_idle_states(struct device_node *dn,
3003			struct genpd_power_state **states, int *n)
3004{
3005	struct genpd_power_state *st;
3006	int ret;
3007
3008	ret = genpd_iterate_idle_states(dn, NULL);
3009	if (ret < 0)
3010		return ret;
3011
3012	if (!ret) {
3013		*states = NULL;
3014		*n = 0;
3015		return 0;
3016	}
3017
3018	st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
3019	if (!st)
3020		return -ENOMEM;
3021
3022	ret = genpd_iterate_idle_states(dn, st);
3023	if (ret <= 0) {
3024		kfree(st);
3025		return ret < 0 ? ret : -EINVAL;
3026	}
3027
3028	*states = st;
3029	*n = ret;
3030
3031	return 0;
3032}
3033EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3034
3035/**
3036 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3037 *
3038 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3039 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3040 *	state.
3041 *
3042 * Returns performance state encoded in the OPP of the genpd. This calls
3043 * platform specific genpd->opp_to_performance_state() callback to translate
3044 * power domain OPP to performance state.
3045 *
3046 * Returns performance state on success and 0 on failure.
3047 */
3048unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3049					       struct dev_pm_opp *opp)
3050{
3051	struct generic_pm_domain *genpd = NULL;
3052	int state;
3053
3054	genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3055
3056	if (unlikely(!genpd->opp_to_performance_state))
3057		return 0;
3058
3059	genpd_lock(genpd);
3060	state = genpd->opp_to_performance_state(genpd, opp);
3061	genpd_unlock(genpd);
3062
3063	return state;
3064}
3065EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3066
3067static int __init genpd_bus_init(void)
3068{
3069	return bus_register(&genpd_bus_type);
3070}
3071core_initcall(genpd_bus_init);
3072
3073#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
3074
3075
3076/***        debugfs support        ***/
3077
3078#ifdef CONFIG_DEBUG_FS
3079/*
3080 * TODO: This function is a slightly modified version of rtpm_status_show
3081 * from sysfs.c, so generalize it.
3082 */
3083static void rtpm_status_str(struct seq_file *s, struct device *dev)
3084{
3085	static const char * const status_lookup[] = {
3086		[RPM_ACTIVE] = "active",
3087		[RPM_RESUMING] = "resuming",
3088		[RPM_SUSPENDED] = "suspended",
3089		[RPM_SUSPENDING] = "suspending"
3090	};
3091	const char *p = "";
3092
3093	if (dev->power.runtime_error)
3094		p = "error";
3095	else if (dev->power.disable_depth)
3096		p = "unsupported";
3097	else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3098		p = status_lookup[dev->power.runtime_status];
3099	else
3100		WARN_ON(1);
3101
3102	seq_printf(s, "%-25s  ", p);
3103}
3104
3105static void perf_status_str(struct seq_file *s, struct device *dev)
3106{
3107	struct generic_pm_domain_data *gpd_data;
3108
3109	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3110	seq_put_decimal_ull(s, "", gpd_data->performance_state);
3111}
3112
3113static int genpd_summary_one(struct seq_file *s,
3114			struct generic_pm_domain *genpd)
3115{
3116	static const char * const status_lookup[] = {
3117		[GENPD_STATE_ON] = "on",
3118		[GENPD_STATE_OFF] = "off"
3119	};
3120	struct pm_domain_data *pm_data;
3121	const char *kobj_path;
3122	struct gpd_link *link;
3123	char state[16];
3124	int ret;
3125
3126	ret = genpd_lock_interruptible(genpd);
3127	if (ret)
3128		return -ERESTARTSYS;
3129
3130	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
3131		goto exit;
3132	if (!genpd_status_on(genpd))
3133		snprintf(state, sizeof(state), "%s-%u",
3134			 status_lookup[genpd->status], genpd->state_idx);
3135	else
3136		snprintf(state, sizeof(state), "%s",
3137			 status_lookup[genpd->status]);
3138	seq_printf(s, "%-30s  %-50s %u", genpd->name, state, genpd->performance_state);
3139
3140	/*
3141	 * Modifications on the list require holding locks on both
3142	 * parent and child, so we are safe.
3143	 * Also genpd->name is immutable.
3144	 */
3145	list_for_each_entry(link, &genpd->parent_links, parent_node) {
3146		if (list_is_first(&link->parent_node, &genpd->parent_links))
3147			seq_printf(s, "\n%48s", " ");
3148		seq_printf(s, "%s", link->child->name);
3149		if (!list_is_last(&link->parent_node, &genpd->parent_links))
3150			seq_puts(s, ", ");
3151	}
3152
3153	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3154		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3155				genpd_is_irq_safe(genpd) ?
3156				GFP_ATOMIC : GFP_KERNEL);
3157		if (kobj_path == NULL)
3158			continue;
3159
3160		seq_printf(s, "\n    %-50s  ", kobj_path);
3161		rtpm_status_str(s, pm_data->dev);
3162		perf_status_str(s, pm_data->dev);
3163		kfree(kobj_path);
3164	}
3165
3166	seq_puts(s, "\n");
3167exit:
3168	genpd_unlock(genpd);
3169
3170	return 0;
3171}
3172
3173static int summary_show(struct seq_file *s, void *data)
3174{
3175	struct generic_pm_domain *genpd;
3176	int ret = 0;
3177
3178	seq_puts(s, "domain                          status          children                           performance\n");
3179	seq_puts(s, "    /device                                             runtime status\n");
3180	seq_puts(s, "----------------------------------------------------------------------------------------------\n");
3181
3182	ret = mutex_lock_interruptible(&gpd_list_lock);
3183	if (ret)
3184		return -ERESTARTSYS;
3185
3186	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
3187		ret = genpd_summary_one(s, genpd);
3188		if (ret)
3189			break;
3190	}
3191	mutex_unlock(&gpd_list_lock);
3192
3193	return ret;
3194}
3195
3196static int status_show(struct seq_file *s, void *data)
3197{
3198	static const char * const status_lookup[] = {
3199		[GENPD_STATE_ON] = "on",
3200		[GENPD_STATE_OFF] = "off"
3201	};
3202
3203	struct generic_pm_domain *genpd = s->private;
3204	int ret = 0;
3205
3206	ret = genpd_lock_interruptible(genpd);
3207	if (ret)
3208		return -ERESTARTSYS;
3209
3210	if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3211		goto exit;
3212
3213	if (genpd->status == GENPD_STATE_OFF)
3214		seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3215			genpd->state_idx);
3216	else
3217		seq_printf(s, "%s\n", status_lookup[genpd->status]);
3218exit:
3219	genpd_unlock(genpd);
3220	return ret;
3221}
3222
3223static int sub_domains_show(struct seq_file *s, void *data)
3224{
3225	struct generic_pm_domain *genpd = s->private;
3226	struct gpd_link *link;
3227	int ret = 0;
3228
3229	ret = genpd_lock_interruptible(genpd);
3230	if (ret)
3231		return -ERESTARTSYS;
3232
3233	list_for_each_entry(link, &genpd->parent_links, parent_node)
3234		seq_printf(s, "%s\n", link->child->name);
3235
3236	genpd_unlock(genpd);
3237	return ret;
3238}
3239
3240static int idle_states_show(struct seq_file *s, void *data)
3241{
3242	struct generic_pm_domain *genpd = s->private;
3243	u64 now, delta, idle_time = 0;
3244	unsigned int i;
3245	int ret = 0;
3246
3247	ret = genpd_lock_interruptible(genpd);
3248	if (ret)
3249		return -ERESTARTSYS;
3250
3251	seq_puts(s, "State          Time Spent(ms) Usage          Rejected\n");
3252
3253	for (i = 0; i < genpd->state_count; i++) {
3254		idle_time += genpd->states[i].idle_time;
3255
3256		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3257			now = ktime_get_mono_fast_ns();
3258			if (now > genpd->accounting_time) {
3259				delta = now - genpd->accounting_time;
3260				idle_time += delta;
3261			}
3262		}
3263
3264		do_div(idle_time, NSEC_PER_MSEC);
3265		seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3266			   genpd->states[i].usage, genpd->states[i].rejected);
3267	}
3268
3269	genpd_unlock(genpd);
3270	return ret;
3271}
3272
3273static int active_time_show(struct seq_file *s, void *data)
3274{
3275	struct generic_pm_domain *genpd = s->private;
3276	u64 now, on_time, delta = 0;
3277	int ret = 0;
3278
3279	ret = genpd_lock_interruptible(genpd);
3280	if (ret)
3281		return -ERESTARTSYS;
3282
3283	if (genpd->status == GENPD_STATE_ON) {
3284		now = ktime_get_mono_fast_ns();
3285		if (now > genpd->accounting_time)
3286			delta = now - genpd->accounting_time;
3287	}
3288
3289	on_time = genpd->on_time + delta;
3290	do_div(on_time, NSEC_PER_MSEC);
3291	seq_printf(s, "%llu ms\n", on_time);
3292
3293	genpd_unlock(genpd);
3294	return ret;
3295}
3296
3297static int total_idle_time_show(struct seq_file *s, void *data)
3298{
3299	struct generic_pm_domain *genpd = s->private;
3300	u64 now, delta, total = 0;
3301	unsigned int i;
3302	int ret = 0;
3303
3304	ret = genpd_lock_interruptible(genpd);
3305	if (ret)
3306		return -ERESTARTSYS;
3307
3308	for (i = 0; i < genpd->state_count; i++) {
3309		total += genpd->states[i].idle_time;
3310
3311		if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3312			now = ktime_get_mono_fast_ns();
3313			if (now > genpd->accounting_time) {
3314				delta = now - genpd->accounting_time;
3315				total += delta;
3316			}
3317		}
3318	}
3319
3320	do_div(total, NSEC_PER_MSEC);
3321	seq_printf(s, "%llu ms\n", total);
3322
3323	genpd_unlock(genpd);
3324	return ret;
3325}
3326
3327
3328static int devices_show(struct seq_file *s, void *data)
3329{
3330	struct generic_pm_domain *genpd = s->private;
3331	struct pm_domain_data *pm_data;
3332	const char *kobj_path;
3333	int ret = 0;
3334
3335	ret = genpd_lock_interruptible(genpd);
3336	if (ret)
3337		return -ERESTARTSYS;
3338
3339	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3340		kobj_path = kobject_get_path(&pm_data->dev->kobj,
3341				genpd_is_irq_safe(genpd) ?
3342				GFP_ATOMIC : GFP_KERNEL);
3343		if (kobj_path == NULL)
3344			continue;
3345
3346		seq_printf(s, "%s\n", kobj_path);
3347		kfree(kobj_path);
3348	}
3349
3350	genpd_unlock(genpd);
3351	return ret;
3352}
3353
3354static int perf_state_show(struct seq_file *s, void *data)
3355{
3356	struct generic_pm_domain *genpd = s->private;
3357
3358	if (genpd_lock_interruptible(genpd))
3359		return -ERESTARTSYS;
3360
3361	seq_printf(s, "%u\n", genpd->performance_state);
3362
3363	genpd_unlock(genpd);
3364	return 0;
3365}
3366
3367DEFINE_SHOW_ATTRIBUTE(summary);
3368DEFINE_SHOW_ATTRIBUTE(status);
3369DEFINE_SHOW_ATTRIBUTE(sub_domains);
3370DEFINE_SHOW_ATTRIBUTE(idle_states);
3371DEFINE_SHOW_ATTRIBUTE(active_time);
3372DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3373DEFINE_SHOW_ATTRIBUTE(devices);
3374DEFINE_SHOW_ATTRIBUTE(perf_state);
3375
3376static void genpd_debug_add(struct generic_pm_domain *genpd)
3377{
3378	struct dentry *d;
3379
3380	if (!genpd_debugfs_dir)
3381		return;
3382
3383	d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3384
3385	debugfs_create_file("current_state", 0444,
3386			    d, genpd, &status_fops);
3387	debugfs_create_file("sub_domains", 0444,
3388			    d, genpd, &sub_domains_fops);
3389	debugfs_create_file("idle_states", 0444,
3390			    d, genpd, &idle_states_fops);
3391	debugfs_create_file("active_time", 0444,
3392			    d, genpd, &active_time_fops);
3393	debugfs_create_file("total_idle_time", 0444,
3394			    d, genpd, &total_idle_time_fops);
3395	debugfs_create_file("devices", 0444,
3396			    d, genpd, &devices_fops);
3397	if (genpd->set_performance_state)
3398		debugfs_create_file("perf_state", 0444,
3399				    d, genpd, &perf_state_fops);
3400}
3401
3402static int __init genpd_debug_init(void)
3403{
3404	struct generic_pm_domain *genpd;
3405
3406	genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
3407
3408	debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3409			    NULL, &summary_fops);
3410
3411	list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3412		genpd_debug_add(genpd);
3413
3414	return 0;
3415}
3416late_initcall(genpd_debug_init);
3417
3418static void __exit genpd_debug_exit(void)
3419{
3420	debugfs_remove_recursive(genpd_debugfs_dir);
3421}
3422__exitcall(genpd_debug_exit);
3423#endif /* CONFIG_DEBUG_FS */
3424