1 /*
2 * kernel/power/wakeup_reason.c
3 *
4 * Logs the reasons which caused the kernel to resume from
5 * the suspend mode.
6 *
7 * Copyright (C) 2020 Google, Inc.
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/wakeup_reason.h>
19 #include <linux/kernel.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/kobject.h>
24 #include <linux/sysfs.h>
25 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/notifier.h>
28 #include <linux/suspend.h>
29 #include <linux/slab.h>
30
31 /*
32 * struct wakeup_irq_node - stores data and relationships for IRQs logged as
33 * either base or nested wakeup reasons during suspend/resume flow.
34 * @siblings - for membership on leaf or parent IRQ lists
35 * @irq - the IRQ number
36 * @irq_name - the name associated with the IRQ, or a default if none
37 */
38 struct wakeup_irq_node {
39 struct list_head siblings;
40 int irq;
41 const char *irq_name;
42 };
43
44 enum wakeup_reason_flag {
45 RESUME_NONE = 0,
46 RESUME_IRQ,
47 RESUME_ABORT,
48 RESUME_ABNORMAL,
49 };
50
51 static DEFINE_SPINLOCK(wakeup_reason_lock);
52
53 static LIST_HEAD(leaf_irqs); /* kept in ascending IRQ sorted order */
54 static LIST_HEAD(parent_irqs); /* unordered */
55
56 static struct kmem_cache *wakeup_irq_nodes_cache;
57
58 static const char *default_irq_name = "(unnamed)";
59
60 static struct kobject *kobj;
61
62 static bool capture_reasons;
63 static int wakeup_reason;
64 static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
65
66 static ktime_t last_monotime; /* monotonic time before last suspend */
67 static ktime_t curr_monotime; /* monotonic time after last suspend */
68 static ktime_t last_stime; /* monotonic boottime offset before last suspend */
69 static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
70
init_node(struct wakeup_irq_node *p, int irq)71 static void init_node(struct wakeup_irq_node *p, int irq)
72 {
73 struct irq_desc *desc;
74
75 INIT_LIST_HEAD(&p->siblings);
76
77 p->irq = irq;
78 desc = irq_to_desc(irq);
79 if (desc && desc->action && desc->action->name) {
80 p->irq_name = desc->action->name;
81 } else {
82 p->irq_name = default_irq_name;
83 }
84 }
85
create_node(int irq)86 static struct wakeup_irq_node *create_node(int irq)
87 {
88 struct wakeup_irq_node *result;
89
90 result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
91 if (unlikely(!result)) {
92 pr_warn("Failed to log wakeup IRQ %d\n", irq);
93 } else {
94 init_node(result, irq);
95 }
96
97 return result;
98 }
99
delete_list(struct list_head *head)100 static void delete_list(struct list_head *head)
101 {
102 struct wakeup_irq_node *n;
103
104 while (!list_empty(head)) {
105 n = list_first_entry(head, struct wakeup_irq_node, siblings);
106 list_del(&n->siblings);
107 kmem_cache_free(wakeup_irq_nodes_cache, n);
108 }
109 }
110
add_sibling_node_sorted(struct list_head *head, int irq)111 static bool add_sibling_node_sorted(struct list_head *head, int irq)
112 {
113 struct wakeup_irq_node *n = NULL;
114 struct list_head *predecessor = head;
115
116 if (unlikely(WARN_ON(!head))) {
117 return NULL;
118 }
119
120 if (!list_empty(head)) {
121 list_for_each_entry(n, head, siblings)
122 {
123 if (n->irq < irq) {
124 predecessor = &n->siblings;
125 } else if (n->irq == irq) {
126 return true;
127 } else {
128 break;
129 }
130 }
131 }
132
133 n = create_node(irq);
134 if (n) {
135 list_add(&n->siblings, predecessor);
136 return true;
137 }
138
139 return false;
140 }
141
find_node_in_list(struct list_head *head, int irq)142 static struct wakeup_irq_node *find_node_in_list(struct list_head *head, int irq)
143 {
144 struct wakeup_irq_node *n;
145
146 if (unlikely(WARN_ON(!head))) {
147 return NULL;
148 }
149
150 list_for_each_entry(n, head, siblings) if (n->irq == irq) return n;
151
152 return NULL;
153 }
154
log_irq_wakeup_reason(int irq)155 void log_irq_wakeup_reason(int irq)
156 {
157 unsigned long flags;
158
159 spin_lock_irqsave(&wakeup_reason_lock, flags);
160 if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
161 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
162 return;
163 }
164
165 if (!capture_reasons) {
166 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
167 return;
168 }
169
170 if (find_node_in_list(&parent_irqs, irq) == NULL) {
171 add_sibling_node_sorted(&leaf_irqs, irq);
172 }
173
174 wakeup_reason = RESUME_IRQ;
175 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
176 }
177
log_threaded_irq_wakeup_reason(int irq, int parent_irq)178 void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
179 {
180 struct wakeup_irq_node *parent;
181 unsigned long flags;
182
183 /*
184 * Intentionally unsynchronized. Calls that come in after we have
185 * resumed should have a fast exit path since there's no work to be
186 * done, any any coherence issue that could cause a wrong value here is
187 * both highly improbable - given the set/clear timing - and very low
188 * impact (parent IRQ gets logged instead of the specific child).
189 */
190 if (!capture_reasons) {
191 return;
192 }
193
194 spin_lock_irqsave(&wakeup_reason_lock, flags);
195
196 if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
197 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
198 return;
199 }
200
201 if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
202 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
203 return;
204 }
205
206 parent = find_node_in_list(&parent_irqs, parent_irq);
207 if (parent != NULL) {
208 add_sibling_node_sorted(&leaf_irqs, irq);
209 } else {
210 parent = find_node_in_list(&leaf_irqs, parent_irq);
211 if (parent != NULL) {
212 list_del_init(&parent->siblings);
213 list_add_tail(&parent->siblings, &parent_irqs);
214 add_sibling_node_sorted(&leaf_irqs, irq);
215 }
216 }
217
218 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
219 }
220 EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
221
_log_abort_or_abnormal_wake(bool abort, const char *fmt, va_list args)222 static void _log_abort_or_abnormal_wake(bool abort, const char *fmt, va_list args)
223 {
224 unsigned long flags;
225
226 spin_lock_irqsave(&wakeup_reason_lock, flags);
227
228 /* Suspend abort or abnormal wake reason has already been logged. */
229 if (wakeup_reason != RESUME_NONE) {
230 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
231 return;
232 }
233
234 if (abort) {
235 wakeup_reason = RESUME_ABORT;
236 } else {
237 wakeup_reason = RESUME_ABNORMAL;
238 }
239
240 (void)vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
241
242 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
243 }
244
log_suspend_abort_reason(const char *fmt, ...)245 void log_suspend_abort_reason(const char *fmt, ...)
246 {
247 va_list args;
248
249 va_start(args, fmt);
250 _log_abort_or_abnormal_wake(true, fmt, args);
251 va_end(args);
252 }
253 EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
254
log_abnormal_wakeup_reason(const char *fmt, ...)255 void log_abnormal_wakeup_reason(const char *fmt, ...)
256 {
257 va_list args;
258
259 va_start(args, fmt);
260 _log_abort_or_abnormal_wake(false, fmt, args);
261 va_end(args);
262 }
263 EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
264
clear_wakeup_reasons(void)265 void clear_wakeup_reasons(void)
266 {
267 unsigned long flags;
268
269 spin_lock_irqsave(&wakeup_reason_lock, flags);
270
271 delete_list(&leaf_irqs);
272 delete_list(&parent_irqs);
273 wakeup_reason = RESUME_NONE;
274 capture_reasons = true;
275
276 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
277 }
278
print_wakeup_sources(void)279 static void print_wakeup_sources(void)
280 {
281 struct wakeup_irq_node *n;
282 unsigned long flags;
283
284 spin_lock_irqsave(&wakeup_reason_lock, flags);
285
286 capture_reasons = false;
287
288 if (wakeup_reason == RESUME_ABORT) {
289 pr_info("Abort: %s\n", non_irq_wake_reason);
290 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
291 return;
292 }
293
294 if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs)) {
295 list_for_each_entry(n, &leaf_irqs, siblings) pr_info("Resume caused by IRQ %d, %s\n", n->irq, n->irq_name);
296 } else if (wakeup_reason == RESUME_ABNORMAL) {
297 pr_info("Resume caused by %s\n", non_irq_wake_reason);
298 } else {
299 pr_info("Resume cause unknown\n");
300 }
301
302 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
303 }
304
last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)305 static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
306 {
307 ssize_t buf_offset = 0;
308 struct wakeup_irq_node *n;
309 unsigned long flags;
310
311 spin_lock_irqsave(&wakeup_reason_lock, flags);
312
313 if (wakeup_reason == RESUME_ABORT) {
314 buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s", non_irq_wake_reason);
315 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
316 return buf_offset;
317 }
318
319 if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs)) {
320 list_for_each_entry(n, &leaf_irqs, siblings) buf_offset +=
321 scnprintf(buf + buf_offset, PAGE_SIZE - buf_offset, "%d %s\n", n->irq, n->irq_name);
322 } else if (wakeup_reason == RESUME_ABNORMAL) {
323 buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s", non_irq_wake_reason);
324 }
325
326 spin_unlock_irqrestore(&wakeup_reason_lock, flags);
327
328 return buf_offset;
329 }
330
last_suspend_time_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)331 static ssize_t last_suspend_time_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
332 {
333 struct timespec64 sleep_time;
334 struct timespec64 total_time;
335 struct timespec64 suspend_resume_time;
336
337 /*
338 * total_time is calculated from monotonic bootoffsets because
339 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
340 */
341 total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
342
343 /*
344 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
345 * time interval before entering suspend and post suspend.
346 */
347 suspend_resume_time = ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
348
349 /* sleep_time = total_time - suspend_resume_time */
350 sleep_time = timespec64_sub(total_time, suspend_resume_time);
351
352 /* Export suspend_resume_time and sleep_time in pair here. */
353 return sprintf(buf, "%llu.%09lu %llu.%09lu\n", (unsigned long long)suspend_resume_time.tv_sec,
354 suspend_resume_time.tv_nsec, (unsigned long long)sleep_time.tv_sec, sleep_time.tv_nsec);
355 }
356
357 static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
358 static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
359
360 static struct attribute *attrs[] = {
361 &resume_reason.attr,
362 &suspend_time.attr,
363 NULL,
364 };
365 static struct attribute_group attr_group = {
366 .attrs = attrs,
367 };
368
369 /* Detects a suspend and clears all the previous wake up reasons */
wakeup_reason_pm_event(struct notifier_block *notifier, unsigned long pm_event, void *unused)370 static int wakeup_reason_pm_event(struct notifier_block *notifier, unsigned long pm_event, void *unused)
371 {
372 switch (pm_event) {
373 case PM_SUSPEND_PREPARE:
374 /* monotonic time since boot */
375 last_monotime = ktime_get();
376 /* monotonic time since boot including the time spent in suspend */
377 last_stime = ktime_get_boottime();
378 clear_wakeup_reasons();
379 break;
380 case PM_POST_SUSPEND:
381 /* monotonic time since boot */
382 curr_monotime = ktime_get();
383 /* monotonic time since boot including the time spent in suspend */
384 curr_stime = ktime_get_boottime();
385 print_wakeup_sources();
386 break;
387 default:
388 break;
389 }
390 return NOTIFY_DONE;
391 }
392
393 static struct notifier_block wakeup_reason_pm_notifier_block = {
394 .notifier_call = wakeup_reason_pm_event,
395 };
396
wakeup_reason_init(void)397 static int __init wakeup_reason_init(void)
398 {
399 if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
400 pr_warn("[%s] failed to register PM notifier\n", __func__);
401 goto fail;
402 }
403
404 kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
405 if (!kobj) {
406 pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
407 goto fail_unregister_pm_notifier;
408 }
409
410 if (sysfs_create_group(kobj, &attr_group)) {
411 pr_warn("[%s] failed to create a sysfs group\n", __func__);
412 goto fail_kobject_put;
413 }
414
415 wakeup_irq_nodes_cache = kmem_cache_create("wakeup_irq_node_cache", sizeof(struct wakeup_irq_node), 0, 0, NULL);
416 if (!wakeup_irq_nodes_cache) {
417 goto fail_remove_group;
418 }
419
420 return 0;
421
422 fail_remove_group:
423 sysfs_remove_group(kobj, &attr_group);
424 fail_kobject_put:
425 kobject_put(kobj);
426 fail_unregister_pm_notifier:
427 unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
428 fail:
429 return 1;
430 }
431
432 late_initcall(wakeup_reason_init);
433