1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ti-sysc.c - Texas Instruments sysc interconnect target driver
4 */
5
6 #include <linux/io.h>
7 #include <linux/clk.h>
8 #include <linux/clkdev.h>
9 #include <linux/cpu_pm.h>
10 #include <linux/delay.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/slab.h>
20 #include <linux/sys_soc.h>
21 #include <linux/timekeeping.h>
22 #include <linux/iopoll.h>
23
24 #include <linux/platform_data/ti-sysc.h>
25
26 #include <dt-bindings/bus/ti-sysc.h>
27
28 #define DIS_ISP BIT(2)
29 #define DIS_IVA BIT(1)
30 #define DIS_SGX BIT(0)
31
32 #define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
33
34 #define MAX_MODULE_SOFTRESET_WAIT 10000
35
36 enum sysc_soc {
37 SOC_UNKNOWN,
38 SOC_2420,
39 SOC_2430,
40 SOC_3430,
41 SOC_AM35,
42 SOC_3630,
43 SOC_4430,
44 SOC_4460,
45 SOC_4470,
46 SOC_5430,
47 SOC_AM3,
48 SOC_AM4,
49 SOC_DRA7,
50 };
51
52 struct sysc_address {
53 unsigned long base;
54 struct list_head node;
55 };
56
57 struct sysc_module {
58 struct sysc *ddata;
59 struct list_head node;
60 };
61
62 struct sysc_soc_info {
63 unsigned long general_purpose:1;
64 enum sysc_soc soc;
65 struct mutex list_lock; /* disabled and restored modules list lock */
66 struct list_head disabled_modules;
67 struct list_head restored_modules;
68 struct notifier_block nb;
69 };
70
71 enum sysc_clocks {
72 SYSC_FCK,
73 SYSC_ICK,
74 SYSC_OPTFCK0,
75 SYSC_OPTFCK1,
76 SYSC_OPTFCK2,
77 SYSC_OPTFCK3,
78 SYSC_OPTFCK4,
79 SYSC_OPTFCK5,
80 SYSC_OPTFCK6,
81 SYSC_OPTFCK7,
82 SYSC_MAX_CLOCKS,
83 };
84
85 static struct sysc_soc_info *sysc_soc;
86 static const char * const reg_names[] = { "rev", "sysc", "syss", };
87 static const char * const clock_names[SYSC_MAX_CLOCKS] = {
88 "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
89 "opt5", "opt6", "opt7",
90 };
91
92 #define SYSC_IDLEMODE_MASK 3
93 #define SYSC_CLOCKACTIVITY_MASK 3
94
95 /**
96 * struct sysc - TI sysc interconnect target module registers and capabilities
97 * @dev: struct device pointer
98 * @module_pa: physical address of the interconnect target module
99 * @module_size: size of the interconnect target module
100 * @module_va: virtual address of the interconnect target module
101 * @offsets: register offsets from module base
102 * @mdata: ti-sysc to hwmod translation data for a module
103 * @clocks: clocks used by the interconnect target module
104 * @clock_roles: clock role names for the found clocks
105 * @nr_clocks: number of clocks used by the interconnect target module
106 * @rsts: resets used by the interconnect target module
107 * @legacy_mode: configured for legacy mode if set
108 * @cap: interconnect target module capabilities
109 * @cfg: interconnect target module configuration
110 * @cookie: data used by legacy platform callbacks
111 * @name: name if available
112 * @revision: interconnect target module revision
113 * @reserved: target module is reserved and already in use
114 * @enabled: sysc runtime enabled status
115 * @needs_resume: runtime resume needed on resume from suspend
116 * @child_needs_resume: runtime resume needed for child on resume from suspend
117 * @disable_on_idle: status flag used for disabling modules with resets
118 * @idle_work: work structure used to perform delayed idle on a module
119 * @pre_reset_quirk: module specific pre-reset quirk
120 * @post_reset_quirk: module specific post-reset quirk
121 * @reset_done_quirk: module specific reset done quirk
122 * @module_enable_quirk: module specific enable quirk
123 * @module_disable_quirk: module specific disable quirk
124 * @module_unlock_quirk: module specific sysconfig unlock quirk
125 * @module_lock_quirk: module specific sysconfig lock quirk
126 */
127 struct sysc {
128 struct device *dev;
129 u64 module_pa;
130 u32 module_size;
131 void __iomem *module_va;
132 int offsets[SYSC_MAX_REGS];
133 struct ti_sysc_module_data *mdata;
134 struct clk **clocks;
135 const char **clock_roles;
136 int nr_clocks;
137 struct reset_control *rsts;
138 const char *legacy_mode;
139 const struct sysc_capabilities *cap;
140 struct sysc_config cfg;
141 struct ti_sysc_cookie cookie;
142 const char *name;
143 u32 revision;
144 unsigned int reserved:1;
145 unsigned int enabled:1;
146 unsigned int needs_resume:1;
147 unsigned int child_needs_resume:1;
148 struct delayed_work idle_work;
149 void (*pre_reset_quirk)(struct sysc *sysc);
150 void (*post_reset_quirk)(struct sysc *sysc);
151 void (*reset_done_quirk)(struct sysc *sysc);
152 void (*module_enable_quirk)(struct sysc *sysc);
153 void (*module_disable_quirk)(struct sysc *sysc);
154 void (*module_unlock_quirk)(struct sysc *sysc);
155 void (*module_lock_quirk)(struct sysc *sysc);
156 };
157
158 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
159 bool is_child);
160
sysc_write(struct sysc *ddata, int offset, u32 value)161 static void sysc_write(struct sysc *ddata, int offset, u32 value)
162 {
163 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
164 writew_relaxed(value & 0xffff, ddata->module_va + offset);
165
166 /* Only i2c revision has LO and HI register with stride of 4 */
167 if (ddata->offsets[SYSC_REVISION] >= 0 &&
168 offset == ddata->offsets[SYSC_REVISION]) {
169 u16 hi = value >> 16;
170
171 writew_relaxed(hi, ddata->module_va + offset + 4);
172 }
173
174 return;
175 }
176
177 writel_relaxed(value, ddata->module_va + offset);
178 }
179
sysc_read(struct sysc *ddata, int offset)180 static u32 sysc_read(struct sysc *ddata, int offset)
181 {
182 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
183 u32 val;
184
185 val = readw_relaxed(ddata->module_va + offset);
186
187 /* Only i2c revision has LO and HI register with stride of 4 */
188 if (ddata->offsets[SYSC_REVISION] >= 0 &&
189 offset == ddata->offsets[SYSC_REVISION]) {
190 u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
191
192 val |= tmp << 16;
193 }
194
195 return val;
196 }
197
198 return readl_relaxed(ddata->module_va + offset);
199 }
200
sysc_opt_clks_needed(struct sysc *ddata)201 static bool sysc_opt_clks_needed(struct sysc *ddata)
202 {
203 return !!(ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_NEEDED);
204 }
205
sysc_read_revision(struct sysc *ddata)206 static u32 sysc_read_revision(struct sysc *ddata)
207 {
208 int offset = ddata->offsets[SYSC_REVISION];
209
210 if (offset < 0)
211 return 0;
212
213 return sysc_read(ddata, offset);
214 }
215
sysc_read_sysconfig(struct sysc *ddata)216 static u32 sysc_read_sysconfig(struct sysc *ddata)
217 {
218 int offset = ddata->offsets[SYSC_SYSCONFIG];
219
220 if (offset < 0)
221 return 0;
222
223 return sysc_read(ddata, offset);
224 }
225
sysc_read_sysstatus(struct sysc *ddata)226 static u32 sysc_read_sysstatus(struct sysc *ddata)
227 {
228 int offset = ddata->offsets[SYSC_SYSSTATUS];
229
230 if (offset < 0)
231 return 0;
232
233 return sysc_read(ddata, offset);
234 }
235
sysc_poll_reset_sysstatus(struct sysc *ddata)236 static int sysc_poll_reset_sysstatus(struct sysc *ddata)
237 {
238 int error, retries;
239 u32 syss_done, rstval;
240
241 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
242 syss_done = 0;
243 else
244 syss_done = ddata->cfg.syss_mask;
245
246 if (likely(!timekeeping_suspended)) {
247 error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
248 rstval, (rstval & ddata->cfg.syss_mask) ==
249 syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
250 } else {
251 retries = MAX_MODULE_SOFTRESET_WAIT;
252 while (retries--) {
253 rstval = sysc_read_sysstatus(ddata);
254 if ((rstval & ddata->cfg.syss_mask) == syss_done)
255 return 0;
256 udelay(2); /* Account for udelay flakeyness */
257 }
258 error = -ETIMEDOUT;
259 }
260
261 return error;
262 }
263
sysc_poll_reset_sysconfig(struct sysc *ddata)264 static int sysc_poll_reset_sysconfig(struct sysc *ddata)
265 {
266 int error, retries;
267 u32 sysc_mask, rstval;
268
269 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
270
271 if (likely(!timekeeping_suspended)) {
272 error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
273 rstval, !(rstval & sysc_mask),
274 100, MAX_MODULE_SOFTRESET_WAIT);
275 } else {
276 retries = MAX_MODULE_SOFTRESET_WAIT;
277 while (retries--) {
278 rstval = sysc_read_sysconfig(ddata);
279 if (!(rstval & sysc_mask))
280 return 0;
281 udelay(2); /* Account for udelay flakeyness */
282 }
283 error = -ETIMEDOUT;
284 }
285
286 return error;
287 }
288
289 /* Poll on reset status */
sysc_wait_softreset(struct sysc *ddata)290 static int sysc_wait_softreset(struct sysc *ddata)
291 {
292 int syss_offset, error = 0;
293
294 if (ddata->cap->regbits->srst_shift < 0)
295 return 0;
296
297 syss_offset = ddata->offsets[SYSC_SYSSTATUS];
298
299 if (syss_offset >= 0)
300 error = sysc_poll_reset_sysstatus(ddata);
301 else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
302 error = sysc_poll_reset_sysconfig(ddata);
303
304 return error;
305 }
306
sysc_add_named_clock_from_child(struct sysc *ddata, const char *name, const char *optfck_name)307 static int sysc_add_named_clock_from_child(struct sysc *ddata,
308 const char *name,
309 const char *optfck_name)
310 {
311 struct device_node *np = ddata->dev->of_node;
312 struct device_node *child;
313 struct clk_lookup *cl;
314 struct clk *clock;
315 const char *n;
316
317 if (name)
318 n = name;
319 else
320 n = optfck_name;
321
322 /* Does the clock alias already exist? */
323 clock = of_clk_get_by_name(np, n);
324 if (!IS_ERR(clock)) {
325 clk_put(clock);
326
327 return 0;
328 }
329
330 child = of_get_next_available_child(np, NULL);
331 if (!child)
332 return -ENODEV;
333
334 clock = devm_get_clk_from_child(ddata->dev, child, name);
335 if (IS_ERR(clock))
336 return PTR_ERR(clock);
337
338 /*
339 * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
340 * limit for clk_get(). If cl ever needs to be freed, it should be done
341 * with clkdev_drop().
342 */
343 cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
344 if (!cl)
345 return -ENOMEM;
346
347 cl->con_id = n;
348 cl->dev_id = dev_name(ddata->dev);
349 cl->clk = clock;
350 clkdev_add(cl);
351
352 clk_put(clock);
353
354 return 0;
355 }
356
sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)357 static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
358 {
359 const char *optfck_name;
360 int error, index;
361
362 if (ddata->nr_clocks < SYSC_OPTFCK0)
363 index = SYSC_OPTFCK0;
364 else
365 index = ddata->nr_clocks;
366
367 if (name)
368 optfck_name = name;
369 else
370 optfck_name = clock_names[index];
371
372 error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
373 if (error)
374 return error;
375
376 ddata->clock_roles[index] = optfck_name;
377 ddata->nr_clocks++;
378
379 return 0;
380 }
381
sysc_get_one_clock(struct sysc *ddata, const char *name)382 static int sysc_get_one_clock(struct sysc *ddata, const char *name)
383 {
384 int error, i, index = -ENODEV;
385
386 if (!strncmp(clock_names[SYSC_FCK], name, 3))
387 index = SYSC_FCK;
388 else if (!strncmp(clock_names[SYSC_ICK], name, 3))
389 index = SYSC_ICK;
390
391 if (index < 0) {
392 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
393 if (!ddata->clocks[i]) {
394 index = i;
395 break;
396 }
397 }
398 }
399
400 if (index < 0) {
401 dev_err(ddata->dev, "clock %s not added\n", name);
402 return index;
403 }
404
405 ddata->clocks[index] = devm_clk_get(ddata->dev, name);
406 if (IS_ERR(ddata->clocks[index])) {
407 dev_err(ddata->dev, "clock get error for %s: %li\n",
408 name, PTR_ERR(ddata->clocks[index]));
409
410 return PTR_ERR(ddata->clocks[index]);
411 }
412
413 error = clk_prepare(ddata->clocks[index]);
414 if (error) {
415 dev_err(ddata->dev, "clock prepare error for %s: %i\n",
416 name, error);
417
418 return error;
419 }
420
421 return 0;
422 }
423
sysc_get_clocks(struct sysc *ddata)424 static int sysc_get_clocks(struct sysc *ddata)
425 {
426 struct device_node *np = ddata->dev->of_node;
427 struct property *prop;
428 const char *name;
429 int nr_fck = 0, nr_ick = 0, i, error = 0;
430
431 ddata->clock_roles = devm_kcalloc(ddata->dev,
432 SYSC_MAX_CLOCKS,
433 sizeof(*ddata->clock_roles),
434 GFP_KERNEL);
435 if (!ddata->clock_roles)
436 return -ENOMEM;
437
438 of_property_for_each_string(np, "clock-names", prop, name) {
439 if (!strncmp(clock_names[SYSC_FCK], name, 3))
440 nr_fck++;
441 if (!strncmp(clock_names[SYSC_ICK], name, 3))
442 nr_ick++;
443 ddata->clock_roles[ddata->nr_clocks] = name;
444 ddata->nr_clocks++;
445 }
446
447 if (ddata->nr_clocks < 1)
448 return 0;
449
450 if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
451 error = sysc_init_ext_opt_clock(ddata, NULL);
452 if (error)
453 return error;
454 }
455
456 if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
457 dev_err(ddata->dev, "too many clocks for %pOF\n", np);
458
459 return -EINVAL;
460 }
461
462 if (nr_fck > 1 || nr_ick > 1) {
463 dev_err(ddata->dev, "max one fck and ick for %pOF\n", np);
464
465 return -EINVAL;
466 }
467
468 /* Always add a slot for main clocks fck and ick even if unused */
469 if (!nr_fck)
470 ddata->nr_clocks++;
471 if (!nr_ick)
472 ddata->nr_clocks++;
473
474 ddata->clocks = devm_kcalloc(ddata->dev,
475 ddata->nr_clocks, sizeof(*ddata->clocks),
476 GFP_KERNEL);
477 if (!ddata->clocks)
478 return -ENOMEM;
479
480 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
481 const char *name = ddata->clock_roles[i];
482
483 if (!name)
484 continue;
485
486 error = sysc_get_one_clock(ddata, name);
487 if (error)
488 return error;
489 }
490
491 return 0;
492 }
493
sysc_enable_main_clocks(struct sysc *ddata)494 static int sysc_enable_main_clocks(struct sysc *ddata)
495 {
496 struct clk *clock;
497 int i, error;
498
499 if (!ddata->clocks)
500 return 0;
501
502 for (i = 0; i < SYSC_OPTFCK0; i++) {
503 clock = ddata->clocks[i];
504
505 /* Main clocks may not have ick */
506 if (IS_ERR_OR_NULL(clock))
507 continue;
508
509 error = clk_enable(clock);
510 if (error)
511 goto err_disable;
512 }
513
514 return 0;
515
516 err_disable:
517 for (i--; i >= 0; i--) {
518 clock = ddata->clocks[i];
519
520 /* Main clocks may not have ick */
521 if (IS_ERR_OR_NULL(clock))
522 continue;
523
524 clk_disable(clock);
525 }
526
527 return error;
528 }
529
sysc_disable_main_clocks(struct sysc *ddata)530 static void sysc_disable_main_clocks(struct sysc *ddata)
531 {
532 struct clk *clock;
533 int i;
534
535 if (!ddata->clocks)
536 return;
537
538 for (i = 0; i < SYSC_OPTFCK0; i++) {
539 clock = ddata->clocks[i];
540 if (IS_ERR_OR_NULL(clock))
541 continue;
542
543 clk_disable(clock);
544 }
545 }
546
sysc_enable_opt_clocks(struct sysc *ddata)547 static int sysc_enable_opt_clocks(struct sysc *ddata)
548 {
549 struct clk *clock;
550 int i, error;
551
552 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
553 return 0;
554
555 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
556 clock = ddata->clocks[i];
557
558 /* Assume no holes for opt clocks */
559 if (IS_ERR_OR_NULL(clock))
560 return 0;
561
562 error = clk_enable(clock);
563 if (error)
564 goto err_disable;
565 }
566
567 return 0;
568
569 err_disable:
570 for (i--; i >= 0; i--) {
571 clock = ddata->clocks[i];
572 if (IS_ERR_OR_NULL(clock))
573 continue;
574
575 clk_disable(clock);
576 }
577
578 return error;
579 }
580
sysc_disable_opt_clocks(struct sysc *ddata)581 static void sysc_disable_opt_clocks(struct sysc *ddata)
582 {
583 struct clk *clock;
584 int i;
585
586 if (!ddata->clocks || ddata->nr_clocks < SYSC_OPTFCK0 + 1)
587 return;
588
589 for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
590 clock = ddata->clocks[i];
591
592 /* Assume no holes for opt clocks */
593 if (IS_ERR_OR_NULL(clock))
594 return;
595
596 clk_disable(clock);
597 }
598 }
599
sysc_clkdm_deny_idle(struct sysc *ddata)600 static void sysc_clkdm_deny_idle(struct sysc *ddata)
601 {
602 struct ti_sysc_platform_data *pdata;
603
604 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
605 return;
606
607 pdata = dev_get_platdata(ddata->dev);
608 if (pdata && pdata->clkdm_deny_idle)
609 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
610 }
611
sysc_clkdm_allow_idle(struct sysc *ddata)612 static void sysc_clkdm_allow_idle(struct sysc *ddata)
613 {
614 struct ti_sysc_platform_data *pdata;
615
616 if (ddata->legacy_mode || (ddata->cfg.quirks & SYSC_QUIRK_CLKDM_NOAUTO))
617 return;
618
619 pdata = dev_get_platdata(ddata->dev);
620 if (pdata && pdata->clkdm_allow_idle)
621 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
622 }
623
624 /**
625 * sysc_init_resets - init rstctrl reset line if configured
626 * @ddata: device driver data
627 *
628 * See sysc_rstctrl_reset_deassert().
629 */
sysc_init_resets(struct sysc *ddata)630 static int sysc_init_resets(struct sysc *ddata)
631 {
632 ddata->rsts =
633 devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
634
635 return PTR_ERR_OR_ZERO(ddata->rsts);
636 }
637
638 /**
639 * sysc_parse_and_check_child_range - parses module IO region from ranges
640 * @ddata: device driver data
641 *
642 * In general we only need rev, syss, and sysc registers and not the whole
643 * module range. But we do want the offsets for these registers from the
644 * module base. This allows us to check them against the legacy hwmod
645 * platform data. Let's also check the ranges are configured properly.
646 */
sysc_parse_and_check_child_range(struct sysc *ddata)647 static int sysc_parse_and_check_child_range(struct sysc *ddata)
648 {
649 struct device_node *np = ddata->dev->of_node;
650 const __be32 *ranges;
651 u32 nr_addr, nr_size;
652 int len, error;
653
654 ranges = of_get_property(np, "ranges", &len);
655 if (!ranges) {
656 dev_err(ddata->dev, "missing ranges for %pOF\n", np);
657
658 return -ENOENT;
659 }
660
661 len /= sizeof(*ranges);
662
663 if (len < 3) {
664 dev_err(ddata->dev, "incomplete ranges for %pOF\n", np);
665
666 return -EINVAL;
667 }
668
669 error = of_property_read_u32(np, "#address-cells", &nr_addr);
670 if (error)
671 return -ENOENT;
672
673 error = of_property_read_u32(np, "#size-cells", &nr_size);
674 if (error)
675 return -ENOENT;
676
677 if (nr_addr != 1 || nr_size != 1) {
678 dev_err(ddata->dev, "invalid ranges for %pOF\n", np);
679
680 return -EINVAL;
681 }
682
683 ranges++;
684 ddata->module_pa = of_translate_address(np, ranges++);
685 ddata->module_size = be32_to_cpup(ranges);
686
687 return 0;
688 }
689
690 /* Interconnect instances to probe before l4_per instances */
691 static struct resource early_bus_ranges[] = {
692 /* am3/4 l4_wkup */
693 { .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
694 /* omap4/5 and dra7 l4_cfg */
695 { .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
696 /* omap4 l4_wkup */
697 { .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
698 /* omap5 and dra7 l4_wkup without dra7 dcan segment */
699 { .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
700 };
701
702 static atomic_t sysc_defer = ATOMIC_INIT(10);
703
704 /**
705 * sysc_defer_non_critical - defer non_critical interconnect probing
706 * @ddata: device driver data
707 *
708 * We want to probe l4_cfg and l4_wkup interconnect instances before any
709 * l4_per instances as l4_per instances depend on resources on l4_cfg and
710 * l4_wkup interconnects.
711 */
sysc_defer_non_critical(struct sysc *ddata)712 static int sysc_defer_non_critical(struct sysc *ddata)
713 {
714 struct resource *res;
715 int i;
716
717 if (!atomic_read(&sysc_defer))
718 return 0;
719
720 for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
721 res = &early_bus_ranges[i];
722 if (ddata->module_pa >= res->start &&
723 ddata->module_pa <= res->end) {
724 atomic_set(&sysc_defer, 0);
725
726 return 0;
727 }
728 }
729
730 atomic_dec_if_positive(&sysc_defer);
731
732 return -EPROBE_DEFER;
733 }
734
735 static struct device_node *stdout_path;
736
sysc_init_stdout_path(struct sysc *ddata)737 static void sysc_init_stdout_path(struct sysc *ddata)
738 {
739 struct device_node *np = NULL;
740 const char *uart;
741
742 if (IS_ERR(stdout_path))
743 return;
744
745 if (stdout_path)
746 return;
747
748 np = of_find_node_by_path("/chosen");
749 if (!np)
750 goto err;
751
752 uart = of_get_property(np, "stdout-path", NULL);
753 if (!uart)
754 goto err;
755
756 np = of_find_node_by_path(uart);
757 if (!np)
758 goto err;
759
760 stdout_path = np;
761
762 return;
763
764 err:
765 stdout_path = ERR_PTR(-ENODEV);
766 }
767
sysc_check_quirk_stdout(struct sysc *ddata, struct device_node *np)768 static void sysc_check_quirk_stdout(struct sysc *ddata,
769 struct device_node *np)
770 {
771 sysc_init_stdout_path(ddata);
772 if (np != stdout_path)
773 return;
774
775 ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
776 SYSC_QUIRK_NO_RESET_ON_INIT;
777 }
778
779 /**
780 * sysc_check_one_child - check child configuration
781 * @ddata: device driver data
782 * @np: child device node
783 *
784 * Let's avoid messy situations where we have new interconnect target
785 * node but children have "ti,hwmods". These belong to the interconnect
786 * target node and are managed by this driver.
787 */
sysc_check_one_child(struct sysc *ddata, struct device_node *np)788 static void sysc_check_one_child(struct sysc *ddata,
789 struct device_node *np)
790 {
791 const char *name;
792
793 name = of_get_property(np, "ti,hwmods", NULL);
794 if (name && !of_device_is_compatible(np, "ti,sysc"))
795 dev_warn(ddata->dev, "really a child ti,hwmods property?");
796
797 sysc_check_quirk_stdout(ddata, np);
798 sysc_parse_dts_quirks(ddata, np, true);
799 }
800
sysc_check_children(struct sysc *ddata)801 static void sysc_check_children(struct sysc *ddata)
802 {
803 struct device_node *child;
804
805 for_each_child_of_node(ddata->dev->of_node, child)
806 sysc_check_one_child(ddata, child);
807 }
808
809 /*
810 * So far only I2C uses 16-bit read access with clockactivity with revision
811 * in two registers with stride of 4. We can detect this based on the rev
812 * register size to configure things far enough to be able to properly read
813 * the revision register.
814 */
sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)815 static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
816 {
817 if (resource_size(res) == 8)
818 ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
819 }
820
821 /**
822 * sysc_parse_one - parses the interconnect target module registers
823 * @ddata: device driver data
824 * @reg: register to parse
825 */
sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)826 static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
827 {
828 struct resource *res;
829 const char *name;
830
831 switch (reg) {
832 case SYSC_REVISION:
833 case SYSC_SYSCONFIG:
834 case SYSC_SYSSTATUS:
835 name = reg_names[reg];
836 break;
837 default:
838 return -EINVAL;
839 }
840
841 res = platform_get_resource_byname(to_platform_device(ddata->dev),
842 IORESOURCE_MEM, name);
843 if (!res) {
844 ddata->offsets[reg] = -ENODEV;
845
846 return 0;
847 }
848
849 ddata->offsets[reg] = res->start - ddata->module_pa;
850 if (reg == SYSC_REVISION)
851 sysc_check_quirk_16bit(ddata, res);
852
853 return 0;
854 }
855
sysc_parse_registers(struct sysc *ddata)856 static int sysc_parse_registers(struct sysc *ddata)
857 {
858 int i, error;
859
860 for (i = 0; i < SYSC_MAX_REGS; i++) {
861 error = sysc_parse_one(ddata, i);
862 if (error)
863 return error;
864 }
865
866 return 0;
867 }
868
869 /**
870 * sysc_check_registers - check for misconfigured register overlaps
871 * @ddata: device driver data
872 */
sysc_check_registers(struct sysc *ddata)873 static int sysc_check_registers(struct sysc *ddata)
874 {
875 int i, j, nr_regs = 0, nr_matches = 0;
876
877 for (i = 0; i < SYSC_MAX_REGS; i++) {
878 if (ddata->offsets[i] < 0)
879 continue;
880
881 if (ddata->offsets[i] > (ddata->module_size - 4)) {
882 dev_err(ddata->dev, "register outside module range");
883
884 return -EINVAL;
885 }
886
887 for (j = 0; j < SYSC_MAX_REGS; j++) {
888 if (ddata->offsets[j] < 0)
889 continue;
890
891 if (ddata->offsets[i] == ddata->offsets[j])
892 nr_matches++;
893 }
894 nr_regs++;
895 }
896
897 if (nr_matches > nr_regs) {
898 dev_err(ddata->dev, "overlapping registers: (%i/%i)",
899 nr_regs, nr_matches);
900
901 return -EINVAL;
902 }
903
904 return 0;
905 }
906
907 /**
908 * syc_ioremap - ioremap register space for the interconnect target module
909 * @ddata: device driver data
910 *
911 * Note that the interconnect target module registers can be anywhere
912 * within the interconnect target module range. For example, SGX has
913 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
914 * has them at offset 0x1200 in the CPSW_WR child. Usually the
915 * the interconnect target module registers are at the beginning of
916 * the module range though.
917 */
sysc_ioremap(struct sysc *ddata)918 static int sysc_ioremap(struct sysc *ddata)
919 {
920 int size;
921
922 if (ddata->offsets[SYSC_REVISION] < 0 &&
923 ddata->offsets[SYSC_SYSCONFIG] < 0 &&
924 ddata->offsets[SYSC_SYSSTATUS] < 0) {
925 size = ddata->module_size;
926 } else {
927 size = max3(ddata->offsets[SYSC_REVISION],
928 ddata->offsets[SYSC_SYSCONFIG],
929 ddata->offsets[SYSC_SYSSTATUS]);
930
931 if (size < SZ_1K)
932 size = SZ_1K;
933
934 if ((size + sizeof(u32)) > ddata->module_size)
935 size = ddata->module_size;
936 }
937
938 ddata->module_va = devm_ioremap(ddata->dev,
939 ddata->module_pa,
940 size + sizeof(u32));
941 if (!ddata->module_va)
942 return -EIO;
943
944 return 0;
945 }
946
947 /**
948 * sysc_map_and_check_registers - ioremap and check device registers
949 * @ddata: device driver data
950 */
sysc_map_and_check_registers(struct sysc *ddata)951 static int sysc_map_and_check_registers(struct sysc *ddata)
952 {
953 int error;
954
955 error = sysc_parse_and_check_child_range(ddata);
956 if (error)
957 return error;
958
959 error = sysc_defer_non_critical(ddata);
960 if (error)
961 return error;
962
963 sysc_check_children(ddata);
964
965 error = sysc_parse_registers(ddata);
966 if (error)
967 return error;
968
969 error = sysc_ioremap(ddata);
970 if (error)
971 return error;
972
973 error = sysc_check_registers(ddata);
974 if (error)
975 return error;
976
977 return 0;
978 }
979
980 /**
981 * sysc_show_rev - read and show interconnect target module revision
982 * @bufp: buffer to print the information to
983 * @ddata: device driver data
984 */
sysc_show_rev(char *bufp, struct sysc *ddata)985 static int sysc_show_rev(char *bufp, struct sysc *ddata)
986 {
987 int len;
988
989 if (ddata->offsets[SYSC_REVISION] < 0)
990 return sprintf(bufp, ":NA");
991
992 len = sprintf(bufp, ":%08x", ddata->revision);
993
994 return len;
995 }
996
sysc_show_reg(struct sysc *ddata, char *bufp, enum sysc_registers reg)997 static int sysc_show_reg(struct sysc *ddata,
998 char *bufp, enum sysc_registers reg)
999 {
1000 if (ddata->offsets[reg] < 0)
1001 return sprintf(bufp, ":NA");
1002
1003 return sprintf(bufp, ":%x", ddata->offsets[reg]);
1004 }
1005
sysc_show_name(char *bufp, struct sysc *ddata)1006 static int sysc_show_name(char *bufp, struct sysc *ddata)
1007 {
1008 if (!ddata->name)
1009 return 0;
1010
1011 return sprintf(bufp, ":%s", ddata->name);
1012 }
1013
1014 /**
1015 * sysc_show_registers - show information about interconnect target module
1016 * @ddata: device driver data
1017 */
sysc_show_registers(struct sysc *ddata)1018 static void sysc_show_registers(struct sysc *ddata)
1019 {
1020 char buf[128];
1021 char *bufp = buf;
1022 int i;
1023
1024 for (i = 0; i < SYSC_MAX_REGS; i++)
1025 bufp += sysc_show_reg(ddata, bufp, i);
1026
1027 bufp += sysc_show_rev(bufp, ddata);
1028 bufp += sysc_show_name(bufp, ddata);
1029
1030 dev_dbg(ddata->dev, "%llx:%x%s\n",
1031 ddata->module_pa, ddata->module_size,
1032 buf);
1033 }
1034
1035 /**
1036 * sysc_write_sysconfig - handle sysconfig quirks for register write
1037 * @ddata: device driver data
1038 * @value: register value
1039 */
sysc_write_sysconfig(struct sysc *ddata, u32 value)1040 static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
1041 {
1042 if (ddata->module_unlock_quirk)
1043 ddata->module_unlock_quirk(ddata);
1044
1045 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
1046
1047 if (ddata->module_lock_quirk)
1048 ddata->module_lock_quirk(ddata);
1049 }
1050
1051 #define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
1052 #define SYSC_CLOCACT_ICK 2
1053
1054 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
sysc_enable_module(struct device *dev)1055 static int sysc_enable_module(struct device *dev)
1056 {
1057 struct sysc *ddata;
1058 const struct sysc_regbits *regbits;
1059 u32 reg, idlemodes, best_mode;
1060 int error;
1061
1062 ddata = dev_get_drvdata(dev);
1063
1064 /*
1065 * Some modules like DSS reset automatically on idle. Enable optional
1066 * reset clocks and wait for OCP softreset to complete.
1067 */
1068 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
1069 error = sysc_enable_opt_clocks(ddata);
1070 if (error) {
1071 dev_err(ddata->dev,
1072 "Optional clocks failed for enable: %i\n",
1073 error);
1074 return error;
1075 }
1076 }
1077 /*
1078 * Some modules like i2c and hdq1w have unusable reset status unless
1079 * the module reset quirk is enabled. Skip status check on enable.
1080 */
1081 if (!(ddata->cfg.quirks & SYSC_MODULE_QUIRK_ENA_RESETDONE)) {
1082 error = sysc_wait_softreset(ddata);
1083 if (error)
1084 dev_warn(ddata->dev, "OCP softreset timed out\n");
1085 }
1086 if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
1087 sysc_disable_opt_clocks(ddata);
1088
1089 /*
1090 * Some subsystem private interconnects, like DSS top level module,
1091 * need only the automatic OCP softreset handling with no sysconfig
1092 * register bits to configure.
1093 */
1094 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1095 return 0;
1096
1097 regbits = ddata->cap->regbits;
1098 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1099
1100 /*
1101 * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
1102 * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
1103 * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
1104 */
1105 if (regbits->clkact_shift >= 0 &&
1106 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
1107 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
1108
1109 /* Set SIDLE mode */
1110 idlemodes = ddata->cfg.sidlemodes;
1111 if (!idlemodes || regbits->sidle_shift < 0)
1112 goto set_midle;
1113
1114 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
1115 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
1116 best_mode = SYSC_IDLE_NO;
1117
1118 /* Clear WAKEUP */
1119 if (regbits->enwkup_shift >= 0 &&
1120 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1121 reg &= ~BIT(regbits->enwkup_shift);
1122 } else {
1123 best_mode = fls(ddata->cfg.sidlemodes) - 1;
1124 if (best_mode > SYSC_IDLE_MASK) {
1125 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1126 return -EINVAL;
1127 }
1128
1129 /* Set WAKEUP */
1130 if (regbits->enwkup_shift >= 0 &&
1131 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1132 reg |= BIT(regbits->enwkup_shift);
1133 }
1134
1135 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1136 reg |= best_mode << regbits->sidle_shift;
1137 sysc_write_sysconfig(ddata, reg);
1138
1139 set_midle:
1140 /* Set MIDLE mode */
1141 idlemodes = ddata->cfg.midlemodes;
1142 if (!idlemodes || regbits->midle_shift < 0)
1143 goto set_autoidle;
1144
1145 best_mode = fls(ddata->cfg.midlemodes) - 1;
1146 if (best_mode > SYSC_IDLE_MASK) {
1147 dev_err(dev, "%s: invalid midlemode\n", __func__);
1148 return -EINVAL;
1149 }
1150
1151 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
1152 best_mode = SYSC_IDLE_NO;
1153
1154 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1155 reg |= best_mode << regbits->midle_shift;
1156 sysc_write_sysconfig(ddata, reg);
1157
1158 set_autoidle:
1159 /* Autoidle bit must enabled separately if available */
1160 if (regbits->autoidle_shift >= 0 &&
1161 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
1162 reg |= 1 << regbits->autoidle_shift;
1163 sysc_write_sysconfig(ddata, reg);
1164 }
1165
1166 /* Flush posted write */
1167 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1168
1169 if (ddata->module_enable_quirk)
1170 ddata->module_enable_quirk(ddata);
1171
1172 return 0;
1173 }
1174
sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)1175 static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
1176 {
1177 if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
1178 *best_mode = SYSC_IDLE_SMART_WKUP;
1179 else if (idlemodes & BIT(SYSC_IDLE_SMART))
1180 *best_mode = SYSC_IDLE_SMART;
1181 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
1182 *best_mode = SYSC_IDLE_FORCE;
1183 else
1184 return -EINVAL;
1185
1186 return 0;
1187 }
1188
1189 /* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
sysc_disable_module(struct device *dev)1190 static int sysc_disable_module(struct device *dev)
1191 {
1192 struct sysc *ddata;
1193 const struct sysc_regbits *regbits;
1194 u32 reg, idlemodes, best_mode;
1195 int ret;
1196
1197 ddata = dev_get_drvdata(dev);
1198 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
1199 return 0;
1200
1201 if (ddata->module_disable_quirk)
1202 ddata->module_disable_quirk(ddata);
1203
1204 regbits = ddata->cap->regbits;
1205 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1206
1207 /* Set MIDLE mode */
1208 idlemodes = ddata->cfg.midlemodes;
1209 if (!idlemodes || regbits->midle_shift < 0)
1210 goto set_sidle;
1211
1212 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1213 if (ret) {
1214 dev_err(dev, "%s: invalid midlemode\n", __func__);
1215 return ret;
1216 }
1217
1218 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_MSTANDBY) ||
1219 ddata->cfg.quirks & (SYSC_QUIRK_FORCE_MSTANDBY))
1220 best_mode = SYSC_IDLE_FORCE;
1221
1222 reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
1223 reg |= best_mode << regbits->midle_shift;
1224 sysc_write_sysconfig(ddata, reg);
1225
1226 set_sidle:
1227 /* Set SIDLE mode */
1228 idlemodes = ddata->cfg.sidlemodes;
1229 if (!idlemodes || regbits->sidle_shift < 0)
1230 return 0;
1231
1232 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
1233 best_mode = SYSC_IDLE_FORCE;
1234 } else {
1235 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1236 if (ret) {
1237 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1238 return ret;
1239 }
1240 }
1241
1242 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
1243 /* Set WAKEUP */
1244 if (regbits->enwkup_shift >= 0 &&
1245 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
1246 reg |= BIT(regbits->enwkup_shift);
1247 }
1248
1249 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
1250 reg |= best_mode << regbits->sidle_shift;
1251 if (regbits->autoidle_shift >= 0 &&
1252 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1253 reg |= 1 << regbits->autoidle_shift;
1254 sysc_write_sysconfig(ddata, reg);
1255
1256 /* Flush posted write */
1257 sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1258
1259 return 0;
1260 }
1261
sysc_runtime_suspend_legacy(struct device *dev, struct sysc *ddata)1262 static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
1263 struct sysc *ddata)
1264 {
1265 struct ti_sysc_platform_data *pdata;
1266 int error;
1267
1268 pdata = dev_get_platdata(ddata->dev);
1269 if (!pdata)
1270 return 0;
1271
1272 if (!pdata->idle_module)
1273 return -ENODEV;
1274
1275 error = pdata->idle_module(dev, &ddata->cookie);
1276 if (error)
1277 dev_err(dev, "%s: could not idle: %i\n",
1278 __func__, error);
1279
1280 reset_control_assert(ddata->rsts);
1281
1282 return 0;
1283 }
1284
sysc_runtime_resume_legacy(struct device *dev, struct sysc *ddata)1285 static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
1286 struct sysc *ddata)
1287 {
1288 struct ti_sysc_platform_data *pdata;
1289 int error;
1290
1291 pdata = dev_get_platdata(ddata->dev);
1292 if (!pdata)
1293 return 0;
1294
1295 if (!pdata->enable_module)
1296 return -ENODEV;
1297
1298 error = pdata->enable_module(dev, &ddata->cookie);
1299 if (error)
1300 dev_err(dev, "%s: could not enable: %i\n",
1301 __func__, error);
1302
1303 reset_control_deassert(ddata->rsts);
1304
1305 return 0;
1306 }
1307
sysc_runtime_suspend(struct device *dev)1308 static int __maybe_unused sysc_runtime_suspend(struct device *dev)
1309 {
1310 struct sysc *ddata;
1311 int error = 0;
1312
1313 ddata = dev_get_drvdata(dev);
1314
1315 if (!ddata->enabled)
1316 return 0;
1317
1318 sysc_clkdm_deny_idle(ddata);
1319
1320 if (ddata->legacy_mode) {
1321 error = sysc_runtime_suspend_legacy(dev, ddata);
1322 if (error)
1323 goto err_allow_idle;
1324 } else {
1325 error = sysc_disable_module(dev);
1326 if (error)
1327 goto err_allow_idle;
1328 }
1329
1330 sysc_disable_main_clocks(ddata);
1331
1332 if (sysc_opt_clks_needed(ddata))
1333 sysc_disable_opt_clocks(ddata);
1334
1335 ddata->enabled = false;
1336
1337 err_allow_idle:
1338 reset_control_assert(ddata->rsts);
1339
1340 sysc_clkdm_allow_idle(ddata);
1341
1342 return error;
1343 }
1344
sysc_runtime_resume(struct device *dev)1345 static int __maybe_unused sysc_runtime_resume(struct device *dev)
1346 {
1347 struct sysc *ddata;
1348 int error = 0;
1349
1350 ddata = dev_get_drvdata(dev);
1351
1352 if (ddata->enabled)
1353 return 0;
1354
1355
1356 sysc_clkdm_deny_idle(ddata);
1357
1358 if (sysc_opt_clks_needed(ddata)) {
1359 error = sysc_enable_opt_clocks(ddata);
1360 if (error)
1361 goto err_allow_idle;
1362 }
1363
1364 error = sysc_enable_main_clocks(ddata);
1365 if (error)
1366 goto err_opt_clocks;
1367
1368 reset_control_deassert(ddata->rsts);
1369
1370 if (ddata->legacy_mode) {
1371 error = sysc_runtime_resume_legacy(dev, ddata);
1372 if (error)
1373 goto err_main_clocks;
1374 } else {
1375 error = sysc_enable_module(dev);
1376 if (error)
1377 goto err_main_clocks;
1378 }
1379
1380 ddata->enabled = true;
1381
1382 sysc_clkdm_allow_idle(ddata);
1383
1384 return 0;
1385
1386 err_main_clocks:
1387 sysc_disable_main_clocks(ddata);
1388 err_opt_clocks:
1389 if (sysc_opt_clks_needed(ddata))
1390 sysc_disable_opt_clocks(ddata);
1391 err_allow_idle:
1392 sysc_clkdm_allow_idle(ddata);
1393
1394 return error;
1395 }
1396
sysc_reinit_module(struct sysc *ddata, bool leave_enabled)1397 static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
1398 {
1399 struct device *dev = ddata->dev;
1400 int error;
1401
1402 /* Disable target module if it is enabled */
1403 if (ddata->enabled) {
1404 error = sysc_runtime_suspend(dev);
1405 if (error)
1406 dev_warn(dev, "reinit suspend failed: %i\n", error);
1407 }
1408
1409 /* Enable target module */
1410 error = sysc_runtime_resume(dev);
1411 if (error)
1412 dev_warn(dev, "reinit resume failed: %i\n", error);
1413
1414 if (leave_enabled)
1415 return error;
1416
1417 /* Disable target module if no leave_enabled was set */
1418 error = sysc_runtime_suspend(dev);
1419 if (error)
1420 dev_warn(dev, "reinit suspend failed: %i\n", error);
1421
1422 return error;
1423 }
1424
sysc_noirq_suspend(struct device *dev)1425 static int __maybe_unused sysc_noirq_suspend(struct device *dev)
1426 {
1427 struct sysc *ddata;
1428
1429 ddata = dev_get_drvdata(dev);
1430
1431 if (ddata->cfg.quirks &
1432 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1433 return 0;
1434
1435 if (!ddata->enabled)
1436 return 0;
1437
1438 ddata->needs_resume = 1;
1439
1440 return sysc_runtime_suspend(dev);
1441 }
1442
sysc_noirq_resume(struct device *dev)1443 static int __maybe_unused sysc_noirq_resume(struct device *dev)
1444 {
1445 struct sysc *ddata;
1446 int error = 0;
1447
1448 ddata = dev_get_drvdata(dev);
1449
1450 if (ddata->cfg.quirks &
1451 (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
1452 return 0;
1453
1454 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) {
1455 error = sysc_reinit_module(ddata, ddata->needs_resume);
1456 if (error)
1457 dev_warn(dev, "noirq_resume failed: %i\n", error);
1458 } else if (ddata->needs_resume) {
1459 error = sysc_runtime_resume(dev);
1460 if (error)
1461 dev_warn(dev, "noirq_resume failed: %i\n", error);
1462 }
1463
1464 ddata->needs_resume = 0;
1465
1466 return error;
1467 }
1468
1469 static const struct dev_pm_ops sysc_pm_ops = {
1470 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_noirq_suspend, sysc_noirq_resume)
1471 SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
1472 sysc_runtime_resume,
1473 NULL)
1474 };
1475
1476 /* Module revision register based quirks */
1477 struct sysc_revision_quirk {
1478 const char *name;
1479 u32 base;
1480 int rev_offset;
1481 int sysc_offset;
1482 int syss_offset;
1483 u32 revision;
1484 u32 revision_mask;
1485 u32 quirks;
1486 };
1487
1488 #define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
1489 optrev_val, optrevmask, optquirkmask) \
1490 { \
1491 .name = (optname), \
1492 .base = (optbase), \
1493 .rev_offset = (optrev), \
1494 .sysc_offset = (optsysc), \
1495 .syss_offset = (optsyss), \
1496 .revision = (optrev_val), \
1497 .revision_mask = (optrevmask), \
1498 .quirks = (optquirkmask), \
1499 }
1500
1501 static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1502 /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
1503 SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
1504 SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
1505 SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
1506 SYSC_QUIRK_LEGACY_IDLE),
1507 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
1508 SYSC_QUIRK_LEGACY_IDLE),
1509 SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
1510 SYSC_QUIRK_LEGACY_IDLE),
1511 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1512 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1513 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1514 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1515 /* Uarts on omap4 and later */
1516 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1517 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1518 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
1519 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1520 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
1521 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
1522
1523 /* Quirks that need to be set based on the module address */
1524 SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
1525 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1526 SYSC_QUIRK_SWSUP_SIDLE),
1527
1528 /* Quirks that need to be set based on detected module */
1529 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
1530 SYSC_MODULE_QUIRK_AESS),
1531 /* Errata i893 handling for dra7 dcan1 and 2 */
1532 SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1533 SYSC_QUIRK_CLKDM_NOAUTO),
1534 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
1535 SYSC_QUIRK_CLKDM_NOAUTO),
1536 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
1537 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1538 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
1539 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1540 SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
1541 SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
1542 SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1543 SYSC_QUIRK_CLKDM_NOAUTO),
1544 SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
1545 SYSC_QUIRK_CLKDM_NOAUTO),
1546 SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
1547 SYSC_QUIRK_GPMC_DEBUG),
1548 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
1549 SYSC_QUIRK_OPT_CLKS_NEEDED),
1550 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1551 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1552 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1553 SYSC_MODULE_QUIRK_HDQ1W | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1554 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1555 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1556 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1557 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1558 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1559 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1560 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1561 SYSC_MODULE_QUIRK_I2C | SYSC_MODULE_QUIRK_ENA_RESETDONE),
1562 SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
1563 SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
1564 SYSC_MODULE_QUIRK_SGX),
1565 SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
1566 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1567 SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
1568 SYSC_MODULE_QUIRK_RTC_UNLOCK),
1569 SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
1570 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1571 SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
1572 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1573 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
1574 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1575 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
1576 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1577 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1578 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
1579 SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
1580 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
1581 SYSC_QUIRK_REINIT_ON_CTX_LOST),
1582 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1583 SYSC_MODULE_QUIRK_WDT),
1584 /* PRUSS on am3, am4 and am5 */
1585 SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
1586 SYSC_MODULE_QUIRK_PRUSS),
1587 /* Watchdog on am3 and am4 */
1588 SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1589 SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
1590
1591 #ifdef DEBUG
1592 SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
1593 SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
1594 SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
1595 SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1596 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1597 0xffff00f0, 0),
1598 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
1599 SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
1600 SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1601 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1602 SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
1603 SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
1604 SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1605 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
1606 SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1607 SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
1608 SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
1609 SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1610 SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1611 SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
1612 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
1613 SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
1614 SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
1615 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1616 SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
1617 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
1618 SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
1619 SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
1620 SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
1621 SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
1622 SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
1623 SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
1624 SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
1625 SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
1626 SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
1627 SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
1628 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
1629 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
1630 SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
1631 SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1632 SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
1633 SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
1634 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
1635 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
1636 SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
1637 SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
1638 SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
1639 SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
1640 SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
1641 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
1642 SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
1643 SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
1644 SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
1645 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
1646 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
1647 /* Some timers on omap4 and later */
1648 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
1649 SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
1650 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
1651 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
1652 SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
1653 SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
1654 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
1655 SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
1656 SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
1657 SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
1658 #endif
1659 };
1660
1661 /*
1662 * Early quirks based on module base and register offsets only that are
1663 * needed before the module revision can be read
1664 */
sysc_init_early_quirks(struct sysc *ddata)1665 static void sysc_init_early_quirks(struct sysc *ddata)
1666 {
1667 const struct sysc_revision_quirk *q;
1668 int i;
1669
1670 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1671 q = &sysc_revision_quirks[i];
1672
1673 if (!q->base)
1674 continue;
1675
1676 if (q->base != ddata->module_pa)
1677 continue;
1678
1679 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1680 continue;
1681
1682 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1683 continue;
1684
1685 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1686 continue;
1687
1688 ddata->name = q->name;
1689 ddata->cfg.quirks |= q->quirks;
1690 }
1691 }
1692
1693 /* Quirks that also consider the revision register value */
sysc_init_revision_quirks(struct sysc *ddata)1694 static void sysc_init_revision_quirks(struct sysc *ddata)
1695 {
1696 const struct sysc_revision_quirk *q;
1697 int i;
1698
1699 for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
1700 q = &sysc_revision_quirks[i];
1701
1702 if (q->base && q->base != ddata->module_pa)
1703 continue;
1704
1705 if (q->rev_offset != ddata->offsets[SYSC_REVISION])
1706 continue;
1707
1708 if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
1709 continue;
1710
1711 if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
1712 continue;
1713
1714 if (q->revision == ddata->revision ||
1715 (q->revision & q->revision_mask) ==
1716 (ddata->revision & q->revision_mask)) {
1717 ddata->name = q->name;
1718 ddata->cfg.quirks |= q->quirks;
1719 }
1720 }
1721 }
1722
1723 /*
1724 * DSS needs dispc outputs disabled to reset modules. Returns mask of
1725 * enabled DSS interrupts. Eventually we may be able to do this on
1726 * dispc init rather than top-level DSS init.
1727 */
sysc_quirk_dispc(struct sysc *ddata, int dispc_offset, bool disable)1728 static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
1729 bool disable)
1730 {
1731 bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
1732 const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
1733 int manager_count;
1734 bool framedonetv_irq = true;
1735 u32 val, irq_mask = 0;
1736
1737 switch (sysc_soc->soc) {
1738 case SOC_2420 ... SOC_3630:
1739 manager_count = 2;
1740 framedonetv_irq = false;
1741 break;
1742 case SOC_4430 ... SOC_4470:
1743 manager_count = 3;
1744 break;
1745 case SOC_5430:
1746 case SOC_DRA7:
1747 manager_count = 4;
1748 break;
1749 case SOC_AM4:
1750 manager_count = 1;
1751 framedonetv_irq = false;
1752 break;
1753 case SOC_UNKNOWN:
1754 default:
1755 return 0;
1756 };
1757
1758 /* Remap the whole module range to be able to reset dispc outputs */
1759 devm_iounmap(ddata->dev, ddata->module_va);
1760 ddata->module_va = devm_ioremap(ddata->dev,
1761 ddata->module_pa,
1762 ddata->module_size);
1763 if (!ddata->module_va)
1764 return -EIO;
1765
1766 /* DISP_CONTROL, shut down lcd and digit on disable if enabled */
1767 val = sysc_read(ddata, dispc_offset + 0x40);
1768 lcd_en = val & lcd_en_mask;
1769 digit_en = val & digit_en_mask;
1770 if (lcd_en)
1771 irq_mask |= BIT(0); /* FRAMEDONE */
1772 if (digit_en) {
1773 if (framedonetv_irq)
1774 irq_mask |= BIT(24); /* FRAMEDONETV */
1775 else
1776 irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
1777 }
1778 if (disable && (lcd_en || digit_en))
1779 sysc_write(ddata, dispc_offset + 0x40,
1780 val & ~(lcd_en_mask | digit_en_mask));
1781
1782 if (manager_count <= 2)
1783 return irq_mask;
1784
1785 /* DISPC_CONTROL2 */
1786 val = sysc_read(ddata, dispc_offset + 0x238);
1787 lcd2_en = val & lcd_en_mask;
1788 if (lcd2_en)
1789 irq_mask |= BIT(22); /* FRAMEDONE2 */
1790 if (disable && lcd2_en)
1791 sysc_write(ddata, dispc_offset + 0x238,
1792 val & ~lcd_en_mask);
1793
1794 if (manager_count <= 3)
1795 return irq_mask;
1796
1797 /* DISPC_CONTROL3 */
1798 val = sysc_read(ddata, dispc_offset + 0x848);
1799 lcd3_en = val & lcd_en_mask;
1800 if (lcd3_en)
1801 irq_mask |= BIT(30); /* FRAMEDONE3 */
1802 if (disable && lcd3_en)
1803 sysc_write(ddata, dispc_offset + 0x848,
1804 val & ~lcd_en_mask);
1805
1806 return irq_mask;
1807 }
1808
1809 /* DSS needs child outputs disabled and SDI registers cleared for reset */
sysc_pre_reset_quirk_dss(struct sysc *ddata)1810 static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
1811 {
1812 const int dispc_offset = 0x1000;
1813 int error;
1814 u32 irq_mask, val;
1815
1816 /* Get enabled outputs */
1817 irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
1818 if (!irq_mask)
1819 return;
1820
1821 /* Clear IRQSTATUS */
1822 sysc_write(ddata, dispc_offset + 0x18, irq_mask);
1823
1824 /* Disable outputs */
1825 val = sysc_quirk_dispc(ddata, dispc_offset, true);
1826
1827 /* Poll IRQSTATUS */
1828 error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
1829 val, val != irq_mask, 100, 50);
1830 if (error)
1831 dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
1832 __func__, val, irq_mask);
1833
1834 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
1835 /* Clear DSS_SDI_CONTROL */
1836 sysc_write(ddata, 0x44, 0);
1837
1838 /* Clear DSS_PLL_CONTROL */
1839 sysc_write(ddata, 0x48, 0);
1840 }
1841
1842 /* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
1843 sysc_write(ddata, 0x40, 0);
1844 }
1845
1846 /* 1-wire needs module's internal clocks enabled for reset */
sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)1847 static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
1848 {
1849 int offset = 0x0c; /* HDQ_CTRL_STATUS */
1850 u16 val;
1851
1852 val = sysc_read(ddata, offset);
1853 val |= BIT(5);
1854 sysc_write(ddata, offset, val);
1855 }
1856
1857 /* AESS (Audio Engine SubSystem) needs autogating set after enable */
sysc_module_enable_quirk_aess(struct sysc *ddata)1858 static void sysc_module_enable_quirk_aess(struct sysc *ddata)
1859 {
1860 int offset = 0x7c; /* AESS_AUTO_GATING_ENABLE */
1861
1862 sysc_write(ddata, offset, 1);
1863 }
1864
1865 /* I2C needs to be disabled for reset */
sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)1866 static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1867 {
1868 int offset;
1869 u16 val;
1870
1871 /* I2C_CON, omap2/3 is different from omap4 and later */
1872 if ((ddata->revision & 0xffffff00) == 0x001f0000)
1873 offset = 0x24;
1874 else
1875 offset = 0xa4;
1876
1877 /* I2C_EN */
1878 val = sysc_read(ddata, offset);
1879 if (enable)
1880 val |= BIT(15);
1881 else
1882 val &= ~BIT(15);
1883 sysc_write(ddata, offset, val);
1884 }
1885
sysc_pre_reset_quirk_i2c(struct sysc *ddata)1886 static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
1887 {
1888 sysc_clk_quirk_i2c(ddata, false);
1889 }
1890
sysc_post_reset_quirk_i2c(struct sysc *ddata)1891 static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
1892 {
1893 sysc_clk_quirk_i2c(ddata, true);
1894 }
1895
1896 /* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
sysc_quirk_rtc(struct sysc *ddata, bool lock)1897 static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
1898 {
1899 u32 val, kick0_val = 0, kick1_val = 0;
1900 unsigned long flags;
1901 int error;
1902
1903 if (!lock) {
1904 kick0_val = 0x83e70b13;
1905 kick1_val = 0x95a4f1e0;
1906 }
1907
1908 local_irq_save(flags);
1909 /* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
1910 error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
1911 !(val & BIT(0)), 100, 50);
1912 if (error)
1913 dev_warn(ddata->dev, "rtc busy timeout\n");
1914 /* Now we have ~15 microseconds to read/write various registers */
1915 sysc_write(ddata, 0x6c, kick0_val);
1916 sysc_write(ddata, 0x70, kick1_val);
1917 local_irq_restore(flags);
1918 }
1919
sysc_module_unlock_quirk_rtc(struct sysc *ddata)1920 static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
1921 {
1922 sysc_quirk_rtc(ddata, false);
1923 }
1924
sysc_module_lock_quirk_rtc(struct sysc *ddata)1925 static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
1926 {
1927 sysc_quirk_rtc(ddata, true);
1928 }
1929
1930 /* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
sysc_module_enable_quirk_sgx(struct sysc *ddata)1931 static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
1932 {
1933 int offset = 0xff08; /* OCP_DEBUG_CONFIG */
1934 u32 val = BIT(31); /* THALIA_INT_BYPASS */
1935
1936 sysc_write(ddata, offset, val);
1937 }
1938
1939 /* Watchdog timer needs a disable sequence after reset */
sysc_reset_done_quirk_wdt(struct sysc *ddata)1940 static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
1941 {
1942 int wps, spr, error;
1943 u32 val;
1944
1945 wps = 0x34;
1946 spr = 0x48;
1947
1948 sysc_write(ddata, spr, 0xaaaa);
1949 error = readl_poll_timeout(ddata->module_va + wps, val,
1950 !(val & 0x10), 100,
1951 MAX_MODULE_SOFTRESET_WAIT);
1952 if (error)
1953 dev_warn(ddata->dev, "wdt disable step1 failed\n");
1954
1955 sysc_write(ddata, spr, 0x5555);
1956 error = readl_poll_timeout(ddata->module_va + wps, val,
1957 !(val & 0x10), 100,
1958 MAX_MODULE_SOFTRESET_WAIT);
1959 if (error)
1960 dev_warn(ddata->dev, "wdt disable step2 failed\n");
1961 }
1962
1963 /* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
sysc_module_disable_quirk_pruss(struct sysc *ddata)1964 static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
1965 {
1966 u32 reg;
1967
1968 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
1969 reg |= SYSC_PRUSS_STANDBY_INIT;
1970 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
1971 }
1972
sysc_init_module_quirks(struct sysc *ddata)1973 static void sysc_init_module_quirks(struct sysc *ddata)
1974 {
1975 if (ddata->legacy_mode || !ddata->name)
1976 return;
1977
1978 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
1979 ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
1980
1981 return;
1982 }
1983
1984 #ifdef CONFIG_OMAP_GPMC_DEBUG
1985 if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
1986 ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
1987
1988 return;
1989 }
1990 #endif
1991
1992 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
1993 ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
1994 ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
1995
1996 return;
1997 }
1998
1999 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
2000 ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
2001
2002 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
2003 ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
2004
2005 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
2006 ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
2007 ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
2008
2009 return;
2010 }
2011
2012 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
2013 ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
2014
2015 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
2016 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
2017 ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
2018 }
2019
2020 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
2021 ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
2022 }
2023
sysc_clockdomain_init(struct sysc *ddata)2024 static int sysc_clockdomain_init(struct sysc *ddata)
2025 {
2026 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2027 struct clk *fck = NULL, *ick = NULL;
2028 int error;
2029
2030 if (!pdata || !pdata->init_clockdomain)
2031 return 0;
2032
2033 switch (ddata->nr_clocks) {
2034 case 2:
2035 ick = ddata->clocks[SYSC_ICK];
2036 fallthrough;
2037 case 1:
2038 fck = ddata->clocks[SYSC_FCK];
2039 break;
2040 case 0:
2041 return 0;
2042 }
2043
2044 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
2045 if (!error || error == -ENODEV)
2046 return 0;
2047
2048 return error;
2049 }
2050
2051 /*
2052 * Note that pdata->init_module() typically does a reset first. After
2053 * pdata->init_module() is done, PM runtime can be used for the interconnect
2054 * target module.
2055 */
sysc_legacy_init(struct sysc *ddata)2056 static int sysc_legacy_init(struct sysc *ddata)
2057 {
2058 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2059 int error;
2060
2061 if (!pdata || !pdata->init_module)
2062 return 0;
2063
2064 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
2065 if (error == -EEXIST)
2066 error = 0;
2067
2068 return error;
2069 }
2070
2071 /*
2072 * Note that the caller must ensure the interconnect target module is enabled
2073 * before calling reset. Otherwise reset will not complete.
2074 */
sysc_reset(struct sysc *ddata)2075 static int sysc_reset(struct sysc *ddata)
2076 {
2077 int sysc_offset, sysc_val, error;
2078 u32 sysc_mask;
2079
2080 sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
2081
2082 if (ddata->legacy_mode ||
2083 ddata->cap->regbits->srst_shift < 0 ||
2084 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
2085 return 0;
2086
2087 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
2088
2089 if (ddata->pre_reset_quirk)
2090 ddata->pre_reset_quirk(ddata);
2091
2092 if (sysc_offset >= 0) {
2093 sysc_val = sysc_read_sysconfig(ddata);
2094 sysc_val |= sysc_mask;
2095 sysc_write(ddata, sysc_offset, sysc_val);
2096
2097 /*
2098 * Some devices need a delay before reading registers
2099 * after reset. Presumably a srst_udelay is not needed
2100 * for devices that use a rstctrl register reset.
2101 */
2102 if (ddata->cfg.srst_udelay)
2103 fsleep(ddata->cfg.srst_udelay);
2104
2105 /*
2106 * Flush posted write. For devices needing srst_udelay
2107 * this should trigger an interconnect error if the
2108 * srst_udelay value is needed but not configured.
2109 */
2110 sysc_val = sysc_read_sysconfig(ddata);
2111 }
2112
2113 if (ddata->post_reset_quirk)
2114 ddata->post_reset_quirk(ddata);
2115
2116 error = sysc_wait_softreset(ddata);
2117 if (error)
2118 dev_warn(ddata->dev, "OCP softreset timed out\n");
2119
2120 if (ddata->reset_done_quirk)
2121 ddata->reset_done_quirk(ddata);
2122
2123 return error;
2124 }
2125
2126 /*
2127 * At this point the module is configured enough to read the revision but
2128 * module may not be completely configured yet to use PM runtime. Enable
2129 * all clocks directly during init to configure the quirks needed for PM
2130 * runtime based on the revision register.
2131 */
sysc_init_module(struct sysc *ddata)2132 static int sysc_init_module(struct sysc *ddata)
2133 {
2134 int error = 0;
2135
2136 error = sysc_clockdomain_init(ddata);
2137 if (error)
2138 return error;
2139
2140 sysc_clkdm_deny_idle(ddata);
2141
2142 /*
2143 * Always enable clocks. The bootloader may or may not have enabled
2144 * the related clocks.
2145 */
2146 error = sysc_enable_opt_clocks(ddata);
2147 if (error)
2148 return error;
2149
2150 error = sysc_enable_main_clocks(ddata);
2151 if (error)
2152 goto err_opt_clocks;
2153
2154 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
2155 error = reset_control_deassert(ddata->rsts);
2156 if (error)
2157 goto err_main_clocks;
2158 }
2159
2160 ddata->revision = sysc_read_revision(ddata);
2161 sysc_init_revision_quirks(ddata);
2162 sysc_init_module_quirks(ddata);
2163
2164 if (ddata->legacy_mode) {
2165 error = sysc_legacy_init(ddata);
2166 if (error)
2167 goto err_reset;
2168 }
2169
2170 if (!ddata->legacy_mode) {
2171 error = sysc_enable_module(ddata->dev);
2172 if (error)
2173 goto err_reset;
2174 }
2175
2176 error = sysc_reset(ddata);
2177 if (error)
2178 dev_err(ddata->dev, "Reset failed with %d\n", error);
2179
2180 if (error && !ddata->legacy_mode)
2181 sysc_disable_module(ddata->dev);
2182
2183 err_reset:
2184 if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
2185 reset_control_assert(ddata->rsts);
2186
2187 err_main_clocks:
2188 if (error)
2189 sysc_disable_main_clocks(ddata);
2190 err_opt_clocks:
2191 /* No re-enable of clockdomain autoidle to prevent module autoidle */
2192 if (error) {
2193 sysc_disable_opt_clocks(ddata);
2194 sysc_clkdm_allow_idle(ddata);
2195 }
2196
2197 return error;
2198 }
2199
sysc_init_sysc_mask(struct sysc *ddata)2200 static int sysc_init_sysc_mask(struct sysc *ddata)
2201 {
2202 struct device_node *np = ddata->dev->of_node;
2203 int error;
2204 u32 val;
2205
2206 error = of_property_read_u32(np, "ti,sysc-mask", &val);
2207 if (error)
2208 return 0;
2209
2210 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
2211
2212 return 0;
2213 }
2214
sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes, const char *name)2215 static int sysc_init_idlemode(struct sysc *ddata, u8 *idlemodes,
2216 const char *name)
2217 {
2218 struct device_node *np = ddata->dev->of_node;
2219 struct property *prop;
2220 const __be32 *p;
2221 u32 val;
2222
2223 of_property_for_each_u32(np, name, prop, p, val) {
2224 if (val >= SYSC_NR_IDLEMODES) {
2225 dev_err(ddata->dev, "invalid idlemode: %i\n", val);
2226 return -EINVAL;
2227 }
2228 *idlemodes |= (1 << val);
2229 }
2230
2231 return 0;
2232 }
2233
sysc_init_idlemodes(struct sysc *ddata)2234 static int sysc_init_idlemodes(struct sysc *ddata)
2235 {
2236 int error;
2237
2238 error = sysc_init_idlemode(ddata, &ddata->cfg.midlemodes,
2239 "ti,sysc-midle");
2240 if (error)
2241 return error;
2242
2243 error = sysc_init_idlemode(ddata, &ddata->cfg.sidlemodes,
2244 "ti,sysc-sidle");
2245 if (error)
2246 return error;
2247
2248 return 0;
2249 }
2250
2251 /*
2252 * Only some devices on omap4 and later have SYSCONFIG reset done
2253 * bit. We can detect this if there is no SYSSTATUS at all, or the
2254 * SYSTATUS bit 0 is not used. Note that some SYSSTATUS registers
2255 * have multiple bits for the child devices like OHCI and EHCI.
2256 * Depends on SYSC being parsed first.
2257 */
sysc_init_syss_mask(struct sysc *ddata)2258 static int sysc_init_syss_mask(struct sysc *ddata)
2259 {
2260 struct device_node *np = ddata->dev->of_node;
2261 int error;
2262 u32 val;
2263
2264 error = of_property_read_u32(np, "ti,syss-mask", &val);
2265 if (error) {
2266 if ((ddata->cap->type == TI_SYSC_OMAP4 ||
2267 ddata->cap->type == TI_SYSC_OMAP4_TIMER) &&
2268 (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2269 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2270
2271 return 0;
2272 }
2273
2274 if (!(val & 1) && (ddata->cfg.sysc_val & SYSC_OMAP4_SOFTRESET))
2275 ddata->cfg.quirks |= SYSC_QUIRK_RESET_STATUS;
2276
2277 ddata->cfg.syss_mask = val;
2278
2279 return 0;
2280 }
2281
2282 /*
2283 * Many child device drivers need to have fck and opt clocks available
2284 * to get the clock rate for device internal configuration etc.
2285 */
sysc_child_add_named_clock(struct sysc *ddata, struct device *child, const char *name)2286 static int sysc_child_add_named_clock(struct sysc *ddata,
2287 struct device *child,
2288 const char *name)
2289 {
2290 struct clk *clk;
2291 struct clk_lookup *l;
2292 int error = 0;
2293
2294 if (!name)
2295 return 0;
2296
2297 clk = clk_get(child, name);
2298 if (!IS_ERR(clk)) {
2299 error = -EEXIST;
2300 goto put_clk;
2301 }
2302
2303 clk = clk_get(ddata->dev, name);
2304 if (IS_ERR(clk))
2305 return -ENODEV;
2306
2307 l = clkdev_create(clk, name, dev_name(child));
2308 if (!l)
2309 error = -ENOMEM;
2310 put_clk:
2311 clk_put(clk);
2312
2313 return error;
2314 }
2315
sysc_child_add_clocks(struct sysc *ddata, struct device *child)2316 static int sysc_child_add_clocks(struct sysc *ddata,
2317 struct device *child)
2318 {
2319 int i, error;
2320
2321 for (i = 0; i < ddata->nr_clocks; i++) {
2322 error = sysc_child_add_named_clock(ddata,
2323 child,
2324 ddata->clock_roles[i]);
2325 if (error && error != -EEXIST) {
2326 dev_err(ddata->dev, "could not add child clock %s: %i\n",
2327 ddata->clock_roles[i], error);
2328
2329 return error;
2330 }
2331 }
2332
2333 return 0;
2334 }
2335
2336 static struct device_type sysc_device_type = {
2337 };
2338
sysc_child_to_parent(struct device *dev)2339 static struct sysc *sysc_child_to_parent(struct device *dev)
2340 {
2341 struct device *parent = dev->parent;
2342
2343 if (!parent || parent->type != &sysc_device_type)
2344 return NULL;
2345
2346 return dev_get_drvdata(parent);
2347 }
2348
sysc_child_runtime_suspend(struct device *dev)2349 static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
2350 {
2351 struct sysc *ddata;
2352 int error;
2353
2354 ddata = sysc_child_to_parent(dev);
2355
2356 error = pm_generic_runtime_suspend(dev);
2357 if (error)
2358 return error;
2359
2360 if (!ddata->enabled)
2361 return 0;
2362
2363 return sysc_runtime_suspend(ddata->dev);
2364 }
2365
sysc_child_runtime_resume(struct device *dev)2366 static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
2367 {
2368 struct sysc *ddata;
2369 int error;
2370
2371 ddata = sysc_child_to_parent(dev);
2372
2373 if (!ddata->enabled) {
2374 error = sysc_runtime_resume(ddata->dev);
2375 if (error < 0)
2376 dev_err(ddata->dev,
2377 "%s error: %i\n", __func__, error);
2378 }
2379
2380 return pm_generic_runtime_resume(dev);
2381 }
2382
2383 #ifdef CONFIG_PM_SLEEP
sysc_child_suspend_noirq(struct device *dev)2384 static int sysc_child_suspend_noirq(struct device *dev)
2385 {
2386 struct sysc *ddata;
2387 int error;
2388
2389 ddata = sysc_child_to_parent(dev);
2390
2391 dev_dbg(ddata->dev, "%s %s\n", __func__,
2392 ddata->name ? ddata->name : "");
2393
2394 error = pm_generic_suspend_noirq(dev);
2395 if (error) {
2396 dev_err(dev, "%s error at %i: %i\n",
2397 __func__, __LINE__, error);
2398
2399 return error;
2400 }
2401
2402 if (!pm_runtime_status_suspended(dev)) {
2403 error = pm_generic_runtime_suspend(dev);
2404 if (error) {
2405 dev_dbg(dev, "%s busy at %i: %i\n",
2406 __func__, __LINE__, error);
2407
2408 return 0;
2409 }
2410
2411 error = sysc_runtime_suspend(ddata->dev);
2412 if (error) {
2413 dev_err(dev, "%s error at %i: %i\n",
2414 __func__, __LINE__, error);
2415
2416 return error;
2417 }
2418
2419 ddata->child_needs_resume = true;
2420 }
2421
2422 return 0;
2423 }
2424
sysc_child_resume_noirq(struct device *dev)2425 static int sysc_child_resume_noirq(struct device *dev)
2426 {
2427 struct sysc *ddata;
2428 int error;
2429
2430 ddata = sysc_child_to_parent(dev);
2431
2432 dev_dbg(ddata->dev, "%s %s\n", __func__,
2433 ddata->name ? ddata->name : "");
2434
2435 if (ddata->child_needs_resume) {
2436 ddata->child_needs_resume = false;
2437
2438 error = sysc_runtime_resume(ddata->dev);
2439 if (error)
2440 dev_err(ddata->dev,
2441 "%s runtime resume error: %i\n",
2442 __func__, error);
2443
2444 error = pm_generic_runtime_resume(dev);
2445 if (error)
2446 dev_err(ddata->dev,
2447 "%s generic runtime resume: %i\n",
2448 __func__, error);
2449 }
2450
2451 return pm_generic_resume_noirq(dev);
2452 }
2453 #endif
2454
2455 static struct dev_pm_domain sysc_child_pm_domain = {
2456 .ops = {
2457 SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
2458 sysc_child_runtime_resume,
2459 NULL)
2460 USE_PLATFORM_PM_SLEEP_OPS
2461 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
2462 sysc_child_resume_noirq)
2463 }
2464 };
2465
2466 /* Caller needs to take list_lock if ever used outside of cpu_pm */
sysc_reinit_modules(struct sysc_soc_info *soc)2467 static void sysc_reinit_modules(struct sysc_soc_info *soc)
2468 {
2469 struct sysc_module *module;
2470 struct list_head *pos;
2471 struct sysc *ddata;
2472
2473 list_for_each(pos, &sysc_soc->restored_modules) {
2474 module = list_entry(pos, struct sysc_module, node);
2475 ddata = module->ddata;
2476 sysc_reinit_module(ddata, ddata->enabled);
2477 }
2478 }
2479
2480 /**
2481 * sysc_context_notifier - optionally reset and restore module after idle
2482 * @nb: notifier block
2483 * @cmd: unused
2484 * @v: unused
2485 *
2486 * Some interconnect target modules need to be restored, or reset and restored
2487 * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
2488 * OTG and GPMC target modules even if the modules are unused.
2489 */
sysc_context_notifier(struct notifier_block *nb, unsigned long cmd, void *v)2490 static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
2491 void *v)
2492 {
2493 struct sysc_soc_info *soc;
2494
2495 soc = container_of(nb, struct sysc_soc_info, nb);
2496
2497 switch (cmd) {
2498 case CPU_CLUSTER_PM_ENTER:
2499 break;
2500 case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
2501 break;
2502 case CPU_CLUSTER_PM_EXIT:
2503 sysc_reinit_modules(soc);
2504 break;
2505 }
2506
2507 return NOTIFY_OK;
2508 }
2509
2510 /**
2511 * sysc_add_restored - optionally add reset and restore quirk hanlling
2512 * @ddata: device data
2513 */
sysc_add_restored(struct sysc *ddata)2514 static void sysc_add_restored(struct sysc *ddata)
2515 {
2516 struct sysc_module *restored_module;
2517
2518 restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
2519 if (!restored_module)
2520 return;
2521
2522 restored_module->ddata = ddata;
2523
2524 mutex_lock(&sysc_soc->list_lock);
2525
2526 list_add(&restored_module->node, &sysc_soc->restored_modules);
2527
2528 if (sysc_soc->nb.notifier_call)
2529 goto out_unlock;
2530
2531 sysc_soc->nb.notifier_call = sysc_context_notifier;
2532 cpu_pm_register_notifier(&sysc_soc->nb);
2533
2534 out_unlock:
2535 mutex_unlock(&sysc_soc->list_lock);
2536 }
2537
2538 /**
2539 * sysc_legacy_idle_quirk - handle children in omap_device compatible way
2540 * @ddata: device driver data
2541 * @child: child device driver
2542 *
2543 * Allow idle for child devices as done with _od_runtime_suspend().
2544 * Otherwise many child devices will not idle because of the permanent
2545 * parent usecount set in pm_runtime_irq_safe().
2546 *
2547 * Note that the long term solution is to just modify the child device
2548 * drivers to not set pm_runtime_irq_safe() and then this can be just
2549 * dropped.
2550 */
sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)2551 static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
2552 {
2553 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
2554 dev_pm_domain_set(child, &sysc_child_pm_domain);
2555 }
2556
sysc_notifier_call(struct notifier_block *nb, unsigned long event, void *device)2557 static int sysc_notifier_call(struct notifier_block *nb,
2558 unsigned long event, void *device)
2559 {
2560 struct device *dev = device;
2561 struct sysc *ddata;
2562 int error;
2563
2564 ddata = sysc_child_to_parent(dev);
2565 if (!ddata)
2566 return NOTIFY_DONE;
2567
2568 switch (event) {
2569 case BUS_NOTIFY_ADD_DEVICE:
2570 error = sysc_child_add_clocks(ddata, dev);
2571 if (error)
2572 return error;
2573 sysc_legacy_idle_quirk(ddata, dev);
2574 break;
2575 default:
2576 break;
2577 }
2578
2579 return NOTIFY_DONE;
2580 }
2581
2582 static struct notifier_block sysc_nb = {
2583 .notifier_call = sysc_notifier_call,
2584 };
2585
2586 /* Device tree configured quirks */
2587 struct sysc_dts_quirk {
2588 const char *name;
2589 u32 mask;
2590 };
2591
2592 static const struct sysc_dts_quirk sysc_dts_quirks[] = {
2593 { .name = "ti,no-idle-on-init",
2594 .mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
2595 { .name = "ti,no-reset-on-init",
2596 .mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
2597 { .name = "ti,no-idle",
2598 .mask = SYSC_QUIRK_NO_IDLE, },
2599 };
2600
sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, bool is_child)2601 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
2602 bool is_child)
2603 {
2604 const struct property *prop;
2605 int i, len;
2606
2607 for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) {
2608 const char *name = sysc_dts_quirks[i].name;
2609
2610 prop = of_get_property(np, name, &len);
2611 if (!prop)
2612 continue;
2613
2614 ddata->cfg.quirks |= sysc_dts_quirks[i].mask;
2615 if (is_child) {
2616 dev_warn(ddata->dev,
2617 "dts flag should be at module level for %s\n",
2618 name);
2619 }
2620 }
2621 }
2622
sysc_init_dts_quirks(struct sysc *ddata)2623 static int sysc_init_dts_quirks(struct sysc *ddata)
2624 {
2625 struct device_node *np = ddata->dev->of_node;
2626 int error;
2627 u32 val;
2628
2629 ddata->legacy_mode = of_get_property(np, "ti,hwmods", NULL);
2630
2631 sysc_parse_dts_quirks(ddata, np, false);
2632 error = of_property_read_u32(np, "ti,sysc-delay-us", &val);
2633 if (!error) {
2634 if (val > 255) {
2635 dev_warn(ddata->dev, "bad ti,sysc-delay-us: %i\n",
2636 val);
2637 }
2638
2639 ddata->cfg.srst_udelay = (u8)val;
2640 }
2641
2642 return 0;
2643 }
2644
sysc_unprepare(struct sysc *ddata)2645 static void sysc_unprepare(struct sysc *ddata)
2646 {
2647 int i;
2648
2649 if (!ddata->clocks)
2650 return;
2651
2652 for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
2653 if (!IS_ERR_OR_NULL(ddata->clocks[i]))
2654 clk_unprepare(ddata->clocks[i]);
2655 }
2656 }
2657
2658 /*
2659 * Common sysc register bits found on omap2, also known as type1
2660 */
2661 static const struct sysc_regbits sysc_regbits_omap2 = {
2662 .dmadisable_shift = -ENODEV,
2663 .midle_shift = 12,
2664 .sidle_shift = 3,
2665 .clkact_shift = 8,
2666 .emufree_shift = 5,
2667 .enwkup_shift = 2,
2668 .srst_shift = 1,
2669 .autoidle_shift = 0,
2670 };
2671
2672 static const struct sysc_capabilities sysc_omap2 = {
2673 .type = TI_SYSC_OMAP2,
2674 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2675 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2676 SYSC_OMAP2_AUTOIDLE,
2677 .regbits = &sysc_regbits_omap2,
2678 };
2679
2680 /* All omap2 and 3 timers, and timers 1, 2 & 10 on omap 4 and 5 */
2681 static const struct sysc_capabilities sysc_omap2_timer = {
2682 .type = TI_SYSC_OMAP2_TIMER,
2683 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY | SYSC_OMAP2_EMUFREE |
2684 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_SOFTRESET |
2685 SYSC_OMAP2_AUTOIDLE,
2686 .regbits = &sysc_regbits_omap2,
2687 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT,
2688 };
2689
2690 /*
2691 * SHAM2 (SHA1/MD5) sysc found on omap3, a variant of sysc_regbits_omap2
2692 * with different sidle position
2693 */
2694 static const struct sysc_regbits sysc_regbits_omap3_sham = {
2695 .dmadisable_shift = -ENODEV,
2696 .midle_shift = -ENODEV,
2697 .sidle_shift = 4,
2698 .clkact_shift = -ENODEV,
2699 .enwkup_shift = -ENODEV,
2700 .srst_shift = 1,
2701 .autoidle_shift = 0,
2702 .emufree_shift = -ENODEV,
2703 };
2704
2705 static const struct sysc_capabilities sysc_omap3_sham = {
2706 .type = TI_SYSC_OMAP3_SHAM,
2707 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2708 .regbits = &sysc_regbits_omap3_sham,
2709 };
2710
2711 /*
2712 * AES register bits found on omap3 and later, a variant of
2713 * sysc_regbits_omap2 with different sidle position
2714 */
2715 static const struct sysc_regbits sysc_regbits_omap3_aes = {
2716 .dmadisable_shift = -ENODEV,
2717 .midle_shift = -ENODEV,
2718 .sidle_shift = 6,
2719 .clkact_shift = -ENODEV,
2720 .enwkup_shift = -ENODEV,
2721 .srst_shift = 1,
2722 .autoidle_shift = 0,
2723 .emufree_shift = -ENODEV,
2724 };
2725
2726 static const struct sysc_capabilities sysc_omap3_aes = {
2727 .type = TI_SYSC_OMAP3_AES,
2728 .sysc_mask = SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE,
2729 .regbits = &sysc_regbits_omap3_aes,
2730 };
2731
2732 /*
2733 * Common sysc register bits found on omap4, also known as type2
2734 */
2735 static const struct sysc_regbits sysc_regbits_omap4 = {
2736 .dmadisable_shift = 16,
2737 .midle_shift = 4,
2738 .sidle_shift = 2,
2739 .clkact_shift = -ENODEV,
2740 .enwkup_shift = -ENODEV,
2741 .emufree_shift = 1,
2742 .srst_shift = 0,
2743 .autoidle_shift = -ENODEV,
2744 };
2745
2746 static const struct sysc_capabilities sysc_omap4 = {
2747 .type = TI_SYSC_OMAP4,
2748 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2749 SYSC_OMAP4_SOFTRESET,
2750 .regbits = &sysc_regbits_omap4,
2751 };
2752
2753 static const struct sysc_capabilities sysc_omap4_timer = {
2754 .type = TI_SYSC_OMAP4_TIMER,
2755 .sysc_mask = SYSC_OMAP4_DMADISABLE | SYSC_OMAP4_FREEEMU |
2756 SYSC_OMAP4_SOFTRESET,
2757 .regbits = &sysc_regbits_omap4,
2758 };
2759
2760 /*
2761 * Common sysc register bits found on omap4, also known as type3
2762 */
2763 static const struct sysc_regbits sysc_regbits_omap4_simple = {
2764 .dmadisable_shift = -ENODEV,
2765 .midle_shift = 2,
2766 .sidle_shift = 0,
2767 .clkact_shift = -ENODEV,
2768 .enwkup_shift = -ENODEV,
2769 .srst_shift = -ENODEV,
2770 .emufree_shift = -ENODEV,
2771 .autoidle_shift = -ENODEV,
2772 };
2773
2774 static const struct sysc_capabilities sysc_omap4_simple = {
2775 .type = TI_SYSC_OMAP4_SIMPLE,
2776 .regbits = &sysc_regbits_omap4_simple,
2777 };
2778
2779 /*
2780 * SmartReflex sysc found on omap34xx
2781 */
2782 static const struct sysc_regbits sysc_regbits_omap34xx_sr = {
2783 .dmadisable_shift = -ENODEV,
2784 .midle_shift = -ENODEV,
2785 .sidle_shift = -ENODEV,
2786 .clkact_shift = 20,
2787 .enwkup_shift = -ENODEV,
2788 .srst_shift = -ENODEV,
2789 .emufree_shift = -ENODEV,
2790 .autoidle_shift = -ENODEV,
2791 };
2792
2793 static const struct sysc_capabilities sysc_34xx_sr = {
2794 .type = TI_SYSC_OMAP34XX_SR,
2795 .sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
2796 .regbits = &sysc_regbits_omap34xx_sr,
2797 .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
2798 SYSC_QUIRK_LEGACY_IDLE,
2799 };
2800
2801 /*
2802 * SmartReflex sysc found on omap36xx and later
2803 */
2804 static const struct sysc_regbits sysc_regbits_omap36xx_sr = {
2805 .dmadisable_shift = -ENODEV,
2806 .midle_shift = -ENODEV,
2807 .sidle_shift = 24,
2808 .clkact_shift = -ENODEV,
2809 .enwkup_shift = 26,
2810 .srst_shift = -ENODEV,
2811 .emufree_shift = -ENODEV,
2812 .autoidle_shift = -ENODEV,
2813 };
2814
2815 static const struct sysc_capabilities sysc_36xx_sr = {
2816 .type = TI_SYSC_OMAP36XX_SR,
2817 .sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
2818 .regbits = &sysc_regbits_omap36xx_sr,
2819 .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
2820 };
2821
2822 static const struct sysc_capabilities sysc_omap4_sr = {
2823 .type = TI_SYSC_OMAP4_SR,
2824 .regbits = &sysc_regbits_omap36xx_sr,
2825 .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
2826 };
2827
2828 /*
2829 * McASP register bits found on omap4 and later
2830 */
2831 static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
2832 .dmadisable_shift = -ENODEV,
2833 .midle_shift = -ENODEV,
2834 .sidle_shift = 0,
2835 .clkact_shift = -ENODEV,
2836 .enwkup_shift = -ENODEV,
2837 .srst_shift = -ENODEV,
2838 .emufree_shift = -ENODEV,
2839 .autoidle_shift = -ENODEV,
2840 };
2841
2842 static const struct sysc_capabilities sysc_omap4_mcasp = {
2843 .type = TI_SYSC_OMAP4_MCASP,
2844 .regbits = &sysc_regbits_omap4_mcasp,
2845 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2846 };
2847
2848 /*
2849 * McASP found on dra7 and later
2850 */
2851 static const struct sysc_capabilities sysc_dra7_mcasp = {
2852 .type = TI_SYSC_OMAP4_SIMPLE,
2853 .regbits = &sysc_regbits_omap4_simple,
2854 .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
2855 };
2856
2857 /*
2858 * FS USB host found on omap4 and later
2859 */
2860 static const struct sysc_regbits sysc_regbits_omap4_usb_host_fs = {
2861 .dmadisable_shift = -ENODEV,
2862 .midle_shift = -ENODEV,
2863 .sidle_shift = 24,
2864 .clkact_shift = -ENODEV,
2865 .enwkup_shift = 26,
2866 .srst_shift = -ENODEV,
2867 .emufree_shift = -ENODEV,
2868 .autoidle_shift = -ENODEV,
2869 };
2870
2871 static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
2872 .type = TI_SYSC_OMAP4_USB_HOST_FS,
2873 .sysc_mask = SYSC_OMAP2_ENAWAKEUP,
2874 .regbits = &sysc_regbits_omap4_usb_host_fs,
2875 };
2876
2877 static const struct sysc_regbits sysc_regbits_dra7_mcan = {
2878 .dmadisable_shift = -ENODEV,
2879 .midle_shift = -ENODEV,
2880 .sidle_shift = -ENODEV,
2881 .clkact_shift = -ENODEV,
2882 .enwkup_shift = 4,
2883 .srst_shift = 0,
2884 .emufree_shift = -ENODEV,
2885 .autoidle_shift = -ENODEV,
2886 };
2887
2888 static const struct sysc_capabilities sysc_dra7_mcan = {
2889 .type = TI_SYSC_DRA7_MCAN,
2890 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2891 .regbits = &sysc_regbits_dra7_mcan,
2892 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2893 };
2894
2895 /*
2896 * PRUSS found on some AM33xx, AM437x and AM57xx SoCs
2897 */
2898 static const struct sysc_capabilities sysc_pruss = {
2899 .type = TI_SYSC_PRUSS,
2900 .sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
2901 .regbits = &sysc_regbits_omap4_simple,
2902 .mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
2903 };
2904
sysc_init_pdata(struct sysc *ddata)2905 static int sysc_init_pdata(struct sysc *ddata)
2906 {
2907 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2908 struct ti_sysc_module_data *mdata;
2909
2910 if (!pdata)
2911 return 0;
2912
2913 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2914 if (!mdata)
2915 return -ENOMEM;
2916
2917 if (ddata->legacy_mode) {
2918 mdata->name = ddata->legacy_mode;
2919 mdata->module_pa = ddata->module_pa;
2920 mdata->module_size = ddata->module_size;
2921 mdata->offsets = ddata->offsets;
2922 mdata->nr_offsets = SYSC_MAX_REGS;
2923 mdata->cap = ddata->cap;
2924 mdata->cfg = &ddata->cfg;
2925 }
2926
2927 ddata->mdata = mdata;
2928
2929 return 0;
2930 }
2931
sysc_init_match(struct sysc *ddata)2932 static int sysc_init_match(struct sysc *ddata)
2933 {
2934 const struct sysc_capabilities *cap;
2935
2936 cap = of_device_get_match_data(ddata->dev);
2937 if (!cap)
2938 return -EINVAL;
2939
2940 ddata->cap = cap;
2941 if (ddata->cap)
2942 ddata->cfg.quirks |= ddata->cap->mod_quirks;
2943
2944 return 0;
2945 }
2946
ti_sysc_idle(struct work_struct *work)2947 static void ti_sysc_idle(struct work_struct *work)
2948 {
2949 struct sysc *ddata;
2950
2951 ddata = container_of(work, struct sysc, idle_work.work);
2952
2953 /*
2954 * One time decrement of clock usage counts if left on from init.
2955 * Note that we disable opt clocks unconditionally in this case
2956 * as they are enabled unconditionally during init without
2957 * considering sysc_opt_clks_needed() at that point.
2958 */
2959 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
2960 SYSC_QUIRK_NO_IDLE_ON_INIT)) {
2961 sysc_disable_main_clocks(ddata);
2962 sysc_disable_opt_clocks(ddata);
2963 sysc_clkdm_allow_idle(ddata);
2964 }
2965
2966 /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
2967 if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
2968 return;
2969
2970 /*
2971 * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
2972 * and SYSC_QUIRK_NO_RESET_ON_INIT
2973 */
2974 if (pm_runtime_active(ddata->dev))
2975 pm_runtime_put_sync(ddata->dev);
2976 }
2977
2978 /*
2979 * SoC model and features detection. Only needed for SoCs that need
2980 * special handling for quirks, no need to list others.
2981 */
2982 static const struct soc_device_attribute sysc_soc_match[] = {
2983 SOC_FLAG("OMAP242*", SOC_2420),
2984 SOC_FLAG("OMAP243*", SOC_2430),
2985 SOC_FLAG("AM35*", SOC_AM35),
2986 SOC_FLAG("OMAP3[45]*", SOC_3430),
2987 SOC_FLAG("OMAP3[67]*", SOC_3630),
2988 SOC_FLAG("OMAP443*", SOC_4430),
2989 SOC_FLAG("OMAP446*", SOC_4460),
2990 SOC_FLAG("OMAP447*", SOC_4470),
2991 SOC_FLAG("OMAP54*", SOC_5430),
2992 SOC_FLAG("AM433", SOC_AM3),
2993 SOC_FLAG("AM43*", SOC_AM4),
2994 SOC_FLAG("DRA7*", SOC_DRA7),
2995
2996 { /* sentinel */ },
2997 };
2998
2999 /*
3000 * List of SoCs variants with disabled features. By default we assume all
3001 * devices in the device tree are available so no need to list those SoCs.
3002 */
3003 static const struct soc_device_attribute sysc_soc_feat_match[] = {
3004 /* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
3005 SOC_FLAG("AM3505", DIS_SGX),
3006 SOC_FLAG("OMAP3525", DIS_SGX),
3007 SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
3008 SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
3009
3010 /* OMAP3630/DM3730 variants with some accelerators disabled */
3011 SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
3012 SOC_FLAG("DM3725", DIS_SGX),
3013 SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
3014 SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
3015 SOC_FLAG("OMAP3621", DIS_ISP),
3016
3017 { /* sentinel */ },
3018 };
3019
sysc_add_disabled(unsigned long base)3020 static int sysc_add_disabled(unsigned long base)
3021 {
3022 struct sysc_address *disabled_module;
3023
3024 disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
3025 if (!disabled_module)
3026 return -ENOMEM;
3027
3028 disabled_module->base = base;
3029
3030 mutex_lock(&sysc_soc->list_lock);
3031 list_add(&disabled_module->node, &sysc_soc->disabled_modules);
3032 mutex_unlock(&sysc_soc->list_lock);
3033
3034 return 0;
3035 }
3036
3037 /*
3038 * One time init to detect the booted SoC, disable unavailable features
3039 * and initialize list for optional cpu_pm notifier.
3040 *
3041 * Note that we initialize static data shared across all ti-sysc instances
3042 * so ddata is only used for SoC type. This can be called from module_init
3043 * once we no longer need to rely on platform data.
3044 */
sysc_init_static_data(struct sysc *ddata)3045 static int sysc_init_static_data(struct sysc *ddata)
3046 {
3047 const struct soc_device_attribute *match;
3048 struct ti_sysc_platform_data *pdata;
3049 unsigned long features = 0;
3050
3051 if (sysc_soc)
3052 return 0;
3053
3054 sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
3055 if (!sysc_soc)
3056 return -ENOMEM;
3057
3058 mutex_init(&sysc_soc->list_lock);
3059 INIT_LIST_HEAD(&sysc_soc->disabled_modules);
3060 INIT_LIST_HEAD(&sysc_soc->restored_modules);
3061 sysc_soc->general_purpose = true;
3062
3063 pdata = dev_get_platdata(ddata->dev);
3064 if (pdata && pdata->soc_type_gp)
3065 sysc_soc->general_purpose = pdata->soc_type_gp();
3066
3067 match = soc_device_match(sysc_soc_match);
3068 if (match && match->data)
3069 sysc_soc->soc = (enum sysc_soc)(uintptr_t)match->data;
3070
3071 /* Ignore devices that are not available on HS and EMU SoCs */
3072 if (!sysc_soc->general_purpose) {
3073 switch (sysc_soc->soc) {
3074 case SOC_3430 ... SOC_3630:
3075 sysc_add_disabled(0x48304000); /* timer12 */
3076 break;
3077 case SOC_AM3:
3078 sysc_add_disabled(0x48310000); /* rng */
3079 break;
3080 default:
3081 break;
3082 };
3083 }
3084
3085 match = soc_device_match(sysc_soc_feat_match);
3086 if (!match)
3087 return 0;
3088
3089 if (match->data)
3090 features = (unsigned long)match->data;
3091
3092 /*
3093 * Add disabled devices to the list based on the module base.
3094 * Note that this must be done before we attempt to access the
3095 * device and have module revision checks working.
3096 */
3097 if (features & DIS_ISP)
3098 sysc_add_disabled(0x480bd400);
3099 if (features & DIS_IVA)
3100 sysc_add_disabled(0x5d000000);
3101 if (features & DIS_SGX)
3102 sysc_add_disabled(0x50000000);
3103
3104 return 0;
3105 }
3106
sysc_cleanup_static_data(void)3107 static void sysc_cleanup_static_data(void)
3108 {
3109 struct sysc_module *restored_module;
3110 struct sysc_address *disabled_module;
3111 struct list_head *pos, *tmp;
3112
3113 if (!sysc_soc)
3114 return;
3115
3116 if (sysc_soc->nb.notifier_call)
3117 cpu_pm_unregister_notifier(&sysc_soc->nb);
3118
3119 mutex_lock(&sysc_soc->list_lock);
3120 list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
3121 restored_module = list_entry(pos, struct sysc_module, node);
3122 list_del(pos);
3123 kfree(restored_module);
3124 }
3125 list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
3126 disabled_module = list_entry(pos, struct sysc_address, node);
3127 list_del(pos);
3128 kfree(disabled_module);
3129 }
3130 mutex_unlock(&sysc_soc->list_lock);
3131 }
3132
sysc_check_disabled_devices(struct sysc *ddata)3133 static int sysc_check_disabled_devices(struct sysc *ddata)
3134 {
3135 struct sysc_address *disabled_module;
3136 struct list_head *pos;
3137 int error = 0;
3138
3139 mutex_lock(&sysc_soc->list_lock);
3140 list_for_each(pos, &sysc_soc->disabled_modules) {
3141 disabled_module = list_entry(pos, struct sysc_address, node);
3142 if (ddata->module_pa == disabled_module->base) {
3143 dev_dbg(ddata->dev, "module disabled for this SoC\n");
3144 error = -ENODEV;
3145 break;
3146 }
3147 }
3148 mutex_unlock(&sysc_soc->list_lock);
3149
3150 return error;
3151 }
3152
3153 /*
3154 * Ignore timers tagged with no-reset and no-idle. These are likely in use,
3155 * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
3156 * are needed, we could also look at the timer register configuration.
3157 */
sysc_check_active_timer(struct sysc *ddata)3158 static int sysc_check_active_timer(struct sysc *ddata)
3159 {
3160 int error;
3161
3162 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
3163 ddata->cap->type != TI_SYSC_OMAP4_TIMER)
3164 return 0;
3165
3166 /*
3167 * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
3168 * Revision C and later are fixed with commit 23885389dbbb ("ARM:
3169 * dts: Fix timer regression for beagleboard revision c"). This all
3170 * can be dropped if we stop supporting old beagleboard revisions
3171 * A to B4 at some point.
3172 */
3173 if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
3174 error = -ENXIO;
3175 else
3176 error = -EBUSY;
3177
3178 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
3179 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
3180 return error;
3181
3182 return 0;
3183 }
3184
3185 static const struct of_device_id sysc_match_table[] = {
3186 { .compatible = "simple-bus", },
3187 { /* sentinel */ },
3188 };
3189
sysc_probe(struct platform_device *pdev)3190 static int sysc_probe(struct platform_device *pdev)
3191 {
3192 struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
3193 struct sysc *ddata;
3194 int error;
3195
3196 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
3197 if (!ddata)
3198 return -ENOMEM;
3199
3200 ddata->dev = &pdev->dev;
3201 platform_set_drvdata(pdev, ddata);
3202
3203 error = sysc_init_static_data(ddata);
3204 if (error)
3205 return error;
3206
3207 error = sysc_init_match(ddata);
3208 if (error)
3209 return error;
3210
3211 error = sysc_init_dts_quirks(ddata);
3212 if (error)
3213 return error;
3214
3215 error = sysc_map_and_check_registers(ddata);
3216 if (error)
3217 return error;
3218
3219 error = sysc_init_sysc_mask(ddata);
3220 if (error)
3221 return error;
3222
3223 error = sysc_init_idlemodes(ddata);
3224 if (error)
3225 return error;
3226
3227 error = sysc_init_syss_mask(ddata);
3228 if (error)
3229 return error;
3230
3231 error = sysc_init_pdata(ddata);
3232 if (error)
3233 return error;
3234
3235 sysc_init_early_quirks(ddata);
3236
3237 error = sysc_check_disabled_devices(ddata);
3238 if (error)
3239 return error;
3240
3241 error = sysc_check_active_timer(ddata);
3242 if (error == -ENXIO)
3243 ddata->reserved = true;
3244 else if (error)
3245 return error;
3246
3247 error = sysc_get_clocks(ddata);
3248 if (error)
3249 return error;
3250
3251 error = sysc_init_resets(ddata);
3252 if (error)
3253 goto unprepare;
3254
3255 error = sysc_init_module(ddata);
3256 if (error)
3257 goto unprepare;
3258
3259 pm_runtime_enable(ddata->dev);
3260 error = pm_runtime_get_sync(ddata->dev);
3261 if (error < 0) {
3262 pm_runtime_put_noidle(ddata->dev);
3263 pm_runtime_disable(ddata->dev);
3264 goto unprepare;
3265 }
3266
3267 /* Balance use counts as PM runtime should have enabled these all */
3268 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
3269 reset_control_assert(ddata->rsts);
3270
3271 if (!(ddata->cfg.quirks &
3272 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
3273 sysc_disable_main_clocks(ddata);
3274 sysc_disable_opt_clocks(ddata);
3275 sysc_clkdm_allow_idle(ddata);
3276 }
3277
3278 sysc_show_registers(ddata);
3279
3280 ddata->dev->type = &sysc_device_type;
3281
3282 if (!ddata->reserved) {
3283 error = of_platform_populate(ddata->dev->of_node,
3284 sysc_match_table,
3285 pdata ? pdata->auxdata : NULL,
3286 ddata->dev);
3287 if (error)
3288 goto err;
3289 }
3290
3291 INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
3292
3293 /* At least earlycon won't survive without deferred idle */
3294 if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
3295 SYSC_QUIRK_NO_IDLE_ON_INIT |
3296 SYSC_QUIRK_NO_RESET_ON_INIT)) {
3297 schedule_delayed_work(&ddata->idle_work, 3000);
3298 } else {
3299 pm_runtime_put(&pdev->dev);
3300 }
3301
3302 if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
3303 sysc_add_restored(ddata);
3304
3305 return 0;
3306
3307 err:
3308 pm_runtime_put_sync(&pdev->dev);
3309 pm_runtime_disable(&pdev->dev);
3310 unprepare:
3311 sysc_unprepare(ddata);
3312
3313 return error;
3314 }
3315
sysc_remove(struct platform_device *pdev)3316 static int sysc_remove(struct platform_device *pdev)
3317 {
3318 struct sysc *ddata = platform_get_drvdata(pdev);
3319 int error;
3320
3321 /* Device can still be enabled, see deferred idle quirk in probe */
3322 if (cancel_delayed_work_sync(&ddata->idle_work))
3323 ti_sysc_idle(&ddata->idle_work.work);
3324
3325 error = pm_runtime_get_sync(ddata->dev);
3326 if (error < 0) {
3327 pm_runtime_put_noidle(ddata->dev);
3328 pm_runtime_disable(ddata->dev);
3329 goto unprepare;
3330 }
3331
3332 of_platform_depopulate(&pdev->dev);
3333
3334 pm_runtime_put_sync(&pdev->dev);
3335 pm_runtime_disable(&pdev->dev);
3336
3337 if (!reset_control_status(ddata->rsts))
3338 reset_control_assert(ddata->rsts);
3339
3340 unprepare:
3341 sysc_unprepare(ddata);
3342
3343 return 0;
3344 }
3345
3346 static const struct of_device_id sysc_match[] = {
3347 { .compatible = "ti,sysc-omap2", .data = &sysc_omap2, },
3348 { .compatible = "ti,sysc-omap2-timer", .data = &sysc_omap2_timer, },
3349 { .compatible = "ti,sysc-omap4", .data = &sysc_omap4, },
3350 { .compatible = "ti,sysc-omap4-timer", .data = &sysc_omap4_timer, },
3351 { .compatible = "ti,sysc-omap4-simple", .data = &sysc_omap4_simple, },
3352 { .compatible = "ti,sysc-omap3430-sr", .data = &sysc_34xx_sr, },
3353 { .compatible = "ti,sysc-omap3630-sr", .data = &sysc_36xx_sr, },
3354 { .compatible = "ti,sysc-omap4-sr", .data = &sysc_omap4_sr, },
3355 { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
3356 { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
3357 { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
3358 { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
3359 { .compatible = "ti,sysc-usb-host-fs",
3360 .data = &sysc_omap4_usb_host_fs, },
3361 { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
3362 { .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
3363 { },
3364 };
3365 MODULE_DEVICE_TABLE(of, sysc_match);
3366
3367 static struct platform_driver sysc_driver = {
3368 .probe = sysc_probe,
3369 .remove = sysc_remove,
3370 .driver = {
3371 .name = "ti-sysc",
3372 .of_match_table = sysc_match,
3373 .pm = &sysc_pm_ops,
3374 },
3375 };
3376
sysc_init(void)3377 static int __init sysc_init(void)
3378 {
3379 bus_register_notifier(&platform_bus_type, &sysc_nb);
3380
3381 return platform_driver_register(&sysc_driver);
3382 }
3383 module_init(sysc_init);
3384
sysc_exit(void)3385 static void __exit sysc_exit(void)
3386 {
3387 bus_unregister_notifier(&platform_bus_type, &sysc_nb);
3388 platform_driver_unregister(&sysc_driver);
3389 sysc_cleanup_static_data();
3390 }
3391 module_exit(sysc_exit);
3392
3393 MODULE_DESCRIPTION("TI sysc interconnect target driver");
3394 MODULE_LICENSE("GPL v2");
3395