1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mach-pxa/pxa3xx.c
4 *
5 * code specific to pxa3xx aka Monahans
6 *
7 * Copyright (C) 2006 Marvell International Ltd.
8 *
9 * 2007-09-02: eric miao <eric.miao@marvell.com>
10 *             initial version
11 */
12#include <linux/dmaengine.h>
13#include <linux/dma/pxa-dma.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/gpio-pxa.h>
18#include <linux/pm.h>
19#include <linux/platform_device.h>
20#include <linux/irq.h>
21#include <linux/irqchip.h>
22#include <linux/io.h>
23#include <linux/of.h>
24#include <linux/syscore_ops.h>
25#include <linux/platform_data/i2c-pxa.h>
26#include <linux/platform_data/mmp_dma.h>
27
28#include <asm/mach/map.h>
29#include <asm/suspend.h>
30#include <mach/hardware.h>
31#include <mach/pxa3xx-regs.h>
32#include <mach/reset.h>
33#include <linux/platform_data/usb-ohci-pxa27x.h>
34#include "pm.h"
35#include <mach/dma.h>
36#include <mach/smemc.h>
37#include <mach/irqs.h>
38
39#include "generic.h"
40#include "devices.h"
41
42#define PECR_IE(n)	((1 << ((n) * 2)) << 28)
43#define PECR_IS(n)	((1 << ((n) * 2)) << 29)
44
45extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
46
47/*
48 * NAND NFC: DFI bus arbitration subset
49 */
50#define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
51#define NDCR_ND_ARB_EN		(1 << 12)
52#define NDCR_ND_ARB_CNTL	(1 << 19)
53
54#ifdef CONFIG_PM
55
56#define ISRAM_START	0x5c000000
57#define ISRAM_SIZE	SZ_256K
58
59static void __iomem *sram;
60static unsigned long wakeup_src;
61
62/*
63 * Enter a standby mode (S0D1C2 or S0D2C2).  Upon wakeup, the dynamic
64 * memory controller has to be reinitialised, so we place some code
65 * in the SRAM to perform this function.
66 *
67 * We disable FIQs across the standby - otherwise, we might receive a
68 * FIQ while the SDRAM is unavailable.
69 */
70static void pxa3xx_cpu_standby(unsigned int pwrmode)
71{
72	void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
73
74	memcpy_toio(sram + 0x8000, pm_enter_standby_start,
75		    pm_enter_standby_end - pm_enter_standby_start);
76
77	AD2D0SR = ~0;
78	AD2D1SR = ~0;
79	AD2D0ER = wakeup_src;
80	AD2D1ER = 0;
81	ASCR = ASCR;
82	ARSR = ARSR;
83
84	local_fiq_disable();
85	fn(pwrmode);
86	local_fiq_enable();
87
88	AD2D0ER = 0;
89	AD2D1ER = 0;
90}
91
92/*
93 * NOTE:  currently, the OBM (OEM Boot Module) binary comes along with
94 * PXA3xx development kits assumes that the resuming process continues
95 * with the address stored within the first 4 bytes of SDRAM. The PSPR
96 * register is used privately by BootROM and OBM, and _must_ be set to
97 * 0x5c014000 for the moment.
98 */
99static void pxa3xx_cpu_pm_suspend(void)
100{
101	volatile unsigned long *p = (volatile void *)0xc0000000;
102	unsigned long saved_data = *p;
103#ifndef CONFIG_IWMMXT
104	u64 acc0;
105
106	asm volatile(".arch_extension xscale\n\t"
107		     "mra %Q0, %R0, acc0" : "=r" (acc0));
108#endif
109
110	/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
111	CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
112	CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
113
114	/* clear and setup wakeup source */
115	AD3SR = ~0;
116	AD3ER = wakeup_src;
117	ASCR = ASCR;
118	ARSR = ARSR;
119
120	PCFR |= (1u << 13);			/* L1_DIS */
121	PCFR &= ~((1u << 12) | (1u << 1));	/* L0_EN | SL_ROD */
122
123	PSPR = 0x5c014000;
124
125	/* overwrite with the resume address */
126	*p = __pa_symbol(cpu_resume);
127
128	cpu_suspend(0, pxa3xx_finish_suspend);
129
130	*p = saved_data;
131
132	AD3ER = 0;
133
134#ifndef CONFIG_IWMMXT
135	asm volatile(".arch_extension xscale\n\t"
136		     "mar acc0, %Q0, %R0" : "=r" (acc0));
137#endif
138}
139
140static void pxa3xx_cpu_pm_enter(suspend_state_t state)
141{
142	/*
143	 * Don't sleep if no wakeup sources are defined
144	 */
145	if (wakeup_src == 0) {
146		printk(KERN_ERR "Not suspending: no wakeup sources\n");
147		return;
148	}
149
150	switch (state) {
151	case PM_SUSPEND_STANDBY:
152		pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
153		break;
154
155	case PM_SUSPEND_MEM:
156		pxa3xx_cpu_pm_suspend();
157		break;
158	}
159}
160
161static int pxa3xx_cpu_pm_valid(suspend_state_t state)
162{
163	return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
164}
165
166static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
167	.valid		= pxa3xx_cpu_pm_valid,
168	.enter		= pxa3xx_cpu_pm_enter,
169};
170
171static void __init pxa3xx_init_pm(void)
172{
173	sram = ioremap(ISRAM_START, ISRAM_SIZE);
174	if (!sram) {
175		printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
176		return;
177	}
178
179	/*
180	 * Since we copy wakeup code into the SRAM, we need to ensure
181	 * that it is preserved over the low power modes.  Note: bit 8
182	 * is undocumented in the developer manual, but must be set.
183	 */
184	AD1R |= ADXR_L2 | ADXR_R0;
185	AD2R |= ADXR_L2 | ADXR_R0;
186	AD3R |= ADXR_L2 | ADXR_R0;
187
188	/*
189	 * Clear the resume enable registers.
190	 */
191	AD1D0ER = 0;
192	AD2D0ER = 0;
193	AD2D1ER = 0;
194	AD3ER = 0;
195
196	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
197}
198
199static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
200{
201	unsigned long flags, mask = 0;
202
203	switch (d->irq) {
204	case IRQ_SSP3:
205		mask = ADXER_MFP_WSSP3;
206		break;
207	case IRQ_MSL:
208		mask = ADXER_WMSL0;
209		break;
210	case IRQ_USBH2:
211	case IRQ_USBH1:
212		mask = ADXER_WUSBH;
213		break;
214	case IRQ_KEYPAD:
215		mask = ADXER_WKP;
216		break;
217	case IRQ_AC97:
218		mask = ADXER_MFP_WAC97;
219		break;
220	case IRQ_USIM:
221		mask = ADXER_WUSIM0;
222		break;
223	case IRQ_SSP2:
224		mask = ADXER_MFP_WSSP2;
225		break;
226	case IRQ_I2C:
227		mask = ADXER_MFP_WI2C;
228		break;
229	case IRQ_STUART:
230		mask = ADXER_MFP_WUART3;
231		break;
232	case IRQ_BTUART:
233		mask = ADXER_MFP_WUART2;
234		break;
235	case IRQ_FFUART:
236		mask = ADXER_MFP_WUART1;
237		break;
238	case IRQ_MMC:
239		mask = ADXER_MFP_WMMC1;
240		break;
241	case IRQ_SSP:
242		mask = ADXER_MFP_WSSP1;
243		break;
244	case IRQ_RTCAlrm:
245		mask = ADXER_WRTC;
246		break;
247	case IRQ_SSP4:
248		mask = ADXER_MFP_WSSP4;
249		break;
250	case IRQ_TSI:
251		mask = ADXER_WTSI;
252		break;
253	case IRQ_USIM2:
254		mask = ADXER_WUSIM1;
255		break;
256	case IRQ_MMC2:
257		mask = ADXER_MFP_WMMC2;
258		break;
259	case IRQ_NAND:
260		mask = ADXER_MFP_WFLASH;
261		break;
262	case IRQ_USB2:
263		mask = ADXER_WUSB2;
264		break;
265	case IRQ_WAKEUP0:
266		mask = ADXER_WEXTWAKE0;
267		break;
268	case IRQ_WAKEUP1:
269		mask = ADXER_WEXTWAKE1;
270		break;
271	case IRQ_MMC3:
272		mask = ADXER_MFP_GEN12;
273		break;
274	default:
275		return -EINVAL;
276	}
277
278	local_irq_save(flags);
279	if (on)
280		wakeup_src |= mask;
281	else
282		wakeup_src &= ~mask;
283	local_irq_restore(flags);
284
285	return 0;
286}
287#else
288static inline void pxa3xx_init_pm(void) {}
289#define pxa3xx_set_wake	NULL
290#endif
291
292static void pxa_ack_ext_wakeup(struct irq_data *d)
293{
294	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
295}
296
297static void pxa_mask_ext_wakeup(struct irq_data *d)
298{
299	pxa_mask_irq(d);
300	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
301}
302
303static void pxa_unmask_ext_wakeup(struct irq_data *d)
304{
305	pxa_unmask_irq(d);
306	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
307}
308
309static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
310{
311	if (flow_type & IRQ_TYPE_EDGE_RISING)
312		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
313
314	if (flow_type & IRQ_TYPE_EDGE_FALLING)
315		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
316
317	return 0;
318}
319
320static struct irq_chip pxa_ext_wakeup_chip = {
321	.name		= "WAKEUP",
322	.irq_ack	= pxa_ack_ext_wakeup,
323	.irq_mask	= pxa_mask_ext_wakeup,
324	.irq_unmask	= pxa_unmask_ext_wakeup,
325	.irq_set_type	= pxa_set_ext_wakeup_type,
326};
327
328static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
329					   unsigned int))
330{
331	int irq;
332
333	for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
334		irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
335					 handle_edge_irq);
336		irq_clear_status_flags(irq, IRQ_NOREQUEST);
337	}
338
339	pxa_ext_wakeup_chip.irq_set_wake = fn;
340}
341
342static void __init __pxa3xx_init_irq(void)
343{
344	/* enable CP6 access */
345	u32 value;
346	__asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
347	value |= (1 << 6);
348	__asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
349
350	pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
351}
352
353void __init pxa3xx_init_irq(void)
354{
355	__pxa3xx_init_irq();
356	pxa_init_irq(56, pxa3xx_set_wake);
357}
358
359#ifdef CONFIG_OF
360static int __init __init
361pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
362{
363	__pxa3xx_init_irq();
364	pxa_dt_irq_init(pxa3xx_set_wake);
365	set_handle_irq(ichp_handle_irq);
366
367	return 0;
368}
369IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
370#endif	/* CONFIG_OF */
371
372static struct map_desc pxa3xx_io_desc[] __initdata = {
373	{	/* Mem Ctl */
374		.virtual	= (unsigned long)SMEMC_VIRT,
375		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
376		.length		= SMEMC_SIZE,
377		.type		= MT_DEVICE
378	}, {
379		.virtual	= (unsigned long)NAND_VIRT,
380		.pfn		= __phys_to_pfn(NAND_PHYS),
381		.length		= NAND_SIZE,
382		.type		= MT_DEVICE
383	},
384};
385
386void __init pxa3xx_map_io(void)
387{
388	pxa_map_io();
389	iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
390	pxa3xx_get_clk_frequency_khz(1);
391}
392
393/*
394 * device registration specific to PXA3xx.
395 */
396
397void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
398{
399	pxa_register_device(&pxa3xx_device_i2c_power, info);
400}
401
402static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
403	.irq_base	= PXA_GPIO_TO_IRQ(0),
404};
405
406static struct platform_device *devices[] __initdata = {
407	&pxa27x_device_udc,
408	&pxa_device_pmu,
409	&pxa_device_i2s,
410	&pxa_device_asoc_ssp1,
411	&pxa_device_asoc_ssp2,
412	&pxa_device_asoc_ssp3,
413	&pxa_device_asoc_ssp4,
414	&pxa_device_asoc_platform,
415	&pxa_device_rtc,
416	&pxa3xx_device_ssp1,
417	&pxa3xx_device_ssp2,
418	&pxa3xx_device_ssp3,
419	&pxa3xx_device_ssp4,
420	&pxa27x_device_pwm0,
421	&pxa27x_device_pwm1,
422};
423
424static const struct dma_slave_map pxa3xx_slave_map[] = {
425	/* PXA25x, PXA27x and PXA3xx common entries */
426	{ "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
427	{ "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
428	{ "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
429	  PDMA_FILTER_PARAM(LOWEST, 10) },
430	{ "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
431	{ "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
432	{ "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
433	{ "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
434	{ "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
435	{ "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
436	{ "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
437	{ "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
438	{ "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
439	{ "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
440	{ "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
441	{ "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
442
443	/* PXA3xx specific map */
444	{ "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
445	{ "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
446	{ "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
447	{ "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
448	{ "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
449	{ "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
450	{ "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
451};
452
453static struct mmp_dma_platdata pxa3xx_dma_pdata = {
454	.dma_channels	= 32,
455	.nb_requestors	= 100,
456	.slave_map	= pxa3xx_slave_map,
457	.slave_map_cnt	= ARRAY_SIZE(pxa3xx_slave_map),
458};
459
460static int __init pxa3xx_init(void)
461{
462	int ret = 0;
463
464	if (cpu_is_pxa3xx()) {
465
466		reset_status = ARSR;
467
468		/*
469		 * clear RDH bit every time after reset
470		 *
471		 * Note: the last 3 bits DxS are write-1-to-clear so carefully
472		 * preserve them here in case they will be referenced later
473		 */
474		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
475
476		/*
477		 * Disable DFI bus arbitration, to prevent a system bus lock if
478		 * somebody disables the NAND clock (unused clock) while this
479		 * bit remains set.
480		 */
481		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
482
483		pxa3xx_init_pm();
484
485		enable_irq_wake(IRQ_WAKEUP0);
486		if (cpu_is_pxa320())
487			enable_irq_wake(IRQ_WAKEUP1);
488
489		register_syscore_ops(&pxa_irq_syscore_ops);
490		register_syscore_ops(&pxa3xx_mfp_syscore_ops);
491
492		if (of_have_populated_dt())
493			return 0;
494
495		pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
496		ret = platform_add_devices(devices, ARRAY_SIZE(devices));
497		if (ret)
498			return ret;
499		if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
500			platform_device_add_data(&pxa3xx_device_gpio,
501						 &pxa3xx_gpio_pdata,
502						 sizeof(pxa3xx_gpio_pdata));
503			ret = platform_device_register(&pxa3xx_device_gpio);
504		}
505	}
506
507	return ret;
508}
509
510postcore_initcall(pxa3xx_init);
511