1/*
2 * linux/arch/arm/mach-omap2/irq.c
3 *
4 * Interrupt handler for OMAP2 boards.
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Paul Mundt <paul.mundt@nokia.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18
19#include <asm/exception.h>
20#include <linux/irqchip.h>
21#include <linux/irqdomain.h>
22#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25
26#include <linux/irqchip/irq-omap-intc.h>
27
28/* selected INTC register offsets */
29
30#define INTC_REVISION		0x0000
31#define INTC_SYSCONFIG		0x0010
32#define INTC_SYSSTATUS		0x0014
33#define INTC_SIR		0x0040
34#define INTC_CONTROL		0x0048
35#define INTC_PROTECTION		0x004C
36#define INTC_IDLE		0x0050
37#define INTC_THRESHOLD		0x0068
38#define INTC_MIR0		0x0084
39#define INTC_MIR_CLEAR0		0x0088
40#define INTC_MIR_SET0		0x008c
41#define INTC_PENDING_IRQ0	0x0098
42#define INTC_PENDING_IRQ1	0x00b8
43#define INTC_PENDING_IRQ2	0x00d8
44#define INTC_PENDING_IRQ3	0x00f8
45#define INTC_ILR0		0x0100
46
47#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
48#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
49#define INTCPS_NR_ILR_REGS	128
50#define INTCPS_NR_MIR_REGS	4
51
52#define INTC_IDLE_FUNCIDLE	(1 << 0)
53#define INTC_IDLE_TURBO		(1 << 1)
54
55#define INTC_PROTECTION_ENABLE	(1 << 0)
56
57struct omap_intc_regs {
58	u32 sysconfig;
59	u32 protection;
60	u32 idle;
61	u32 threshold;
62	u32 ilr[INTCPS_NR_ILR_REGS];
63	u32 mir[INTCPS_NR_MIR_REGS];
64};
65static struct omap_intc_regs intc_context;
66
67static struct irq_domain *domain;
68static void __iomem *omap_irq_base;
69static int omap_nr_pending;
70static int omap_nr_irqs;
71
72static void intc_writel(u32 reg, u32 val)
73{
74	writel_relaxed(val, omap_irq_base + reg);
75}
76
77static u32 intc_readl(u32 reg)
78{
79	return readl_relaxed(omap_irq_base + reg);
80}
81
82void omap_intc_save_context(void)
83{
84	int i;
85
86	intc_context.sysconfig =
87		intc_readl(INTC_SYSCONFIG);
88	intc_context.protection =
89		intc_readl(INTC_PROTECTION);
90	intc_context.idle =
91		intc_readl(INTC_IDLE);
92	intc_context.threshold =
93		intc_readl(INTC_THRESHOLD);
94
95	for (i = 0; i < omap_nr_irqs; i++)
96		intc_context.ilr[i] =
97			intc_readl((INTC_ILR0 + 0x4 * i));
98	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
99		intc_context.mir[i] =
100			intc_readl(INTC_MIR0 + (0x20 * i));
101}
102
103void omap_intc_restore_context(void)
104{
105	int i;
106
107	intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
108	intc_writel(INTC_PROTECTION, intc_context.protection);
109	intc_writel(INTC_IDLE, intc_context.idle);
110	intc_writel(INTC_THRESHOLD, intc_context.threshold);
111
112	for (i = 0; i < omap_nr_irqs; i++)
113		intc_writel(INTC_ILR0 + 0x4 * i,
114				intc_context.ilr[i]);
115
116	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
117		intc_writel(INTC_MIR0 + 0x20 * i,
118			intc_context.mir[i]);
119	/* MIRs are saved and restore with other PRCM registers */
120}
121
122void omap3_intc_prepare_idle(void)
123{
124	/*
125	 * Disable autoidle as it can stall interrupt controller,
126	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
127	 */
128	intc_writel(INTC_SYSCONFIG, 0);
129	intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
130}
131
132void omap3_intc_resume_idle(void)
133{
134	/* Re-enable autoidle */
135	intc_writel(INTC_SYSCONFIG, 1);
136	intc_writel(INTC_IDLE, 0);
137}
138
139/* XXX: FIQ and additional INTC support (only MPU at the moment) */
140static void omap_ack_irq(struct irq_data *d)
141{
142	intc_writel(INTC_CONTROL, 0x1);
143}
144
145static void omap_mask_ack_irq(struct irq_data *d)
146{
147	irq_gc_mask_disable_reg(d);
148	omap_ack_irq(d);
149}
150
151static void __init omap_irq_soft_reset(void)
152{
153	unsigned long tmp;
154
155	tmp = intc_readl(INTC_REVISION) & 0xff;
156
157	pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
158		omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
159
160	tmp = intc_readl(INTC_SYSCONFIG);
161	tmp |= 1 << 1;	/* soft reset */
162	intc_writel(INTC_SYSCONFIG, tmp);
163
164	while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
165		/* Wait for reset to complete */;
166
167	/* Enable autoidle */
168	intc_writel(INTC_SYSCONFIG, 1 << 0);
169}
170
171int omap_irq_pending(void)
172{
173	int i;
174
175	for (i = 0; i < omap_nr_pending; i++)
176		if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
177			return 1;
178	return 0;
179}
180
181void omap3_intc_suspend(void)
182{
183	/* A pending interrupt would prevent OMAP from entering suspend */
184	omap_ack_irq(NULL);
185}
186
187static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
188{
189	int ret;
190	int i;
191
192	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
193			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
194			IRQ_LEVEL, 0);
195	if (ret) {
196		pr_warn("Failed to allocate irq chips\n");
197		return ret;
198	}
199
200	for (i = 0; i < omap_nr_pending; i++) {
201		struct irq_chip_generic *gc;
202		struct irq_chip_type *ct;
203
204		gc = irq_get_domain_generic_chip(d, 32 * i);
205		gc->reg_base = base;
206		ct = gc->chip_types;
207
208		ct->type = IRQ_TYPE_LEVEL_MASK;
209
210		ct->chip.irq_ack = omap_mask_ack_irq;
211		ct->chip.irq_mask = irq_gc_mask_disable_reg;
212		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
213
214		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
215
216		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
217		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
218	}
219
220	return 0;
221}
222
223static void __init omap_alloc_gc_legacy(void __iomem *base,
224		unsigned int irq_start, unsigned int num)
225{
226	struct irq_chip_generic *gc;
227	struct irq_chip_type *ct;
228
229	gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
230			handle_level_irq);
231	ct = gc->chip_types;
232	ct->chip.irq_ack = omap_mask_ack_irq;
233	ct->chip.irq_mask = irq_gc_mask_disable_reg;
234	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
235	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
236
237	ct->regs.enable = INTC_MIR_CLEAR0;
238	ct->regs.disable = INTC_MIR_SET0;
239	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
240			IRQ_NOREQUEST | IRQ_NOPROBE, 0);
241}
242
243static int __init omap_init_irq_of(struct device_node *node)
244{
245	int ret;
246
247	omap_irq_base = of_iomap(node, 0);
248	if (WARN_ON(!omap_irq_base))
249		return -ENOMEM;
250
251	domain = irq_domain_add_linear(node, omap_nr_irqs,
252			&irq_generic_chip_ops, NULL);
253
254	omap_irq_soft_reset();
255
256	ret = omap_alloc_gc_of(domain, omap_irq_base);
257	if (ret < 0)
258		irq_domain_remove(domain);
259
260	return ret;
261}
262
263static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
264{
265	int j, irq_base;
266
267	omap_irq_base = ioremap(base, SZ_4K);
268	if (WARN_ON(!omap_irq_base))
269		return -ENOMEM;
270
271	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
272	if (irq_base < 0) {
273		pr_warn("Couldn't allocate IRQ numbers\n");
274		irq_base = 0;
275	}
276
277	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
278			&irq_domain_simple_ops, NULL);
279
280	omap_irq_soft_reset();
281
282	for (j = 0; j < omap_nr_irqs; j += 32)
283		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
284
285	return 0;
286}
287
288static void __init omap_irq_enable_protection(void)
289{
290	u32 reg;
291
292	reg = intc_readl(INTC_PROTECTION);
293	reg |= INTC_PROTECTION_ENABLE;
294	intc_writel(INTC_PROTECTION, reg);
295}
296
297static int __init omap_init_irq(u32 base, struct device_node *node)
298{
299	int ret;
300
301	/*
302	 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
303	 * depends is still not ready for linear IRQ domains; because of that
304	 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
305	 * linear IRQ Domain until that driver is finally fixed.
306	 */
307	if (of_device_is_compatible(node, "ti,omap2-intc") ||
308			of_device_is_compatible(node, "ti,omap3-intc")) {
309		struct resource res;
310
311		if (of_address_to_resource(node, 0, &res))
312			return -ENOMEM;
313
314		base = res.start;
315		ret = omap_init_irq_legacy(base, node);
316	} else if (node) {
317		ret = omap_init_irq_of(node);
318	} else {
319		ret = omap_init_irq_legacy(base, NULL);
320	}
321
322	if (ret == 0)
323		omap_irq_enable_protection();
324
325	return ret;
326}
327
328static asmlinkage void __exception_irq_entry
329omap_intc_handle_irq(struct pt_regs *regs)
330{
331	extern unsigned long irq_err_count;
332	u32 irqnr;
333
334	irqnr = intc_readl(INTC_SIR);
335
336	/*
337	 * A spurious IRQ can result if interrupt that triggered the
338	 * sorting is no longer active during the sorting (10 INTC
339	 * functional clock cycles after interrupt assertion). Or a
340	 * change in interrupt mask affected the result during sorting
341	 * time. There is no special handling required except ignoring
342	 * the SIR register value just read and retrying.
343	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
344	 *
345	 * Many a times, a spurious interrupt situation has been fixed
346	 * by adding a flush for the posted write acking the IRQ in
347	 * the device driver. Typically, this is going be the device
348	 * driver whose interrupt was handled just before the spurious
349	 * IRQ occurred. Pay attention to those device drivers if you
350	 * run into hitting the spurious IRQ condition below.
351	 */
352	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
353		pr_err_once("%s: spurious irq!\n", __func__);
354		irq_err_count++;
355		omap_ack_irq(NULL);
356		return;
357	}
358
359	irqnr &= ACTIVEIRQ_MASK;
360	handle_domain_irq(domain, irqnr, regs);
361}
362
363static int __init intc_of_init(struct device_node *node,
364			     struct device_node *parent)
365{
366	int ret;
367
368	omap_nr_pending = 3;
369	omap_nr_irqs = 96;
370
371	if (WARN_ON(!node))
372		return -ENODEV;
373
374	if (of_device_is_compatible(node, "ti,dm814-intc") ||
375	    of_device_is_compatible(node, "ti,dm816-intc") ||
376	    of_device_is_compatible(node, "ti,am33xx-intc")) {
377		omap_nr_irqs = 128;
378		omap_nr_pending = 4;
379	}
380
381	ret = omap_init_irq(-1, of_node_get(node));
382	if (ret < 0)
383		return ret;
384
385	set_handle_irq(omap_intc_handle_irq);
386
387	return 0;
388}
389
390IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
391IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
392IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
393IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
394IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
395