1/*
2 * Allwinner A20/A31 SoCs NMI IRQ chip driver.
3 *
4 * Carlo Caione <carlo.caione@gmail.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2.  This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11#define DRV_NAME	"sunxi-nmi"
12#define pr_fmt(fmt)	DRV_NAME ": " fmt
13
14#include <linux/bitops.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/interrupt.h>
19#include <linux/irqdomain.h>
20#include <linux/of_irq.h>
21#include <linux/of_address.h>
22#include <linux/of_platform.h>
23#include <linux/irqchip.h>
24#include <linux/irqchip/chained_irq.h>
25
26#define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
27
28#define SUNXI_NMI_IRQ_BIT	BIT(0)
29
30#define SUN6I_R_INTC_CTRL	0x0c
31#define SUN6I_R_INTC_PENDING	0x10
32#define SUN6I_R_INTC_ENABLE	0x40
33
34/*
35 * For deprecated sun6i-a31-sc-nmi compatible.
36 * Registers are offset by 0x0c.
37 */
38#define SUN6I_R_INTC_NMI_OFFSET	0x0c
39#define SUN6I_NMI_CTRL		(SUN6I_R_INTC_CTRL - SUN6I_R_INTC_NMI_OFFSET)
40#define SUN6I_NMI_PENDING	(SUN6I_R_INTC_PENDING - SUN6I_R_INTC_NMI_OFFSET)
41#define SUN6I_NMI_ENABLE	(SUN6I_R_INTC_ENABLE - SUN6I_R_INTC_NMI_OFFSET)
42
43#define SUN7I_NMI_CTRL		0x00
44#define SUN7I_NMI_PENDING	0x04
45#define SUN7I_NMI_ENABLE	0x08
46
47#define SUN9I_NMI_CTRL		0x00
48#define SUN9I_NMI_ENABLE	0x04
49#define SUN9I_NMI_PENDING	0x08
50
51enum {
52	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
53	SUNXI_SRC_TYPE_EDGE_FALLING,
54	SUNXI_SRC_TYPE_LEVEL_HIGH,
55	SUNXI_SRC_TYPE_EDGE_RISING,
56};
57
58struct sunxi_sc_nmi_reg_offs {
59	u32 ctrl;
60	u32 pend;
61	u32 enable;
62};
63
64static const struct sunxi_sc_nmi_reg_offs sun6i_r_intc_reg_offs __initconst = {
65	.ctrl	= SUN6I_R_INTC_CTRL,
66	.pend	= SUN6I_R_INTC_PENDING,
67	.enable	= SUN6I_R_INTC_ENABLE,
68};
69
70static const struct sunxi_sc_nmi_reg_offs sun6i_reg_offs __initconst = {
71	.ctrl	= SUN6I_NMI_CTRL,
72	.pend	= SUN6I_NMI_PENDING,
73	.enable	= SUN6I_NMI_ENABLE,
74};
75
76static const struct sunxi_sc_nmi_reg_offs sun7i_reg_offs __initconst = {
77	.ctrl	= SUN7I_NMI_CTRL,
78	.pend	= SUN7I_NMI_PENDING,
79	.enable	= SUN7I_NMI_ENABLE,
80};
81
82static const struct sunxi_sc_nmi_reg_offs sun9i_reg_offs __initconst = {
83	.ctrl	= SUN9I_NMI_CTRL,
84	.pend	= SUN9I_NMI_PENDING,
85	.enable	= SUN9I_NMI_ENABLE,
86};
87
88static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
89				      u32 val)
90{
91	irq_reg_writel(gc, val, off);
92}
93
94static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
95{
96	return irq_reg_readl(gc, off);
97}
98
99static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
100{
101	struct irq_domain *domain = irq_desc_get_handler_data(desc);
102	struct irq_chip *chip = irq_desc_get_chip(desc);
103	unsigned int virq = irq_find_mapping(domain, 0);
104
105	chained_irq_enter(chip, desc);
106	generic_handle_irq(virq);
107	chained_irq_exit(chip, desc);
108}
109
110static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
111{
112	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
113	struct irq_chip_type *ct = gc->chip_types;
114	u32 src_type_reg;
115	u32 ctrl_off = ct->regs.type;
116	unsigned int src_type;
117	unsigned int i;
118
119	irq_gc_lock(gc);
120
121	switch (flow_type & IRQF_TRIGGER_MASK) {
122	case IRQ_TYPE_EDGE_FALLING:
123		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
124		break;
125	case IRQ_TYPE_EDGE_RISING:
126		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
127		break;
128	case IRQ_TYPE_LEVEL_HIGH:
129		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
130		break;
131	case IRQ_TYPE_NONE:
132	case IRQ_TYPE_LEVEL_LOW:
133		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
134		break;
135	default:
136		irq_gc_unlock(gc);
137		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
138			data->irq);
139		return -EBADR;
140	}
141
142	irqd_set_trigger_type(data, flow_type);
143	irq_setup_alt_chip(data, flow_type);
144
145	for (i = 0; i < gc->num_ct; i++, ct++)
146		if (ct->type & flow_type)
147			ctrl_off = ct->regs.type;
148
149	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
150	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
151	src_type_reg |= src_type;
152	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
153
154	irq_gc_unlock(gc);
155
156	return IRQ_SET_MASK_OK;
157}
158
159static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
160					const struct sunxi_sc_nmi_reg_offs *reg_offs)
161{
162	struct irq_domain *domain;
163	struct irq_chip_generic *gc;
164	unsigned int irq;
165	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
166	int ret;
167
168
169	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
170	if (!domain) {
171		pr_err("Could not register interrupt domain.\n");
172		return -ENOMEM;
173	}
174
175	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
176					     handle_fasteoi_irq, clr, 0,
177					     IRQ_GC_INIT_MASK_CACHE);
178	if (ret) {
179		pr_err("Could not allocate generic interrupt chip.\n");
180		goto fail_irqd_remove;
181	}
182
183	irq = irq_of_parse_and_map(node, 0);
184	if (irq <= 0) {
185		pr_err("unable to parse irq\n");
186		ret = -EINVAL;
187		goto fail_irqd_remove;
188	}
189
190	gc = irq_get_domain_generic_chip(domain, 0);
191	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
192	if (IS_ERR(gc->reg_base)) {
193		pr_err("unable to map resource\n");
194		ret = PTR_ERR(gc->reg_base);
195		goto fail_irqd_remove;
196	}
197
198	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
199	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
200	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
201	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
202	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
203	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
204	gc->chip_types[0].regs.ack		= reg_offs->pend;
205	gc->chip_types[0].regs.mask		= reg_offs->enable;
206	gc->chip_types[0].regs.type		= reg_offs->ctrl;
207
208	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
209	gc->chip_types[1].chip.name		= gc->chip_types[0].chip.name;
210	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
211	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
212	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
213	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
214	gc->chip_types[1].regs.ack		= reg_offs->pend;
215	gc->chip_types[1].regs.mask		= reg_offs->enable;
216	gc->chip_types[1].regs.type		= reg_offs->ctrl;
217	gc->chip_types[1].handler		= handle_edge_irq;
218
219	/* Disable any active interrupts */
220	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
221
222	/* Clear any pending NMI interrupts */
223	sunxi_sc_nmi_write(gc, reg_offs->pend, SUNXI_NMI_IRQ_BIT);
224
225	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
226
227	return 0;
228
229fail_irqd_remove:
230	irq_domain_remove(domain);
231
232	return ret;
233}
234
235static int __init sun6i_r_intc_irq_init(struct device_node *node,
236					struct device_node *parent)
237{
238	return sunxi_sc_nmi_irq_init(node, &sun6i_r_intc_reg_offs);
239}
240IRQCHIP_DECLARE(sun6i_r_intc, "allwinner,sun6i-a31-r-intc",
241		sun6i_r_intc_irq_init);
242
243static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
244					struct device_node *parent)
245{
246	return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
247}
248IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
249
250static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
251					struct device_node *parent)
252{
253	return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
254}
255IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
256
257static int __init sun9i_nmi_irq_init(struct device_node *node,
258				     struct device_node *parent)
259{
260	return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
261}
262IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
263