1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017-2018 SiFive
5 * Copyright (C) 2020 Western Digital Corporation or its affiliates.
6 */
7
8#define pr_fmt(fmt) "riscv-intc: " fmt
9#include <linux/acpi.h>
10#include <linux/atomic.h>
11#include <linux/bits.h>
12#include <linux/cpu.h>
13#include <linux/irq.h>
14#include <linux/irqchip.h>
15#include <linux/irqdomain.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/smp.h>
20
21static struct irq_domain *intc_domain;
22
23static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
24{
25	unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
26
27	if (unlikely(cause >= BITS_PER_LONG))
28		panic("unexpected interrupt cause");
29
30	generic_handle_domain_irq(intc_domain, cause);
31}
32
33/*
34 * On RISC-V systems local interrupts are masked or unmasked by writing
35 * the SIE (Supervisor Interrupt Enable) CSR.  As CSRs can only be written
36 * on the local hart, these functions can only be called on the hart that
37 * corresponds to the IRQ chip.
38 */
39
40static void riscv_intc_irq_mask(struct irq_data *d)
41{
42	csr_clear(CSR_IE, BIT(d->hwirq));
43}
44
45static void riscv_intc_irq_unmask(struct irq_data *d)
46{
47	csr_set(CSR_IE, BIT(d->hwirq));
48}
49
50static void riscv_intc_irq_eoi(struct irq_data *d)
51{
52	/*
53	 * The RISC-V INTC driver uses handle_percpu_devid_irq() flow
54	 * for the per-HART local interrupts and child irqchip drivers
55	 * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement
56	 * chained handlers for the per-HART local interrupts.
57	 *
58	 * In the absence of irq_eoi(), the chained_irq_enter() and
59	 * chained_irq_exit() functions (used by child irqchip drivers)
60	 * will do unnecessary mask/unmask of per-HART local interrupts
61	 * at the time of handling interrupts. To avoid this, we provide
62	 * an empty irq_eoi() callback for RISC-V INTC irqchip.
63	 */
64}
65
66static struct irq_chip riscv_intc_chip = {
67	.name = "RISC-V INTC",
68	.irq_mask = riscv_intc_irq_mask,
69	.irq_unmask = riscv_intc_irq_unmask,
70	.irq_eoi = riscv_intc_irq_eoi,
71};
72
73static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
74				 irq_hw_number_t hwirq)
75{
76	irq_set_percpu_devid(irq);
77	irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
78			    handle_percpu_devid_irq, NULL, NULL);
79
80	return 0;
81}
82
83static int riscv_intc_domain_alloc(struct irq_domain *domain,
84				   unsigned int virq, unsigned int nr_irqs,
85				   void *arg)
86{
87	int i, ret;
88	irq_hw_number_t hwirq;
89	unsigned int type = IRQ_TYPE_NONE;
90	struct irq_fwspec *fwspec = arg;
91
92	ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
93	if (ret)
94		return ret;
95
96	for (i = 0; i < nr_irqs; i++) {
97		ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
98		if (ret)
99			return ret;
100	}
101
102	return 0;
103}
104
105static const struct irq_domain_ops riscv_intc_domain_ops = {
106	.map	= riscv_intc_domain_map,
107	.xlate	= irq_domain_xlate_onecell,
108	.alloc	= riscv_intc_domain_alloc
109};
110
111static struct fwnode_handle *riscv_intc_hwnode(void)
112{
113	return intc_domain->fwnode;
114}
115
116static int __init riscv_intc_init_common(struct fwnode_handle *fn)
117{
118	int rc;
119
120	intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
121					       &riscv_intc_domain_ops, NULL);
122	if (!intc_domain) {
123		pr_err("unable to add IRQ domain\n");
124		return -ENXIO;
125	}
126
127	rc = set_handle_irq(&riscv_intc_irq);
128	if (rc) {
129		pr_err("failed to set irq handler\n");
130		return rc;
131	}
132
133	riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
134
135	pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
136
137	return 0;
138}
139
140static int __init riscv_intc_init(struct device_node *node,
141				  struct device_node *parent)
142{
143	int rc;
144	unsigned long hartid;
145
146	rc = riscv_of_parent_hartid(node, &hartid);
147	if (rc < 0) {
148		pr_warn("unable to find hart id for %pOF\n", node);
149		return 0;
150	}
151
152	/*
153	 * The DT will have one INTC DT node under each CPU (or HART)
154	 * DT node so riscv_intc_init() function will be called once
155	 * for each INTC DT node. We only need to do INTC initialization
156	 * for the INTC DT node belonging to boot CPU (or boot HART).
157	 */
158	if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
159		/*
160		 * The INTC nodes of each CPU are suppliers for downstream
161		 * interrupt controllers (such as PLIC, IMSIC and APLIC
162		 * direct-mode) so we should mark an INTC node as initialized
163		 * if we are not creating IRQ domain for it.
164		 */
165		fwnode_dev_initialized(of_fwnode_handle(node), true);
166		return 0;
167	}
168
169	return riscv_intc_init_common(of_node_to_fwnode(node));
170}
171
172IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
173
174#ifdef CONFIG_ACPI
175
176static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
177				       const unsigned long end)
178{
179	struct fwnode_handle *fn;
180	struct acpi_madt_rintc *rintc;
181
182	rintc = (struct acpi_madt_rintc *)header;
183
184	/*
185	 * The ACPI MADT will have one INTC for each CPU (or HART)
186	 * so riscv_intc_acpi_init() function will be called once
187	 * for each INTC. We only do INTC initialization
188	 * for the INTC belonging to the boot CPU (or boot HART).
189	 */
190	if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id())
191		return 0;
192
193	fn = irq_domain_alloc_named_fwnode("RISCV-INTC");
194	if (!fn) {
195		pr_err("unable to allocate INTC FW node\n");
196		return -ENOMEM;
197	}
198
199	return riscv_intc_init_common(fn);
200}
201
202IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
203		     ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init);
204#endif
205