1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #define pr_fmt(fmt) "GICv3: " fmt
8 
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/percpu.h>
19 #include <linux/refcount.h>
20 #include <linux/slab.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/wakeup_reason.h>
23 
24 #include <linux/irqchip.h>
25 #include <linux/irqchip/arm-gic-common.h>
26 #include <linux/irqchip/arm-gic-v3.h>
27 #include <linux/irqchip/irq-partition-percpu.h>
28 
29 #include <asm/cputype.h>
30 #include <asm/exception.h>
31 #include <asm/smp_plat.h>
32 #include <asm/virt.h>
33 
34 #include "irq-gic-common.h"
35 
36 #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
37 
38 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
39 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
40 
41 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
42 
43 #define IRQ_S_TO_US_VALUE 1000000
44 #define IRQ_HW_IRQ_VALUE 32
45 #define IRQ_HW_IRQ_VALUE_MUL 4
46 #define IRQ_GIC_REG_INDEX_MUL 8
47 #define GIC_IRQ_TYPE_SPI 0
48 #define GIC_IRQ_TYPE_PPI 1
49 #define GIC_IRQ_TYPE_ESPI 2
50 #define GIC_IRQ_TYPE_EPPI 3
51 #define GIC_IRQ_REG_OFFSET_ONE 16
52 #define GIC_IRQ_REG_OFFSET_TWO 32
53 #define GIC_IRQ_BUF_INDEX_TWO 2
54 #define GIC_IRQ_BUF_INDEX_THREE 3
55 #define GIC_IRQ_PARAMETER_COUNT_TWO 2
56 #define GIC_IRQ_PARAMETER_COUNT_THREE 3
57 #define GIC_IRQ_PARAMETER_COUNT_FOUR 4
58 #define GIC_IRQ_PARAMETER_VALUE_SIXTEEN 16
59 #define GIC_GEN_MASK_NINE 9
60 #define GIC_GEN_MASK_EIGHT 8
61 
62 struct redist_region {
63     void __iomem *redist_base;
64     phys_addr_t phys_base;
65     bool single_redist;
66 };
67 
68 struct gic_chip_data {
69     struct fwnode_handle *fwnode;
70     void __iomem *dist_base;
71     struct redist_region *redist_regions;
72     struct rdists rdists;
73     struct irq_domain *domain;
74     u64 redist_stride;
75     u32 nr_redist_regions;
76     u64 flags;
77     bool has_rss;
78     unsigned int ppi_nr;
79     struct partition_desc **ppi_descs;
80 };
81 
82 static struct gic_chip_data gic_data __read_mostly;
83 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
84 
85 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
86 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
87 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
88 
89 /*
90  * The behaviours of RPR and PMR registers differ depending on the value of
91  * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
92  * distributor and redistributors depends on whether security is enabled in the
93  * GIC.
94  *
95  * When security is enabled, non-secure priority values from the (re)distributor
96  * are presented to the GIC CPUIF as follow:
97  *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
98  *
99  * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
100  * EL1 are subject to a similar operation thus matching the priorities presented
101  * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
102  * these values are unchanched by the GIC.
103  *
104  * see GICv3/GICv4 Architecture Specification (IHI0069D):
105  * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
106  *   priorities.
107  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
108  *   interrupt.
109  */
110 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
111 
112 /*
113  * Global static key controlling whether an update to PMR allowing more
114  * interrupts requires to be propagated to the redistributor (DSB SY).
115  * And this needs to be exported for modules to be able to enable
116  * interrupts...
117  */
118 DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
119 EXPORT_SYMBOL(gic_pmr_sync);
120 
121 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
122 EXPORT_SYMBOL(gic_nonsecure_priorities);
123 
124 /*
125  * When the Non-secure world has access to group 0 interrupts (as a
126  * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
127  * return the Distributor's view of the interrupt priority.
128  *
129  * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
130  * written by software is moved to the Non-secure range by the Distributor.
131  *
132  * If both are true (which is when gic_nonsecure_priorities gets enabled),
133  * we need to shift down the priority programmed by software to match it
134  * against the value returned by ICC_RPR_EL1.
135  */
136 #define GICD_INT_RPR_PRI(priority)                                                                                     \
137     ( {                                                                                                                \
138         u32 __priority = (priority);                                                                                   \
139         if (static_branch_unlikely(&gic_nonsecure_priorities))                                                         \
140             __priority = 0x80 | (__priority >> 1);                                                                     \
141                                                                                                                        \
142         __priority;                                                                                                    \
143     })
144 
145 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
146 static refcount_t *ppi_nmi_refs;
147 
148 static struct gic_kvm_info gic_v3_kvm_info;
149 static DEFINE_PER_CPU(bool, has_rss);
150 
151 #define MPIDR_RS(mpidr) (((mpidr)&0xF0UL) >> 4)
152 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
153 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
154 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
155 
156 /* Our default, arbitrary priority value. Linux only uses one anyway. */
157 #define DEFAULT_PMR_VALUE 0xf0
158 
159 enum gic_intid_range { SGI_RANGE, PPI_RANGE, SPI_RANGE, EPPI_RANGE, ESPI_RANGE, LPI_RANGE, __INVALID_RANGE__ };
160 
get_intid_range_func(irq_hw_number_t hwirq)161 static enum gic_intid_range get_intid_range_func(irq_hw_number_t hwirq)
162 {
163     switch (hwirq) {
164         case 0 ... 0xf:
165             return SGI_RANGE;
166         case 0x10 ... 0x1f:
167             return PPI_RANGE;
168         case 0x20 ... 0x3fb:
169             return SPI_RANGE;
170         case EPPI_BASE_INTID ...(EPPI_BASE_INTID + 0x3f):
171             return EPPI_RANGE;
172         case ESPI_BASE_INTID ...(ESPI_BASE_INTID + 0x3ff):
173             return ESPI_RANGE;
174         case 0x2000 ... GENMASK(0x17, 0):
175             return LPI_RANGE;
176         default:
177             return __INVALID_RANGE__;
178     }
179 }
180 
get_intid_range(struct irq_data *d)181 static enum gic_intid_range get_intid_range(struct irq_data *d)
182 {
183     return get_intid_range_func(d->hwirq);
184 }
185 
gic_irq(struct irq_data *d)186 static inline unsigned int gic_irq(struct irq_data *d)
187 {
188     return d->hwirq;
189 }
190 
gic_irq_in_rdist(struct irq_data *d)191 static inline bool gic_irq_in_rdist(struct irq_data *d)
192 {
193     switch (get_intid_range(d)) {
194         case SGI_RANGE:
195         case PPI_RANGE:
196         case EPPI_RANGE:
197             return true;
198         default:
199             return false;
200     }
201 }
202 
gic_dist_base(struct irq_data *d)203 static inline void __iomem *gic_dist_base(struct irq_data *d)
204 {
205     switch (get_intid_range(d)) {
206         case SGI_RANGE:
207         case PPI_RANGE:
208         case EPPI_RANGE:
209             /* SGI+PPI -> SGI_base for this CPU */
210             return gic_data_rdist_sgi_base();
211 
212         case SPI_RANGE:
213         case ESPI_RANGE:
214             /* SPI -> dist_base */
215             return gic_data.dist_base;
216 
217         default:
218             return NULL;
219     }
220 }
221 
gic_do_wait_for_rwp(void __iomem *base, u32 bit)222 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
223 {
224     u32 count = 0xf4240; /* 1s! */
225 
226     while (readl_relaxed(base + GICD_CTLR) & bit) {
227         count--;
228         if (!count) {
229             pr_err_ratelimited("RWP timeout, gone fishing\n");
230             return;
231         }
232         cpu_relax();
233         udelay(1);
234     }
235 }
236 
237 /* Wait for completion of a distributor change */
gic_dist_wait_for_rwp(void)238 static void gic_dist_wait_for_rwp(void)
239 {
240     gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
241 }
242 
243 /* Wait for completion of a redistributor change */
gic_redist_wait_for_rwp(void)244 static void gic_redist_wait_for_rwp(void)
245 {
246     gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
247 }
248 
249 #ifdef CONFIG_ARM64
250 
gic_read_iar(void)251 static u64 __maybe_unused gic_read_iar(void)
252 {
253     if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) {
254         return gic_read_iar_cavium_thunderx();
255     } else {
256         return gic_read_iar_common();
257     }
258 }
259 #endif
260 
gic_enable_redist(bool enable)261 static void gic_enable_redist(bool enable)
262 {
263     void __iomem *rbase;
264     u32 count = IRQ_S_TO_US_VALUE; /* 1s! */
265     u32 val;
266 
267     if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) {
268         return;
269     }
270 
271     rbase = gic_data_rdist_rd_base();
272 
273     val = readl_relaxed(rbase + GICR_WAKER);
274     if (enable) {
275         /* Wake up this CPU redistributor */
276         val &= ~GICR_WAKER_ProcessorSleep;
277     } else {
278         val |= GICR_WAKER_ProcessorSleep;
279     }
280     writel_relaxed(val, rbase + GICR_WAKER);
281 
282     if (!enable) { /* Check that GICR_WAKER is writeable */
283         val = readl_relaxed(rbase + GICR_WAKER);
284         if (!(val & GICR_WAKER_ProcessorSleep)) {
285             return; /* No PM support in this redistributor */
286         }
287     }
288 
289     while (--count) {
290         val = readl_relaxed(rbase + GICR_WAKER);
291         if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) {
292             break;
293         }
294         cpu_relax();
295         udelay(1);
296     }
297     if (!count) {
298         pr_err_ratelimited("redistributor failed to %s...\n", enable ? "wakeup" : "sleep");
299     }
300 }
301 
302 /*
303  * Routines to disable, enable, EOI and route interrupts
304  */
convert_offset_index(struct irq_data *d, u32 offset, u32 *index)305 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
306 {
307     switch (get_intid_range(d)) {
308         case SGI_RANGE:
309         case PPI_RANGE:
310         case SPI_RANGE:
311             *index = d->hwirq;
312             return offset;
313         case EPPI_RANGE:
314             /*
315              * Contrary to the ESPI range, the EPPI range is contiguous
316              * to the PPI range in the registers, so let's adjust the
317              * displacement accordingly. Consistency is overrated.
318              */
319             *index = d->hwirq - EPPI_BASE_INTID + IRQ_HW_IRQ_VALUE;
320             return offset;
321         case ESPI_RANGE:
322             *index = d->hwirq - ESPI_BASE_INTID;
323             switch (offset) {
324                 case GICD_ISENABLER:
325                     return GICD_ISENABLERnE;
326                 case GICD_ICENABLER:
327                     return GICD_ICENABLERnE;
328                 case GICD_ISPENDR:
329                     return GICD_ISPENDRnE;
330                 case GICD_ICPENDR:
331                     return GICD_ICPENDRnE;
332                 case GICD_ISACTIVER:
333                     return GICD_ISACTIVERnE;
334                 case GICD_ICACTIVER:
335                     return GICD_ICACTIVERnE;
336                 case GICD_IPRIORITYR:
337                     return GICD_IPRIORITYRnE;
338                 case GICD_ICFGR:
339                     return GICD_ICFGRnE;
340                 case GICD_IROUTER:
341                     return GICD_IROUTERnE;
342                 default:
343                     break;
344             }
345             break;
346         default:
347             break;
348     }
349 
350     WARN_ON(1);
351     *index = d->hwirq;
352     return offset;
353 }
354 
gic_peek_irq(struct irq_data *d, u32 offset)355 static int gic_peek_irq(struct irq_data *d, u32 offset)
356 {
357     void __iomem *base;
358     u32 index, mask;
359 
360     offset = convert_offset_index(d, offset, &index);
361     mask = 1 << (index % IRQ_HW_IRQ_VALUE);
362 
363     if (gic_irq_in_rdist(d)) {
364         base = gic_data_rdist_sgi_base();
365     } else {
366         base = gic_data.dist_base;
367     }
368 
369     return !!(readl_relaxed(base + offset + (index / IRQ_HW_IRQ_VALUE) * IRQ_HW_IRQ_VALUE_MUL) & mask);
370 }
371 
gic_poke_irq(struct irq_data *d, u32 offset)372 static void gic_poke_irq(struct irq_data *d, u32 offset)
373 {
374     void (*rwp_wait)(void);
375     void __iomem *base;
376     u32 index, mask;
377 
378     offset = convert_offset_index(d, offset, &index);
379     mask = 1 << (index % 0x20);
380 
381     if (gic_irq_in_rdist(d)) {
382         base = gic_data_rdist_sgi_base();
383         rwp_wait = gic_redist_wait_for_rwp;
384     } else {
385         base = gic_data.dist_base;
386         rwp_wait = gic_dist_wait_for_rwp;
387     }
388 
389     writel_relaxed(mask, base + offset + (index / IRQ_HW_IRQ_VALUE) * IRQ_HW_IRQ_VALUE_MUL);
390     rwp_wait();
391 }
392 
gic_mask_irq(struct irq_data *d)393 static void gic_mask_irq(struct irq_data *d)
394 {
395     gic_poke_irq(d, GICD_ICENABLER);
396 }
397 
gic_eoimode1_mask_irq(struct irq_data *d)398 static void gic_eoimode1_mask_irq(struct irq_data *d)
399 {
400     gic_mask_irq(d);
401     /*
402      * When masking a forwarded interrupt, make sure it is
403      * deactivated as well.
404      *
405      * This ensures that an interrupt that is getting
406      * disabled/masked will not get "stuck", because there is
407      * noone to deactivate it (guest is being terminated).
408      */
409     if (irqd_is_forwarded_to_vcpu(d)) {
410         gic_poke_irq(d, GICD_ICACTIVER);
411     }
412 }
413 
gic_unmask_irq(struct irq_data *d)414 static void gic_unmask_irq(struct irq_data *d)
415 {
416     gic_poke_irq(d, GICD_ISENABLER);
417 }
418 
gic_supports_nmi(void)419 static inline bool gic_supports_nmi(void)
420 {
421     return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && static_branch_likely(&supports_pseudo_nmis);
422 }
423 
gic_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool val)424 static int gic_irq_set_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool val)
425 {
426     u32 reg;
427 
428     if (d->hwirq >= 0x2000) { /* SGI/PPI/SPI only */
429         return -EINVAL;
430     }
431 
432     switch (which) {
433         case IRQCHIP_STATE_PENDING:
434             reg = val ? GICD_ISPENDR : GICD_ICPENDR;
435             break;
436 
437         case IRQCHIP_STATE_ACTIVE:
438             reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
439             break;
440 
441         case IRQCHIP_STATE_MASKED:
442             reg = val ? GICD_ICENABLER : GICD_ISENABLER;
443             break;
444 
445         default:
446             return -EINVAL;
447     }
448 
449     gic_poke_irq(d, reg);
450     return 0;
451 }
452 
gic_irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val)453 static int gic_irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *val)
454 {
455     if (d->hwirq >= 0x2000) { /* PPI/SPI only */
456         return -EINVAL;
457     }
458 
459     switch (which) {
460         case IRQCHIP_STATE_PENDING:
461             *val = gic_peek_irq(d, GICD_ISPENDR);
462             break;
463 
464         case IRQCHIP_STATE_ACTIVE:
465             *val = gic_peek_irq(d, GICD_ISACTIVER);
466             break;
467 
468         case IRQCHIP_STATE_MASKED:
469             *val = !gic_peek_irq(d, GICD_ISENABLER);
470             break;
471 
472         default:
473             return -EINVAL;
474     }
475 
476     return 0;
477 }
478 
gic_irq_set_prio(struct irq_data *d, u8 prio)479 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
480 {
481     void __iomem *base = gic_dist_base(d);
482     u32 offset, index;
483 
484     offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
485 
486     writeb_relaxed(prio, base + offset + index);
487 }
488 
gic_get_ppi_index(struct irq_data *d)489 static u32 gic_get_ppi_index(struct irq_data *d)
490 {
491     switch (get_intid_range(d)) {
492         case PPI_RANGE:
493             return d->hwirq - 0x10;
494         case EPPI_RANGE:
495             return d->hwirq - EPPI_BASE_INTID + 0x10;
496         default:
497             unreachable();
498     }
499 }
500 
gic_irq_nmi_setup(struct irq_data *d)501 static int gic_irq_nmi_setup(struct irq_data *d)
502 {
503     struct irq_desc *desc = irq_to_desc(d->irq);
504 
505     if (!gic_supports_nmi()) {
506         return -EINVAL;
507     }
508 
509     if (gic_peek_irq(d, GICD_ISENABLER)) {
510         pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
511         return -EINVAL;
512     }
513 
514     /*
515      * A secondary irq_chip should be in charge of LPI request,
516      * it should not be possible to get there
517      */
518     if (WARN_ON(gic_irq(d) >= 0x2000)) {
519         return -EINVAL;
520     }
521 
522     /* desc lock should already be held */
523     if (gic_irq_in_rdist(d)) {
524         u32 idx = gic_get_ppi_index(d);
525         /* Setting up PPI as NMI, only switch handler for first NMI */
526         if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
527             refcount_set(&ppi_nmi_refs[idx], 1);
528             desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
529         }
530     } else {
531         desc->handle_irq = handle_fasteoi_nmi;
532     }
533 
534     gic_irq_set_prio(d, GICD_INT_NMI_PRI);
535 
536     return 0;
537 }
538 
gic_irq_nmi_teardown(struct irq_data *d)539 static void gic_irq_nmi_teardown(struct irq_data *d)
540 {
541     struct irq_desc *desc = irq_to_desc(d->irq);
542 
543     if (WARN_ON(!gic_supports_nmi())) {
544         return;
545     }
546 
547     if (gic_peek_irq(d, GICD_ISENABLER)) {
548         pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
549         return;
550     }
551 
552     /*
553      * A secondary irq_chip should be in charge of LPI request,
554      * it should not be possible to get there
555      */
556     if (WARN_ON(gic_irq(d) >= 0x2000)) {
557         return;
558     }
559 
560     /* desc lock should already be held */
561     if (gic_irq_in_rdist(d)) {
562         u32 idx = gic_get_ppi_index(d);
563         /* Tearing down NMI, only switch handler for last NMI */
564         if (refcount_dec_and_test(&ppi_nmi_refs[idx])) {
565             desc->handle_irq = handle_percpu_devid_irq;
566         }
567     } else {
568         desc->handle_irq = handle_fasteoi_irq;
569     }
570 
571     gic_irq_set_prio(d, GICD_INT_DEF_PRI);
572 }
573 
gic_eoi_irq(struct irq_data *d)574 static void gic_eoi_irq(struct irq_data *d)
575 {
576     gic_write_eoir(gic_irq(d));
577 }
578 
gic_eoimode1_eoi_irq(struct irq_data *d)579 static void gic_eoimode1_eoi_irq(struct irq_data *d)
580 {
581     /*
582      * No need to deactivate an LPI, or an interrupt that
583      * is is getting forwarded to a vcpu.
584      */
585     if (gic_irq(d) >= 0x2000 || irqd_is_forwarded_to_vcpu(d)) {
586         return;
587     }
588     gic_write_dir(gic_irq(d));
589 }
590 
gic_set_type(struct irq_data *d, unsigned int type)591 static int gic_set_type(struct irq_data *d, unsigned int type)
592 {
593     enum gic_intid_range range;
594     unsigned int irq = gic_irq(d);
595     void (*rwp_wait)(void);
596     void __iomem *base;
597     u32 offset, index;
598     int ret;
599 
600     range = get_intid_range(d);
601     /* Interrupt configuration for SGIs can't be changed */
602     if (range == SGI_RANGE) {
603         return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
604     }
605 
606     /* SPIs have restrictions on the supported types */
607     if ((range == SPI_RANGE || range == ESPI_RANGE) && type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) {
608         return -EINVAL;
609     }
610 
611     if (gic_irq_in_rdist(d)) {
612         base = gic_data_rdist_sgi_base();
613         rwp_wait = gic_redist_wait_for_rwp;
614     } else {
615         base = gic_data.dist_base;
616         rwp_wait = gic_dist_wait_for_rwp;
617     }
618 
619     offset = convert_offset_index(d, GICD_ICFGR, &index);
620 
621     ret = gic_configure_irq(index, type, base + offset, rwp_wait);
622     if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
623         /* Misconfigured PPIs are usually not fatal */
624         pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
625         ret = 0;
626     }
627 
628     return ret;
629 }
630 
gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)631 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
632 {
633     if (get_intid_range(d) == SGI_RANGE) {
634         return -EINVAL;
635     }
636 
637     if (vcpu) {
638         irqd_set_forwarded_to_vcpu(d);
639     } else {
640         irqd_clr_forwarded_to_vcpu(d);
641     }
642     return 0;
643 }
644 
gic_mpidr_to_affinity(unsigned long mpidr)645 static u64 gic_mpidr_to_affinity(unsigned long mpidr)
646 {
647     u64 aff;
648 
649     aff = (((u64)MPIDR_AFFINITY_LEVEL(mpidr, 0x3) << 0x20) | (MPIDR_AFFINITY_LEVEL(mpidr, 0x2) << 0x10) |
650            (MPIDR_AFFINITY_LEVEL(mpidr, 0x1) << 0x8) | MPIDR_AFFINITY_LEVEL(mpidr, 0));
651 
652     return aff;
653 }
654 
gic_deactivate_unhandled(u32 irqnr)655 static void gic_deactivate_unhandled(u32 irqnr)
656 {
657     if (static_branch_likely(&supports_deactivate_key)) {
658         if (irqnr < 0x2000) {
659             gic_write_dir(irqnr);
660         }
661     } else {
662         gic_write_eoir(irqnr);
663     }
664 }
665 
gic_handle_nmi(u32 irqnr, struct pt_regs *regs)666 static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
667 {
668     bool irqs_enabled = interrupts_enabled(regs);
669     int err;
670 
671     if (irqs_enabled) {
672         nmi_enter();
673     }
674 
675     if (static_branch_likely(&supports_deactivate_key)) {
676         gic_write_eoir(irqnr);
677     }
678     /*
679      * Leave the PSR.I bit set to prevent other NMIs to be
680      * received while handling this one.
681      * PSR.I will be restored when we ERET to the
682      * interrupted context.
683      */
684     err = handle_domain_nmi(gic_data.domain, irqnr, regs);
685     if (err) {
686         gic_deactivate_unhandled(irqnr);
687     }
688 
689     if (irqs_enabled) {
690         nmi_exit();
691     }
692 }
693 
do_read_iar(struct pt_regs *regs)694 static u32 do_read_iar(struct pt_regs *regs)
695 {
696     u32 iar;
697 
698     if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
699         u64 pmr;
700 
701         /*
702          * We were in a context with IRQs disabled. However, the
703          * entry code has set PMR to a value that allows any
704          * interrupt to be acknowledged, and not just NMIs. This can
705          * lead to surprising effects if the NMI has been retired in
706          * the meantime, and that there is an IRQ pending. The IRQ
707          * would then be taken in NMI context, something that nobody
708          * wants to debug twice.
709          *
710          * Until we sort this, drop PMR again to a level that will
711          * actually only allow NMIs before reading IAR, and then
712          * restore it to what it was.
713          */
714         pmr = gic_read_pmr();
715         gic_pmr_mask_irqs();
716         isb();
717 
718         iar = gic_read_iar();
719 
720         gic_write_pmr(pmr);
721     } else {
722         iar = gic_read_iar();
723     }
724 
725     return iar;
726 }
727 
gic_handle_irq(struct pt_regs *regs)728 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
729 {
730     u32 irqnr;
731 
732     irqnr = do_read_iar(regs);
733 
734     /* Check for special IDs first */
735     if ((irqnr >= 0x3fc && irqnr <= 0x3ff)) {
736         return;
737     }
738 
739     if (gic_supports_nmi() && unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
740         gic_handle_nmi(irqnr, regs);
741         return;
742     }
743 
744     if (gic_prio_masking_enabled()) {
745         gic_pmr_mask_irqs();
746         gic_arch_enable_irqs();
747     }
748 
749     if (static_branch_likely(&supports_deactivate_key)) {
750         gic_write_eoir(irqnr);
751     } else {
752         isb();
753     }
754 
755     if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
756         WARN_ONCE(true, "Unexpected interrupt received!\n");
757         log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
758         gic_deactivate_unhandled(irqnr);
759     }
760 }
761 
gic_get_pribits(void)762 static u32 gic_get_pribits(void)
763 {
764     u32 pribits;
765 
766     pribits = gic_read_ctlr();
767     pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
768     pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
769     pribits++;
770 
771     return pribits;
772 }
773 
gic_has_group0(void)774 static bool gic_has_group0(void)
775 {
776     u32 val;
777     u32 old_pmr;
778 
779     old_pmr = gic_read_pmr();
780 
781     /*
782      * Let's find out if Group0 is under control of EL3 or not by
783      * setting the highest possible, non-zero priority in PMR.
784      *
785      * If SCR_EL3.FIQ is set, the priority gets shifted down in
786      * order for the CPU interface to set bit 7, and keep the
787      * actual priority in the non-secure range. In the process, it
788      * looses the least significant bit and the actual priority
789      * becomes 0x80. Reading it back returns 0, indicating that
790      * we're don't have access to Group0.
791      */
792     gic_write_pmr(BIT(8 - gic_get_pribits()));
793     val = gic_read_pmr();
794 
795     gic_write_pmr(old_pmr);
796 
797     return val != 0;
798 }
799 
gic_dist_init(void)800 static void __init gic_dist_init(void)
801 {
802     unsigned int i;
803     u64 affinity;
804     void __iomem *base = gic_data.dist_base;
805     u32 val;
806 
807     /* Disable the distributor */
808     writel_relaxed(0, base + GICD_CTLR);
809     gic_dist_wait_for_rwp();
810 
811     /*
812      * Configure SPIs as non-secure Group-1. This will only matter
813      * if the GIC only has a single security state. This will not
814      * do the right thing if the kernel is running in secure mode,
815      * but that's not the intended use case anyway.
816      */
817     for (i = 0x20; i < GIC_LINE_NR; i += 0x20) {
818         writel_relaxed(~0, base + GICD_IGROUPR + i / 0x8);
819     }
820 
821     /* Extended SPI range, not handled by the GICv2/GICv3 common code */
822     for (i = 0; i < GIC_ESPI_NR; i += 0x20) {
823         writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 0x8);
824         writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 0x8);
825     }
826 
827     for (i = 0; i < GIC_ESPI_NR; i += 0x20) {
828         writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 0x8);
829     }
830 
831     for (i = 0; i < GIC_ESPI_NR; i += 0x10) {
832         writel_relaxed(0, base + GICD_ICFGRnE + i / 0x4);
833     }
834 
835     for (i = 0; i < GIC_ESPI_NR; i += 0x4) {
836         writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
837     }
838 
839     /* Now do the common stuff, and wait for the distributor to drain */
840     gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
841 
842     val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
843     if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
844         pr_info("Enabling SGIs without active state\n");
845         val |= GICD_CTLR_nASSGIreq;
846     }
847 
848     /* Enable distributor with ARE, Group1 */
849     writel_relaxed(val, base + GICD_CTLR);
850 
851     /*
852      * Set all global interrupts to the boot CPU only. ARE must be
853      * enabled.
854      */
855     affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
856     for (i = 0x20; i < GIC_LINE_NR; i++) {
857         gic_write_irouter(affinity, base + GICD_IROUTER + i * 0x8);
858     }
859 
860     for (i = 0; i < GIC_ESPI_NR; i++) {
861         gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 0x8);
862     }
863 }
864 
gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))865 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
866 {
867     int ret = -ENODEV;
868     int i;
869 
870     for (i = 0; i < gic_data.nr_redist_regions; i++) {
871         void __iomem *ptr = gic_data.redist_regions[i].redist_base;
872         u64 typer;
873         u32 reg;
874 
875         reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
876         if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
877             pr_warn("No redistributor present @%p\n", ptr);
878             break;
879         }
880 
881         do {
882             typer = gic_read_typer(ptr + GICR_TYPER);
883             ret = fn(gic_data.redist_regions + i, ptr);
884             if (!ret) {
885                 return 0;
886             }
887 
888             if (gic_data.redist_regions[i].single_redist) {
889                 break;
890             }
891 
892             if (gic_data.redist_stride) {
893                 ptr += gic_data.redist_stride;
894             } else {
895                 ptr += SZ_64K * 0x2; /* Skip RD_base + SGI_base */
896                 if (typer & GICR_TYPER_VLPIS) {
897                     ptr += SZ_64K * 0x2; /* Skip VLPI_base + reserved page */
898                 }
899             }
900         } while (!(typer & GICR_TYPER_LAST));
901     }
902 
903     return ret ? -ENODEV : 0;
904 }
905 
__gic_populate_rdist(struct redist_region *region, void __iomem *ptr)906 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
907 {
908     unsigned long mpidr = cpu_logical_map(smp_processor_id());
909     u64 typer;
910     u32 aff;
911 
912     /*
913      * Convert affinity to a 32bit value that can be matched to
914      * GICR_TYPER bits [63:32].
915      */
916     aff = (MPIDR_AFFINITY_LEVEL(mpidr, 0x3) << 0x18 | MPIDR_AFFINITY_LEVEL(mpidr, 0x2) << 0x10 |
917            MPIDR_AFFINITY_LEVEL(mpidr, 0x1) << 0x8 | MPIDR_AFFINITY_LEVEL(mpidr, 0x0));
918 
919     typer = gic_read_typer(ptr + GICR_TYPER);
920     if ((typer >> 0x20) == aff) {
921         u64 offset = ptr - region->redist_base;
922         raw_spin_lock_init(&gic_data_rdist()->rd_lock);
923         gic_data_rdist_rd_base() = ptr;
924         gic_data_rdist()->phys_base = region->phys_base + offset;
925 
926         pr_info("CPU%d: found redistributor %lx region %d:%pa\n", smp_processor_id(), mpidr,
927                 (int)(region - gic_data.redist_regions), &gic_data_rdist()->phys_base);
928         return 0;
929     }
930 
931     /* Try next one */
932     return 1;
933 }
934 
gic_populate_rdist(void)935 static int gic_populate_rdist(void)
936 {
937     if (gic_iterate_rdists(__gic_populate_rdist) == 0) {
938         return 0;
939     }
940 
941     /* We couldn't even deal with ourselves... */
942     WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", smp_processor_id(),
943          (unsigned long)cpu_logical_map(smp_processor_id()));
944     return -ENODEV;
945 }
946 
__gic_update_rdist_properties(struct redist_region *region, void __iomem *ptr)947 static int __gic_update_rdist_properties(struct redist_region *region, void __iomem *ptr)
948 {
949     u64 typer = gic_read_typer(ptr + GICR_TYPER);
950 
951     /* Boot-time cleanip */
952     if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
953         u64 val;
954 
955         /* Deactivate any present vPE */
956         val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
957         if (val & GICR_VPENDBASER_Valid)
958             gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
959                           ptr + SZ_128K + GICR_VPENDBASER);
960 
961         /* Mark the VPE table as invalid */
962         val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
963         val &= ~GICR_VPROPBASER_4_1_VALID;
964         gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
965     }
966 
967     gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
968 
969     /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
970     gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
971     gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | gic_data.rdists.has_rvpeid);
972     gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
973 
974     /* Detect non-sensical configurations */
975     if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
976         gic_data.rdists.has_direct_lpi = false;
977         gic_data.rdists.has_vlpis = false;
978         gic_data.rdists.has_rvpeid = false;
979     }
980 
981     gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
982 
983     return 1;
984 }
985 
gic_update_rdist_properties(void)986 static void gic_update_rdist_properties(void)
987 {
988     gic_data.ppi_nr = UINT_MAX;
989     gic_iterate_rdists(__gic_update_rdist_properties);
990     if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) {
991         gic_data.ppi_nr = 0;
992     }
993     pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
994     if (gic_data.rdists.has_vlpis) {
995         pr_info("GICv4 features: %s%s%s\n", gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
996                 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
997                 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
998     }
999 }
1000 
1001 /* Check whether it's single security state view */
gic_dist_security_disabled(void)1002 static inline bool gic_dist_security_disabled(void)
1003 {
1004     return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
1005 }
1006 
gic_cpu_sys_reg_init(void)1007 static void gic_cpu_sys_reg_init(void)
1008 {
1009     int i, cpu = smp_processor_id();
1010     u64 mpidr = cpu_logical_map(cpu);
1011     u64 need_rss = MPIDR_RS(mpidr);
1012     bool group0;
1013     u32 pribits;
1014 
1015     if (!gic_enable_sre()) {
1016         pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1017     }
1018 
1019     pribits = gic_get_pribits();
1020     group0 = gic_has_group0();
1021 
1022     /* Set priority mask register */
1023     if (!gic_prio_masking_enabled()) {
1024         write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1025     } else if (gic_supports_nmi()) {
1026         if (static_branch_unlikely(&gic_nonsecure_priorities)) {
1027             WARN_ON(!group0 || gic_dist_security_disabled());
1028         } else {
1029             WARN_ON(group0 && !gic_dist_security_disabled());
1030         }
1031     }
1032     gic_write_bpr1(0);
1033 
1034     if (static_branch_likely(&supports_deactivate_key)) {
1035         gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1036     } else {
1037         gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1038     }
1039 
1040     if (group0) {
1041         switch (pribits) {
1042             case 0x8:
1043             case 0x7:
1044                 write_gicreg(0, ICC_AP0R3_EL1);
1045                 write_gicreg(0, ICC_AP0R2_EL1);
1046                 fallthrough;
1047             case 0x6:
1048                 write_gicreg(0, ICC_AP0R1_EL1);
1049                 fallthrough;
1050             case 0x5:
1051             case 0x4:
1052                 write_gicreg(0, ICC_AP0R0_EL1);
1053         }
1054         isb();
1055     }
1056 
1057     switch (pribits) {
1058         case 0x8:
1059         case 0x7:
1060             write_gicreg(0, ICC_AP1R3_EL1);
1061             write_gicreg(0, ICC_AP1R2_EL1);
1062             fallthrough;
1063         case 0x6:
1064             write_gicreg(0, ICC_AP1R1_EL1);
1065             fallthrough;
1066         case 0x5:
1067         case 0x4:
1068             write_gicreg(0, ICC_AP1R0_EL1);
1069     }
1070 
1071     isb();
1072     gic_write_grpen1(1);
1073     per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1074     for_each_online_cpu(i) {
1075         bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1076         need_rss |= MPIDR_RS(cpu_logical_map(i));
1077         if (need_rss && (!have_rss)) {
1078             pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", cpu, (unsigned long)mpidr, i,
1079                     (unsigned long)cpu_logical_map(i));
1080         }
1081     }
1082 
1083     if (need_rss && (!gic_data.has_rss)) {
1084         pr_crit_once("RSS is required but GICD doesn't support it\n");
1085     }
1086 }
1087 
1088 static bool gicv3_nolpi;
1089 
gicv3_nolpi_cfg(char *buf)1090 static int __init gicv3_nolpi_cfg(char *buf)
1091 {
1092     return strtobool(buf, &gicv3_nolpi);
1093 }
1094 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1095 
gic_dist_supports_lpis(void)1096 static int gic_dist_supports_lpis(void)
1097 {
1098     return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1099             !gicv3_nolpi);
1100 }
1101 
gic_cpu_init(void)1102 static void gic_cpu_init(void)
1103 {
1104     void __iomem *rbase;
1105     int i;
1106 
1107     /* Register ourselves with the rest of the world */
1108     if (gic_populate_rdist()) {
1109         return;
1110     }
1111 
1112     gic_enable_redist(true);
1113 
1114     WARN((gic_data.ppi_nr > 0x10 || GIC_ESPI_NR != 0) && !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1115          "Distributor has extended ranges, but CPU%d doesn't\n", smp_processor_id());
1116 
1117     rbase = gic_data_rdist_sgi_base();
1118 
1119     /* Configure SGIs/PPIs as non-secure Group-1 */
1120     for (i = 0; i < gic_data.ppi_nr + 0x10; i += 0x20) {
1121         writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 0x8);
1122     }
1123 
1124     gic_cpu_config(rbase, gic_data.ppi_nr + 0x10, gic_redist_wait_for_rwp);
1125 
1126     /* initialise system registers */
1127     gic_cpu_sys_reg_init();
1128 }
1129 
1130 #ifdef CONFIG_SMP
1131 
1132 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1133 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1134 
gic_starting_cpu(unsigned int cpu)1135 static int gic_starting_cpu(unsigned int cpu)
1136 {
1137     gic_cpu_init();
1138 
1139     if (gic_dist_supports_lpis()) {
1140         its_cpu_init();
1141     }
1142 
1143     return 0;
1144 }
1145 
gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id)1146 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id)
1147 {
1148     int next_cpu, cpu = *base_cpu;
1149     unsigned long mpidr = cpu_logical_map(cpu);
1150     u16 tlist = 0;
1151 
1152     while (cpu < nr_cpu_ids) {
1153         tlist |= 1 << (mpidr & 0xf);
1154 
1155         next_cpu = cpumask_next(cpu, mask);
1156         if (next_cpu >= nr_cpu_ids) {
1157             goto out;
1158         }
1159         cpu = next_cpu;
1160 
1161         mpidr = cpu_logical_map(cpu);
1162 
1163         if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1164             cpu--;
1165             goto out;
1166         }
1167     }
1168 out:
1169     *base_cpu = cpu;
1170     return tlist;
1171 }
1172 
1173 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level)                                                                       \
1174     (MPIDR_AFFINITY_LEVEL(cluster_id, level) << ICC_SGI1R_AFFINITY_##level##_SHIFT)
1175 
gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)1176 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1177 {
1178     u64 val;
1179 
1180     val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | \
1181            (irq << ICC_SGI1R_SGI_ID_SHIFT) | MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | \
1182            MPIDR_TO_SGI_RS(cluster_id) | (tlist << ICC_SGI1R_TARGET_LIST_SHIFT));
1183 
1184     pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1185     gic_write_sgi1r(val);
1186 }
1187 
gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)1188 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1189 {
1190     int cpu;
1191 
1192     if (WARN_ON(d->hwirq >= 0x10)) {
1193         return;
1194     }
1195 
1196     /*
1197      * Ensure that stores to Normal memory are visible to the
1198      * other CPUs before issuing the IPI.
1199      */
1200     wmb();
1201 
1202     for_each_cpu(cpu, mask)
1203     {
1204         u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
1205         u16 tlist;
1206 
1207         tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1208         gic_send_sgi(cluster_id, tlist, d->hwirq);
1209     }
1210 
1211     /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1212     isb();
1213 }
1214 
gic_smp_init(void)1215 static void __init gic_smp_init(void)
1216 {
1217     struct irq_fwspec sgi_fwspec = {
1218         .fwnode = gic_data.fwnode,
1219         .param_count = 1,
1220     };
1221     int base_sgi;
1222 
1223     cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, "irqchip/arm/gicv3:starting", gic_starting_cpu, NULL);
1224 
1225     /* Register all 8 non-secure SGIs */
1226     base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 0x8, NUMA_NO_NODE, &sgi_fwspec, false, NULL);
1227     if (WARN_ON(base_sgi <= 0)) {
1228         return;
1229     }
1230 
1231     set_smp_ipi_range(base_sgi, 8);
1232 }
1233 
gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)1234 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force)
1235 {
1236     unsigned int cpu;
1237     u32 offset, index;
1238     void __iomem *reg;
1239     int enabled;
1240     u64 val;
1241 
1242     if (force) {
1243         cpu = cpumask_first(mask_val);
1244     } else {
1245         cpu = cpumask_any_and(mask_val, cpu_online_mask);
1246     }
1247 
1248     if (cpu >= nr_cpu_ids) {
1249         return -EINVAL;
1250     }
1251 
1252     if (gic_irq_in_rdist(d)) {
1253         return -EINVAL;
1254     }
1255 
1256     /* If interrupt was enabled, disable it first */
1257     enabled = gic_peek_irq(d, GICD_ISENABLER);
1258     if (enabled) {
1259         gic_mask_irq(d);
1260     }
1261 
1262     offset = convert_offset_index(d, GICD_IROUTER, &index);
1263     reg = gic_dist_base(d) + offset + (index * IRQ_GIC_REG_INDEX_MUL);
1264     val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1265 
1266     gic_write_irouter(val, reg);
1267 
1268     /*
1269      * If the interrupt was enabled, enabled it again. Otherwise,
1270      * just wait for the distributor to have digested our changes.
1271      */
1272     if (enabled) {
1273         gic_unmask_irq(d);
1274     } else {
1275         gic_dist_wait_for_rwp();
1276     }
1277 
1278     irq_data_update_effective_affinity(d, cpumask_of(cpu));
1279 
1280     return IRQ_SET_MASK_OK_DONE;
1281 }
1282 #else
1283 #define gic_set_affinity NULL
1284 #define gic_ipi_send_mask NULL
1285 #define gic_smp_init()                                                                                                 \
1286     do {                                                                                                               \
1287     } while (0)
1288 #endif
1289 
gic_retrigger(struct irq_data *data)1290 static int gic_retrigger(struct irq_data *data)
1291 {
1292     return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1293 }
1294 
1295 #ifdef CONFIG_CPU_PM
gic_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v)1296 static int gic_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v)
1297 {
1298     if (cmd == CPU_PM_EXIT) {
1299         if (gic_dist_security_disabled()) {
1300             gic_enable_redist(true);
1301         }
1302         gic_cpu_sys_reg_init();
1303     } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1304         gic_write_grpen1(0);
1305         gic_enable_redist(false);
1306     }
1307     return NOTIFY_OK;
1308 }
1309 
1310 static struct notifier_block gic_cpu_pm_notifier_block = {
1311     .notifier_call = gic_cpu_pm_notifier,
1312 };
1313 
gic_cpu_pm_init(void)1314 static void gic_cpu_pm_init(void)
1315 {
1316     cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1317 }
1318 
1319 #else
gic_cpu_pm_init(void)1320 static inline void gic_cpu_pm_init(void)
1321 {
1322 }
1323 #endif /* CONFIG_CPU_PM */
1324 
1325 #ifdef CONFIG_PM
gic_resume(void)1326 void gic_resume(void)
1327 {
1328 }
1329 EXPORT_SYMBOL_GPL(gic_resume);
1330 
1331 static struct syscore_ops gic_syscore_ops = {
1332     .resume = gic_resume,
1333 };
1334 
gic_syscore_init(void)1335 static void gic_syscore_init(void)
1336 {
1337     register_syscore_ops(&gic_syscore_ops);
1338 }
1339 
1340 #else
gic_syscore_init(void)1341 static inline void gic_syscore_init(void)
1342 {
1343 }
gic_resume(void)1344 void gic_resume(void)
1345 {
1346 }
1347 #endif
1348 
1349 static struct irq_chip gic_chip = {
1350     .name = "GICv3",
1351     .irq_mask = gic_mask_irq,
1352     .irq_unmask = gic_unmask_irq,
1353     .irq_eoi = gic_eoi_irq,
1354     .irq_set_type = gic_set_type,
1355     .irq_set_affinity = gic_set_affinity,
1356     .irq_retrigger = gic_retrigger,
1357     .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1358     .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1359     .irq_nmi_setup = gic_irq_nmi_setup,
1360     .irq_nmi_teardown = gic_irq_nmi_teardown,
1361     .ipi_send_mask = gic_ipi_send_mask,
1362     .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
1363 };
1364 
1365 static struct irq_chip gic_eoimode1_chip = {
1366     .name = "GICv3",
1367     .irq_mask = gic_eoimode1_mask_irq,
1368     .irq_unmask = gic_unmask_irq,
1369     .irq_eoi = gic_eoimode1_eoi_irq,
1370     .irq_set_type = gic_set_type,
1371     .irq_set_affinity = gic_set_affinity,
1372     .irq_retrigger = gic_retrigger,
1373     .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1374     .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1375     .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1376     .irq_nmi_setup = gic_irq_nmi_setup,
1377     .irq_nmi_teardown = gic_irq_nmi_teardown,
1378     .ipi_send_mask = gic_ipi_send_mask,
1379     .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
1380 };
1381 
gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)1382 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
1383 {
1384     struct irq_chip *chip = &gic_chip;
1385     struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1386 
1387     if (static_branch_likely(&supports_deactivate_key)) {
1388         chip = &gic_eoimode1_chip;
1389     }
1390 
1391     switch (get_intid_range_func(hw)) {
1392         case SGI_RANGE:
1393             irq_set_percpu_devid(irq);
1394             irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_fasteoi_ipi, NULL, NULL);
1395             break;
1396 
1397         case PPI_RANGE:
1398         case EPPI_RANGE:
1399             irq_set_percpu_devid(irq);
1400             irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_percpu_devid_irq, NULL, NULL);
1401             break;
1402 
1403         case SPI_RANGE:
1404         case ESPI_RANGE:
1405             irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL);
1406             irq_set_probe(irq);
1407             irqd_set_single_target(irqd);
1408             break;
1409 
1410         case LPI_RANGE:
1411             if (!gic_dist_supports_lpis()) {
1412                 return -EPERM;
1413             }
1414             irq_domain_set_info(d, irq, hw, chip, d->host_data, handle_fasteoi_irq, NULL, NULL);
1415             break;
1416 
1417         default:
1418             return -EPERM;
1419     }
1420 
1421     /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1422     irqd_set_handle_enforce_irqctx(irqd);
1423     return 0;
1424 }
1425 
gic_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type)1426 static int gic_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
1427     unsigned long *hwirq, unsigned int *type)
1428 {
1429     if (fwspec->param_count == 1 && fwspec->param[0] < GIC_IRQ_PARAMETER_VALUE_SIXTEEN) {
1430         *hwirq = fwspec->param[0];
1431         *type = IRQ_TYPE_EDGE_RISING;
1432         return 0;
1433     }
1434 
1435     if (is_of_node(fwspec->fwnode)) {
1436         if (fwspec->param_count < GIC_IRQ_PARAMETER_COUNT_THREE) {
1437             return -EINVAL;
1438         }
1439 
1440         switch (fwspec->param[0]) {
1441             case GIC_IRQ_TYPE_SPI: /* SPI */
1442                 *hwirq = fwspec->param[1] + GIC_IRQ_REG_OFFSET_TWO;
1443                 break;
1444             case GIC_IRQ_TYPE_PPI: /* PPI */
1445                 *hwirq = fwspec->param[1] + GIC_IRQ_REG_OFFSET_ONE;
1446                 break;
1447             case GIC_IRQ_TYPE_ESPI: /* ESPI */
1448                 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1449                 break;
1450             case GIC_IRQ_TYPE_EPPI: /* EPPI */
1451                 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1452                 break;
1453             case GIC_IRQ_TYPE_LPI: /* LPI */
1454                 *hwirq = fwspec->param[1];
1455                 break;
1456             case GIC_IRQ_TYPE_PARTITION:
1457                 *hwirq = fwspec->param[1];
1458                 if (fwspec->param[1] >= GIC_IRQ_PARAMETER_VALUE_SIXTEEN) {
1459                     *hwirq += EPPI_BASE_INTID - GIC_IRQ_REG_OFFSET_ONE;
1460                 } else {
1461                     *hwirq += GIC_IRQ_REG_OFFSET_ONE;
1462                 }
1463                 break;
1464             default:
1465                 return -EINVAL;
1466         }
1467 
1468         *type = fwspec->param[GIC_IRQ_BUF_INDEX_TWO] & IRQ_TYPE_SENSE_MASK;
1469 
1470         /*
1471          * Make it clear that broken DTs are... broken.
1472          * Partitionned PPIs are an unfortunate exception.
1473          */
1474         WARN_ON(*type == IRQ_TYPE_NONE && fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1475         return 0;
1476     }
1477 
1478     if (is_fwnode_irqchip(fwspec->fwnode)) {
1479         if (fwspec->param_count != GIC_IRQ_PARAMETER_COUNT_TWO) {
1480             return -EINVAL;
1481         }
1482 
1483         if (fwspec->param[0] < GIC_IRQ_PARAMETER_VALUE_SIXTEEN) {
1484             pr_err(FW_BUG "Illegal GSI%d translation request\n",
1485                    fwspec->param[0]);
1486             return -EINVAL;
1487         }
1488 
1489         *hwirq = fwspec->param[0];
1490         *type = fwspec->param[1];
1491 
1492         WARN_ON(*type == IRQ_TYPE_NONE);
1493         return 0;
1494     }
1495 
1496     return -EINVAL;
1497 }
1498 
gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg)1499 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg)
1500 {
1501     int i, ret;
1502     irq_hw_number_t hwirq;
1503     unsigned int type = IRQ_TYPE_NONE;
1504     struct irq_fwspec *fwspec = arg;
1505 
1506     ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1507     if (ret) {
1508         return ret;
1509     }
1510 
1511     for (i = 0; i < nr_irqs; i++) {
1512         ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1513         if (ret) {
1514             return ret;
1515         }
1516     }
1517 
1518     return 0;
1519 }
1520 
gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)1521 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
1522 {
1523     int i;
1524 
1525     for (i = 0; i < nr_irqs; i++) {
1526         struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1527         irq_set_handler(virq + i, NULL);
1528         irq_domain_reset_irq_data(d);
1529     }
1530 }
1531 
gic_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token)1532 static int gic_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token)
1533 {
1534     /* Not for us */
1535     if (fwspec->fwnode != d->fwnode) {
1536         return 0;
1537     }
1538 
1539     /* If this is not DT, then we have a single domain */
1540     if (!is_of_node(fwspec->fwnode)) {
1541         return 1;
1542     }
1543 
1544     /*
1545      * If this is a PPI and we have a 4th (non-null) parameter,
1546      * then we need to match the partition domain.
1547      */
1548     if (fwspec->param_count >= 0x4 && fwspec->param[0] == 0x1 && fwspec->param[GIC_IRQ_BUF_INDEX_THREE] != 0 &&
1549         gic_data.ppi_descs) {
1550         return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1551     }
1552 
1553     return d == gic_data.domain;
1554 }
1555 
1556 static const struct irq_domain_ops gic_irq_domain_ops = {
1557     .translate = gic_irq_domain_translate,
1558     .alloc = gic_irq_domain_alloc,
1559     .free = gic_irq_domain_free,
1560     .select = gic_irq_domain_select,
1561 };
1562 
partition_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type)1563 static int partition_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *hwirq,
1564                                       unsigned int *type)
1565 {
1566     struct device_node *np;
1567     int ret;
1568 
1569     if (!gic_data.ppi_descs) {
1570         return -ENOMEM;
1571     }
1572 
1573     np = of_find_node_by_phandle(fwspec->param[GIC_IRQ_BUF_INDEX_THREE]);
1574     if (WARN_ON(!np)) {
1575         return -EINVAL;
1576     }
1577 
1578     ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], of_node_to_fwnode(np));
1579     if (ret < 0) {
1580         return ret;
1581     }
1582 
1583     *hwirq = ret;
1584     *type = fwspec->param[GIC_IRQ_BUF_INDEX_TWO] & IRQ_TYPE_SENSE_MASK;
1585 
1586     return 0;
1587 }
1588 
1589 static const struct irq_domain_ops partition_domain_ops = {
1590     .translate = partition_domain_translate,
1591     .select = gic_irq_domain_select,
1592 };
1593 
gic_enable_quirk_msm8996(void *data)1594 static bool gic_enable_quirk_msm8996(void *data)
1595 {
1596     struct gic_chip_data *d = data;
1597 
1598     d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1599 
1600     return true;
1601 }
1602 
gic_enable_quirk_cavium_38539(void *data)1603 static bool gic_enable_quirk_cavium_38539(void *data)
1604 {
1605     struct gic_chip_data *d = data;
1606 
1607     d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1608 
1609     return true;
1610 }
1611 
gic_enable_quirk_hip06_07(void *data)1612 static bool gic_enable_quirk_hip06_07(void *data)
1613 {
1614     struct gic_chip_data *d = data;
1615 
1616     /*
1617      * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1618      * not being an actual ARM implementation). The saving grace is
1619      * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1620      * HIP07 doesn't even have a proper IIDR, and still pretends to
1621      * have ESPI. In both cases, put them right.
1622      */
1623     if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1624         /* Zero both ESPI and the RES0 field next to it... */
1625         d->rdists.gicd_typer &= ~GENMASK(GIC_GEN_MASK_NINE, GIC_GEN_MASK_EIGHT);
1626         return true;
1627     }
1628 
1629     return false;
1630 }
1631 
1632 static const struct gic_quirk gic_quirks[] = {
1633     {
1634         .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1635         .compatible = "qcom,msm8996-gic-v3",
1636         .init = gic_enable_quirk_msm8996,
1637     },
1638     {
1639         .desc = "GICv3: HIP06 erratum 161010803",
1640         .iidr = 0x0204043b,
1641         .mask = 0xffffffff,
1642         .init = gic_enable_quirk_hip06_07,
1643     },
1644     {
1645         .desc = "GICv3: HIP07 erratum 161010803",
1646         .iidr = 0x00000000,
1647         .mask = 0xffffffff,
1648         .init = gic_enable_quirk_hip06_07,
1649     },
1650     {
1651         /*
1652         * Reserved register accesses generate a Synchronous
1653         * External Abort. This erratum applies to:
1654         * - ThunderX: CN88xx
1655         * - OCTEON TX: CN83xx, CN81xx
1656         * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1657         */
1658         .desc = "GICv3: Cavium erratum 38539",
1659         .iidr = 0xa000034c,
1660         .mask = 0xe8f00fff,
1661         .init = gic_enable_quirk_cavium_38539,
1662     },
1663     {}
1664 };
1665 
gic_enable_nmi_support(void)1666 static void gic_enable_nmi_support(void)
1667 {
1668     int i;
1669 
1670     if (!gic_prio_masking_enabled()) {
1671         return;
1672     }
1673 
1674     ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1675     if (!ppi_nmi_refs) {
1676         return;
1677     }
1678 
1679     for (i = 0; i < gic_data.ppi_nr; i++) {
1680         refcount_set(&ppi_nmi_refs[i], 0);
1681     }
1682 
1683     /*
1684      * Linux itself doesn't use 1:N distribution, so has no need to
1685      * set PMHE. The only reason to have it set is if EL3 requires it
1686      * (and we can't change it).
1687      */
1688     if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) {
1689         static_branch_enable(&gic_pmr_sync);
1690     }
1691 
1692     pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1693             static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
1694 
1695     /*
1696      * How priority values are used by the GIC depends on two things:
1697      * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1698      * and if Group 0 interrupts can be delivered to Linux in the non-secure
1699      * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1700      * the ICC_PMR_EL1 register and the priority that software assigns to
1701      * interrupts:
1702      *
1703      * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1704      * -----------------------------------------------------------
1705      *      1       |      -      |  unchanged  |    unchanged
1706      * -----------------------------------------------------------
1707      *      0       |      1      |  non-secure |    non-secure
1708      * -----------------------------------------------------------
1709      *      0       |      0      |  unchanged  |    non-secure
1710      *
1711      * where non-secure means that the value is right-shifted by one and the
1712      * MSB bit set, to make it fit in the non-secure priority range.
1713      *
1714      * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1715      * are both either modified or unchanged, we can use the same set of
1716      * priorities.
1717      *
1718      * In the last case, where only the interrupt priorities are modified to
1719      * be in the non-secure range, we use a different PMR value to mask IRQs
1720      * and the rest of the values that we use remain unchanged.
1721      */
1722     if (gic_has_group0() && !gic_dist_security_disabled()) {
1723         static_branch_enable(&gic_nonsecure_priorities);
1724     }
1725 
1726     static_branch_enable(&supports_pseudo_nmis);
1727 
1728     if (static_branch_likely(&supports_deactivate_key)) {
1729         gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1730     } else {
1731         gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1732     }
1733 }
1734 
gic_init_bases(void __iomem *dist_base, struct redist_region *rdist_regs, u32 nr_redist_regions, u64 redist_stride, struct fwnode_handle *handle)1735 static int __init gic_init_bases(void __iomem *dist_base, struct redist_region *rdist_regs, u32 nr_redist_regions,
1736                                  u64 redist_stride, struct fwnode_handle *handle)
1737 {
1738     u32 typer;
1739     int err;
1740 
1741     if (!is_hyp_mode_available()) {
1742         static_branch_disable(&supports_deactivate_key);
1743     }
1744 
1745     if (static_branch_likely(&supports_deactivate_key)) {
1746         pr_info("GIC: Using split EOI/Deactivate mode\n");
1747     }
1748 
1749     gic_data.fwnode = handle;
1750     gic_data.dist_base = dist_base;
1751     gic_data.redist_regions = rdist_regs;
1752     gic_data.nr_redist_regions = nr_redist_regions;
1753     gic_data.redist_stride = redist_stride;
1754 
1755     /*
1756      * Find out how many interrupts are supported.
1757      */
1758     typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1759     gic_data.rdists.gicd_typer = typer;
1760 
1761     gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), gic_quirks, &gic_data);
1762 
1763     pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1764     pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
1765 
1766     /*
1767      * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1768      * architecture spec (which says that reserved registers are RES0).
1769      */
1770     if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) {
1771         gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
1772     }
1773 
1774     gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, &gic_data);
1775     gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
1776     gic_data.rdists.has_rvpeid = true;
1777     gic_data.rdists.has_vlpis = true;
1778     gic_data.rdists.has_direct_lpi = true;
1779     gic_data.rdists.has_vpend_valid_dirty = true;
1780 
1781     if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1782         err = -ENOMEM;
1783         goto out_free;
1784     }
1785 
1786     irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1787 
1788     gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1789     pr_info("Distributor has %sRange Selector support\n", gic_data.has_rss ? "" : "no ");
1790 
1791     if (typer & GICD_TYPER_MBIS) {
1792         err = mbi_init(handle, gic_data.domain);
1793         if (err) {
1794             pr_err("Failed to initialize MBIs\n");
1795         }
1796     }
1797 
1798     set_handle_irq(gic_handle_irq);
1799 
1800     gic_update_rdist_properties();
1801 
1802     gic_dist_init();
1803     gic_cpu_init();
1804     gic_smp_init();
1805     gic_cpu_pm_init();
1806     gic_syscore_init();
1807 
1808     if (gic_dist_supports_lpis()) {
1809         its_init(handle, &gic_data.rdists, gic_data.domain);
1810         its_cpu_init();
1811     } else {
1812         if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) {
1813             gicv2m_init(handle, gic_data.domain);
1814         }
1815     }
1816 
1817     gic_enable_nmi_support();
1818 
1819     return 0;
1820 
1821 out_free:
1822     if (gic_data.domain) {
1823         irq_domain_remove(gic_data.domain);
1824     }
1825     free_percpu(gic_data.rdists.rdist);
1826     return err;
1827 }
1828 
gic_validate_dist_version(void __iomem *dist_base)1829 static int __init gic_validate_dist_version(void __iomem *dist_base)
1830 {
1831     u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1832 
1833     if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
1834         return -ENODEV;
1835     }
1836 
1837     return 0;
1838 }
1839 
1840 /* Create all possible partitions at boot time */
gic_populate_ppi_partitions(struct device_node *gic_node)1841 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1842 {
1843     struct device_node *parts_node, *child_part;
1844     int part_idx = 0, i;
1845     int nr_parts;
1846     struct partition_affinity *parts;
1847     parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1848     if (!parts_node) {
1849         return;
1850     }
1851     gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1852     if (!gic_data.ppi_descs) {
1853                 goto out_put_node;
1854     }
1855 
1856     nr_parts = of_get_child_count(parts_node);
1857 
1858     if (!nr_parts) {
1859         goto out_put_node;
1860     }
1861 
1862     parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1863     if (WARN_ON(!parts)) {
1864         goto out_put_node;
1865     }
1866 
1867     for_each_child_of_node(parts_node, child_part)
1868     {
1869         struct partition_affinity *part;
1870         int n;
1871 
1872         part = &parts[part_idx];
1873 
1874         part->partition_id = of_node_to_fwnode(child_part);
1875 
1876         pr_info("GIC: PPI partition %pOFn[%d] { ", child_part, part_idx);
1877 
1878         n = of_property_count_elems_of_size(child_part, "affinity", sizeof(u32));
1879         WARN_ON(n <= 0);
1880 
1881         for (i = 0; i < n; i++) {
1882             int err, cpu;
1883             u32 cpu_phandle;
1884             struct device_node *cpu_node;
1885 
1886             err = of_property_read_u32_index(child_part, "affinity", i, &cpu_phandle);
1887             if (WARN_ON(err)) {
1888                 continue;
1889             }
1890 
1891             cpu_node = of_find_node_by_phandle(cpu_phandle);
1892             if (WARN_ON(!cpu_node)) {
1893                 of_node_put(cpu_node);
1894                 continue;
1895             }
1896 
1897             cpu = of_cpu_node_to_id(cpu_node);
1898             if (WARN_ON(cpu < 0)) {
1899                 continue;
1900             }
1901 
1902             pr_cont("%pOF[%d] ", cpu_node, cpu);
1903 
1904             cpumask_set_cpu(cpu, &part->mask);
1905             of_node_put(cpu_node);
1906         }
1907 
1908         pr_cont("}\n");
1909         part_idx++;
1910     }
1911 
1912     for (i = 0; i < gic_data.ppi_nr; i++) {
1913         unsigned int irq;
1914         struct partition_desc *desc;
1915         struct irq_fwspec ppi_fwspec = {
1916             .fwnode = gic_data.fwnode,
1917             .param_count = 3,
1918             .param =
1919                 {
1920                     [0] = GIC_IRQ_TYPE_PARTITION,
1921                     [1] = i,
1922                     [2] = IRQ_TYPE_NONE,
1923                 },
1924         };
1925 
1926         irq = irq_create_fwspec_mapping(&ppi_fwspec);
1927         if (WARN_ON(!irq)) {
1928             continue;
1929         }
1930         desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, irq, &partition_domain_ops);
1931         if (WARN_ON(!desc)) {
1932             continue;
1933         }
1934 
1935         gic_data.ppi_descs[i] = desc;
1936     }
1937 
1938 out_put_node:
1939     of_node_put(parts_node);
1940 }
1941 
gic_of_setup_kvm_info(struct device_node *node)1942 static void __init gic_of_setup_kvm_info(struct device_node *node)
1943 {
1944     int ret;
1945     struct resource r;
1946     u32 gicv_idx;
1947 
1948     gic_v3_kvm_info.type = GIC_V3;
1949 
1950     gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1951     if (!gic_v3_kvm_info.maint_irq) {
1952         return;
1953     }
1954 
1955     if (of_property_read_u32(node, "#redistributor-regions", &gicv_idx)) {
1956         gicv_idx = 1;
1957     }
1958 
1959     gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1960     ret = of_address_to_resource(node, gicv_idx, &r);
1961     if (!ret) {
1962         gic_v3_kvm_info.vcpu = r;
1963     }
1964 
1965     gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
1966     gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
1967     gic_set_kvm_info(&gic_v3_kvm_info);
1968 }
1969 
gic_of_init(struct device_node *node, struct device_node *parent)1970 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1971 {
1972     void __iomem *dist_base;
1973     struct redist_region *rdist_regs;
1974     u64 redist_stride;
1975     u32 nr_redist_regions;
1976     int err, i;
1977 
1978     dist_base = of_iomap(node, 0);
1979     if (!dist_base) {
1980         pr_err("%pOF: unable to map gic dist registers\n", node);
1981         return -ENXIO;
1982     }
1983 
1984     err = gic_validate_dist_version(dist_base);
1985     if (err) {
1986         pr_err("%pOF: no distributor detected, giving up\n", node);
1987         goto out_unmap_dist;
1988     }
1989 
1990     if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) {
1991         nr_redist_regions = 1;
1992     }
1993 
1994     rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), GFP_KERNEL);
1995     if (!rdist_regs) {
1996         err = -ENOMEM;
1997         goto out_unmap_dist;
1998     }
1999 
2000     for (i = 0; i < nr_redist_regions; i++) {
2001         struct resource res;
2002         int ret;
2003 
2004         ret = of_address_to_resource(node, 1 + i, &res);
2005         rdist_regs[i].redist_base = of_iomap(node, 1 + i);
2006         if (ret || !rdist_regs[i].redist_base) {
2007             pr_err("%pOF: couldn't map region %d\n", node, i);
2008             err = -ENODEV;
2009             goto out_unmap_rdist;
2010         }
2011         rdist_regs[i].phys_base = res.start;
2012     }
2013 
2014     if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) {
2015         redist_stride = 0;
2016     }
2017 
2018     gic_enable_of_quirks(node, gic_quirks, &gic_data);
2019 
2020     err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, redist_stride, &node->fwnode);
2021     if (err) {
2022         goto out_unmap_rdist;
2023     }
2024 
2025     gic_populate_ppi_partitions(node);
2026 
2027     if (static_branch_likely(&supports_deactivate_key)) {
2028         gic_of_setup_kvm_info(node);
2029     }
2030     return 0;
2031 
2032 out_unmap_rdist:
2033     for (i = 0; i < nr_redist_regions; i++) {
2034         if (rdist_regs[i].redist_base) {
2035             iounmap(rdist_regs[i].redist_base);
2036         }
2037     }
2038     kfree(rdist_regs);
2039 out_unmap_dist:
2040     iounmap(dist_base);
2041     return err;
2042 }
2043 
2044 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2045 
2046 #ifdef CONFIG_ACPI
2047 static struct {
2048     void __iomem *dist_base;
2049     struct redist_region *redist_regs;
2050     u32 nr_redist_regions;
2051     bool single_redist;
2052     int enabled_rdists;
2053     u32 maint_irq;
2054     int maint_irq_mode;
2055     phys_addr_t vcpu_base;
2056 } acpi_data __initdata;
2057 
gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)2058 static void __init gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2059 {
2060     static int count = 0;
2061 
2062     acpi_data.redist_regs[count].phys_base = phys_base;
2063     acpi_data.redist_regs[count].redist_base = redist_base;
2064     acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2065     count++;
2066 }
2067 
gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, const unsigned long end)2068 static int __init gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, const unsigned long end)
2069 {
2070     struct acpi_madt_generic_redistributor *redist = (struct acpi_madt_generic_redistributor *)header;
2071     void __iomem *redist_base;
2072 
2073     redist_base = ioremap(redist->base_address, redist->length);
2074     if (!redist_base) {
2075         pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2076         return -ENOMEM;
2077     }
2078 
2079     gic_acpi_register_redist(redist->base_address, redist_base);
2080     return 0;
2081 }
2082 
gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, const unsigned long end)2083 static int __init gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, const unsigned long end)
2084 {
2085     struct acpi_madt_generic_interrupt *gicc = (struct acpi_madt_generic_interrupt *)header;
2086     u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2087     u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 0x4 : SZ_64K * 0x2;
2088     void __iomem *redist_base;
2089 
2090     /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2091     if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2092         return 0;
2093     }
2094 
2095     redist_base = ioremap(gicc->gicr_base_address, size);
2096     if (!redist_base) {
2097         return -ENOMEM;
2098     }
2099 
2100     gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2101     return 0;
2102 }
2103 
gic_acpi_collect_gicr_base(void)2104 static int __init gic_acpi_collect_gicr_base(void)
2105 {
2106     acpi_tbl_entry_handler redist_parser;
2107     enum acpi_madt_type type;
2108 
2109     if (acpi_data.single_redist) {
2110         type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2111         redist_parser = gic_acpi_parse_madt_gicc;
2112     } else {
2113         type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2114         redist_parser = gic_acpi_parse_madt_redist;
2115     }
2116 
2117     /* Collect redistributor base addresses in GICR entries */
2118     if (acpi_table_parse_madt(type, redist_parser, 0) > 0) {
2119         return 0;
2120     }
2121 
2122     pr_info("No valid GICR entries exist\n");
2123     return -ENODEV;
2124 }
2125 
gic_acpi_match_gicr(union acpi_subtable_headers *header, const unsigned long end)2126 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, const unsigned long end)
2127 {
2128     /* Subtable presence means that redist exists, that's it */
2129     return 0;
2130 }
2131 
gic_acpi_match_gicc(union acpi_subtable_headers *header, const unsigned long end)2132 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, const unsigned long end)
2133 {
2134     struct acpi_madt_generic_interrupt *gicc = (struct acpi_madt_generic_interrupt *)header;
2135 
2136     /*
2137      * If GICC is enabled and has valid gicr base address, then it means
2138      * GICR base is presented via GICC
2139      */
2140     if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2141         acpi_data.enabled_rdists++;
2142         return 0;
2143     }
2144 
2145     /*
2146      * It's perfectly valid firmware can pass disabled GICC entry, driver
2147      * should not treat as errors, skip the entry instead of probe fail.
2148      */
2149     if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2150         return 0;
2151     }
2152 
2153     return -ENODEV;
2154 }
2155 
gic_acpi_count_gicr_regions(void)2156 static int __init gic_acpi_count_gicr_regions(void)
2157 {
2158     int count;
2159 
2160     /*
2161      * Count how many redistributor regions we have. It is not allowed
2162      * to mix redistributor description, GICR and GICC subtables have to be
2163      * mutually exclusive.
2164      */
2165     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, gic_acpi_match_gicr, 0);
2166     if (count > 0) {
2167         acpi_data.single_redist = false;
2168         return count;
2169     }
2170 
2171     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_match_gicc, 0);
2172     if (count > 0) {
2173         acpi_data.single_redist = true;
2174         count = acpi_data.enabled_rdists;
2175     }
2176 
2177     return count;
2178 }
2179 
acpi_validate_gic_table(struct acpi_subtable_header *header, struct acpi_probe_entry *ape)2180 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, struct acpi_probe_entry *ape)
2181 {
2182     struct acpi_madt_generic_distributor *dist;
2183     int count;
2184 
2185     dist = (struct acpi_madt_generic_distributor *)header;
2186     if (dist->version != ape->driver_data) {
2187         return false;
2188     }
2189 
2190     /* We need to do that exercise anyway, the sooner the better */
2191     count = gic_acpi_count_gicr_regions();
2192     if (count <= 0) {
2193         return false;
2194     }
2195 
2196     acpi_data.nr_redist_regions = count;
2197     return true;
2198 }
2199 
gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, const unsigned long end)2200 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, const unsigned long end)
2201 {
2202     struct acpi_madt_generic_interrupt *gicc = (struct acpi_madt_generic_interrupt *)header;
2203     int maint_irq_mode;
2204     static int first_madt = true;
2205 
2206     /* Skip unusable CPUs */
2207     if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2208         return 0;
2209     }
2210 
2211     maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2212 
2213     if (first_madt) {
2214         first_madt = false;
2215 
2216         acpi_data.maint_irq = gicc->vgic_interrupt;
2217         acpi_data.maint_irq_mode = maint_irq_mode;
2218         acpi_data.vcpu_base = gicc->gicv_base_address;
2219 
2220         return 0;
2221     }
2222 
2223     /*
2224      * The maintenance interrupt and GICV should be the same for every CPU
2225      */
2226     if ((acpi_data.maint_irq != gicc->vgic_interrupt) || (acpi_data.maint_irq_mode != maint_irq_mode) ||
2227         (acpi_data.vcpu_base != gicc->gicv_base_address)) {
2228         return -EINVAL;
2229     }
2230 
2231     return 0;
2232 }
2233 
gic_acpi_collect_virt_info(void)2234 static bool __init gic_acpi_collect_virt_info(void)
2235 {
2236     int count;
2237 
2238     count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, gic_acpi_parse_virt_madt_gicc, 0);
2239 
2240     return (count > 0);
2241 }
2242 
2243 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2244 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2245 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2246 
gic_acpi_setup_kvm_info(void)2247 static void __init gic_acpi_setup_kvm_info(void)
2248 {
2249     int irq;
2250 
2251     if (!gic_acpi_collect_virt_info()) {
2252         pr_warn("Unable to get hardware information used for virtualization\n");
2253         return;
2254     }
2255 
2256     gic_v3_kvm_info.type = GIC_V3;
2257 
2258     irq = acpi_register_gsi(NULL, acpi_data.maint_irq, acpi_data.maint_irq_mode, ACPI_ACTIVE_HIGH);
2259     if (irq <= 0) {
2260         return;
2261     }
2262 
2263     gic_v3_kvm_info.maint_irq = irq;
2264 
2265     if (acpi_data.vcpu_base) {
2266         struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2267 
2268         vcpu->flags = IORESOURCE_MEM;
2269         vcpu->start = acpi_data.vcpu_base;
2270         vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2271     }
2272 
2273     gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2274     gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2275     gic_set_kvm_info(&gic_v3_kvm_info);
2276 }
2277 
gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)2278 static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2279 {
2280     struct acpi_madt_generic_distributor *dist;
2281     struct fwnode_handle *domain_handle;
2282     size_t size;
2283     int i, err;
2284 
2285     /* Get distributor base address */
2286     dist = (struct acpi_madt_generic_distributor *)header;
2287     acpi_data.dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
2288     if (!acpi_data.dist_base) {
2289         pr_err("Unable to map GICD registers\n");
2290         return -ENOMEM;
2291     }
2292 
2293     err = gic_validate_dist_version(acpi_data.dist_base);
2294     if (err) {
2295         pr_err("No distributor detected at @%p, giving up\n", acpi_data.dist_base);
2296         goto out_dist_unmap;
2297     }
2298 
2299     size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2300     acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2301     if (!acpi_data.redist_regs) {
2302         err = -ENOMEM;
2303         goto out_dist_unmap;
2304     }
2305 
2306     err = gic_acpi_collect_gicr_base();
2307     if (err) {
2308         goto out_redist_unmap;
2309     }
2310 
2311     domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2312     if (!domain_handle) {
2313         err = -ENOMEM;
2314         goto out_redist_unmap;
2315     }
2316 
2317     err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, acpi_data.nr_redist_regions, 0, domain_handle);
2318     if (err) {
2319         goto out_fwhandle_free;
2320     }
2321 
2322     acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
2323 
2324     if (static_branch_likely(&supports_deactivate_key)) {
2325         gic_acpi_setup_kvm_info();
2326     }
2327 
2328     return 0;
2329 
2330 out_fwhandle_free:
2331     irq_domain_free_fwnode(domain_handle);
2332 out_redist_unmap:
2333     for (i = 0; i < acpi_data.nr_redist_regions; i++) {
2334         if (acpi_data.redist_regs[i].redist_base) {
2335             iounmap(acpi_data.redist_regs[i].redist_base);
2336         }
2337     }
2338     kfree(acpi_data.redist_regs);
2339 out_dist_unmap:
2340     iounmap(acpi_data.dist_base);
2341     return err;
2342 }
2343 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2344                      gic_acpi_init);
2345 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2346                      gic_acpi_init);
2347 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, acpi_validate_gic_table,
2348                      ACPI_MADT_GIC_VERSION_NONE, gic_acpi_init);
2349 #endif
2350