1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7#define pr_fmt(fmt) "GICv3: " fmt 8 9#include <linux/acpi.h> 10#include <linux/cpu.h> 11#include <linux/cpu_pm.h> 12#include <linux/delay.h> 13#include <linux/interrupt.h> 14#include <linux/irqdomain.h> 15#include <linux/of.h> 16#include <linux/of_address.h> 17#include <linux/of_irq.h> 18#include <linux/percpu.h> 19#include <linux/refcount.h> 20#include <linux/slab.h> 21 22#include <linux/irqchip.h> 23#include <linux/irqchip/arm-gic-common.h> 24#include <linux/irqchip/arm-gic-v3.h> 25#include <linux/irqchip/irq-partition-percpu.h> 26 27#include <asm/cputype.h> 28#include <asm/exception.h> 29#include <asm/smp_plat.h> 30#include <asm/virt.h> 31 32#include "irq-gic-common.h" 33 34#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) 35 36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) 37#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) 38#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) 39 40#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) 41 42struct redist_region { 43 void __iomem *redist_base; 44 phys_addr_t phys_base; 45 bool single_redist; 46}; 47 48struct gic_chip_data { 49 struct fwnode_handle *fwnode; 50 void __iomem *dist_base; 51 struct redist_region *redist_regions; 52 struct rdists rdists; 53 struct irq_domain *domain; 54 u64 redist_stride; 55 u32 nr_redist_regions; 56 u64 flags; 57 bool has_rss; 58 unsigned int ppi_nr; 59 struct partition_desc **ppi_descs; 60}; 61 62static struct gic_chip_data gic_data __read_mostly; 63static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 64 65#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 66#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) 67#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) 68 69/* 70 * The behaviours of RPR and PMR registers differ depending on the value of 71 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the 72 * distributor and redistributors depends on whether security is enabled in the 73 * GIC. 74 * 75 * When security is enabled, non-secure priority values from the (re)distributor 76 * are presented to the GIC CPUIF as follow: 77 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; 78 * 79 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure 80 * EL1 are subject to a similar operation thus matching the priorities presented 81 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, 82 * these values are unchanched by the GIC. 83 * 84 * see GICv3/GICv4 Architecture Specification (IHI0069D): 85 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt 86 * priorities. 87 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 88 * interrupt. 89 */ 90static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); 91 92/* 93 * Global static key controlling whether an update to PMR allowing more 94 * interrupts requires to be propagated to the redistributor (DSB SY). 95 * And this needs to be exported for modules to be able to enable 96 * interrupts... 97 */ 98DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); 99EXPORT_SYMBOL(gic_pmr_sync); 100 101DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); 102EXPORT_SYMBOL(gic_nonsecure_priorities); 103 104/* 105 * When the Non-secure world has access to group 0 interrupts (as a 106 * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will 107 * return the Distributor's view of the interrupt priority. 108 * 109 * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority 110 * written by software is moved to the Non-secure range by the Distributor. 111 * 112 * If both are true (which is when gic_nonsecure_priorities gets enabled), 113 * we need to shift down the priority programmed by software to match it 114 * against the value returned by ICC_RPR_EL1. 115 */ 116#define GICD_INT_RPR_PRI(priority) \ 117 ({ \ 118 u32 __priority = (priority); \ 119 if (static_branch_unlikely(&gic_nonsecure_priorities)) \ 120 __priority = 0x80 | (__priority >> 1); \ 121 \ 122 __priority; \ 123 }) 124 125/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ 126static refcount_t *ppi_nmi_refs; 127 128static struct gic_kvm_info gic_v3_kvm_info; 129static DEFINE_PER_CPU(bool, has_rss); 130 131#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) 132#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 133#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 134#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 135 136/* Our default, arbitrary priority value. Linux only uses one anyway. */ 137#define DEFAULT_PMR_VALUE 0xf0 138 139enum gic_intid_range { 140 SGI_RANGE, 141 PPI_RANGE, 142 SPI_RANGE, 143 EPPI_RANGE, 144 ESPI_RANGE, 145 LPI_RANGE, 146 __INVALID_RANGE__ 147}; 148 149static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) 150{ 151 switch (hwirq) { 152 case 0 ... 15: 153 return SGI_RANGE; 154 case 16 ... 31: 155 return PPI_RANGE; 156 case 32 ... 1019: 157 return SPI_RANGE; 158 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): 159 return EPPI_RANGE; 160 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): 161 return ESPI_RANGE; 162 case 8192 ... GENMASK(23, 0): 163 return LPI_RANGE; 164 default: 165 return __INVALID_RANGE__; 166 } 167} 168 169static enum gic_intid_range get_intid_range(struct irq_data *d) 170{ 171 return __get_intid_range(d->hwirq); 172} 173 174static inline unsigned int gic_irq(struct irq_data *d) 175{ 176 return d->hwirq; 177} 178 179static inline bool gic_irq_in_rdist(struct irq_data *d) 180{ 181 switch (get_intid_range(d)) { 182 case SGI_RANGE: 183 case PPI_RANGE: 184 case EPPI_RANGE: 185 return true; 186 default: 187 return false; 188 } 189} 190 191static inline void __iomem *gic_dist_base(struct irq_data *d) 192{ 193 switch (get_intid_range(d)) { 194 case SGI_RANGE: 195 case PPI_RANGE: 196 case EPPI_RANGE: 197 /* SGI+PPI -> SGI_base for this CPU */ 198 return gic_data_rdist_sgi_base(); 199 200 case SPI_RANGE: 201 case ESPI_RANGE: 202 /* SPI -> dist_base */ 203 return gic_data.dist_base; 204 205 default: 206 return NULL; 207 } 208} 209 210static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) 211{ 212 u32 count = 1000000; /* 1s! */ 213 214 while (readl_relaxed(base + GICD_CTLR) & bit) { 215 count--; 216 if (!count) { 217 pr_err_ratelimited("RWP timeout, gone fishing\n"); 218 return; 219 } 220 cpu_relax(); 221 udelay(1); 222 } 223} 224 225/* Wait for completion of a distributor change */ 226static void gic_dist_wait_for_rwp(void) 227{ 228 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); 229} 230 231/* Wait for completion of a redistributor change */ 232static void gic_redist_wait_for_rwp(void) 233{ 234 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); 235} 236 237#ifdef CONFIG_ARM64 238 239static u64 __maybe_unused gic_read_iar(void) 240{ 241 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) 242 return gic_read_iar_cavium_thunderx(); 243 else 244 return gic_read_iar_common(); 245} 246#endif 247 248static void gic_enable_redist(bool enable) 249{ 250 void __iomem *rbase; 251 u32 count = 1000000; /* 1s! */ 252 u32 val; 253 254 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) 255 return; 256 257 rbase = gic_data_rdist_rd_base(); 258 259 val = readl_relaxed(rbase + GICR_WAKER); 260 if (enable) 261 /* Wake up this CPU redistributor */ 262 val &= ~GICR_WAKER_ProcessorSleep; 263 else 264 val |= GICR_WAKER_ProcessorSleep; 265 writel_relaxed(val, rbase + GICR_WAKER); 266 267 if (!enable) { /* Check that GICR_WAKER is writeable */ 268 val = readl_relaxed(rbase + GICR_WAKER); 269 if (!(val & GICR_WAKER_ProcessorSleep)) 270 return; /* No PM support in this redistributor */ 271 } 272 273 while (--count) { 274 val = readl_relaxed(rbase + GICR_WAKER); 275 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) 276 break; 277 cpu_relax(); 278 udelay(1); 279 } 280 if (!count) 281 pr_err_ratelimited("redistributor failed to %s...\n", 282 enable ? "wakeup" : "sleep"); 283} 284 285/* 286 * Routines to disable, enable, EOI and route interrupts 287 */ 288static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) 289{ 290 switch (get_intid_range(d)) { 291 case SGI_RANGE: 292 case PPI_RANGE: 293 case SPI_RANGE: 294 *index = d->hwirq; 295 return offset; 296 case EPPI_RANGE: 297 /* 298 * Contrary to the ESPI range, the EPPI range is contiguous 299 * to the PPI range in the registers, so let's adjust the 300 * displacement accordingly. Consistency is overrated. 301 */ 302 *index = d->hwirq - EPPI_BASE_INTID + 32; 303 return offset; 304 case ESPI_RANGE: 305 *index = d->hwirq - ESPI_BASE_INTID; 306 switch (offset) { 307 case GICD_ISENABLER: 308 return GICD_ISENABLERnE; 309 case GICD_ICENABLER: 310 return GICD_ICENABLERnE; 311 case GICD_ISPENDR: 312 return GICD_ISPENDRnE; 313 case GICD_ICPENDR: 314 return GICD_ICPENDRnE; 315 case GICD_ISACTIVER: 316 return GICD_ISACTIVERnE; 317 case GICD_ICACTIVER: 318 return GICD_ICACTIVERnE; 319 case GICD_IPRIORITYR: 320 return GICD_IPRIORITYRnE; 321 case GICD_ICFGR: 322 return GICD_ICFGRnE; 323 case GICD_IROUTER: 324 return GICD_IROUTERnE; 325 default: 326 break; 327 } 328 break; 329 default: 330 break; 331 } 332 333 WARN_ON(1); 334 *index = d->hwirq; 335 return offset; 336} 337 338static int gic_peek_irq(struct irq_data *d, u32 offset) 339{ 340 void __iomem *base; 341 u32 index, mask; 342 343 offset = convert_offset_index(d, offset, &index); 344 mask = 1 << (index % 32); 345 346 if (gic_irq_in_rdist(d)) 347 base = gic_data_rdist_sgi_base(); 348 else 349 base = gic_data.dist_base; 350 351 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); 352} 353 354static void gic_poke_irq(struct irq_data *d, u32 offset) 355{ 356 void (*rwp_wait)(void); 357 void __iomem *base; 358 u32 index, mask; 359 360 offset = convert_offset_index(d, offset, &index); 361 mask = 1 << (index % 32); 362 363 if (gic_irq_in_rdist(d)) { 364 base = gic_data_rdist_sgi_base(); 365 rwp_wait = gic_redist_wait_for_rwp; 366 } else { 367 base = gic_data.dist_base; 368 rwp_wait = gic_dist_wait_for_rwp; 369 } 370 371 writel_relaxed(mask, base + offset + (index / 32) * 4); 372 rwp_wait(); 373} 374 375static void gic_mask_irq(struct irq_data *d) 376{ 377 gic_poke_irq(d, GICD_ICENABLER); 378} 379 380static void gic_eoimode1_mask_irq(struct irq_data *d) 381{ 382 gic_mask_irq(d); 383 /* 384 * When masking a forwarded interrupt, make sure it is 385 * deactivated as well. 386 * 387 * This ensures that an interrupt that is getting 388 * disabled/masked will not get "stuck", because there is 389 * noone to deactivate it (guest is being terminated). 390 */ 391 if (irqd_is_forwarded_to_vcpu(d)) 392 gic_poke_irq(d, GICD_ICACTIVER); 393} 394 395static void gic_unmask_irq(struct irq_data *d) 396{ 397 gic_poke_irq(d, GICD_ISENABLER); 398} 399 400static inline bool gic_supports_nmi(void) 401{ 402 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 403 static_branch_likely(&supports_pseudo_nmis); 404} 405 406static int gic_irq_set_irqchip_state(struct irq_data *d, 407 enum irqchip_irq_state which, bool val) 408{ 409 u32 reg; 410 411 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ 412 return -EINVAL; 413 414 switch (which) { 415 case IRQCHIP_STATE_PENDING: 416 reg = val ? GICD_ISPENDR : GICD_ICPENDR; 417 break; 418 419 case IRQCHIP_STATE_ACTIVE: 420 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; 421 break; 422 423 case IRQCHIP_STATE_MASKED: 424 reg = val ? GICD_ICENABLER : GICD_ISENABLER; 425 break; 426 427 default: 428 return -EINVAL; 429 } 430 431 gic_poke_irq(d, reg); 432 return 0; 433} 434 435static int gic_irq_get_irqchip_state(struct irq_data *d, 436 enum irqchip_irq_state which, bool *val) 437{ 438 if (d->hwirq >= 8192) /* PPI/SPI only */ 439 return -EINVAL; 440 441 switch (which) { 442 case IRQCHIP_STATE_PENDING: 443 *val = gic_peek_irq(d, GICD_ISPENDR); 444 break; 445 446 case IRQCHIP_STATE_ACTIVE: 447 *val = gic_peek_irq(d, GICD_ISACTIVER); 448 break; 449 450 case IRQCHIP_STATE_MASKED: 451 *val = !gic_peek_irq(d, GICD_ISENABLER); 452 break; 453 454 default: 455 return -EINVAL; 456 } 457 458 return 0; 459} 460 461static void gic_irq_set_prio(struct irq_data *d, u8 prio) 462{ 463 void __iomem *base = gic_dist_base(d); 464 u32 offset, index; 465 466 offset = convert_offset_index(d, GICD_IPRIORITYR, &index); 467 468 writeb_relaxed(prio, base + offset + index); 469} 470 471static u32 gic_get_ppi_index(struct irq_data *d) 472{ 473 switch (get_intid_range(d)) { 474 case PPI_RANGE: 475 return d->hwirq - 16; 476 case EPPI_RANGE: 477 return d->hwirq - EPPI_BASE_INTID + 16; 478 default: 479 unreachable(); 480 } 481} 482 483static int gic_irq_nmi_setup(struct irq_data *d) 484{ 485 struct irq_desc *desc = irq_to_desc(d->irq); 486 487 if (!gic_supports_nmi()) 488 return -EINVAL; 489 490 if (gic_peek_irq(d, GICD_ISENABLER)) { 491 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 492 return -EINVAL; 493 } 494 495 /* 496 * A secondary irq_chip should be in charge of LPI request, 497 * it should not be possible to get there 498 */ 499 if (WARN_ON(gic_irq(d) >= 8192)) 500 return -EINVAL; 501 502 /* desc lock should already be held */ 503 if (gic_irq_in_rdist(d)) { 504 u32 idx = gic_get_ppi_index(d); 505 506 /* Setting up PPI as NMI, only switch handler for first NMI */ 507 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { 508 refcount_set(&ppi_nmi_refs[idx], 1); 509 desc->handle_irq = handle_percpu_devid_fasteoi_nmi; 510 } 511 } else { 512 desc->handle_irq = handle_fasteoi_nmi; 513 } 514 515 gic_irq_set_prio(d, GICD_INT_NMI_PRI); 516 517 return 0; 518} 519 520static void gic_irq_nmi_teardown(struct irq_data *d) 521{ 522 struct irq_desc *desc = irq_to_desc(d->irq); 523 524 if (WARN_ON(!gic_supports_nmi())) 525 return; 526 527 if (gic_peek_irq(d, GICD_ISENABLER)) { 528 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 529 return; 530 } 531 532 /* 533 * A secondary irq_chip should be in charge of LPI request, 534 * it should not be possible to get there 535 */ 536 if (WARN_ON(gic_irq(d) >= 8192)) 537 return; 538 539 /* desc lock should already be held */ 540 if (gic_irq_in_rdist(d)) { 541 u32 idx = gic_get_ppi_index(d); 542 543 /* Tearing down NMI, only switch handler for last NMI */ 544 if (refcount_dec_and_test(&ppi_nmi_refs[idx])) 545 desc->handle_irq = handle_percpu_devid_irq; 546 } else { 547 desc->handle_irq = handle_fasteoi_irq; 548 } 549 550 gic_irq_set_prio(d, GICD_INT_DEF_PRI); 551} 552 553static void gic_eoi_irq(struct irq_data *d) 554{ 555 gic_write_eoir(gic_irq(d)); 556} 557 558static void gic_eoimode1_eoi_irq(struct irq_data *d) 559{ 560 /* 561 * No need to deactivate an LPI, or an interrupt that 562 * is is getting forwarded to a vcpu. 563 */ 564 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 565 return; 566 gic_write_dir(gic_irq(d)); 567} 568 569static int gic_set_type(struct irq_data *d, unsigned int type) 570{ 571 enum gic_intid_range range; 572 unsigned int irq = gic_irq(d); 573 void (*rwp_wait)(void); 574 void __iomem *base; 575 u32 offset, index; 576 int ret; 577 578 range = get_intid_range(d); 579 580 /* Interrupt configuration for SGIs can't be changed */ 581 if (range == SGI_RANGE) 582 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; 583 584 /* SPIs have restrictions on the supported types */ 585 if ((range == SPI_RANGE || range == ESPI_RANGE) && 586 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 587 return -EINVAL; 588 589 if (gic_irq_in_rdist(d)) { 590 base = gic_data_rdist_sgi_base(); 591 rwp_wait = gic_redist_wait_for_rwp; 592 } else { 593 base = gic_data.dist_base; 594 rwp_wait = gic_dist_wait_for_rwp; 595 } 596 597 offset = convert_offset_index(d, GICD_ICFGR, &index); 598 599 ret = gic_configure_irq(index, type, base + offset, rwp_wait); 600 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { 601 /* Misconfigured PPIs are usually not fatal */ 602 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); 603 ret = 0; 604 } 605 606 return ret; 607} 608 609static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 610{ 611 if (get_intid_range(d) == SGI_RANGE) 612 return -EINVAL; 613 614 if (vcpu) 615 irqd_set_forwarded_to_vcpu(d); 616 else 617 irqd_clr_forwarded_to_vcpu(d); 618 return 0; 619} 620 621static u64 gic_mpidr_to_affinity(unsigned long mpidr) 622{ 623 u64 aff; 624 625 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 626 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 627 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 628 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 629 630 return aff; 631} 632 633static void gic_deactivate_unhandled(u32 irqnr) 634{ 635 if (static_branch_likely(&supports_deactivate_key)) { 636 if (irqnr < 8192) 637 gic_write_dir(irqnr); 638 } else { 639 gic_write_eoir(irqnr); 640 } 641} 642 643static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) 644{ 645 bool irqs_enabled = interrupts_enabled(regs); 646 int err; 647 648 if (irqs_enabled) 649 nmi_enter(); 650 651 if (static_branch_likely(&supports_deactivate_key)) 652 gic_write_eoir(irqnr); 653 /* 654 * Leave the PSR.I bit set to prevent other NMIs to be 655 * received while handling this one. 656 * PSR.I will be restored when we ERET to the 657 * interrupted context. 658 */ 659 err = handle_domain_nmi(gic_data.domain, irqnr, regs); 660 if (err) 661 gic_deactivate_unhandled(irqnr); 662 663 if (irqs_enabled) 664 nmi_exit(); 665} 666 667static u32 do_read_iar(struct pt_regs *regs) 668{ 669 u32 iar; 670 671 if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) { 672 u64 pmr; 673 674 /* 675 * We were in a context with IRQs disabled. However, the 676 * entry code has set PMR to a value that allows any 677 * interrupt to be acknowledged, and not just NMIs. This can 678 * lead to surprising effects if the NMI has been retired in 679 * the meantime, and that there is an IRQ pending. The IRQ 680 * would then be taken in NMI context, something that nobody 681 * wants to debug twice. 682 * 683 * Until we sort this, drop PMR again to a level that will 684 * actually only allow NMIs before reading IAR, and then 685 * restore it to what it was. 686 */ 687 pmr = gic_read_pmr(); 688 gic_pmr_mask_irqs(); 689 isb(); 690 691 iar = gic_read_iar(); 692 693 gic_write_pmr(pmr); 694 } else { 695 iar = gic_read_iar(); 696 } 697 698 return iar; 699} 700 701static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 702{ 703 u32 irqnr; 704 705 irqnr = do_read_iar(regs); 706 707 /* Check for special IDs first */ 708 if ((irqnr >= 1020 && irqnr <= 1023)) 709 return; 710 711 if (gic_supports_nmi() && 712 unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) { 713 gic_handle_nmi(irqnr, regs); 714 return; 715 } 716 717 if (gic_prio_masking_enabled()) { 718 gic_pmr_mask_irqs(); 719 gic_arch_enable_irqs(); 720 } 721 722 if (static_branch_likely(&supports_deactivate_key)) 723 gic_write_eoir(irqnr); 724 else 725 isb(); 726 727 if (handle_domain_irq(gic_data.domain, irqnr, regs)) { 728 WARN_ONCE(true, "Unexpected interrupt received!\n"); 729 gic_deactivate_unhandled(irqnr); 730 } 731} 732 733static u32 gic_get_pribits(void) 734{ 735 u32 pribits; 736 737 pribits = gic_read_ctlr(); 738 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; 739 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; 740 pribits++; 741 742 return pribits; 743} 744 745static bool gic_has_group0(void) 746{ 747 u32 val; 748 u32 old_pmr; 749 750 old_pmr = gic_read_pmr(); 751 752 /* 753 * Let's find out if Group0 is under control of EL3 or not by 754 * setting the highest possible, non-zero priority in PMR. 755 * 756 * If SCR_EL3.FIQ is set, the priority gets shifted down in 757 * order for the CPU interface to set bit 7, and keep the 758 * actual priority in the non-secure range. In the process, it 759 * looses the least significant bit and the actual priority 760 * becomes 0x80. Reading it back returns 0, indicating that 761 * we're don't have access to Group0. 762 */ 763 gic_write_pmr(BIT(8 - gic_get_pribits())); 764 val = gic_read_pmr(); 765 766 gic_write_pmr(old_pmr); 767 768 return val != 0; 769} 770 771static void __init gic_dist_init(void) 772{ 773 unsigned int i; 774 u64 affinity; 775 void __iomem *base = gic_data.dist_base; 776 u32 val; 777 778 /* Disable the distributor */ 779 writel_relaxed(0, base + GICD_CTLR); 780 gic_dist_wait_for_rwp(); 781 782 /* 783 * Configure SPIs as non-secure Group-1. This will only matter 784 * if the GIC only has a single security state. This will not 785 * do the right thing if the kernel is running in secure mode, 786 * but that's not the intended use case anyway. 787 */ 788 for (i = 32; i < GIC_LINE_NR; i += 32) 789 writel_relaxed(~0, base + GICD_IGROUPR + i / 8); 790 791 /* Extended SPI range, not handled by the GICv2/GICv3 common code */ 792 for (i = 0; i < GIC_ESPI_NR; i += 32) { 793 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); 794 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); 795 } 796 797 for (i = 0; i < GIC_ESPI_NR; i += 32) 798 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); 799 800 for (i = 0; i < GIC_ESPI_NR; i += 16) 801 writel_relaxed(0, base + GICD_ICFGRnE + i / 4); 802 803 for (i = 0; i < GIC_ESPI_NR; i += 4) 804 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); 805 806 /* Now do the common stuff, and wait for the distributor to drain */ 807 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); 808 809 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; 810 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { 811 pr_info("Enabling SGIs without active state\n"); 812 val |= GICD_CTLR_nASSGIreq; 813 } 814 815 /* Enable distributor with ARE, Group1 */ 816 writel_relaxed(val, base + GICD_CTLR); 817 818 /* 819 * Set all global interrupts to the boot CPU only. ARE must be 820 * enabled. 821 */ 822 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 823 for (i = 32; i < GIC_LINE_NR; i++) 824 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 825 826 for (i = 0; i < GIC_ESPI_NR; i++) 827 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); 828} 829 830static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) 831{ 832 int ret = -ENODEV; 833 int i; 834 835 for (i = 0; i < gic_data.nr_redist_regions; i++) { 836 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 837 u64 typer; 838 u32 reg; 839 840 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 841 if (reg != GIC_PIDR2_ARCH_GICv3 && 842 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ 843 pr_warn("No redistributor present @%p\n", ptr); 844 break; 845 } 846 847 do { 848 typer = gic_read_typer(ptr + GICR_TYPER); 849 ret = fn(gic_data.redist_regions + i, ptr); 850 if (!ret) 851 return 0; 852 853 if (gic_data.redist_regions[i].single_redist) 854 break; 855 856 if (gic_data.redist_stride) { 857 ptr += gic_data.redist_stride; 858 } else { 859 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ 860 if (typer & GICR_TYPER_VLPIS) 861 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ 862 } 863 } while (!(typer & GICR_TYPER_LAST)); 864 } 865 866 return ret ? -ENODEV : 0; 867} 868 869static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) 870{ 871 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 872 u64 typer; 873 u32 aff; 874 875 /* 876 * Convert affinity to a 32bit value that can be matched to 877 * GICR_TYPER bits [63:32]. 878 */ 879 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 880 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 881 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 882 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 883 884 typer = gic_read_typer(ptr + GICR_TYPER); 885 if ((typer >> 32) == aff) { 886 u64 offset = ptr - region->redist_base; 887 raw_spin_lock_init(&gic_data_rdist()->rd_lock); 888 gic_data_rdist_rd_base() = ptr; 889 gic_data_rdist()->phys_base = region->phys_base + offset; 890 891 pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 892 smp_processor_id(), mpidr, 893 (int)(region - gic_data.redist_regions), 894 &gic_data_rdist()->phys_base); 895 return 0; 896 } 897 898 /* Try next one */ 899 return 1; 900} 901 902static int gic_populate_rdist(void) 903{ 904 if (gic_iterate_rdists(__gic_populate_rdist) == 0) 905 return 0; 906 907 /* We couldn't even deal with ourselves... */ 908 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 909 smp_processor_id(), 910 (unsigned long)cpu_logical_map(smp_processor_id())); 911 return -ENODEV; 912} 913 914static int __gic_update_rdist_properties(struct redist_region *region, 915 void __iomem *ptr) 916{ 917 u64 typer = gic_read_typer(ptr + GICR_TYPER); 918 919 /* Boot-time cleanip */ 920 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { 921 u64 val; 922 923 /* Deactivate any present vPE */ 924 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); 925 if (val & GICR_VPENDBASER_Valid) 926 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, 927 ptr + SZ_128K + GICR_VPENDBASER); 928 929 /* Mark the VPE table as invalid */ 930 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); 931 val &= ~GICR_VPROPBASER_4_1_VALID; 932 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); 933 } 934 935 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 936 937 /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ 938 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); 939 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | 940 gic_data.rdists.has_rvpeid); 941 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); 942 943 /* Detect non-sensical configurations */ 944 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { 945 gic_data.rdists.has_direct_lpi = false; 946 gic_data.rdists.has_vlpis = false; 947 gic_data.rdists.has_rvpeid = false; 948 } 949 950 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); 951 952 return 1; 953} 954 955static void gic_update_rdist_properties(void) 956{ 957 gic_data.ppi_nr = UINT_MAX; 958 gic_iterate_rdists(__gic_update_rdist_properties); 959 if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) 960 gic_data.ppi_nr = 0; 961 pr_info("%d PPIs implemented\n", gic_data.ppi_nr); 962 if (gic_data.rdists.has_vlpis) 963 pr_info("GICv4 features: %s%s%s\n", 964 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", 965 gic_data.rdists.has_rvpeid ? "RVPEID " : "", 966 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); 967} 968 969/* Check whether it's single security state view */ 970static inline bool gic_dist_security_disabled(void) 971{ 972 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; 973} 974 975static void gic_cpu_sys_reg_init(void) 976{ 977 int i, cpu = smp_processor_id(); 978 u64 mpidr = cpu_logical_map(cpu); 979 u64 need_rss = MPIDR_RS(mpidr); 980 bool group0; 981 u32 pribits; 982 983 /* 984 * Need to check that the SRE bit has actually been set. If 985 * not, it means that SRE is disabled at EL2. We're going to 986 * die painfully, and there is nothing we can do about it. 987 * 988 * Kindly inform the luser. 989 */ 990 if (!gic_enable_sre()) 991 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); 992 993 pribits = gic_get_pribits(); 994 995 group0 = gic_has_group0(); 996 997 /* Set priority mask register */ 998 if (!gic_prio_masking_enabled()) { 999 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); 1000 } else if (gic_supports_nmi()) { 1001 /* 1002 * Mismatch configuration with boot CPU, the system is likely 1003 * to die as interrupt masking will not work properly on all 1004 * CPUs 1005 * 1006 * The boot CPU calls this function before enabling NMI support, 1007 * and as a result we'll never see this warning in the boot path 1008 * for that CPU. 1009 */ 1010 if (static_branch_unlikely(&gic_nonsecure_priorities)) 1011 WARN_ON(!group0 || gic_dist_security_disabled()); 1012 else 1013 WARN_ON(group0 && !gic_dist_security_disabled()); 1014 } 1015 1016 /* 1017 * Some firmwares hand over to the kernel with the BPR changed from 1018 * its reset value (and with a value large enough to prevent 1019 * any pre-emptive interrupts from working at all). Writing a zero 1020 * to BPR restores is reset value. 1021 */ 1022 gic_write_bpr1(0); 1023 1024 if (static_branch_likely(&supports_deactivate_key)) { 1025 /* EOI drops priority only (mode 1) */ 1026 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 1027 } else { 1028 /* EOI deactivates interrupt too (mode 0) */ 1029 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); 1030 } 1031 1032 /* Always whack Group0 before Group1 */ 1033 if (group0) { 1034 switch(pribits) { 1035 case 8: 1036 case 7: 1037 write_gicreg(0, ICC_AP0R3_EL1); 1038 write_gicreg(0, ICC_AP0R2_EL1); 1039 fallthrough; 1040 case 6: 1041 write_gicreg(0, ICC_AP0R1_EL1); 1042 fallthrough; 1043 case 5: 1044 case 4: 1045 write_gicreg(0, ICC_AP0R0_EL1); 1046 } 1047 1048 isb(); 1049 } 1050 1051 switch(pribits) { 1052 case 8: 1053 case 7: 1054 write_gicreg(0, ICC_AP1R3_EL1); 1055 write_gicreg(0, ICC_AP1R2_EL1); 1056 fallthrough; 1057 case 6: 1058 write_gicreg(0, ICC_AP1R1_EL1); 1059 fallthrough; 1060 case 5: 1061 case 4: 1062 write_gicreg(0, ICC_AP1R0_EL1); 1063 } 1064 1065 isb(); 1066 1067 /* ... and let's hit the road... */ 1068 gic_write_grpen1(1); 1069 1070 /* Keep the RSS capability status in per_cpu variable */ 1071 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); 1072 1073 /* Check all the CPUs have capable of sending SGIs to other CPUs */ 1074 for_each_online_cpu(i) { 1075 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); 1076 1077 need_rss |= MPIDR_RS(cpu_logical_map(i)); 1078 if (need_rss && (!have_rss)) 1079 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", 1080 cpu, (unsigned long)mpidr, 1081 i, (unsigned long)cpu_logical_map(i)); 1082 } 1083 1084 /** 1085 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, 1086 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED 1087 * UNPREDICTABLE choice of : 1088 * - The write is ignored. 1089 * - The RS field is treated as 0. 1090 */ 1091 if (need_rss && (!gic_data.has_rss)) 1092 pr_crit_once("RSS is required but GICD doesn't support it\n"); 1093} 1094 1095static bool gicv3_nolpi; 1096 1097static int __init gicv3_nolpi_cfg(char *buf) 1098{ 1099 return strtobool(buf, &gicv3_nolpi); 1100} 1101early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); 1102 1103static int gic_dist_supports_lpis(void) 1104{ 1105 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && 1106 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && 1107 !gicv3_nolpi); 1108} 1109 1110static void gic_cpu_init(void) 1111{ 1112 void __iomem *rbase; 1113 int i; 1114 1115 /* Register ourselves with the rest of the world */ 1116 if (gic_populate_rdist()) 1117 return; 1118 1119 gic_enable_redist(true); 1120 1121 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && 1122 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), 1123 "Distributor has extended ranges, but CPU%d doesn't\n", 1124 smp_processor_id()); 1125 1126 rbase = gic_data_rdist_sgi_base(); 1127 1128 /* Configure SGIs/PPIs as non-secure Group-1 */ 1129 for (i = 0; i < gic_data.ppi_nr + 16; i += 32) 1130 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); 1131 1132 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); 1133 1134 /* initialise system registers */ 1135 gic_cpu_sys_reg_init(); 1136} 1137 1138#ifdef CONFIG_SMP 1139 1140#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) 1141#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) 1142 1143static int gic_starting_cpu(unsigned int cpu) 1144{ 1145 gic_cpu_init(); 1146 1147 if (gic_dist_supports_lpis()) 1148 its_cpu_init(); 1149 1150 return 0; 1151} 1152 1153static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 1154 unsigned long cluster_id) 1155{ 1156 int next_cpu, cpu = *base_cpu; 1157 unsigned long mpidr = cpu_logical_map(cpu); 1158 u16 tlist = 0; 1159 1160 while (cpu < nr_cpu_ids) { 1161 tlist |= 1 << (mpidr & 0xf); 1162 1163 next_cpu = cpumask_next(cpu, mask); 1164 if (next_cpu >= nr_cpu_ids) 1165 goto out; 1166 cpu = next_cpu; 1167 1168 mpidr = cpu_logical_map(cpu); 1169 1170 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { 1171 cpu--; 1172 goto out; 1173 } 1174 } 1175out: 1176 *base_cpu = cpu; 1177 return tlist; 1178} 1179 1180#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ 1181 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ 1182 << ICC_SGI1R_AFFINITY_## level ##_SHIFT) 1183 1184static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) 1185{ 1186 u64 val; 1187 1188 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1189 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1190 irq << ICC_SGI1R_SGI_ID_SHIFT | 1191 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1192 MPIDR_TO_SGI_RS(cluster_id) | 1193 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 1194 1195 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 1196 gic_write_sgi1r(val); 1197} 1198 1199static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) 1200{ 1201 int cpu; 1202 1203 if (WARN_ON(d->hwirq >= 16)) 1204 return; 1205 1206 /* 1207 * Ensure that stores to Normal memory are visible to the 1208 * other CPUs before issuing the IPI. 1209 */ 1210 wmb(); 1211 1212 for_each_cpu(cpu, mask) { 1213 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); 1214 u16 tlist; 1215 1216 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 1217 gic_send_sgi(cluster_id, tlist, d->hwirq); 1218 } 1219 1220 /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1221 isb(); 1222} 1223 1224static void __init gic_smp_init(void) 1225{ 1226 struct irq_fwspec sgi_fwspec = { 1227 .fwnode = gic_data.fwnode, 1228 .param_count = 1, 1229 }; 1230 int base_sgi; 1231 1232 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1233 "irqchip/arm/gicv3:starting", 1234 gic_starting_cpu, NULL); 1235 1236 /* Register all 8 non-secure SGIs */ 1237 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8, 1238 NUMA_NO_NODE, &sgi_fwspec, 1239 false, NULL); 1240 if (WARN_ON(base_sgi <= 0)) 1241 return; 1242 1243 set_smp_ipi_range(base_sgi, 8); 1244} 1245 1246static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1247 bool force) 1248{ 1249 unsigned int cpu; 1250 u32 offset, index; 1251 void __iomem *reg; 1252 int enabled; 1253 u64 val; 1254 1255 if (force) 1256 cpu = cpumask_first(mask_val); 1257 else 1258 cpu = cpumask_any_and(mask_val, cpu_online_mask); 1259 1260 if (cpu >= nr_cpu_ids) 1261 return -EINVAL; 1262 1263 if (gic_irq_in_rdist(d)) 1264 return -EINVAL; 1265 1266 /* If interrupt was enabled, disable it first */ 1267 enabled = gic_peek_irq(d, GICD_ISENABLER); 1268 if (enabled) 1269 gic_mask_irq(d); 1270 1271 offset = convert_offset_index(d, GICD_IROUTER, &index); 1272 reg = gic_dist_base(d) + offset + (index * 8); 1273 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 1274 1275 gic_write_irouter(val, reg); 1276 1277 /* 1278 * If the interrupt was enabled, enabled it again. Otherwise, 1279 * just wait for the distributor to have digested our changes. 1280 */ 1281 if (enabled) 1282 gic_unmask_irq(d); 1283 else 1284 gic_dist_wait_for_rwp(); 1285 1286 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1287 1288 return IRQ_SET_MASK_OK_DONE; 1289} 1290#else 1291#define gic_set_affinity NULL 1292#define gic_ipi_send_mask NULL 1293#define gic_smp_init() do { } while(0) 1294#endif 1295 1296static int gic_retrigger(struct irq_data *data) 1297{ 1298 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); 1299} 1300 1301#ifdef CONFIG_CPU_PM 1302static int gic_cpu_pm_notifier(struct notifier_block *self, 1303 unsigned long cmd, void *v) 1304{ 1305 if (cmd == CPU_PM_EXIT) { 1306 if (gic_dist_security_disabled()) 1307 gic_enable_redist(true); 1308 gic_cpu_sys_reg_init(); 1309 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { 1310 gic_write_grpen1(0); 1311 gic_enable_redist(false); 1312 } 1313 return NOTIFY_OK; 1314} 1315 1316static struct notifier_block gic_cpu_pm_notifier_block = { 1317 .notifier_call = gic_cpu_pm_notifier, 1318}; 1319 1320static void gic_cpu_pm_init(void) 1321{ 1322 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); 1323} 1324 1325#else 1326static inline void gic_cpu_pm_init(void) { } 1327#endif /* CONFIG_CPU_PM */ 1328 1329static struct irq_chip gic_chip = { 1330 .name = "GICv3", 1331 .irq_mask = gic_mask_irq, 1332 .irq_unmask = gic_unmask_irq, 1333 .irq_eoi = gic_eoi_irq, 1334 .irq_set_type = gic_set_type, 1335 .irq_set_affinity = gic_set_affinity, 1336 .irq_retrigger = gic_retrigger, 1337 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1338 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1339 .irq_nmi_setup = gic_irq_nmi_setup, 1340 .irq_nmi_teardown = gic_irq_nmi_teardown, 1341 .ipi_send_mask = gic_ipi_send_mask, 1342 .flags = IRQCHIP_SET_TYPE_MASKED | 1343 IRQCHIP_SKIP_SET_WAKE | 1344 IRQCHIP_MASK_ON_SUSPEND, 1345}; 1346 1347static struct irq_chip gic_eoimode1_chip = { 1348 .name = "GICv3", 1349 .irq_mask = gic_eoimode1_mask_irq, 1350 .irq_unmask = gic_unmask_irq, 1351 .irq_eoi = gic_eoimode1_eoi_irq, 1352 .irq_set_type = gic_set_type, 1353 .irq_set_affinity = gic_set_affinity, 1354 .irq_retrigger = gic_retrigger, 1355 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1356 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1357 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 1358 .irq_nmi_setup = gic_irq_nmi_setup, 1359 .irq_nmi_teardown = gic_irq_nmi_teardown, 1360 .ipi_send_mask = gic_ipi_send_mask, 1361 .flags = IRQCHIP_SET_TYPE_MASKED | 1362 IRQCHIP_SKIP_SET_WAKE | 1363 IRQCHIP_MASK_ON_SUSPEND, 1364}; 1365 1366static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 1367 irq_hw_number_t hw) 1368{ 1369 struct irq_chip *chip = &gic_chip; 1370 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); 1371 1372 if (static_branch_likely(&supports_deactivate_key)) 1373 chip = &gic_eoimode1_chip; 1374 1375 switch (__get_intid_range(hw)) { 1376 case SGI_RANGE: 1377 irq_set_percpu_devid(irq); 1378 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1379 handle_percpu_devid_fasteoi_ipi, 1380 NULL, NULL); 1381 break; 1382 1383 case PPI_RANGE: 1384 case EPPI_RANGE: 1385 irq_set_percpu_devid(irq); 1386 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1387 handle_percpu_devid_irq, NULL, NULL); 1388 break; 1389 1390 case SPI_RANGE: 1391 case ESPI_RANGE: 1392 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1393 handle_fasteoi_irq, NULL, NULL); 1394 irq_set_probe(irq); 1395 irqd_set_single_target(irqd); 1396 break; 1397 1398 case LPI_RANGE: 1399 if (!gic_dist_supports_lpis()) 1400 return -EPERM; 1401 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1402 handle_fasteoi_irq, NULL, NULL); 1403 break; 1404 1405 default: 1406 return -EPERM; 1407 } 1408 1409 /* Prevents SW retriggers which mess up the ACK/EOI ordering */ 1410 irqd_set_handle_enforce_irqctx(irqd); 1411 return 0; 1412} 1413 1414static int gic_irq_domain_translate(struct irq_domain *d, 1415 struct irq_fwspec *fwspec, 1416 unsigned long *hwirq, 1417 unsigned int *type) 1418{ 1419 if (fwspec->param_count == 1 && fwspec->param[0] < 16) { 1420 *hwirq = fwspec->param[0]; 1421 *type = IRQ_TYPE_EDGE_RISING; 1422 return 0; 1423 } 1424 1425 if (is_of_node(fwspec->fwnode)) { 1426 if (fwspec->param_count < 3) 1427 return -EINVAL; 1428 1429 switch (fwspec->param[0]) { 1430 case 0: /* SPI */ 1431 *hwirq = fwspec->param[1] + 32; 1432 break; 1433 case 1: /* PPI */ 1434 *hwirq = fwspec->param[1] + 16; 1435 break; 1436 case 2: /* ESPI */ 1437 *hwirq = fwspec->param[1] + ESPI_BASE_INTID; 1438 break; 1439 case 3: /* EPPI */ 1440 *hwirq = fwspec->param[1] + EPPI_BASE_INTID; 1441 break; 1442 case GIC_IRQ_TYPE_LPI: /* LPI */ 1443 *hwirq = fwspec->param[1]; 1444 break; 1445 case GIC_IRQ_TYPE_PARTITION: 1446 *hwirq = fwspec->param[1]; 1447 if (fwspec->param[1] >= 16) 1448 *hwirq += EPPI_BASE_INTID - 16; 1449 else 1450 *hwirq += 16; 1451 break; 1452 default: 1453 return -EINVAL; 1454 } 1455 1456 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1457 1458 /* 1459 * Make it clear that broken DTs are... broken. 1460 * Partitioned PPIs are an unfortunate exception. 1461 */ 1462 WARN_ON(*type == IRQ_TYPE_NONE && 1463 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); 1464 return 0; 1465 } 1466 1467 if (is_fwnode_irqchip(fwspec->fwnode)) { 1468 if(fwspec->param_count != 2) 1469 return -EINVAL; 1470 1471 if (fwspec->param[0] < 16) { 1472 pr_err(FW_BUG "Illegal GSI%d translation request\n", 1473 fwspec->param[0]); 1474 return -EINVAL; 1475 } 1476 1477 *hwirq = fwspec->param[0]; 1478 *type = fwspec->param[1]; 1479 1480 WARN_ON(*type == IRQ_TYPE_NONE); 1481 return 0; 1482 } 1483 1484 return -EINVAL; 1485} 1486 1487static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1488 unsigned int nr_irqs, void *arg) 1489{ 1490 int i, ret; 1491 irq_hw_number_t hwirq; 1492 unsigned int type = IRQ_TYPE_NONE; 1493 struct irq_fwspec *fwspec = arg; 1494 1495 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 1496 if (ret) 1497 return ret; 1498 1499 for (i = 0; i < nr_irqs; i++) { 1500 ret = gic_irq_domain_map(domain, virq + i, hwirq + i); 1501 if (ret) 1502 return ret; 1503 } 1504 1505 return 0; 1506} 1507 1508static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1509 unsigned int nr_irqs) 1510{ 1511 int i; 1512 1513 for (i = 0; i < nr_irqs; i++) { 1514 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 1515 irq_set_handler(virq + i, NULL); 1516 irq_domain_reset_irq_data(d); 1517 } 1518} 1519 1520static int gic_irq_domain_select(struct irq_domain *d, 1521 struct irq_fwspec *fwspec, 1522 enum irq_domain_bus_token bus_token) 1523{ 1524 /* Not for us */ 1525 if (fwspec->fwnode != d->fwnode) 1526 return 0; 1527 1528 /* If this is not DT, then we have a single domain */ 1529 if (!is_of_node(fwspec->fwnode)) 1530 return 1; 1531 1532 /* 1533 * If this is a PPI and we have a 4th (non-null) parameter, 1534 * then we need to match the partition domain. 1535 */ 1536 if (fwspec->param_count >= 4 && 1537 fwspec->param[0] == 1 && fwspec->param[3] != 0 && 1538 gic_data.ppi_descs) 1539 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); 1540 1541 return d == gic_data.domain; 1542} 1543 1544static const struct irq_domain_ops gic_irq_domain_ops = { 1545 .translate = gic_irq_domain_translate, 1546 .alloc = gic_irq_domain_alloc, 1547 .free = gic_irq_domain_free, 1548 .select = gic_irq_domain_select, 1549}; 1550 1551static int partition_domain_translate(struct irq_domain *d, 1552 struct irq_fwspec *fwspec, 1553 unsigned long *hwirq, 1554 unsigned int *type) 1555{ 1556 struct device_node *np; 1557 int ret; 1558 1559 if (!gic_data.ppi_descs) 1560 return -ENOMEM; 1561 1562 np = of_find_node_by_phandle(fwspec->param[3]); 1563 if (WARN_ON(!np)) 1564 return -EINVAL; 1565 1566 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], 1567 of_node_to_fwnode(np)); 1568 if (ret < 0) 1569 return ret; 1570 1571 *hwirq = ret; 1572 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1573 1574 return 0; 1575} 1576 1577static const struct irq_domain_ops partition_domain_ops = { 1578 .translate = partition_domain_translate, 1579 .select = gic_irq_domain_select, 1580}; 1581 1582static bool gic_enable_quirk_msm8996(void *data) 1583{ 1584 struct gic_chip_data *d = data; 1585 1586 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; 1587 1588 return true; 1589} 1590 1591static bool gic_enable_quirk_mtk_gicr(void *data) 1592{ 1593 struct gic_chip_data *d = data; 1594 1595 d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; 1596 1597 return true; 1598} 1599 1600static bool gic_enable_quirk_cavium_38539(void *data) 1601{ 1602 struct gic_chip_data *d = data; 1603 1604 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; 1605 1606 return true; 1607} 1608 1609static bool gic_enable_quirk_hip06_07(void *data) 1610{ 1611 struct gic_chip_data *d = data; 1612 1613 /* 1614 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite 1615 * not being an actual ARM implementation). The saving grace is 1616 * that GIC-600 doesn't have ESPI, so nothing to do in that case. 1617 * HIP07 doesn't even have a proper IIDR, and still pretends to 1618 * have ESPI. In both cases, put them right. 1619 */ 1620 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { 1621 /* Zero both ESPI and the RES0 field next to it... */ 1622 d->rdists.gicd_typer &= ~GENMASK(9, 8); 1623 return true; 1624 } 1625 1626 return false; 1627} 1628 1629static const struct gic_quirk gic_quirks[] = { 1630 { 1631 .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1632 .compatible = "qcom,msm8996-gic-v3", 1633 .init = gic_enable_quirk_msm8996, 1634 }, 1635 { 1636 .desc = "GICv3: Mediatek Chromebook GICR save problem", 1637 .property = "mediatek,broken-save-restore-fw", 1638 .init = gic_enable_quirk_mtk_gicr, 1639 }, 1640 { 1641 .desc = "GICv3: HIP06 erratum 161010803", 1642 .iidr = 0x0204043b, 1643 .mask = 0xffffffff, 1644 .init = gic_enable_quirk_hip06_07, 1645 }, 1646 { 1647 .desc = "GICv3: HIP07 erratum 161010803", 1648 .iidr = 0x00000000, 1649 .mask = 0xffffffff, 1650 .init = gic_enable_quirk_hip06_07, 1651 }, 1652 { 1653 /* 1654 * Reserved register accesses generate a Synchronous 1655 * External Abort. This erratum applies to: 1656 * - ThunderX: CN88xx 1657 * - OCTEON TX: CN83xx, CN81xx 1658 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* 1659 */ 1660 .desc = "GICv3: Cavium erratum 38539", 1661 .iidr = 0xa000034c, 1662 .mask = 0xe8f00fff, 1663 .init = gic_enable_quirk_cavium_38539, 1664 }, 1665 { 1666 } 1667}; 1668 1669static void gic_enable_nmi_support(void) 1670{ 1671 int i; 1672 1673 if (!gic_prio_masking_enabled()) 1674 return; 1675 1676 if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { 1677 pr_warn("Skipping NMI enable due to firmware issues\n"); 1678 return; 1679 } 1680 1681 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); 1682 if (!ppi_nmi_refs) 1683 return; 1684 1685 for (i = 0; i < gic_data.ppi_nr; i++) 1686 refcount_set(&ppi_nmi_refs[i], 0); 1687 1688 /* 1689 * Linux itself doesn't use 1:N distribution, so has no need to 1690 * set PMHE. The only reason to have it set is if EL3 requires it 1691 * (and we can't change it). 1692 */ 1693 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) 1694 static_branch_enable(&gic_pmr_sync); 1695 1696 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", 1697 static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed"); 1698 1699 /* 1700 * How priority values are used by the GIC depends on two things: 1701 * the security state of the GIC (controlled by the GICD_CTRL.DS bit) 1702 * and if Group 0 interrupts can be delivered to Linux in the non-secure 1703 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the 1704 * the ICC_PMR_EL1 register and the priority that software assigns to 1705 * interrupts: 1706 * 1707 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority 1708 * ----------------------------------------------------------- 1709 * 1 | - | unchanged | unchanged 1710 * ----------------------------------------------------------- 1711 * 0 | 1 | non-secure | non-secure 1712 * ----------------------------------------------------------- 1713 * 0 | 0 | unchanged | non-secure 1714 * 1715 * where non-secure means that the value is right-shifted by one and the 1716 * MSB bit set, to make it fit in the non-secure priority range. 1717 * 1718 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority 1719 * are both either modified or unchanged, we can use the same set of 1720 * priorities. 1721 * 1722 * In the last case, where only the interrupt priorities are modified to 1723 * be in the non-secure range, we use a different PMR value to mask IRQs 1724 * and the rest of the values that we use remain unchanged. 1725 */ 1726 if (gic_has_group0() && !gic_dist_security_disabled()) 1727 static_branch_enable(&gic_nonsecure_priorities); 1728 1729 static_branch_enable(&supports_pseudo_nmis); 1730 1731 if (static_branch_likely(&supports_deactivate_key)) 1732 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1733 else 1734 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1735} 1736 1737static int __init gic_init_bases(void __iomem *dist_base, 1738 struct redist_region *rdist_regs, 1739 u32 nr_redist_regions, 1740 u64 redist_stride, 1741 struct fwnode_handle *handle) 1742{ 1743 u32 typer; 1744 int err; 1745 1746 if (!is_hyp_mode_available()) 1747 static_branch_disable(&supports_deactivate_key); 1748 1749 if (static_branch_likely(&supports_deactivate_key)) 1750 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1751 1752 gic_data.fwnode = handle; 1753 gic_data.dist_base = dist_base; 1754 gic_data.redist_regions = rdist_regs; 1755 gic_data.nr_redist_regions = nr_redist_regions; 1756 gic_data.redist_stride = redist_stride; 1757 1758 /* 1759 * Find out how many interrupts are supported. 1760 */ 1761 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1762 gic_data.rdists.gicd_typer = typer; 1763 1764 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), 1765 gic_quirks, &gic_data); 1766 1767 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); 1768 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); 1769 1770 /* 1771 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the 1772 * architecture spec (which says that reserved registers are RES0). 1773 */ 1774 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) 1775 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); 1776 1777 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 1778 &gic_data); 1779 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 1780 gic_data.rdists.has_rvpeid = true; 1781 gic_data.rdists.has_vlpis = true; 1782 gic_data.rdists.has_direct_lpi = true; 1783 gic_data.rdists.has_vpend_valid_dirty = true; 1784 1785 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1786 err = -ENOMEM; 1787 goto out_free; 1788 } 1789 1790 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); 1791 1792 gic_data.has_rss = !!(typer & GICD_TYPER_RSS); 1793 pr_info("Distributor has %sRange Selector support\n", 1794 gic_data.has_rss ? "" : "no "); 1795 1796 if (typer & GICD_TYPER_MBIS) { 1797 err = mbi_init(handle, gic_data.domain); 1798 if (err) 1799 pr_err("Failed to initialize MBIs\n"); 1800 } 1801 1802 set_handle_irq(gic_handle_irq); 1803 1804 gic_update_rdist_properties(); 1805 1806 gic_dist_init(); 1807 gic_cpu_init(); 1808 gic_smp_init(); 1809 gic_cpu_pm_init(); 1810 1811 if (gic_dist_supports_lpis()) { 1812 its_init(handle, &gic_data.rdists, gic_data.domain); 1813 its_cpu_init(); 1814 } else { 1815 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1816 gicv2m_init(handle, gic_data.domain); 1817 } 1818 1819 gic_enable_nmi_support(); 1820 1821 return 0; 1822 1823out_free: 1824 if (gic_data.domain) 1825 irq_domain_remove(gic_data.domain); 1826 free_percpu(gic_data.rdists.rdist); 1827 return err; 1828} 1829 1830static int __init gic_validate_dist_version(void __iomem *dist_base) 1831{ 1832 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 1833 1834 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) 1835 return -ENODEV; 1836 1837 return 0; 1838} 1839 1840/* Create all possible partitions at boot time */ 1841static void __init gic_populate_ppi_partitions(struct device_node *gic_node) 1842{ 1843 struct device_node *parts_node, *child_part; 1844 int part_idx = 0, i; 1845 int nr_parts; 1846 struct partition_affinity *parts; 1847 1848 parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); 1849 if (!parts_node) 1850 return; 1851 1852 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); 1853 if (!gic_data.ppi_descs) 1854 goto out_put_node; 1855 1856 nr_parts = of_get_child_count(parts_node); 1857 1858 if (!nr_parts) 1859 goto out_put_node; 1860 1861 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); 1862 if (WARN_ON(!parts)) 1863 goto out_put_node; 1864 1865 for_each_child_of_node(parts_node, child_part) { 1866 struct partition_affinity *part; 1867 int n; 1868 1869 part = &parts[part_idx]; 1870 1871 part->partition_id = of_node_to_fwnode(child_part); 1872 1873 pr_info("GIC: PPI partition %pOFn[%d] { ", 1874 child_part, part_idx); 1875 1876 n = of_property_count_elems_of_size(child_part, "affinity", 1877 sizeof(u32)); 1878 WARN_ON(n <= 0); 1879 1880 for (i = 0; i < n; i++) { 1881 int err, cpu; 1882 u32 cpu_phandle; 1883 struct device_node *cpu_node; 1884 1885 err = of_property_read_u32_index(child_part, "affinity", 1886 i, &cpu_phandle); 1887 if (WARN_ON(err)) 1888 continue; 1889 1890 cpu_node = of_find_node_by_phandle(cpu_phandle); 1891 if (WARN_ON(!cpu_node)) 1892 continue; 1893 1894 cpu = of_cpu_node_to_id(cpu_node); 1895 if (WARN_ON(cpu < 0)) { 1896 of_node_put(cpu_node); 1897 continue; 1898 } 1899 1900 pr_cont("%pOF[%d] ", cpu_node, cpu); 1901 1902 cpumask_set_cpu(cpu, &part->mask); 1903 of_node_put(cpu_node); 1904 } 1905 1906 pr_cont("}\n"); 1907 part_idx++; 1908 } 1909 1910 for (i = 0; i < gic_data.ppi_nr; i++) { 1911 unsigned int irq; 1912 struct partition_desc *desc; 1913 struct irq_fwspec ppi_fwspec = { 1914 .fwnode = gic_data.fwnode, 1915 .param_count = 3, 1916 .param = { 1917 [0] = GIC_IRQ_TYPE_PARTITION, 1918 [1] = i, 1919 [2] = IRQ_TYPE_NONE, 1920 }, 1921 }; 1922 1923 irq = irq_create_fwspec_mapping(&ppi_fwspec); 1924 if (WARN_ON(!irq)) 1925 continue; 1926 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, 1927 irq, &partition_domain_ops); 1928 if (WARN_ON(!desc)) 1929 continue; 1930 1931 gic_data.ppi_descs[i] = desc; 1932 } 1933 1934out_put_node: 1935 of_node_put(parts_node); 1936} 1937 1938static void __init gic_of_setup_kvm_info(struct device_node *node) 1939{ 1940 int ret; 1941 struct resource r; 1942 u32 gicv_idx; 1943 1944 gic_v3_kvm_info.type = GIC_V3; 1945 1946 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); 1947 if (!gic_v3_kvm_info.maint_irq) 1948 return; 1949 1950 if (of_property_read_u32(node, "#redistributor-regions", 1951 &gicv_idx)) 1952 gicv_idx = 1; 1953 1954 gicv_idx += 3; /* Also skip GICD, GICC, GICH */ 1955 ret = of_address_to_resource(node, gicv_idx, &r); 1956 if (!ret) 1957 gic_v3_kvm_info.vcpu = r; 1958 1959 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 1960 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 1961 gic_set_kvm_info(&gic_v3_kvm_info); 1962} 1963 1964static int __init gic_of_init(struct device_node *node, struct device_node *parent) 1965{ 1966 void __iomem *dist_base; 1967 struct redist_region *rdist_regs; 1968 u64 redist_stride; 1969 u32 nr_redist_regions; 1970 int err, i; 1971 1972 dist_base = of_iomap(node, 0); 1973 if (!dist_base) { 1974 pr_err("%pOF: unable to map gic dist registers\n", node); 1975 return -ENXIO; 1976 } 1977 1978 err = gic_validate_dist_version(dist_base); 1979 if (err) { 1980 pr_err("%pOF: no distributor detected, giving up\n", node); 1981 goto out_unmap_dist; 1982 } 1983 1984 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) 1985 nr_redist_regions = 1; 1986 1987 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), 1988 GFP_KERNEL); 1989 if (!rdist_regs) { 1990 err = -ENOMEM; 1991 goto out_unmap_dist; 1992 } 1993 1994 for (i = 0; i < nr_redist_regions; i++) { 1995 struct resource res; 1996 int ret; 1997 1998 ret = of_address_to_resource(node, 1 + i, &res); 1999 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 2000 if (ret || !rdist_regs[i].redist_base) { 2001 pr_err("%pOF: couldn't map region %d\n", node, i); 2002 err = -ENODEV; 2003 goto out_unmap_rdist; 2004 } 2005 rdist_regs[i].phys_base = res.start; 2006 } 2007 2008 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 2009 redist_stride = 0; 2010 2011 gic_enable_of_quirks(node, gic_quirks, &gic_data); 2012 2013 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, 2014 redist_stride, &node->fwnode); 2015 if (err) 2016 goto out_unmap_rdist; 2017 2018 gic_populate_ppi_partitions(node); 2019 2020 if (static_branch_likely(&supports_deactivate_key)) 2021 gic_of_setup_kvm_info(node); 2022 return 0; 2023 2024out_unmap_rdist: 2025 for (i = 0; i < nr_redist_regions; i++) 2026 if (rdist_regs[i].redist_base) 2027 iounmap(rdist_regs[i].redist_base); 2028 kfree(rdist_regs); 2029out_unmap_dist: 2030 iounmap(dist_base); 2031 return err; 2032} 2033 2034IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 2035 2036#ifdef CONFIG_ACPI 2037static struct 2038{ 2039 void __iomem *dist_base; 2040 struct redist_region *redist_regs; 2041 u32 nr_redist_regions; 2042 bool single_redist; 2043 int enabled_rdists; 2044 u32 maint_irq; 2045 int maint_irq_mode; 2046 phys_addr_t vcpu_base; 2047} acpi_data __initdata; 2048 2049static void __init 2050gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) 2051{ 2052 static int count = 0; 2053 2054 acpi_data.redist_regs[count].phys_base = phys_base; 2055 acpi_data.redist_regs[count].redist_base = redist_base; 2056 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; 2057 count++; 2058} 2059 2060static int __init 2061gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, 2062 const unsigned long end) 2063{ 2064 struct acpi_madt_generic_redistributor *redist = 2065 (struct acpi_madt_generic_redistributor *)header; 2066 void __iomem *redist_base; 2067 2068 redist_base = ioremap(redist->base_address, redist->length); 2069 if (!redist_base) { 2070 pr_err("Couldn't map GICR region @%llx\n", redist->base_address); 2071 return -ENOMEM; 2072 } 2073 2074 gic_acpi_register_redist(redist->base_address, redist_base); 2075 return 0; 2076} 2077 2078static int __init 2079gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, 2080 const unsigned long end) 2081{ 2082 struct acpi_madt_generic_interrupt *gicc = 2083 (struct acpi_madt_generic_interrupt *)header; 2084 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 2085 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; 2086 void __iomem *redist_base; 2087 2088 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ 2089 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2090 return 0; 2091 2092 redist_base = ioremap(gicc->gicr_base_address, size); 2093 if (!redist_base) 2094 return -ENOMEM; 2095 2096 gic_acpi_register_redist(gicc->gicr_base_address, redist_base); 2097 return 0; 2098} 2099 2100static int __init gic_acpi_collect_gicr_base(void) 2101{ 2102 acpi_tbl_entry_handler redist_parser; 2103 enum acpi_madt_type type; 2104 2105 if (acpi_data.single_redist) { 2106 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; 2107 redist_parser = gic_acpi_parse_madt_gicc; 2108 } else { 2109 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; 2110 redist_parser = gic_acpi_parse_madt_redist; 2111 } 2112 2113 /* Collect redistributor base addresses in GICR entries */ 2114 if (acpi_table_parse_madt(type, redist_parser, 0) > 0) 2115 return 0; 2116 2117 pr_info("No valid GICR entries exist\n"); 2118 return -ENODEV; 2119} 2120 2121static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, 2122 const unsigned long end) 2123{ 2124 /* Subtable presence means that redist exists, that's it */ 2125 return 0; 2126} 2127 2128static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, 2129 const unsigned long end) 2130{ 2131 struct acpi_madt_generic_interrupt *gicc = 2132 (struct acpi_madt_generic_interrupt *)header; 2133 2134 /* 2135 * If GICC is enabled and has valid gicr base address, then it means 2136 * GICR base is presented via GICC 2137 */ 2138 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { 2139 acpi_data.enabled_rdists++; 2140 return 0; 2141 } 2142 2143 /* 2144 * It's perfectly valid firmware can pass disabled GICC entry, driver 2145 * should not treat as errors, skip the entry instead of probe fail. 2146 */ 2147 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2148 return 0; 2149 2150 return -ENODEV; 2151} 2152 2153static int __init gic_acpi_count_gicr_regions(void) 2154{ 2155 int count; 2156 2157 /* 2158 * Count how many redistributor regions we have. It is not allowed 2159 * to mix redistributor description, GICR and GICC subtables have to be 2160 * mutually exclusive. 2161 */ 2162 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 2163 gic_acpi_match_gicr, 0); 2164 if (count > 0) { 2165 acpi_data.single_redist = false; 2166 return count; 2167 } 2168 2169 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2170 gic_acpi_match_gicc, 0); 2171 if (count > 0) { 2172 acpi_data.single_redist = true; 2173 count = acpi_data.enabled_rdists; 2174 } 2175 2176 return count; 2177} 2178 2179static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, 2180 struct acpi_probe_entry *ape) 2181{ 2182 struct acpi_madt_generic_distributor *dist; 2183 int count; 2184 2185 dist = (struct acpi_madt_generic_distributor *)header; 2186 if (dist->version != ape->driver_data) 2187 return false; 2188 2189 /* We need to do that exercise anyway, the sooner the better */ 2190 count = gic_acpi_count_gicr_regions(); 2191 if (count <= 0) 2192 return false; 2193 2194 acpi_data.nr_redist_regions = count; 2195 return true; 2196} 2197 2198static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, 2199 const unsigned long end) 2200{ 2201 struct acpi_madt_generic_interrupt *gicc = 2202 (struct acpi_madt_generic_interrupt *)header; 2203 int maint_irq_mode; 2204 static int first_madt = true; 2205 2206 /* Skip unusable CPUs */ 2207 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2208 return 0; 2209 2210 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? 2211 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; 2212 2213 if (first_madt) { 2214 first_madt = false; 2215 2216 acpi_data.maint_irq = gicc->vgic_interrupt; 2217 acpi_data.maint_irq_mode = maint_irq_mode; 2218 acpi_data.vcpu_base = gicc->gicv_base_address; 2219 2220 return 0; 2221 } 2222 2223 /* 2224 * The maintenance interrupt and GICV should be the same for every CPU 2225 */ 2226 if ((acpi_data.maint_irq != gicc->vgic_interrupt) || 2227 (acpi_data.maint_irq_mode != maint_irq_mode) || 2228 (acpi_data.vcpu_base != gicc->gicv_base_address)) 2229 return -EINVAL; 2230 2231 return 0; 2232} 2233 2234static bool __init gic_acpi_collect_virt_info(void) 2235{ 2236 int count; 2237 2238 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2239 gic_acpi_parse_virt_madt_gicc, 0); 2240 2241 return (count > 0); 2242} 2243 2244#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) 2245#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) 2246#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) 2247 2248static void __init gic_acpi_setup_kvm_info(void) 2249{ 2250 int irq; 2251 2252 if (!gic_acpi_collect_virt_info()) { 2253 pr_warn("Unable to get hardware information used for virtualization\n"); 2254 return; 2255 } 2256 2257 gic_v3_kvm_info.type = GIC_V3; 2258 2259 irq = acpi_register_gsi(NULL, acpi_data.maint_irq, 2260 acpi_data.maint_irq_mode, 2261 ACPI_ACTIVE_HIGH); 2262 if (irq <= 0) 2263 return; 2264 2265 gic_v3_kvm_info.maint_irq = irq; 2266 2267 if (acpi_data.vcpu_base) { 2268 struct resource *vcpu = &gic_v3_kvm_info.vcpu; 2269 2270 vcpu->flags = IORESOURCE_MEM; 2271 vcpu->start = acpi_data.vcpu_base; 2272 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 2273 } 2274 2275 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2276 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 2277 gic_set_kvm_info(&gic_v3_kvm_info); 2278} 2279 2280static int __init 2281gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) 2282{ 2283 struct acpi_madt_generic_distributor *dist; 2284 struct fwnode_handle *domain_handle; 2285 size_t size; 2286 int i, err; 2287 2288 /* Get distributor base address */ 2289 dist = (struct acpi_madt_generic_distributor *)header; 2290 acpi_data.dist_base = ioremap(dist->base_address, 2291 ACPI_GICV3_DIST_MEM_SIZE); 2292 if (!acpi_data.dist_base) { 2293 pr_err("Unable to map GICD registers\n"); 2294 return -ENOMEM; 2295 } 2296 2297 err = gic_validate_dist_version(acpi_data.dist_base); 2298 if (err) { 2299 pr_err("No distributor detected at @%p, giving up\n", 2300 acpi_data.dist_base); 2301 goto out_dist_unmap; 2302 } 2303 2304 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; 2305 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); 2306 if (!acpi_data.redist_regs) { 2307 err = -ENOMEM; 2308 goto out_dist_unmap; 2309 } 2310 2311 err = gic_acpi_collect_gicr_base(); 2312 if (err) 2313 goto out_redist_unmap; 2314 2315 domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 2316 if (!domain_handle) { 2317 err = -ENOMEM; 2318 goto out_redist_unmap; 2319 } 2320 2321 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, 2322 acpi_data.nr_redist_regions, 0, domain_handle); 2323 if (err) 2324 goto out_fwhandle_free; 2325 2326 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 2327 2328 if (static_branch_likely(&supports_deactivate_key)) 2329 gic_acpi_setup_kvm_info(); 2330 2331 return 0; 2332 2333out_fwhandle_free: 2334 irq_domain_free_fwnode(domain_handle); 2335out_redist_unmap: 2336 for (i = 0; i < acpi_data.nr_redist_regions; i++) 2337 if (acpi_data.redist_regs[i].redist_base) 2338 iounmap(acpi_data.redist_regs[i].redist_base); 2339 kfree(acpi_data.redist_regs); 2340out_dist_unmap: 2341 iounmap(acpi_data.dist_base); 2342 return err; 2343} 2344IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2345 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, 2346 gic_acpi_init); 2347IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2348 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, 2349 gic_acpi_init); 2350IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2351 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, 2352 gic_acpi_init); 2353#endif 2354