18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright 2016,2017 IBM Corporation. 48c2ecf20Sopenharmony_ci */ 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci#define pr_fmt(fmt) "xive: " fmt 78c2ecf20Sopenharmony_ci 88c2ecf20Sopenharmony_ci#include <linux/types.h> 98c2ecf20Sopenharmony_ci#include <linux/threads.h> 108c2ecf20Sopenharmony_ci#include <linux/kernel.h> 118c2ecf20Sopenharmony_ci#include <linux/irq.h> 128c2ecf20Sopenharmony_ci#include <linux/debugfs.h> 138c2ecf20Sopenharmony_ci#include <linux/smp.h> 148c2ecf20Sopenharmony_ci#include <linux/interrupt.h> 158c2ecf20Sopenharmony_ci#include <linux/seq_file.h> 168c2ecf20Sopenharmony_ci#include <linux/init.h> 178c2ecf20Sopenharmony_ci#include <linux/cpu.h> 188c2ecf20Sopenharmony_ci#include <linux/of.h> 198c2ecf20Sopenharmony_ci#include <linux/slab.h> 208c2ecf20Sopenharmony_ci#include <linux/spinlock.h> 218c2ecf20Sopenharmony_ci#include <linux/msi.h> 228c2ecf20Sopenharmony_ci#include <linux/vmalloc.h> 238c2ecf20Sopenharmony_ci 248c2ecf20Sopenharmony_ci#include <asm/debugfs.h> 258c2ecf20Sopenharmony_ci#include <asm/prom.h> 268c2ecf20Sopenharmony_ci#include <asm/io.h> 278c2ecf20Sopenharmony_ci#include <asm/smp.h> 288c2ecf20Sopenharmony_ci#include <asm/machdep.h> 298c2ecf20Sopenharmony_ci#include <asm/irq.h> 308c2ecf20Sopenharmony_ci#include <asm/errno.h> 318c2ecf20Sopenharmony_ci#include <asm/xive.h> 328c2ecf20Sopenharmony_ci#include <asm/xive-regs.h> 338c2ecf20Sopenharmony_ci#include <asm/xmon.h> 348c2ecf20Sopenharmony_ci 358c2ecf20Sopenharmony_ci#include "xive-internal.h" 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_ci#undef DEBUG_FLUSH 388c2ecf20Sopenharmony_ci#undef DEBUG_ALL 398c2ecf20Sopenharmony_ci 408c2ecf20Sopenharmony_ci#ifdef DEBUG_ALL 418c2ecf20Sopenharmony_ci#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 428c2ecf20Sopenharmony_ci smp_processor_id(), ## __VA_ARGS__) 438c2ecf20Sopenharmony_ci#else 448c2ecf20Sopenharmony_ci#define DBG_VERBOSE(fmt...) do { } while(0) 458c2ecf20Sopenharmony_ci#endif 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_cibool __xive_enabled; 488c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(__xive_enabled); 498c2ecf20Sopenharmony_cibool xive_cmdline_disabled; 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_ci/* We use only one priority for now */ 528c2ecf20Sopenharmony_cistatic u8 xive_irq_priority; 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_ci/* TIMA exported to KVM */ 558c2ecf20Sopenharmony_civoid __iomem *xive_tima; 568c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(xive_tima); 578c2ecf20Sopenharmony_ciu32 xive_tima_offset; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci/* Backend ops */ 608c2ecf20Sopenharmony_cistatic const struct xive_ops *xive_ops; 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_ci/* Our global interrupt domain */ 638c2ecf20Sopenharmony_cistatic struct irq_domain *xive_irq_domain; 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 668c2ecf20Sopenharmony_ci/* The IPIs all use the same logical irq number */ 678c2ecf20Sopenharmony_cistatic u32 xive_ipi_irq; 688c2ecf20Sopenharmony_ci#endif 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_ci/* Xive state for each CPU */ 718c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_ci/* An invalid CPU target */ 748c2ecf20Sopenharmony_ci#define XIVE_INVALID_TARGET (-1) 758c2ecf20Sopenharmony_ci 768c2ecf20Sopenharmony_ci/* 778c2ecf20Sopenharmony_ci * Read the next entry in a queue, return its content if it's valid 788c2ecf20Sopenharmony_ci * or 0 if there is no new entry. 798c2ecf20Sopenharmony_ci * 808c2ecf20Sopenharmony_ci * The queue pointer is moved forward unless "just_peek" is set 818c2ecf20Sopenharmony_ci */ 828c2ecf20Sopenharmony_cistatic u32 xive_read_eq(struct xive_q *q, bool just_peek) 838c2ecf20Sopenharmony_ci{ 848c2ecf20Sopenharmony_ci u32 cur; 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci if (!q->qpage) 878c2ecf20Sopenharmony_ci return 0; 888c2ecf20Sopenharmony_ci cur = be32_to_cpup(q->qpage + q->idx); 898c2ecf20Sopenharmony_ci 908c2ecf20Sopenharmony_ci /* Check valid bit (31) vs current toggle polarity */ 918c2ecf20Sopenharmony_ci if ((cur >> 31) == q->toggle) 928c2ecf20Sopenharmony_ci return 0; 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci /* If consuming from the queue ... */ 958c2ecf20Sopenharmony_ci if (!just_peek) { 968c2ecf20Sopenharmony_ci /* Next entry */ 978c2ecf20Sopenharmony_ci q->idx = (q->idx + 1) & q->msk; 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci /* Wrap around: flip valid toggle */ 1008c2ecf20Sopenharmony_ci if (q->idx == 0) 1018c2ecf20Sopenharmony_ci q->toggle ^= 1; 1028c2ecf20Sopenharmony_ci } 1038c2ecf20Sopenharmony_ci /* Mask out the valid bit (31) */ 1048c2ecf20Sopenharmony_ci return cur & 0x7fffffff; 1058c2ecf20Sopenharmony_ci} 1068c2ecf20Sopenharmony_ci 1078c2ecf20Sopenharmony_ci/* 1088c2ecf20Sopenharmony_ci * Scans all the queue that may have interrupts in them 1098c2ecf20Sopenharmony_ci * (based on "pending_prio") in priority order until an 1108c2ecf20Sopenharmony_ci * interrupt is found or all the queues are empty. 1118c2ecf20Sopenharmony_ci * 1128c2ecf20Sopenharmony_ci * Then updates the CPPR (Current Processor Priority 1138c2ecf20Sopenharmony_ci * Register) based on the most favored interrupt found 1148c2ecf20Sopenharmony_ci * (0xff if none) and return what was found (0 if none). 1158c2ecf20Sopenharmony_ci * 1168c2ecf20Sopenharmony_ci * If just_peek is set, return the most favored pending 1178c2ecf20Sopenharmony_ci * interrupt if any but don't update the queue pointers. 1188c2ecf20Sopenharmony_ci * 1198c2ecf20Sopenharmony_ci * Note: This function can operate generically on any number 1208c2ecf20Sopenharmony_ci * of queues (up to 8). The current implementation of the XIVE 1218c2ecf20Sopenharmony_ci * driver only uses a single queue however. 1228c2ecf20Sopenharmony_ci * 1238c2ecf20Sopenharmony_ci * Note2: This will also "flush" "the pending_count" of a queue 1248c2ecf20Sopenharmony_ci * into the "count" when that queue is observed to be empty. 1258c2ecf20Sopenharmony_ci * This is used to keep track of the amount of interrupts 1268c2ecf20Sopenharmony_ci * targetting a queue. When an interrupt is moved away from 1278c2ecf20Sopenharmony_ci * a queue, we only decrement that queue count once the queue 1288c2ecf20Sopenharmony_ci * has been observed empty to avoid races. 1298c2ecf20Sopenharmony_ci */ 1308c2ecf20Sopenharmony_cistatic u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 1318c2ecf20Sopenharmony_ci{ 1328c2ecf20Sopenharmony_ci u32 irq = 0; 1338c2ecf20Sopenharmony_ci u8 prio = 0; 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci /* Find highest pending priority */ 1368c2ecf20Sopenharmony_ci while (xc->pending_prio != 0) { 1378c2ecf20Sopenharmony_ci struct xive_q *q; 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci prio = ffs(xc->pending_prio) - 1; 1408c2ecf20Sopenharmony_ci DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci /* Try to fetch */ 1438c2ecf20Sopenharmony_ci irq = xive_read_eq(&xc->queue[prio], just_peek); 1448c2ecf20Sopenharmony_ci 1458c2ecf20Sopenharmony_ci /* Found something ? That's it */ 1468c2ecf20Sopenharmony_ci if (irq) { 1478c2ecf20Sopenharmony_ci if (just_peek || irq_to_desc(irq)) 1488c2ecf20Sopenharmony_ci break; 1498c2ecf20Sopenharmony_ci /* 1508c2ecf20Sopenharmony_ci * We should never get here; if we do then we must 1518c2ecf20Sopenharmony_ci * have failed to synchronize the interrupt properly 1528c2ecf20Sopenharmony_ci * when shutting it down. 1538c2ecf20Sopenharmony_ci */ 1548c2ecf20Sopenharmony_ci pr_crit("xive: got interrupt %d without descriptor, dropping\n", 1558c2ecf20Sopenharmony_ci irq); 1568c2ecf20Sopenharmony_ci WARN_ON(1); 1578c2ecf20Sopenharmony_ci continue; 1588c2ecf20Sopenharmony_ci } 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci /* Clear pending bits */ 1618c2ecf20Sopenharmony_ci xc->pending_prio &= ~(1 << prio); 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci /* 1648c2ecf20Sopenharmony_ci * Check if the queue count needs adjusting due to 1658c2ecf20Sopenharmony_ci * interrupts being moved away. See description of 1668c2ecf20Sopenharmony_ci * xive_dec_target_count() 1678c2ecf20Sopenharmony_ci */ 1688c2ecf20Sopenharmony_ci q = &xc->queue[prio]; 1698c2ecf20Sopenharmony_ci if (atomic_read(&q->pending_count)) { 1708c2ecf20Sopenharmony_ci int p = atomic_xchg(&q->pending_count, 0); 1718c2ecf20Sopenharmony_ci if (p) { 1728c2ecf20Sopenharmony_ci WARN_ON(p > atomic_read(&q->count)); 1738c2ecf20Sopenharmony_ci atomic_sub(p, &q->count); 1748c2ecf20Sopenharmony_ci } 1758c2ecf20Sopenharmony_ci } 1768c2ecf20Sopenharmony_ci } 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci /* If nothing was found, set CPPR to 0xff */ 1798c2ecf20Sopenharmony_ci if (irq == 0) 1808c2ecf20Sopenharmony_ci prio = 0xff; 1818c2ecf20Sopenharmony_ci 1828c2ecf20Sopenharmony_ci /* Update HW CPPR to match if necessary */ 1838c2ecf20Sopenharmony_ci if (prio != xc->cppr) { 1848c2ecf20Sopenharmony_ci DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 1858c2ecf20Sopenharmony_ci xc->cppr = prio; 1868c2ecf20Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 1878c2ecf20Sopenharmony_ci } 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci return irq; 1908c2ecf20Sopenharmony_ci} 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_ci/* 1938c2ecf20Sopenharmony_ci * This is used to perform the magic loads from an ESB 1948c2ecf20Sopenharmony_ci * described in xive-regs.h 1958c2ecf20Sopenharmony_ci */ 1968c2ecf20Sopenharmony_cistatic notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 1978c2ecf20Sopenharmony_ci{ 1988c2ecf20Sopenharmony_ci u64 val; 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_ci if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 2018c2ecf20Sopenharmony_ci offset |= XIVE_ESB_LD_ST_MO; 2028c2ecf20Sopenharmony_ci 2038c2ecf20Sopenharmony_ci /* Handle HW errata */ 2048c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) 2058c2ecf20Sopenharmony_ci offset |= offset << 4; 2068c2ecf20Sopenharmony_ci 2078c2ecf20Sopenharmony_ci if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 2088c2ecf20Sopenharmony_ci val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 2098c2ecf20Sopenharmony_ci else 2108c2ecf20Sopenharmony_ci val = in_be64(xd->eoi_mmio + offset); 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_ci return (u8)val; 2138c2ecf20Sopenharmony_ci} 2148c2ecf20Sopenharmony_ci 2158c2ecf20Sopenharmony_cistatic void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 2168c2ecf20Sopenharmony_ci{ 2178c2ecf20Sopenharmony_ci /* Handle HW errata */ 2188c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) 2198c2ecf20Sopenharmony_ci offset |= offset << 4; 2208c2ecf20Sopenharmony_ci 2218c2ecf20Sopenharmony_ci if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 2228c2ecf20Sopenharmony_ci xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 2238c2ecf20Sopenharmony_ci else 2248c2ecf20Sopenharmony_ci out_be64(xd->eoi_mmio + offset, data); 2258c2ecf20Sopenharmony_ci} 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci#ifdef CONFIG_XMON 2288c2ecf20Sopenharmony_cistatic notrace void xive_dump_eq(const char *name, struct xive_q *q) 2298c2ecf20Sopenharmony_ci{ 2308c2ecf20Sopenharmony_ci u32 i0, i1, idx; 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci if (!q->qpage) 2338c2ecf20Sopenharmony_ci return; 2348c2ecf20Sopenharmony_ci idx = q->idx; 2358c2ecf20Sopenharmony_ci i0 = be32_to_cpup(q->qpage + idx); 2368c2ecf20Sopenharmony_ci idx = (idx + 1) & q->msk; 2378c2ecf20Sopenharmony_ci i1 = be32_to_cpup(q->qpage + idx); 2388c2ecf20Sopenharmony_ci xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 2398c2ecf20Sopenharmony_ci q->idx, q->toggle, i0, i1); 2408c2ecf20Sopenharmony_ci} 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_cinotrace void xmon_xive_do_dump(int cpu) 2438c2ecf20Sopenharmony_ci{ 2448c2ecf20Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci xmon_printf("CPU %d:", cpu); 2478c2ecf20Sopenharmony_ci if (xc) { 2488c2ecf20Sopenharmony_ci xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 2518c2ecf20Sopenharmony_ci { 2528c2ecf20Sopenharmony_ci u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_ci xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 2558c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_P ? 'P' : '-', 2568c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 2578c2ecf20Sopenharmony_ci } 2588c2ecf20Sopenharmony_ci#endif 2598c2ecf20Sopenharmony_ci xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 2608c2ecf20Sopenharmony_ci } 2618c2ecf20Sopenharmony_ci xmon_printf("\n"); 2628c2ecf20Sopenharmony_ci} 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_cistatic struct irq_data *xive_get_irq_data(u32 hw_irq) 2658c2ecf20Sopenharmony_ci{ 2668c2ecf20Sopenharmony_ci unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); 2678c2ecf20Sopenharmony_ci 2688c2ecf20Sopenharmony_ci return irq ? irq_get_irq_data(irq) : NULL; 2698c2ecf20Sopenharmony_ci} 2708c2ecf20Sopenharmony_ci 2718c2ecf20Sopenharmony_ciint xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 2728c2ecf20Sopenharmony_ci{ 2738c2ecf20Sopenharmony_ci int rc; 2748c2ecf20Sopenharmony_ci u32 target; 2758c2ecf20Sopenharmony_ci u8 prio; 2768c2ecf20Sopenharmony_ci u32 lirq; 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 2798c2ecf20Sopenharmony_ci if (rc) { 2808c2ecf20Sopenharmony_ci xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 2818c2ecf20Sopenharmony_ci return rc; 2828c2ecf20Sopenharmony_ci } 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 2858c2ecf20Sopenharmony_ci hw_irq, target, prio, lirq); 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci if (!d) 2888c2ecf20Sopenharmony_ci d = xive_get_irq_data(hw_irq); 2898c2ecf20Sopenharmony_ci 2908c2ecf20Sopenharmony_ci if (d) { 2918c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 2928c2ecf20Sopenharmony_ci u64 val = xive_esb_read(xd, XIVE_ESB_GET); 2938c2ecf20Sopenharmony_ci 2948c2ecf20Sopenharmony_ci xmon_printf("flags=%c%c%c PQ=%c%c", 2958c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 2968c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 2978c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 2988c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_P ? 'P' : '-', 2998c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 3008c2ecf20Sopenharmony_ci } 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_ci xmon_printf("\n"); 3038c2ecf20Sopenharmony_ci return 0; 3048c2ecf20Sopenharmony_ci} 3058c2ecf20Sopenharmony_ci 3068c2ecf20Sopenharmony_ci#endif /* CONFIG_XMON */ 3078c2ecf20Sopenharmony_ci 3088c2ecf20Sopenharmony_cistatic unsigned int xive_get_irq(void) 3098c2ecf20Sopenharmony_ci{ 3108c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 3118c2ecf20Sopenharmony_ci u32 irq; 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci /* 3148c2ecf20Sopenharmony_ci * This can be called either as a result of a HW interrupt or 3158c2ecf20Sopenharmony_ci * as a "replay" because EOI decided there was still something 3168c2ecf20Sopenharmony_ci * in one of the queues. 3178c2ecf20Sopenharmony_ci * 3188c2ecf20Sopenharmony_ci * First we perform an ACK cycle in order to update our mask 3198c2ecf20Sopenharmony_ci * of pending priorities. This will also have the effect of 3208c2ecf20Sopenharmony_ci * updating the CPPR to the most favored pending interrupts. 3218c2ecf20Sopenharmony_ci * 3228c2ecf20Sopenharmony_ci * In the future, if we have a way to differentiate a first 3238c2ecf20Sopenharmony_ci * entry (on HW interrupt) from a replay triggered by EOI, 3248c2ecf20Sopenharmony_ci * we could skip this on replays unless we soft-mask tells us 3258c2ecf20Sopenharmony_ci * that a new HW interrupt occurred. 3268c2ecf20Sopenharmony_ci */ 3278c2ecf20Sopenharmony_ci xive_ops->update_pending(xc); 3288c2ecf20Sopenharmony_ci 3298c2ecf20Sopenharmony_ci DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci /* Scan our queue(s) for interrupts */ 3328c2ecf20Sopenharmony_ci irq = xive_scan_interrupts(xc, false); 3338c2ecf20Sopenharmony_ci 3348c2ecf20Sopenharmony_ci DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 3358c2ecf20Sopenharmony_ci irq, xc->pending_prio); 3368c2ecf20Sopenharmony_ci 3378c2ecf20Sopenharmony_ci /* Return pending interrupt if any */ 3388c2ecf20Sopenharmony_ci if (irq == XIVE_BAD_IRQ) 3398c2ecf20Sopenharmony_ci return 0; 3408c2ecf20Sopenharmony_ci return irq; 3418c2ecf20Sopenharmony_ci} 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci/* 3448c2ecf20Sopenharmony_ci * After EOI'ing an interrupt, we need to re-check the queue 3458c2ecf20Sopenharmony_ci * to see if another interrupt is pending since multiple 3468c2ecf20Sopenharmony_ci * interrupts can coalesce into a single notification to the 3478c2ecf20Sopenharmony_ci * CPU. 3488c2ecf20Sopenharmony_ci * 3498c2ecf20Sopenharmony_ci * If we find that there is indeed more in there, we call 3508c2ecf20Sopenharmony_ci * force_external_irq_replay() to make Linux synthetize an 3518c2ecf20Sopenharmony_ci * external interrupt on the next call to local_irq_restore(). 3528c2ecf20Sopenharmony_ci */ 3538c2ecf20Sopenharmony_cistatic void xive_do_queue_eoi(struct xive_cpu *xc) 3548c2ecf20Sopenharmony_ci{ 3558c2ecf20Sopenharmony_ci if (xive_scan_interrupts(xc, true) != 0) { 3568c2ecf20Sopenharmony_ci DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 3578c2ecf20Sopenharmony_ci force_external_irq_replay(); 3588c2ecf20Sopenharmony_ci } 3598c2ecf20Sopenharmony_ci} 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_ci/* 3628c2ecf20Sopenharmony_ci * EOI an interrupt at the source. There are several methods 3638c2ecf20Sopenharmony_ci * to do this depending on the HW version and source type 3648c2ecf20Sopenharmony_ci */ 3658c2ecf20Sopenharmony_cistatic void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) 3668c2ecf20Sopenharmony_ci{ 3678c2ecf20Sopenharmony_ci xd->stale_p = false; 3688c2ecf20Sopenharmony_ci /* If the XIVE supports the new "store EOI facility, use it */ 3698c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 3708c2ecf20Sopenharmony_ci xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 3718c2ecf20Sopenharmony_ci else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 3728c2ecf20Sopenharmony_ci /* 3738c2ecf20Sopenharmony_ci * The FW told us to call it. This happens for some 3748c2ecf20Sopenharmony_ci * interrupt sources that need additional HW whacking 3758c2ecf20Sopenharmony_ci * beyond the ESB manipulation. For example LPC interrupts 3768c2ecf20Sopenharmony_ci * on P9 DD1.0 needed a latch to be clared in the LPC bridge 3778c2ecf20Sopenharmony_ci * itself. The Firmware will take care of it. 3788c2ecf20Sopenharmony_ci */ 3798c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(!xive_ops->eoi)) 3808c2ecf20Sopenharmony_ci return; 3818c2ecf20Sopenharmony_ci xive_ops->eoi(hw_irq); 3828c2ecf20Sopenharmony_ci } else { 3838c2ecf20Sopenharmony_ci u8 eoi_val; 3848c2ecf20Sopenharmony_ci 3858c2ecf20Sopenharmony_ci /* 3868c2ecf20Sopenharmony_ci * Otherwise for EOI, we use the special MMIO that does 3878c2ecf20Sopenharmony_ci * a clear of both P and Q and returns the old Q, 3888c2ecf20Sopenharmony_ci * except for LSIs where we use the "EOI cycle" special 3898c2ecf20Sopenharmony_ci * load. 3908c2ecf20Sopenharmony_ci * 3918c2ecf20Sopenharmony_ci * This allows us to then do a re-trigger if Q was set 3928c2ecf20Sopenharmony_ci * rather than synthesizing an interrupt in software 3938c2ecf20Sopenharmony_ci * 3948c2ecf20Sopenharmony_ci * For LSIs the HW EOI cycle is used rather than PQ bits, 3958c2ecf20Sopenharmony_ci * as they are automatically re-triggred in HW when still 3968c2ecf20Sopenharmony_ci * pending. 3978c2ecf20Sopenharmony_ci */ 3988c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_LSI) 3998c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 4008c2ecf20Sopenharmony_ci else { 4018c2ecf20Sopenharmony_ci eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 4028c2ecf20Sopenharmony_ci DBG_VERBOSE("eoi_val=%x\n", eoi_val); 4038c2ecf20Sopenharmony_ci 4048c2ecf20Sopenharmony_ci /* Re-trigger if needed */ 4058c2ecf20Sopenharmony_ci if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 4068c2ecf20Sopenharmony_ci out_be64(xd->trig_mmio, 0); 4078c2ecf20Sopenharmony_ci } 4088c2ecf20Sopenharmony_ci } 4098c2ecf20Sopenharmony_ci} 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_ci/* irq_chip eoi callback, called with irq descriptor lock held */ 4128c2ecf20Sopenharmony_cistatic void xive_irq_eoi(struct irq_data *d) 4138c2ecf20Sopenharmony_ci{ 4148c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 4158c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 4188c2ecf20Sopenharmony_ci d->irq, irqd_to_hwirq(d), xc->pending_prio); 4198c2ecf20Sopenharmony_ci 4208c2ecf20Sopenharmony_ci /* 4218c2ecf20Sopenharmony_ci * EOI the source if it hasn't been disabled and hasn't 4228c2ecf20Sopenharmony_ci * been passed-through to a KVM guest 4238c2ecf20Sopenharmony_ci */ 4248c2ecf20Sopenharmony_ci if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 4258c2ecf20Sopenharmony_ci !(xd->flags & XIVE_IRQ_NO_EOI)) 4268c2ecf20Sopenharmony_ci xive_do_source_eoi(irqd_to_hwirq(d), xd); 4278c2ecf20Sopenharmony_ci else 4288c2ecf20Sopenharmony_ci xd->stale_p = true; 4298c2ecf20Sopenharmony_ci 4308c2ecf20Sopenharmony_ci /* 4318c2ecf20Sopenharmony_ci * Clear saved_p to indicate that it's no longer occupying 4328c2ecf20Sopenharmony_ci * a queue slot on the target queue 4338c2ecf20Sopenharmony_ci */ 4348c2ecf20Sopenharmony_ci xd->saved_p = false; 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci /* Check for more work in the queue */ 4378c2ecf20Sopenharmony_ci xive_do_queue_eoi(xc); 4388c2ecf20Sopenharmony_ci} 4398c2ecf20Sopenharmony_ci 4408c2ecf20Sopenharmony_ci/* 4418c2ecf20Sopenharmony_ci * Helper used to mask and unmask an interrupt source. This 4428c2ecf20Sopenharmony_ci * is only called for normal interrupts that do not require 4438c2ecf20Sopenharmony_ci * masking/unmasking via firmware. 4448c2ecf20Sopenharmony_ci */ 4458c2ecf20Sopenharmony_cistatic void xive_do_source_set_mask(struct xive_irq_data *xd, 4468c2ecf20Sopenharmony_ci bool mask) 4478c2ecf20Sopenharmony_ci{ 4488c2ecf20Sopenharmony_ci u64 val; 4498c2ecf20Sopenharmony_ci 4508c2ecf20Sopenharmony_ci /* 4518c2ecf20Sopenharmony_ci * If the interrupt had P set, it may be in a queue. 4528c2ecf20Sopenharmony_ci * 4538c2ecf20Sopenharmony_ci * We need to make sure we don't re-enable it until it 4548c2ecf20Sopenharmony_ci * has been fetched from that queue and EOId. We keep 4558c2ecf20Sopenharmony_ci * a copy of that P state and use it to restore the 4568c2ecf20Sopenharmony_ci * ESB accordingly on unmask. 4578c2ecf20Sopenharmony_ci */ 4588c2ecf20Sopenharmony_ci if (mask) { 4598c2ecf20Sopenharmony_ci val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 4608c2ecf20Sopenharmony_ci if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 4618c2ecf20Sopenharmony_ci xd->saved_p = true; 4628c2ecf20Sopenharmony_ci xd->stale_p = false; 4638c2ecf20Sopenharmony_ci } else if (xd->saved_p) { 4648c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 4658c2ecf20Sopenharmony_ci xd->saved_p = false; 4668c2ecf20Sopenharmony_ci } else { 4678c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 4688c2ecf20Sopenharmony_ci xd->stale_p = false; 4698c2ecf20Sopenharmony_ci } 4708c2ecf20Sopenharmony_ci} 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_ci/* 4738c2ecf20Sopenharmony_ci * Try to chose "cpu" as a new interrupt target. Increments 4748c2ecf20Sopenharmony_ci * the queue accounting for that target if it's not already 4758c2ecf20Sopenharmony_ci * full. 4768c2ecf20Sopenharmony_ci */ 4778c2ecf20Sopenharmony_cistatic bool xive_try_pick_target(int cpu) 4788c2ecf20Sopenharmony_ci{ 4798c2ecf20Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 4808c2ecf20Sopenharmony_ci struct xive_q *q = &xc->queue[xive_irq_priority]; 4818c2ecf20Sopenharmony_ci int max; 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci /* 4848c2ecf20Sopenharmony_ci * Calculate max number of interrupts in that queue. 4858c2ecf20Sopenharmony_ci * 4868c2ecf20Sopenharmony_ci * We leave a gap of 1 just in case... 4878c2ecf20Sopenharmony_ci */ 4888c2ecf20Sopenharmony_ci max = (q->msk + 1) - 1; 4898c2ecf20Sopenharmony_ci return !!atomic_add_unless(&q->count, 1, max); 4908c2ecf20Sopenharmony_ci} 4918c2ecf20Sopenharmony_ci 4928c2ecf20Sopenharmony_ci/* 4938c2ecf20Sopenharmony_ci * Un-account an interrupt for a target CPU. We don't directly 4948c2ecf20Sopenharmony_ci * decrement q->count since the interrupt might still be present 4958c2ecf20Sopenharmony_ci * in the queue. 4968c2ecf20Sopenharmony_ci * 4978c2ecf20Sopenharmony_ci * Instead increment a separate counter "pending_count" which 4988c2ecf20Sopenharmony_ci * will be substracted from "count" later when that CPU observes 4998c2ecf20Sopenharmony_ci * the queue to be empty. 5008c2ecf20Sopenharmony_ci */ 5018c2ecf20Sopenharmony_cistatic void xive_dec_target_count(int cpu) 5028c2ecf20Sopenharmony_ci{ 5038c2ecf20Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 5048c2ecf20Sopenharmony_ci struct xive_q *q = &xc->queue[xive_irq_priority]; 5058c2ecf20Sopenharmony_ci 5068c2ecf20Sopenharmony_ci if (WARN_ON(cpu < 0 || !xc)) { 5078c2ecf20Sopenharmony_ci pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 5088c2ecf20Sopenharmony_ci return; 5098c2ecf20Sopenharmony_ci } 5108c2ecf20Sopenharmony_ci 5118c2ecf20Sopenharmony_ci /* 5128c2ecf20Sopenharmony_ci * We increment the "pending count" which will be used 5138c2ecf20Sopenharmony_ci * to decrement the target queue count whenever it's next 5148c2ecf20Sopenharmony_ci * processed and found empty. This ensure that we don't 5158c2ecf20Sopenharmony_ci * decrement while we still have the interrupt there 5168c2ecf20Sopenharmony_ci * occupying a slot. 5178c2ecf20Sopenharmony_ci */ 5188c2ecf20Sopenharmony_ci atomic_inc(&q->pending_count); 5198c2ecf20Sopenharmony_ci} 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci/* Find a tentative CPU target in a CPU mask */ 5228c2ecf20Sopenharmony_cistatic int xive_find_target_in_mask(const struct cpumask *mask, 5238c2ecf20Sopenharmony_ci unsigned int fuzz) 5248c2ecf20Sopenharmony_ci{ 5258c2ecf20Sopenharmony_ci int cpu, first, num, i; 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci /* Pick up a starting point CPU in the mask based on fuzz */ 5288c2ecf20Sopenharmony_ci num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 5298c2ecf20Sopenharmony_ci first = fuzz % num; 5308c2ecf20Sopenharmony_ci 5318c2ecf20Sopenharmony_ci /* Locate it */ 5328c2ecf20Sopenharmony_ci cpu = cpumask_first(mask); 5338c2ecf20Sopenharmony_ci for (i = 0; i < first && cpu < nr_cpu_ids; i++) 5348c2ecf20Sopenharmony_ci cpu = cpumask_next(cpu, mask); 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci /* Sanity check */ 5378c2ecf20Sopenharmony_ci if (WARN_ON(cpu >= nr_cpu_ids)) 5388c2ecf20Sopenharmony_ci cpu = cpumask_first(cpu_online_mask); 5398c2ecf20Sopenharmony_ci 5408c2ecf20Sopenharmony_ci /* Remember first one to handle wrap-around */ 5418c2ecf20Sopenharmony_ci first = cpu; 5428c2ecf20Sopenharmony_ci 5438c2ecf20Sopenharmony_ci /* 5448c2ecf20Sopenharmony_ci * Now go through the entire mask until we find a valid 5458c2ecf20Sopenharmony_ci * target. 5468c2ecf20Sopenharmony_ci */ 5478c2ecf20Sopenharmony_ci do { 5488c2ecf20Sopenharmony_ci /* 5498c2ecf20Sopenharmony_ci * We re-check online as the fallback case passes us 5508c2ecf20Sopenharmony_ci * an untested affinity mask 5518c2ecf20Sopenharmony_ci */ 5528c2ecf20Sopenharmony_ci if (cpu_online(cpu) && xive_try_pick_target(cpu)) 5538c2ecf20Sopenharmony_ci return cpu; 5548c2ecf20Sopenharmony_ci cpu = cpumask_next(cpu, mask); 5558c2ecf20Sopenharmony_ci /* Wrap around */ 5568c2ecf20Sopenharmony_ci if (cpu >= nr_cpu_ids) 5578c2ecf20Sopenharmony_ci cpu = cpumask_first(mask); 5588c2ecf20Sopenharmony_ci } while (cpu != first); 5598c2ecf20Sopenharmony_ci 5608c2ecf20Sopenharmony_ci return -1; 5618c2ecf20Sopenharmony_ci} 5628c2ecf20Sopenharmony_ci 5638c2ecf20Sopenharmony_ci/* 5648c2ecf20Sopenharmony_ci * Pick a target CPU for an interrupt. This is done at 5658c2ecf20Sopenharmony_ci * startup or if the affinity is changed in a way that 5668c2ecf20Sopenharmony_ci * invalidates the current target. 5678c2ecf20Sopenharmony_ci */ 5688c2ecf20Sopenharmony_cistatic int xive_pick_irq_target(struct irq_data *d, 5698c2ecf20Sopenharmony_ci const struct cpumask *affinity) 5708c2ecf20Sopenharmony_ci{ 5718c2ecf20Sopenharmony_ci static unsigned int fuzz; 5728c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 5738c2ecf20Sopenharmony_ci cpumask_var_t mask; 5748c2ecf20Sopenharmony_ci int cpu = -1; 5758c2ecf20Sopenharmony_ci 5768c2ecf20Sopenharmony_ci /* 5778c2ecf20Sopenharmony_ci * If we have chip IDs, first we try to build a mask of 5788c2ecf20Sopenharmony_ci * CPUs matching the CPU and find a target in there 5798c2ecf20Sopenharmony_ci */ 5808c2ecf20Sopenharmony_ci if (xd->src_chip != XIVE_INVALID_CHIP_ID && 5818c2ecf20Sopenharmony_ci zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 5828c2ecf20Sopenharmony_ci /* Build a mask of matching chip IDs */ 5838c2ecf20Sopenharmony_ci for_each_cpu_and(cpu, affinity, cpu_online_mask) { 5848c2ecf20Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 5858c2ecf20Sopenharmony_ci if (xc->chip_id == xd->src_chip) 5868c2ecf20Sopenharmony_ci cpumask_set_cpu(cpu, mask); 5878c2ecf20Sopenharmony_ci } 5888c2ecf20Sopenharmony_ci /* Try to find a target */ 5898c2ecf20Sopenharmony_ci if (cpumask_empty(mask)) 5908c2ecf20Sopenharmony_ci cpu = -1; 5918c2ecf20Sopenharmony_ci else 5928c2ecf20Sopenharmony_ci cpu = xive_find_target_in_mask(mask, fuzz++); 5938c2ecf20Sopenharmony_ci free_cpumask_var(mask); 5948c2ecf20Sopenharmony_ci if (cpu >= 0) 5958c2ecf20Sopenharmony_ci return cpu; 5968c2ecf20Sopenharmony_ci fuzz--; 5978c2ecf20Sopenharmony_ci } 5988c2ecf20Sopenharmony_ci 5998c2ecf20Sopenharmony_ci /* No chip IDs, fallback to using the affinity mask */ 6008c2ecf20Sopenharmony_ci return xive_find_target_in_mask(affinity, fuzz++); 6018c2ecf20Sopenharmony_ci} 6028c2ecf20Sopenharmony_ci 6038c2ecf20Sopenharmony_cistatic unsigned int xive_irq_startup(struct irq_data *d) 6048c2ecf20Sopenharmony_ci{ 6058c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 6068c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 6078c2ecf20Sopenharmony_ci int target, rc; 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci xd->saved_p = false; 6108c2ecf20Sopenharmony_ci xd->stale_p = false; 6118c2ecf20Sopenharmony_ci pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 6128c2ecf20Sopenharmony_ci d->irq, hw_irq, d); 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_ci#ifdef CONFIG_PCI_MSI 6158c2ecf20Sopenharmony_ci /* 6168c2ecf20Sopenharmony_ci * The generic MSI code returns with the interrupt disabled on the 6178c2ecf20Sopenharmony_ci * card, using the MSI mask bits. Firmware doesn't appear to unmask 6188c2ecf20Sopenharmony_ci * at that level, so we do it here by hand. 6198c2ecf20Sopenharmony_ci */ 6208c2ecf20Sopenharmony_ci if (irq_data_get_msi_desc(d)) 6218c2ecf20Sopenharmony_ci pci_msi_unmask_irq(d); 6228c2ecf20Sopenharmony_ci#endif 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci /* Pick a target */ 6258c2ecf20Sopenharmony_ci target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 6268c2ecf20Sopenharmony_ci if (target == XIVE_INVALID_TARGET) { 6278c2ecf20Sopenharmony_ci /* Try again breaking affinity */ 6288c2ecf20Sopenharmony_ci target = xive_pick_irq_target(d, cpu_online_mask); 6298c2ecf20Sopenharmony_ci if (target == XIVE_INVALID_TARGET) 6308c2ecf20Sopenharmony_ci return -ENXIO; 6318c2ecf20Sopenharmony_ci pr_warn("irq %d started with broken affinity\n", d->irq); 6328c2ecf20Sopenharmony_ci } 6338c2ecf20Sopenharmony_ci 6348c2ecf20Sopenharmony_ci /* Sanity check */ 6358c2ecf20Sopenharmony_ci if (WARN_ON(target == XIVE_INVALID_TARGET || 6368c2ecf20Sopenharmony_ci target >= nr_cpu_ids)) 6378c2ecf20Sopenharmony_ci target = smp_processor_id(); 6388c2ecf20Sopenharmony_ci 6398c2ecf20Sopenharmony_ci xd->target = target; 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_ci /* 6428c2ecf20Sopenharmony_ci * Configure the logical number to be the Linux IRQ number 6438c2ecf20Sopenharmony_ci * and set the target queue 6448c2ecf20Sopenharmony_ci */ 6458c2ecf20Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 6468c2ecf20Sopenharmony_ci get_hard_smp_processor_id(target), 6478c2ecf20Sopenharmony_ci xive_irq_priority, d->irq); 6488c2ecf20Sopenharmony_ci if (rc) 6498c2ecf20Sopenharmony_ci return rc; 6508c2ecf20Sopenharmony_ci 6518c2ecf20Sopenharmony_ci /* Unmask the ESB */ 6528c2ecf20Sopenharmony_ci xive_do_source_set_mask(xd, false); 6538c2ecf20Sopenharmony_ci 6548c2ecf20Sopenharmony_ci return 0; 6558c2ecf20Sopenharmony_ci} 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_ci/* called with irq descriptor lock held */ 6588c2ecf20Sopenharmony_cistatic void xive_irq_shutdown(struct irq_data *d) 6598c2ecf20Sopenharmony_ci{ 6608c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 6618c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 6628c2ecf20Sopenharmony_ci 6638c2ecf20Sopenharmony_ci pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 6648c2ecf20Sopenharmony_ci d->irq, hw_irq, d); 6658c2ecf20Sopenharmony_ci 6668c2ecf20Sopenharmony_ci if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 6678c2ecf20Sopenharmony_ci return; 6688c2ecf20Sopenharmony_ci 6698c2ecf20Sopenharmony_ci /* Mask the interrupt at the source */ 6708c2ecf20Sopenharmony_ci xive_do_source_set_mask(xd, true); 6718c2ecf20Sopenharmony_ci 6728c2ecf20Sopenharmony_ci /* 6738c2ecf20Sopenharmony_ci * Mask the interrupt in HW in the IVT/EAS and set the number 6748c2ecf20Sopenharmony_ci * to be the "bad" IRQ number 6758c2ecf20Sopenharmony_ci */ 6768c2ecf20Sopenharmony_ci xive_ops->configure_irq(hw_irq, 6778c2ecf20Sopenharmony_ci get_hard_smp_processor_id(xd->target), 6788c2ecf20Sopenharmony_ci 0xff, XIVE_BAD_IRQ); 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci xive_dec_target_count(xd->target); 6818c2ecf20Sopenharmony_ci xd->target = XIVE_INVALID_TARGET; 6828c2ecf20Sopenharmony_ci} 6838c2ecf20Sopenharmony_ci 6848c2ecf20Sopenharmony_cistatic void xive_irq_unmask(struct irq_data *d) 6858c2ecf20Sopenharmony_ci{ 6868c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 6878c2ecf20Sopenharmony_ci 6888c2ecf20Sopenharmony_ci pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 6898c2ecf20Sopenharmony_ci 6908c2ecf20Sopenharmony_ci /* 6918c2ecf20Sopenharmony_ci * This is a workaround for PCI LSI problems on P9, for 6928c2ecf20Sopenharmony_ci * these, we call FW to set the mask. The problems might 6938c2ecf20Sopenharmony_ci * be fixed by P9 DD2.0, if that is the case, firmware 6948c2ecf20Sopenharmony_ci * will no longer set that flag. 6958c2ecf20Sopenharmony_ci */ 6968c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { 6978c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 6988c2ecf20Sopenharmony_ci xive_ops->configure_irq(hw_irq, 6998c2ecf20Sopenharmony_ci get_hard_smp_processor_id(xd->target), 7008c2ecf20Sopenharmony_ci xive_irq_priority, d->irq); 7018c2ecf20Sopenharmony_ci return; 7028c2ecf20Sopenharmony_ci } 7038c2ecf20Sopenharmony_ci 7048c2ecf20Sopenharmony_ci xive_do_source_set_mask(xd, false); 7058c2ecf20Sopenharmony_ci} 7068c2ecf20Sopenharmony_ci 7078c2ecf20Sopenharmony_cistatic void xive_irq_mask(struct irq_data *d) 7088c2ecf20Sopenharmony_ci{ 7098c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 7108c2ecf20Sopenharmony_ci 7118c2ecf20Sopenharmony_ci pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 7128c2ecf20Sopenharmony_ci 7138c2ecf20Sopenharmony_ci /* 7148c2ecf20Sopenharmony_ci * This is a workaround for PCI LSI problems on P9, for 7158c2ecf20Sopenharmony_ci * these, we call OPAL to set the mask. The problems might 7168c2ecf20Sopenharmony_ci * be fixed by P9 DD2.0, if that is the case, firmware 7178c2ecf20Sopenharmony_ci * will no longer set that flag. 7188c2ecf20Sopenharmony_ci */ 7198c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { 7208c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 7218c2ecf20Sopenharmony_ci xive_ops->configure_irq(hw_irq, 7228c2ecf20Sopenharmony_ci get_hard_smp_processor_id(xd->target), 7238c2ecf20Sopenharmony_ci 0xff, d->irq); 7248c2ecf20Sopenharmony_ci return; 7258c2ecf20Sopenharmony_ci } 7268c2ecf20Sopenharmony_ci 7278c2ecf20Sopenharmony_ci xive_do_source_set_mask(xd, true); 7288c2ecf20Sopenharmony_ci} 7298c2ecf20Sopenharmony_ci 7308c2ecf20Sopenharmony_cistatic int xive_irq_set_affinity(struct irq_data *d, 7318c2ecf20Sopenharmony_ci const struct cpumask *cpumask, 7328c2ecf20Sopenharmony_ci bool force) 7338c2ecf20Sopenharmony_ci{ 7348c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 7358c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 7368c2ecf20Sopenharmony_ci u32 target, old_target; 7378c2ecf20Sopenharmony_ci int rc = 0; 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci pr_devel("xive_irq_set_affinity: irq %d\n", d->irq); 7408c2ecf20Sopenharmony_ci 7418c2ecf20Sopenharmony_ci /* Is this valid ? */ 7428c2ecf20Sopenharmony_ci if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 7438c2ecf20Sopenharmony_ci return -EINVAL; 7448c2ecf20Sopenharmony_ci 7458c2ecf20Sopenharmony_ci /* Don't do anything if the interrupt isn't started */ 7468c2ecf20Sopenharmony_ci if (!irqd_is_started(d)) 7478c2ecf20Sopenharmony_ci return IRQ_SET_MASK_OK; 7488c2ecf20Sopenharmony_ci 7498c2ecf20Sopenharmony_ci /* 7508c2ecf20Sopenharmony_ci * If existing target is already in the new mask, and is 7518c2ecf20Sopenharmony_ci * online then do nothing. 7528c2ecf20Sopenharmony_ci */ 7538c2ecf20Sopenharmony_ci if (xd->target != XIVE_INVALID_TARGET && 7548c2ecf20Sopenharmony_ci cpu_online(xd->target) && 7558c2ecf20Sopenharmony_ci cpumask_test_cpu(xd->target, cpumask)) 7568c2ecf20Sopenharmony_ci return IRQ_SET_MASK_OK; 7578c2ecf20Sopenharmony_ci 7588c2ecf20Sopenharmony_ci /* Pick a new target */ 7598c2ecf20Sopenharmony_ci target = xive_pick_irq_target(d, cpumask); 7608c2ecf20Sopenharmony_ci 7618c2ecf20Sopenharmony_ci /* No target found */ 7628c2ecf20Sopenharmony_ci if (target == XIVE_INVALID_TARGET) 7638c2ecf20Sopenharmony_ci return -ENXIO; 7648c2ecf20Sopenharmony_ci 7658c2ecf20Sopenharmony_ci /* Sanity check */ 7668c2ecf20Sopenharmony_ci if (WARN_ON(target >= nr_cpu_ids)) 7678c2ecf20Sopenharmony_ci target = smp_processor_id(); 7688c2ecf20Sopenharmony_ci 7698c2ecf20Sopenharmony_ci old_target = xd->target; 7708c2ecf20Sopenharmony_ci 7718c2ecf20Sopenharmony_ci /* 7728c2ecf20Sopenharmony_ci * Only configure the irq if it's not currently passed-through to 7738c2ecf20Sopenharmony_ci * a KVM guest 7748c2ecf20Sopenharmony_ci */ 7758c2ecf20Sopenharmony_ci if (!irqd_is_forwarded_to_vcpu(d)) 7768c2ecf20Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 7778c2ecf20Sopenharmony_ci get_hard_smp_processor_id(target), 7788c2ecf20Sopenharmony_ci xive_irq_priority, d->irq); 7798c2ecf20Sopenharmony_ci if (rc < 0) { 7808c2ecf20Sopenharmony_ci pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 7818c2ecf20Sopenharmony_ci return rc; 7828c2ecf20Sopenharmony_ci } 7838c2ecf20Sopenharmony_ci 7848c2ecf20Sopenharmony_ci pr_devel(" target: 0x%x\n", target); 7858c2ecf20Sopenharmony_ci xd->target = target; 7868c2ecf20Sopenharmony_ci 7878c2ecf20Sopenharmony_ci /* Give up previous target */ 7888c2ecf20Sopenharmony_ci if (old_target != XIVE_INVALID_TARGET) 7898c2ecf20Sopenharmony_ci xive_dec_target_count(old_target); 7908c2ecf20Sopenharmony_ci 7918c2ecf20Sopenharmony_ci return IRQ_SET_MASK_OK; 7928c2ecf20Sopenharmony_ci} 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_cistatic int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 7958c2ecf20Sopenharmony_ci{ 7968c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 7978c2ecf20Sopenharmony_ci 7988c2ecf20Sopenharmony_ci /* 7998c2ecf20Sopenharmony_ci * We only support these. This has really no effect other than setting 8008c2ecf20Sopenharmony_ci * the corresponding descriptor bits mind you but those will in turn 8018c2ecf20Sopenharmony_ci * affect the resend function when re-enabling an edge interrupt. 8028c2ecf20Sopenharmony_ci * 8038c2ecf20Sopenharmony_ci * Set set the default to edge as explained in map(). 8048c2ecf20Sopenharmony_ci */ 8058c2ecf20Sopenharmony_ci if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 8068c2ecf20Sopenharmony_ci flow_type = IRQ_TYPE_EDGE_RISING; 8078c2ecf20Sopenharmony_ci 8088c2ecf20Sopenharmony_ci if (flow_type != IRQ_TYPE_EDGE_RISING && 8098c2ecf20Sopenharmony_ci flow_type != IRQ_TYPE_LEVEL_LOW) 8108c2ecf20Sopenharmony_ci return -EINVAL; 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci irqd_set_trigger_type(d, flow_type); 8138c2ecf20Sopenharmony_ci 8148c2ecf20Sopenharmony_ci /* 8158c2ecf20Sopenharmony_ci * Double check it matches what the FW thinks 8168c2ecf20Sopenharmony_ci * 8178c2ecf20Sopenharmony_ci * NOTE: We don't know yet if the PAPR interface will provide 8188c2ecf20Sopenharmony_ci * the LSI vs MSI information apart from the device-tree so 8198c2ecf20Sopenharmony_ci * this check might have to move into an optional backend call 8208c2ecf20Sopenharmony_ci * that is specific to the native backend 8218c2ecf20Sopenharmony_ci */ 8228c2ecf20Sopenharmony_ci if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 8238c2ecf20Sopenharmony_ci !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 8248c2ecf20Sopenharmony_ci pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 8258c2ecf20Sopenharmony_ci d->irq, (u32)irqd_to_hwirq(d), 8268c2ecf20Sopenharmony_ci (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 8278c2ecf20Sopenharmony_ci (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 8288c2ecf20Sopenharmony_ci } 8298c2ecf20Sopenharmony_ci 8308c2ecf20Sopenharmony_ci return IRQ_SET_MASK_OK_NOCOPY; 8318c2ecf20Sopenharmony_ci} 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_cistatic int xive_irq_retrigger(struct irq_data *d) 8348c2ecf20Sopenharmony_ci{ 8358c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 8368c2ecf20Sopenharmony_ci 8378c2ecf20Sopenharmony_ci /* This should be only for MSIs */ 8388c2ecf20Sopenharmony_ci if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 8398c2ecf20Sopenharmony_ci return 0; 8408c2ecf20Sopenharmony_ci 8418c2ecf20Sopenharmony_ci /* 8428c2ecf20Sopenharmony_ci * To perform a retrigger, we first set the PQ bits to 8438c2ecf20Sopenharmony_ci * 11, then perform an EOI. 8448c2ecf20Sopenharmony_ci */ 8458c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 8468c2ecf20Sopenharmony_ci 8478c2ecf20Sopenharmony_ci /* 8488c2ecf20Sopenharmony_ci * Note: We pass "0" to the hw_irq argument in order to 8498c2ecf20Sopenharmony_ci * avoid calling into the backend EOI code which we don't 8508c2ecf20Sopenharmony_ci * want to do in the case of a re-trigger. Backends typically 8518c2ecf20Sopenharmony_ci * only do EOI for LSIs anyway. 8528c2ecf20Sopenharmony_ci */ 8538c2ecf20Sopenharmony_ci xive_do_source_eoi(0, xd); 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci return 1; 8568c2ecf20Sopenharmony_ci} 8578c2ecf20Sopenharmony_ci 8588c2ecf20Sopenharmony_ci/* 8598c2ecf20Sopenharmony_ci * Caller holds the irq descriptor lock, so this won't be called 8608c2ecf20Sopenharmony_ci * concurrently with xive_get_irqchip_state on the same interrupt. 8618c2ecf20Sopenharmony_ci */ 8628c2ecf20Sopenharmony_cistatic int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 8638c2ecf20Sopenharmony_ci{ 8648c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 8658c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 8668c2ecf20Sopenharmony_ci int rc; 8678c2ecf20Sopenharmony_ci u8 pq; 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci /* 8708c2ecf20Sopenharmony_ci * We only support this on interrupts that do not require 8718c2ecf20Sopenharmony_ci * firmware calls for masking and unmasking 8728c2ecf20Sopenharmony_ci */ 8738c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) 8748c2ecf20Sopenharmony_ci return -EIO; 8758c2ecf20Sopenharmony_ci 8768c2ecf20Sopenharmony_ci /* 8778c2ecf20Sopenharmony_ci * This is called by KVM with state non-NULL for enabling 8788c2ecf20Sopenharmony_ci * pass-through or NULL for disabling it 8798c2ecf20Sopenharmony_ci */ 8808c2ecf20Sopenharmony_ci if (state) { 8818c2ecf20Sopenharmony_ci irqd_set_forwarded_to_vcpu(d); 8828c2ecf20Sopenharmony_ci 8838c2ecf20Sopenharmony_ci /* Set it to PQ=10 state to prevent further sends */ 8848c2ecf20Sopenharmony_ci pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 8858c2ecf20Sopenharmony_ci if (!xd->stale_p) { 8868c2ecf20Sopenharmony_ci xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 8878c2ecf20Sopenharmony_ci xd->stale_p = !xd->saved_p; 8888c2ecf20Sopenharmony_ci } 8898c2ecf20Sopenharmony_ci 8908c2ecf20Sopenharmony_ci /* No target ? nothing to do */ 8918c2ecf20Sopenharmony_ci if (xd->target == XIVE_INVALID_TARGET) { 8928c2ecf20Sopenharmony_ci /* 8938c2ecf20Sopenharmony_ci * An untargetted interrupt should have been 8948c2ecf20Sopenharmony_ci * also masked at the source 8958c2ecf20Sopenharmony_ci */ 8968c2ecf20Sopenharmony_ci WARN_ON(xd->saved_p); 8978c2ecf20Sopenharmony_ci 8988c2ecf20Sopenharmony_ci return 0; 8998c2ecf20Sopenharmony_ci } 9008c2ecf20Sopenharmony_ci 9018c2ecf20Sopenharmony_ci /* 9028c2ecf20Sopenharmony_ci * If P was set, adjust state to PQ=11 to indicate 9038c2ecf20Sopenharmony_ci * that a resend is needed for the interrupt to reach 9048c2ecf20Sopenharmony_ci * the guest. Also remember the value of P. 9058c2ecf20Sopenharmony_ci * 9068c2ecf20Sopenharmony_ci * This also tells us that it's in flight to a host queue 9078c2ecf20Sopenharmony_ci * or has already been fetched but hasn't been EOIed yet 9088c2ecf20Sopenharmony_ci * by the host. This it's potentially using up a host 9098c2ecf20Sopenharmony_ci * queue slot. This is important to know because as long 9108c2ecf20Sopenharmony_ci * as this is the case, we must not hard-unmask it when 9118c2ecf20Sopenharmony_ci * "returning" that interrupt to the host. 9128c2ecf20Sopenharmony_ci * 9138c2ecf20Sopenharmony_ci * This saved_p is cleared by the host EOI, when we know 9148c2ecf20Sopenharmony_ci * for sure the queue slot is no longer in use. 9158c2ecf20Sopenharmony_ci */ 9168c2ecf20Sopenharmony_ci if (xd->saved_p) { 9178c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 9188c2ecf20Sopenharmony_ci 9198c2ecf20Sopenharmony_ci /* 9208c2ecf20Sopenharmony_ci * Sync the XIVE source HW to ensure the interrupt 9218c2ecf20Sopenharmony_ci * has gone through the EAS before we change its 9228c2ecf20Sopenharmony_ci * target to the guest. That should guarantee us 9238c2ecf20Sopenharmony_ci * that we *will* eventually get an EOI for it on 9248c2ecf20Sopenharmony_ci * the host. Otherwise there would be a small window 9258c2ecf20Sopenharmony_ci * for P to be seen here but the interrupt going 9268c2ecf20Sopenharmony_ci * to the guest queue. 9278c2ecf20Sopenharmony_ci */ 9288c2ecf20Sopenharmony_ci if (xive_ops->sync_source) 9298c2ecf20Sopenharmony_ci xive_ops->sync_source(hw_irq); 9308c2ecf20Sopenharmony_ci } 9318c2ecf20Sopenharmony_ci } else { 9328c2ecf20Sopenharmony_ci irqd_clr_forwarded_to_vcpu(d); 9338c2ecf20Sopenharmony_ci 9348c2ecf20Sopenharmony_ci /* No host target ? hard mask and return */ 9358c2ecf20Sopenharmony_ci if (xd->target == XIVE_INVALID_TARGET) { 9368c2ecf20Sopenharmony_ci xive_do_source_set_mask(xd, true); 9378c2ecf20Sopenharmony_ci return 0; 9388c2ecf20Sopenharmony_ci } 9398c2ecf20Sopenharmony_ci 9408c2ecf20Sopenharmony_ci /* 9418c2ecf20Sopenharmony_ci * Sync the XIVE source HW to ensure the interrupt 9428c2ecf20Sopenharmony_ci * has gone through the EAS before we change its 9438c2ecf20Sopenharmony_ci * target to the host. 9448c2ecf20Sopenharmony_ci */ 9458c2ecf20Sopenharmony_ci if (xive_ops->sync_source) 9468c2ecf20Sopenharmony_ci xive_ops->sync_source(hw_irq); 9478c2ecf20Sopenharmony_ci 9488c2ecf20Sopenharmony_ci /* 9498c2ecf20Sopenharmony_ci * By convention we are called with the interrupt in 9508c2ecf20Sopenharmony_ci * a PQ=10 or PQ=11 state, ie, it won't fire and will 9518c2ecf20Sopenharmony_ci * have latched in Q whether there's a pending HW 9528c2ecf20Sopenharmony_ci * interrupt or not. 9538c2ecf20Sopenharmony_ci * 9548c2ecf20Sopenharmony_ci * First reconfigure the target. 9558c2ecf20Sopenharmony_ci */ 9568c2ecf20Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 9578c2ecf20Sopenharmony_ci get_hard_smp_processor_id(xd->target), 9588c2ecf20Sopenharmony_ci xive_irq_priority, d->irq); 9598c2ecf20Sopenharmony_ci if (rc) 9608c2ecf20Sopenharmony_ci return rc; 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci /* 9638c2ecf20Sopenharmony_ci * Then if saved_p is not set, effectively re-enable the 9648c2ecf20Sopenharmony_ci * interrupt with an EOI. If it is set, we know there is 9658c2ecf20Sopenharmony_ci * still a message in a host queue somewhere that will be 9668c2ecf20Sopenharmony_ci * EOId eventually. 9678c2ecf20Sopenharmony_ci * 9688c2ecf20Sopenharmony_ci * Note: We don't check irqd_irq_disabled(). Effectively, 9698c2ecf20Sopenharmony_ci * we *will* let the irq get through even if masked if the 9708c2ecf20Sopenharmony_ci * HW is still firing it in order to deal with the whole 9718c2ecf20Sopenharmony_ci * saved_p business properly. If the interrupt triggers 9728c2ecf20Sopenharmony_ci * while masked, the generic code will re-mask it anyway. 9738c2ecf20Sopenharmony_ci */ 9748c2ecf20Sopenharmony_ci if (!xd->saved_p) 9758c2ecf20Sopenharmony_ci xive_do_source_eoi(hw_irq, xd); 9768c2ecf20Sopenharmony_ci 9778c2ecf20Sopenharmony_ci } 9788c2ecf20Sopenharmony_ci return 0; 9798c2ecf20Sopenharmony_ci} 9808c2ecf20Sopenharmony_ci 9818c2ecf20Sopenharmony_ci/* Called with irq descriptor lock held. */ 9828c2ecf20Sopenharmony_cistatic int xive_get_irqchip_state(struct irq_data *data, 9838c2ecf20Sopenharmony_ci enum irqchip_irq_state which, bool *state) 9848c2ecf20Sopenharmony_ci{ 9858c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 9868c2ecf20Sopenharmony_ci u8 pq; 9878c2ecf20Sopenharmony_ci 9888c2ecf20Sopenharmony_ci switch (which) { 9898c2ecf20Sopenharmony_ci case IRQCHIP_STATE_ACTIVE: 9908c2ecf20Sopenharmony_ci pq = xive_esb_read(xd, XIVE_ESB_GET); 9918c2ecf20Sopenharmony_ci 9928c2ecf20Sopenharmony_ci /* 9938c2ecf20Sopenharmony_ci * The esb value being all 1's means we couldn't get 9948c2ecf20Sopenharmony_ci * the PQ state of the interrupt through mmio. It may 9958c2ecf20Sopenharmony_ci * happen, for example when querying a PHB interrupt 9968c2ecf20Sopenharmony_ci * while the PHB is in an error state. We consider the 9978c2ecf20Sopenharmony_ci * interrupt to be inactive in that case. 9988c2ecf20Sopenharmony_ci */ 9998c2ecf20Sopenharmony_ci *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 10008c2ecf20Sopenharmony_ci (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) && 10018c2ecf20Sopenharmony_ci !irqd_irq_disabled(data))); 10028c2ecf20Sopenharmony_ci return 0; 10038c2ecf20Sopenharmony_ci default: 10048c2ecf20Sopenharmony_ci return -EINVAL; 10058c2ecf20Sopenharmony_ci } 10068c2ecf20Sopenharmony_ci} 10078c2ecf20Sopenharmony_ci 10088c2ecf20Sopenharmony_cistatic struct irq_chip xive_irq_chip = { 10098c2ecf20Sopenharmony_ci .name = "XIVE-IRQ", 10108c2ecf20Sopenharmony_ci .irq_startup = xive_irq_startup, 10118c2ecf20Sopenharmony_ci .irq_shutdown = xive_irq_shutdown, 10128c2ecf20Sopenharmony_ci .irq_eoi = xive_irq_eoi, 10138c2ecf20Sopenharmony_ci .irq_mask = xive_irq_mask, 10148c2ecf20Sopenharmony_ci .irq_unmask = xive_irq_unmask, 10158c2ecf20Sopenharmony_ci .irq_set_affinity = xive_irq_set_affinity, 10168c2ecf20Sopenharmony_ci .irq_set_type = xive_irq_set_type, 10178c2ecf20Sopenharmony_ci .irq_retrigger = xive_irq_retrigger, 10188c2ecf20Sopenharmony_ci .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 10198c2ecf20Sopenharmony_ci .irq_get_irqchip_state = xive_get_irqchip_state, 10208c2ecf20Sopenharmony_ci}; 10218c2ecf20Sopenharmony_ci 10228c2ecf20Sopenharmony_cibool is_xive_irq(struct irq_chip *chip) 10238c2ecf20Sopenharmony_ci{ 10248c2ecf20Sopenharmony_ci return chip == &xive_irq_chip; 10258c2ecf20Sopenharmony_ci} 10268c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(is_xive_irq); 10278c2ecf20Sopenharmony_ci 10288c2ecf20Sopenharmony_civoid xive_cleanup_irq_data(struct xive_irq_data *xd) 10298c2ecf20Sopenharmony_ci{ 10308c2ecf20Sopenharmony_ci if (xd->eoi_mmio) { 10318c2ecf20Sopenharmony_ci unmap_kernel_range((unsigned long)xd->eoi_mmio, 10328c2ecf20Sopenharmony_ci 1u << xd->esb_shift); 10338c2ecf20Sopenharmony_ci iounmap(xd->eoi_mmio); 10348c2ecf20Sopenharmony_ci if (xd->eoi_mmio == xd->trig_mmio) 10358c2ecf20Sopenharmony_ci xd->trig_mmio = NULL; 10368c2ecf20Sopenharmony_ci xd->eoi_mmio = NULL; 10378c2ecf20Sopenharmony_ci } 10388c2ecf20Sopenharmony_ci if (xd->trig_mmio) { 10398c2ecf20Sopenharmony_ci unmap_kernel_range((unsigned long)xd->trig_mmio, 10408c2ecf20Sopenharmony_ci 1u << xd->esb_shift); 10418c2ecf20Sopenharmony_ci iounmap(xd->trig_mmio); 10428c2ecf20Sopenharmony_ci xd->trig_mmio = NULL; 10438c2ecf20Sopenharmony_ci } 10448c2ecf20Sopenharmony_ci} 10458c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 10468c2ecf20Sopenharmony_ci 10478c2ecf20Sopenharmony_cistatic int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 10488c2ecf20Sopenharmony_ci{ 10498c2ecf20Sopenharmony_ci struct xive_irq_data *xd; 10508c2ecf20Sopenharmony_ci int rc; 10518c2ecf20Sopenharmony_ci 10528c2ecf20Sopenharmony_ci xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 10538c2ecf20Sopenharmony_ci if (!xd) 10548c2ecf20Sopenharmony_ci return -ENOMEM; 10558c2ecf20Sopenharmony_ci rc = xive_ops->populate_irq_data(hw, xd); 10568c2ecf20Sopenharmony_ci if (rc) { 10578c2ecf20Sopenharmony_ci kfree(xd); 10588c2ecf20Sopenharmony_ci return rc; 10598c2ecf20Sopenharmony_ci } 10608c2ecf20Sopenharmony_ci xd->target = XIVE_INVALID_TARGET; 10618c2ecf20Sopenharmony_ci irq_set_handler_data(virq, xd); 10628c2ecf20Sopenharmony_ci 10638c2ecf20Sopenharmony_ci /* 10648c2ecf20Sopenharmony_ci * Turn OFF by default the interrupt being mapped. A side 10658c2ecf20Sopenharmony_ci * effect of this check is the mapping the ESB page of the 10668c2ecf20Sopenharmony_ci * interrupt in the Linux address space. This prevents page 10678c2ecf20Sopenharmony_ci * fault issues in the crash handler which masks all 10688c2ecf20Sopenharmony_ci * interrupts. 10698c2ecf20Sopenharmony_ci */ 10708c2ecf20Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 10718c2ecf20Sopenharmony_ci 10728c2ecf20Sopenharmony_ci return 0; 10738c2ecf20Sopenharmony_ci} 10748c2ecf20Sopenharmony_ci 10758c2ecf20Sopenharmony_cistatic void xive_irq_free_data(unsigned int virq) 10768c2ecf20Sopenharmony_ci{ 10778c2ecf20Sopenharmony_ci struct xive_irq_data *xd = irq_get_handler_data(virq); 10788c2ecf20Sopenharmony_ci 10798c2ecf20Sopenharmony_ci if (!xd) 10808c2ecf20Sopenharmony_ci return; 10818c2ecf20Sopenharmony_ci irq_set_handler_data(virq, NULL); 10828c2ecf20Sopenharmony_ci xive_cleanup_irq_data(xd); 10838c2ecf20Sopenharmony_ci kfree(xd); 10848c2ecf20Sopenharmony_ci} 10858c2ecf20Sopenharmony_ci 10868c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 10878c2ecf20Sopenharmony_ci 10888c2ecf20Sopenharmony_cistatic void xive_cause_ipi(int cpu) 10898c2ecf20Sopenharmony_ci{ 10908c2ecf20Sopenharmony_ci struct xive_cpu *xc; 10918c2ecf20Sopenharmony_ci struct xive_irq_data *xd; 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 10968c2ecf20Sopenharmony_ci smp_processor_id(), cpu, xc->hw_ipi); 10978c2ecf20Sopenharmony_ci 10988c2ecf20Sopenharmony_ci xd = &xc->ipi_data; 10998c2ecf20Sopenharmony_ci if (WARN_ON(!xd->trig_mmio)) 11008c2ecf20Sopenharmony_ci return; 11018c2ecf20Sopenharmony_ci out_be64(xd->trig_mmio, 0); 11028c2ecf20Sopenharmony_ci} 11038c2ecf20Sopenharmony_ci 11048c2ecf20Sopenharmony_cistatic irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 11058c2ecf20Sopenharmony_ci{ 11068c2ecf20Sopenharmony_ci return smp_ipi_demux(); 11078c2ecf20Sopenharmony_ci} 11088c2ecf20Sopenharmony_ci 11098c2ecf20Sopenharmony_cistatic void xive_ipi_eoi(struct irq_data *d) 11108c2ecf20Sopenharmony_ci{ 11118c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 11128c2ecf20Sopenharmony_ci 11138c2ecf20Sopenharmony_ci /* Handle possible race with unplug and drop stale IPIs */ 11148c2ecf20Sopenharmony_ci if (!xc) 11158c2ecf20Sopenharmony_ci return; 11168c2ecf20Sopenharmony_ci 11178c2ecf20Sopenharmony_ci DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 11188c2ecf20Sopenharmony_ci d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 11198c2ecf20Sopenharmony_ci 11208c2ecf20Sopenharmony_ci xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); 11218c2ecf20Sopenharmony_ci xive_do_queue_eoi(xc); 11228c2ecf20Sopenharmony_ci} 11238c2ecf20Sopenharmony_ci 11248c2ecf20Sopenharmony_cistatic void xive_ipi_do_nothing(struct irq_data *d) 11258c2ecf20Sopenharmony_ci{ 11268c2ecf20Sopenharmony_ci /* 11278c2ecf20Sopenharmony_ci * Nothing to do, we never mask/unmask IPIs, but the callback 11288c2ecf20Sopenharmony_ci * has to exist for the struct irq_chip. 11298c2ecf20Sopenharmony_ci */ 11308c2ecf20Sopenharmony_ci} 11318c2ecf20Sopenharmony_ci 11328c2ecf20Sopenharmony_cistatic struct irq_chip xive_ipi_chip = { 11338c2ecf20Sopenharmony_ci .name = "XIVE-IPI", 11348c2ecf20Sopenharmony_ci .irq_eoi = xive_ipi_eoi, 11358c2ecf20Sopenharmony_ci .irq_mask = xive_ipi_do_nothing, 11368c2ecf20Sopenharmony_ci .irq_unmask = xive_ipi_do_nothing, 11378c2ecf20Sopenharmony_ci}; 11388c2ecf20Sopenharmony_ci 11398c2ecf20Sopenharmony_cistatic void __init xive_request_ipi(void) 11408c2ecf20Sopenharmony_ci{ 11418c2ecf20Sopenharmony_ci unsigned int virq; 11428c2ecf20Sopenharmony_ci 11438c2ecf20Sopenharmony_ci /* 11448c2ecf20Sopenharmony_ci * Initialization failed, move on, we might manage to 11458c2ecf20Sopenharmony_ci * reach the point where we display our errors before 11468c2ecf20Sopenharmony_ci * the system falls appart 11478c2ecf20Sopenharmony_ci */ 11488c2ecf20Sopenharmony_ci if (!xive_irq_domain) 11498c2ecf20Sopenharmony_ci return; 11508c2ecf20Sopenharmony_ci 11518c2ecf20Sopenharmony_ci /* Initialize it */ 11528c2ecf20Sopenharmony_ci virq = irq_create_mapping(xive_irq_domain, 0); 11538c2ecf20Sopenharmony_ci xive_ipi_irq = virq; 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci WARN_ON(request_irq(virq, xive_muxed_ipi_action, 11568c2ecf20Sopenharmony_ci IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); 11578c2ecf20Sopenharmony_ci} 11588c2ecf20Sopenharmony_ci 11598c2ecf20Sopenharmony_cistatic int xive_setup_cpu_ipi(unsigned int cpu) 11608c2ecf20Sopenharmony_ci{ 11618c2ecf20Sopenharmony_ci struct xive_cpu *xc; 11628c2ecf20Sopenharmony_ci int rc; 11638c2ecf20Sopenharmony_ci 11648c2ecf20Sopenharmony_ci pr_debug("Setting up IPI for CPU %d\n", cpu); 11658c2ecf20Sopenharmony_ci 11668c2ecf20Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 11678c2ecf20Sopenharmony_ci 11688c2ecf20Sopenharmony_ci /* Check if we are already setup */ 11698c2ecf20Sopenharmony_ci if (xc->hw_ipi != XIVE_BAD_IRQ) 11708c2ecf20Sopenharmony_ci return 0; 11718c2ecf20Sopenharmony_ci 11728c2ecf20Sopenharmony_ci /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 11738c2ecf20Sopenharmony_ci if (xive_ops->get_ipi(cpu, xc)) 11748c2ecf20Sopenharmony_ci return -EIO; 11758c2ecf20Sopenharmony_ci 11768c2ecf20Sopenharmony_ci /* 11778c2ecf20Sopenharmony_ci * Populate the IRQ data in the xive_cpu structure and 11788c2ecf20Sopenharmony_ci * configure the HW / enable the IPIs. 11798c2ecf20Sopenharmony_ci */ 11808c2ecf20Sopenharmony_ci rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 11818c2ecf20Sopenharmony_ci if (rc) { 11828c2ecf20Sopenharmony_ci pr_err("Failed to populate IPI data on CPU %d\n", cpu); 11838c2ecf20Sopenharmony_ci return -EIO; 11848c2ecf20Sopenharmony_ci } 11858c2ecf20Sopenharmony_ci rc = xive_ops->configure_irq(xc->hw_ipi, 11868c2ecf20Sopenharmony_ci get_hard_smp_processor_id(cpu), 11878c2ecf20Sopenharmony_ci xive_irq_priority, xive_ipi_irq); 11888c2ecf20Sopenharmony_ci if (rc) { 11898c2ecf20Sopenharmony_ci pr_err("Failed to map IPI CPU %d\n", cpu); 11908c2ecf20Sopenharmony_ci return -EIO; 11918c2ecf20Sopenharmony_ci } 11928c2ecf20Sopenharmony_ci pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 11938c2ecf20Sopenharmony_ci xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 11948c2ecf20Sopenharmony_ci 11958c2ecf20Sopenharmony_ci /* Unmask it */ 11968c2ecf20Sopenharmony_ci xive_do_source_set_mask(&xc->ipi_data, false); 11978c2ecf20Sopenharmony_ci 11988c2ecf20Sopenharmony_ci return 0; 11998c2ecf20Sopenharmony_ci} 12008c2ecf20Sopenharmony_ci 12018c2ecf20Sopenharmony_cistatic void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 12028c2ecf20Sopenharmony_ci{ 12038c2ecf20Sopenharmony_ci /* Disable the IPI and free the IRQ data */ 12048c2ecf20Sopenharmony_ci 12058c2ecf20Sopenharmony_ci /* Already cleaned up ? */ 12068c2ecf20Sopenharmony_ci if (xc->hw_ipi == XIVE_BAD_IRQ) 12078c2ecf20Sopenharmony_ci return; 12088c2ecf20Sopenharmony_ci 12098c2ecf20Sopenharmony_ci /* Mask the IPI */ 12108c2ecf20Sopenharmony_ci xive_do_source_set_mask(&xc->ipi_data, true); 12118c2ecf20Sopenharmony_ci 12128c2ecf20Sopenharmony_ci /* 12138c2ecf20Sopenharmony_ci * Note: We don't call xive_cleanup_irq_data() to free 12148c2ecf20Sopenharmony_ci * the mappings as this is called from an IPI on kexec 12158c2ecf20Sopenharmony_ci * which is not a safe environment to call iounmap() 12168c2ecf20Sopenharmony_ci */ 12178c2ecf20Sopenharmony_ci 12188c2ecf20Sopenharmony_ci /* Deconfigure/mask in the backend */ 12198c2ecf20Sopenharmony_ci xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 12208c2ecf20Sopenharmony_ci 0xff, xive_ipi_irq); 12218c2ecf20Sopenharmony_ci 12228c2ecf20Sopenharmony_ci /* Free the IPIs in the backend */ 12238c2ecf20Sopenharmony_ci xive_ops->put_ipi(cpu, xc); 12248c2ecf20Sopenharmony_ci} 12258c2ecf20Sopenharmony_ci 12268c2ecf20Sopenharmony_civoid __init xive_smp_probe(void) 12278c2ecf20Sopenharmony_ci{ 12288c2ecf20Sopenharmony_ci smp_ops->cause_ipi = xive_cause_ipi; 12298c2ecf20Sopenharmony_ci 12308c2ecf20Sopenharmony_ci /* Register the IPI */ 12318c2ecf20Sopenharmony_ci xive_request_ipi(); 12328c2ecf20Sopenharmony_ci 12338c2ecf20Sopenharmony_ci /* Allocate and setup IPI for the boot CPU */ 12348c2ecf20Sopenharmony_ci xive_setup_cpu_ipi(smp_processor_id()); 12358c2ecf20Sopenharmony_ci} 12368c2ecf20Sopenharmony_ci 12378c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */ 12388c2ecf20Sopenharmony_ci 12398c2ecf20Sopenharmony_cistatic int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 12408c2ecf20Sopenharmony_ci irq_hw_number_t hw) 12418c2ecf20Sopenharmony_ci{ 12428c2ecf20Sopenharmony_ci int rc; 12438c2ecf20Sopenharmony_ci 12448c2ecf20Sopenharmony_ci /* 12458c2ecf20Sopenharmony_ci * Mark interrupts as edge sensitive by default so that resend 12468c2ecf20Sopenharmony_ci * actually works. Will fix that up below if needed. 12478c2ecf20Sopenharmony_ci */ 12488c2ecf20Sopenharmony_ci irq_clear_status_flags(virq, IRQ_LEVEL); 12498c2ecf20Sopenharmony_ci 12508c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 12518c2ecf20Sopenharmony_ci /* IPIs are special and come up with HW number 0 */ 12528c2ecf20Sopenharmony_ci if (hw == 0) { 12538c2ecf20Sopenharmony_ci /* 12548c2ecf20Sopenharmony_ci * IPIs are marked per-cpu. We use separate HW interrupts under 12558c2ecf20Sopenharmony_ci * the hood but associated with the same "linux" interrupt 12568c2ecf20Sopenharmony_ci */ 12578c2ecf20Sopenharmony_ci irq_set_chip_and_handler(virq, &xive_ipi_chip, 12588c2ecf20Sopenharmony_ci handle_percpu_irq); 12598c2ecf20Sopenharmony_ci return 0; 12608c2ecf20Sopenharmony_ci } 12618c2ecf20Sopenharmony_ci#endif 12628c2ecf20Sopenharmony_ci 12638c2ecf20Sopenharmony_ci rc = xive_irq_alloc_data(virq, hw); 12648c2ecf20Sopenharmony_ci if (rc) 12658c2ecf20Sopenharmony_ci return rc; 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_ci irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 12688c2ecf20Sopenharmony_ci 12698c2ecf20Sopenharmony_ci return 0; 12708c2ecf20Sopenharmony_ci} 12718c2ecf20Sopenharmony_ci 12728c2ecf20Sopenharmony_cistatic void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 12738c2ecf20Sopenharmony_ci{ 12748c2ecf20Sopenharmony_ci struct irq_data *data = irq_get_irq_data(virq); 12758c2ecf20Sopenharmony_ci unsigned int hw_irq; 12768c2ecf20Sopenharmony_ci 12778c2ecf20Sopenharmony_ci /* XXX Assign BAD number */ 12788c2ecf20Sopenharmony_ci if (!data) 12798c2ecf20Sopenharmony_ci return; 12808c2ecf20Sopenharmony_ci hw_irq = (unsigned int)irqd_to_hwirq(data); 12818c2ecf20Sopenharmony_ci if (hw_irq) 12828c2ecf20Sopenharmony_ci xive_irq_free_data(virq); 12838c2ecf20Sopenharmony_ci} 12848c2ecf20Sopenharmony_ci 12858c2ecf20Sopenharmony_cistatic int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 12868c2ecf20Sopenharmony_ci const u32 *intspec, unsigned int intsize, 12878c2ecf20Sopenharmony_ci irq_hw_number_t *out_hwirq, unsigned int *out_flags) 12888c2ecf20Sopenharmony_ci 12898c2ecf20Sopenharmony_ci{ 12908c2ecf20Sopenharmony_ci *out_hwirq = intspec[0]; 12918c2ecf20Sopenharmony_ci 12928c2ecf20Sopenharmony_ci /* 12938c2ecf20Sopenharmony_ci * If intsize is at least 2, we look for the type in the second cell, 12948c2ecf20Sopenharmony_ci * we assume the LSB indicates a level interrupt. 12958c2ecf20Sopenharmony_ci */ 12968c2ecf20Sopenharmony_ci if (intsize > 1) { 12978c2ecf20Sopenharmony_ci if (intspec[1] & 1) 12988c2ecf20Sopenharmony_ci *out_flags = IRQ_TYPE_LEVEL_LOW; 12998c2ecf20Sopenharmony_ci else 13008c2ecf20Sopenharmony_ci *out_flags = IRQ_TYPE_EDGE_RISING; 13018c2ecf20Sopenharmony_ci } else 13028c2ecf20Sopenharmony_ci *out_flags = IRQ_TYPE_LEVEL_LOW; 13038c2ecf20Sopenharmony_ci 13048c2ecf20Sopenharmony_ci return 0; 13058c2ecf20Sopenharmony_ci} 13068c2ecf20Sopenharmony_ci 13078c2ecf20Sopenharmony_cistatic int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 13088c2ecf20Sopenharmony_ci enum irq_domain_bus_token bus_token) 13098c2ecf20Sopenharmony_ci{ 13108c2ecf20Sopenharmony_ci return xive_ops->match(node); 13118c2ecf20Sopenharmony_ci} 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_cistatic const struct irq_domain_ops xive_irq_domain_ops = { 13148c2ecf20Sopenharmony_ci .match = xive_irq_domain_match, 13158c2ecf20Sopenharmony_ci .map = xive_irq_domain_map, 13168c2ecf20Sopenharmony_ci .unmap = xive_irq_domain_unmap, 13178c2ecf20Sopenharmony_ci .xlate = xive_irq_domain_xlate, 13188c2ecf20Sopenharmony_ci}; 13198c2ecf20Sopenharmony_ci 13208c2ecf20Sopenharmony_cistatic void __init xive_init_host(void) 13218c2ecf20Sopenharmony_ci{ 13228c2ecf20Sopenharmony_ci xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ, 13238c2ecf20Sopenharmony_ci &xive_irq_domain_ops, NULL); 13248c2ecf20Sopenharmony_ci if (WARN_ON(xive_irq_domain == NULL)) 13258c2ecf20Sopenharmony_ci return; 13268c2ecf20Sopenharmony_ci irq_set_default_host(xive_irq_domain); 13278c2ecf20Sopenharmony_ci} 13288c2ecf20Sopenharmony_ci 13298c2ecf20Sopenharmony_cistatic void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 13308c2ecf20Sopenharmony_ci{ 13318c2ecf20Sopenharmony_ci if (xc->queue[xive_irq_priority].qpage) 13328c2ecf20Sopenharmony_ci xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 13338c2ecf20Sopenharmony_ci} 13348c2ecf20Sopenharmony_ci 13358c2ecf20Sopenharmony_cistatic int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 13368c2ecf20Sopenharmony_ci{ 13378c2ecf20Sopenharmony_ci int rc = 0; 13388c2ecf20Sopenharmony_ci 13398c2ecf20Sopenharmony_ci /* We setup 1 queues for now with a 64k page */ 13408c2ecf20Sopenharmony_ci if (!xc->queue[xive_irq_priority].qpage) 13418c2ecf20Sopenharmony_ci rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 13428c2ecf20Sopenharmony_ci 13438c2ecf20Sopenharmony_ci return rc; 13448c2ecf20Sopenharmony_ci} 13458c2ecf20Sopenharmony_ci 13468c2ecf20Sopenharmony_cistatic int xive_prepare_cpu(unsigned int cpu) 13478c2ecf20Sopenharmony_ci{ 13488c2ecf20Sopenharmony_ci struct xive_cpu *xc; 13498c2ecf20Sopenharmony_ci 13508c2ecf20Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 13518c2ecf20Sopenharmony_ci if (!xc) { 13528c2ecf20Sopenharmony_ci struct device_node *np; 13538c2ecf20Sopenharmony_ci 13548c2ecf20Sopenharmony_ci xc = kzalloc_node(sizeof(struct xive_cpu), 13558c2ecf20Sopenharmony_ci GFP_KERNEL, cpu_to_node(cpu)); 13568c2ecf20Sopenharmony_ci if (!xc) 13578c2ecf20Sopenharmony_ci return -ENOMEM; 13588c2ecf20Sopenharmony_ci np = of_get_cpu_node(cpu, NULL); 13598c2ecf20Sopenharmony_ci if (np) 13608c2ecf20Sopenharmony_ci xc->chip_id = of_get_ibm_chip_id(np); 13618c2ecf20Sopenharmony_ci of_node_put(np); 13628c2ecf20Sopenharmony_ci xc->hw_ipi = XIVE_BAD_IRQ; 13638c2ecf20Sopenharmony_ci 13648c2ecf20Sopenharmony_ci per_cpu(xive_cpu, cpu) = xc; 13658c2ecf20Sopenharmony_ci } 13668c2ecf20Sopenharmony_ci 13678c2ecf20Sopenharmony_ci /* Setup EQs if not already */ 13688c2ecf20Sopenharmony_ci return xive_setup_cpu_queues(cpu, xc); 13698c2ecf20Sopenharmony_ci} 13708c2ecf20Sopenharmony_ci 13718c2ecf20Sopenharmony_cistatic void xive_setup_cpu(void) 13728c2ecf20Sopenharmony_ci{ 13738c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 13748c2ecf20Sopenharmony_ci 13758c2ecf20Sopenharmony_ci /* The backend might have additional things to do */ 13768c2ecf20Sopenharmony_ci if (xive_ops->setup_cpu) 13778c2ecf20Sopenharmony_ci xive_ops->setup_cpu(smp_processor_id(), xc); 13788c2ecf20Sopenharmony_ci 13798c2ecf20Sopenharmony_ci /* Set CPPR to 0xff to enable flow of interrupts */ 13808c2ecf20Sopenharmony_ci xc->cppr = 0xff; 13818c2ecf20Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 13828c2ecf20Sopenharmony_ci} 13838c2ecf20Sopenharmony_ci 13848c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 13858c2ecf20Sopenharmony_civoid xive_smp_setup_cpu(void) 13868c2ecf20Sopenharmony_ci{ 13878c2ecf20Sopenharmony_ci pr_devel("SMP setup CPU %d\n", smp_processor_id()); 13888c2ecf20Sopenharmony_ci 13898c2ecf20Sopenharmony_ci /* This will have already been done on the boot CPU */ 13908c2ecf20Sopenharmony_ci if (smp_processor_id() != boot_cpuid) 13918c2ecf20Sopenharmony_ci xive_setup_cpu(); 13928c2ecf20Sopenharmony_ci 13938c2ecf20Sopenharmony_ci} 13948c2ecf20Sopenharmony_ci 13958c2ecf20Sopenharmony_ciint xive_smp_prepare_cpu(unsigned int cpu) 13968c2ecf20Sopenharmony_ci{ 13978c2ecf20Sopenharmony_ci int rc; 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci /* Allocate per-CPU data and queues */ 14008c2ecf20Sopenharmony_ci rc = xive_prepare_cpu(cpu); 14018c2ecf20Sopenharmony_ci if (rc) 14028c2ecf20Sopenharmony_ci return rc; 14038c2ecf20Sopenharmony_ci 14048c2ecf20Sopenharmony_ci /* Allocate and setup IPI for the new CPU */ 14058c2ecf20Sopenharmony_ci return xive_setup_cpu_ipi(cpu); 14068c2ecf20Sopenharmony_ci} 14078c2ecf20Sopenharmony_ci 14088c2ecf20Sopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU 14098c2ecf20Sopenharmony_cistatic void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 14108c2ecf20Sopenharmony_ci{ 14118c2ecf20Sopenharmony_ci u32 irq; 14128c2ecf20Sopenharmony_ci 14138c2ecf20Sopenharmony_ci /* We assume local irqs are disabled */ 14148c2ecf20Sopenharmony_ci WARN_ON(!irqs_disabled()); 14158c2ecf20Sopenharmony_ci 14168c2ecf20Sopenharmony_ci /* Check what's already in the CPU queue */ 14178c2ecf20Sopenharmony_ci while ((irq = xive_scan_interrupts(xc, false)) != 0) { 14188c2ecf20Sopenharmony_ci /* 14198c2ecf20Sopenharmony_ci * We need to re-route that interrupt to its new destination. 14208c2ecf20Sopenharmony_ci * First get and lock the descriptor 14218c2ecf20Sopenharmony_ci */ 14228c2ecf20Sopenharmony_ci struct irq_desc *desc = irq_to_desc(irq); 14238c2ecf20Sopenharmony_ci struct irq_data *d = irq_desc_get_irq_data(desc); 14248c2ecf20Sopenharmony_ci struct xive_irq_data *xd; 14258c2ecf20Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 14268c2ecf20Sopenharmony_ci 14278c2ecf20Sopenharmony_ci /* 14288c2ecf20Sopenharmony_ci * Ignore anything that isn't a XIVE irq and ignore 14298c2ecf20Sopenharmony_ci * IPIs, so can just be dropped. 14308c2ecf20Sopenharmony_ci */ 14318c2ecf20Sopenharmony_ci if (d->domain != xive_irq_domain || hw_irq == 0) 14328c2ecf20Sopenharmony_ci continue; 14338c2ecf20Sopenharmony_ci 14348c2ecf20Sopenharmony_ci /* 14358c2ecf20Sopenharmony_ci * The IRQ should have already been re-routed, it's just a 14368c2ecf20Sopenharmony_ci * stale in the old queue, so re-trigger it in order to make 14378c2ecf20Sopenharmony_ci * it reach is new destination. 14388c2ecf20Sopenharmony_ci */ 14398c2ecf20Sopenharmony_ci#ifdef DEBUG_FLUSH 14408c2ecf20Sopenharmony_ci pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 14418c2ecf20Sopenharmony_ci cpu, irq); 14428c2ecf20Sopenharmony_ci#endif 14438c2ecf20Sopenharmony_ci raw_spin_lock(&desc->lock); 14448c2ecf20Sopenharmony_ci xd = irq_desc_get_handler_data(desc); 14458c2ecf20Sopenharmony_ci 14468c2ecf20Sopenharmony_ci /* 14478c2ecf20Sopenharmony_ci * Clear saved_p to indicate that it's no longer pending 14488c2ecf20Sopenharmony_ci */ 14498c2ecf20Sopenharmony_ci xd->saved_p = false; 14508c2ecf20Sopenharmony_ci 14518c2ecf20Sopenharmony_ci /* 14528c2ecf20Sopenharmony_ci * For LSIs, we EOI, this will cause a resend if it's 14538c2ecf20Sopenharmony_ci * still asserted. Otherwise do an MSI retrigger. 14548c2ecf20Sopenharmony_ci */ 14558c2ecf20Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_LSI) 14568c2ecf20Sopenharmony_ci xive_do_source_eoi(irqd_to_hwirq(d), xd); 14578c2ecf20Sopenharmony_ci else 14588c2ecf20Sopenharmony_ci xive_irq_retrigger(d); 14598c2ecf20Sopenharmony_ci 14608c2ecf20Sopenharmony_ci raw_spin_unlock(&desc->lock); 14618c2ecf20Sopenharmony_ci } 14628c2ecf20Sopenharmony_ci} 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_civoid xive_smp_disable_cpu(void) 14658c2ecf20Sopenharmony_ci{ 14668c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 14678c2ecf20Sopenharmony_ci unsigned int cpu = smp_processor_id(); 14688c2ecf20Sopenharmony_ci 14698c2ecf20Sopenharmony_ci /* Migrate interrupts away from the CPU */ 14708c2ecf20Sopenharmony_ci irq_migrate_all_off_this_cpu(); 14718c2ecf20Sopenharmony_ci 14728c2ecf20Sopenharmony_ci /* Set CPPR to 0 to disable flow of interrupts */ 14738c2ecf20Sopenharmony_ci xc->cppr = 0; 14748c2ecf20Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 14758c2ecf20Sopenharmony_ci 14768c2ecf20Sopenharmony_ci /* Flush everything still in the queue */ 14778c2ecf20Sopenharmony_ci xive_flush_cpu_queue(cpu, xc); 14788c2ecf20Sopenharmony_ci 14798c2ecf20Sopenharmony_ci /* Re-enable CPPR */ 14808c2ecf20Sopenharmony_ci xc->cppr = 0xff; 14818c2ecf20Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 14828c2ecf20Sopenharmony_ci} 14838c2ecf20Sopenharmony_ci 14848c2ecf20Sopenharmony_civoid xive_flush_interrupt(void) 14858c2ecf20Sopenharmony_ci{ 14868c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 14878c2ecf20Sopenharmony_ci unsigned int cpu = smp_processor_id(); 14888c2ecf20Sopenharmony_ci 14898c2ecf20Sopenharmony_ci /* Called if an interrupt occurs while the CPU is hot unplugged */ 14908c2ecf20Sopenharmony_ci xive_flush_cpu_queue(cpu, xc); 14918c2ecf20Sopenharmony_ci} 14928c2ecf20Sopenharmony_ci 14938c2ecf20Sopenharmony_ci#endif /* CONFIG_HOTPLUG_CPU */ 14948c2ecf20Sopenharmony_ci 14958c2ecf20Sopenharmony_ci#endif /* CONFIG_SMP */ 14968c2ecf20Sopenharmony_ci 14978c2ecf20Sopenharmony_civoid xive_teardown_cpu(void) 14988c2ecf20Sopenharmony_ci{ 14998c2ecf20Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 15008c2ecf20Sopenharmony_ci unsigned int cpu = smp_processor_id(); 15018c2ecf20Sopenharmony_ci 15028c2ecf20Sopenharmony_ci /* Set CPPR to 0 to disable flow of interrupts */ 15038c2ecf20Sopenharmony_ci xc->cppr = 0; 15048c2ecf20Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 15058c2ecf20Sopenharmony_ci 15068c2ecf20Sopenharmony_ci if (xive_ops->teardown_cpu) 15078c2ecf20Sopenharmony_ci xive_ops->teardown_cpu(cpu, xc); 15088c2ecf20Sopenharmony_ci 15098c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 15108c2ecf20Sopenharmony_ci /* Get rid of IPI */ 15118c2ecf20Sopenharmony_ci xive_cleanup_cpu_ipi(cpu, xc); 15128c2ecf20Sopenharmony_ci#endif 15138c2ecf20Sopenharmony_ci 15148c2ecf20Sopenharmony_ci /* Disable and free the queues */ 15158c2ecf20Sopenharmony_ci xive_cleanup_cpu_queues(cpu, xc); 15168c2ecf20Sopenharmony_ci} 15178c2ecf20Sopenharmony_ci 15188c2ecf20Sopenharmony_civoid xive_shutdown(void) 15198c2ecf20Sopenharmony_ci{ 15208c2ecf20Sopenharmony_ci xive_ops->shutdown(); 15218c2ecf20Sopenharmony_ci} 15228c2ecf20Sopenharmony_ci 15238c2ecf20Sopenharmony_cibool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, 15248c2ecf20Sopenharmony_ci u8 max_prio) 15258c2ecf20Sopenharmony_ci{ 15268c2ecf20Sopenharmony_ci xive_tima = area; 15278c2ecf20Sopenharmony_ci xive_tima_offset = offset; 15288c2ecf20Sopenharmony_ci xive_ops = ops; 15298c2ecf20Sopenharmony_ci xive_irq_priority = max_prio; 15308c2ecf20Sopenharmony_ci 15318c2ecf20Sopenharmony_ci ppc_md.get_irq = xive_get_irq; 15328c2ecf20Sopenharmony_ci __xive_enabled = true; 15338c2ecf20Sopenharmony_ci 15348c2ecf20Sopenharmony_ci pr_devel("Initializing host..\n"); 15358c2ecf20Sopenharmony_ci xive_init_host(); 15368c2ecf20Sopenharmony_ci 15378c2ecf20Sopenharmony_ci pr_devel("Initializing boot CPU..\n"); 15388c2ecf20Sopenharmony_ci 15398c2ecf20Sopenharmony_ci /* Allocate per-CPU data and queues */ 15408c2ecf20Sopenharmony_ci xive_prepare_cpu(smp_processor_id()); 15418c2ecf20Sopenharmony_ci 15428c2ecf20Sopenharmony_ci /* Get ready for interrupts */ 15438c2ecf20Sopenharmony_ci xive_setup_cpu(); 15448c2ecf20Sopenharmony_ci 15458c2ecf20Sopenharmony_ci pr_info("Interrupt handling initialized with %s backend\n", 15468c2ecf20Sopenharmony_ci xive_ops->name); 15478c2ecf20Sopenharmony_ci pr_info("Using priority %d for all interrupts\n", max_prio); 15488c2ecf20Sopenharmony_ci 15498c2ecf20Sopenharmony_ci return true; 15508c2ecf20Sopenharmony_ci} 15518c2ecf20Sopenharmony_ci 15528c2ecf20Sopenharmony_ci__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 15538c2ecf20Sopenharmony_ci{ 15548c2ecf20Sopenharmony_ci unsigned int alloc_order; 15558c2ecf20Sopenharmony_ci struct page *pages; 15568c2ecf20Sopenharmony_ci __be32 *qpage; 15578c2ecf20Sopenharmony_ci 15588c2ecf20Sopenharmony_ci alloc_order = xive_alloc_order(queue_shift); 15598c2ecf20Sopenharmony_ci pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 15608c2ecf20Sopenharmony_ci if (!pages) 15618c2ecf20Sopenharmony_ci return ERR_PTR(-ENOMEM); 15628c2ecf20Sopenharmony_ci qpage = (__be32 *)page_address(pages); 15638c2ecf20Sopenharmony_ci memset(qpage, 0, 1 << queue_shift); 15648c2ecf20Sopenharmony_ci 15658c2ecf20Sopenharmony_ci return qpage; 15668c2ecf20Sopenharmony_ci} 15678c2ecf20Sopenharmony_ci 15688c2ecf20Sopenharmony_cistatic int __init xive_off(char *arg) 15698c2ecf20Sopenharmony_ci{ 15708c2ecf20Sopenharmony_ci xive_cmdline_disabled = true; 15718c2ecf20Sopenharmony_ci return 0; 15728c2ecf20Sopenharmony_ci} 15738c2ecf20Sopenharmony_ci__setup("xive=off", xive_off); 15748c2ecf20Sopenharmony_ci 15758c2ecf20Sopenharmony_cistatic void xive_debug_show_cpu(struct seq_file *m, int cpu) 15768c2ecf20Sopenharmony_ci{ 15778c2ecf20Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 15788c2ecf20Sopenharmony_ci 15798c2ecf20Sopenharmony_ci seq_printf(m, "CPU %d:", cpu); 15808c2ecf20Sopenharmony_ci if (xc) { 15818c2ecf20Sopenharmony_ci seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 15828c2ecf20Sopenharmony_ci 15838c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP 15848c2ecf20Sopenharmony_ci { 15858c2ecf20Sopenharmony_ci u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 15868c2ecf20Sopenharmony_ci 15878c2ecf20Sopenharmony_ci seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 15888c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_P ? 'P' : '-', 15898c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 15908c2ecf20Sopenharmony_ci } 15918c2ecf20Sopenharmony_ci#endif 15928c2ecf20Sopenharmony_ci { 15938c2ecf20Sopenharmony_ci struct xive_q *q = &xc->queue[xive_irq_priority]; 15948c2ecf20Sopenharmony_ci u32 i0, i1, idx; 15958c2ecf20Sopenharmony_ci 15968c2ecf20Sopenharmony_ci if (q->qpage) { 15978c2ecf20Sopenharmony_ci idx = q->idx; 15988c2ecf20Sopenharmony_ci i0 = be32_to_cpup(q->qpage + idx); 15998c2ecf20Sopenharmony_ci idx = (idx + 1) & q->msk; 16008c2ecf20Sopenharmony_ci i1 = be32_to_cpup(q->qpage + idx); 16018c2ecf20Sopenharmony_ci seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", 16028c2ecf20Sopenharmony_ci q->idx, q->toggle, i0, i1); 16038c2ecf20Sopenharmony_ci } 16048c2ecf20Sopenharmony_ci } 16058c2ecf20Sopenharmony_ci } 16068c2ecf20Sopenharmony_ci seq_puts(m, "\n"); 16078c2ecf20Sopenharmony_ci} 16088c2ecf20Sopenharmony_ci 16098c2ecf20Sopenharmony_cistatic void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d) 16108c2ecf20Sopenharmony_ci{ 16118c2ecf20Sopenharmony_ci struct irq_chip *chip = irq_data_get_irq_chip(d); 16128c2ecf20Sopenharmony_ci int rc; 16138c2ecf20Sopenharmony_ci u32 target; 16148c2ecf20Sopenharmony_ci u8 prio; 16158c2ecf20Sopenharmony_ci u32 lirq; 16168c2ecf20Sopenharmony_ci struct xive_irq_data *xd; 16178c2ecf20Sopenharmony_ci u64 val; 16188c2ecf20Sopenharmony_ci 16198c2ecf20Sopenharmony_ci if (!is_xive_irq(chip)) 16208c2ecf20Sopenharmony_ci return; 16218c2ecf20Sopenharmony_ci 16228c2ecf20Sopenharmony_ci rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 16238c2ecf20Sopenharmony_ci if (rc) { 16248c2ecf20Sopenharmony_ci seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 16258c2ecf20Sopenharmony_ci return; 16268c2ecf20Sopenharmony_ci } 16278c2ecf20Sopenharmony_ci 16288c2ecf20Sopenharmony_ci seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 16298c2ecf20Sopenharmony_ci hw_irq, target, prio, lirq); 16308c2ecf20Sopenharmony_ci 16318c2ecf20Sopenharmony_ci xd = irq_data_get_irq_handler_data(d); 16328c2ecf20Sopenharmony_ci val = xive_esb_read(xd, XIVE_ESB_GET); 16338c2ecf20Sopenharmony_ci seq_printf(m, "flags=%c%c%c PQ=%c%c", 16348c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 16358c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 16368c2ecf20Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 16378c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_P ? 'P' : '-', 16388c2ecf20Sopenharmony_ci val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 16398c2ecf20Sopenharmony_ci seq_puts(m, "\n"); 16408c2ecf20Sopenharmony_ci} 16418c2ecf20Sopenharmony_ci 16428c2ecf20Sopenharmony_cistatic int xive_core_debug_show(struct seq_file *m, void *private) 16438c2ecf20Sopenharmony_ci{ 16448c2ecf20Sopenharmony_ci unsigned int i; 16458c2ecf20Sopenharmony_ci struct irq_desc *desc; 16468c2ecf20Sopenharmony_ci int cpu; 16478c2ecf20Sopenharmony_ci 16488c2ecf20Sopenharmony_ci if (xive_ops->debug_show) 16498c2ecf20Sopenharmony_ci xive_ops->debug_show(m, private); 16508c2ecf20Sopenharmony_ci 16518c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) 16528c2ecf20Sopenharmony_ci xive_debug_show_cpu(m, cpu); 16538c2ecf20Sopenharmony_ci 16548c2ecf20Sopenharmony_ci for_each_irq_desc(i, desc) { 16558c2ecf20Sopenharmony_ci struct irq_data *d = irq_desc_get_irq_data(desc); 16568c2ecf20Sopenharmony_ci unsigned int hw_irq; 16578c2ecf20Sopenharmony_ci 16588c2ecf20Sopenharmony_ci if (!d) 16598c2ecf20Sopenharmony_ci continue; 16608c2ecf20Sopenharmony_ci 16618c2ecf20Sopenharmony_ci hw_irq = (unsigned int)irqd_to_hwirq(d); 16628c2ecf20Sopenharmony_ci 16638c2ecf20Sopenharmony_ci /* IPIs are special (HW number 0) */ 16648c2ecf20Sopenharmony_ci if (hw_irq) 16658c2ecf20Sopenharmony_ci xive_debug_show_irq(m, hw_irq, d); 16668c2ecf20Sopenharmony_ci } 16678c2ecf20Sopenharmony_ci return 0; 16688c2ecf20Sopenharmony_ci} 16698c2ecf20Sopenharmony_ciDEFINE_SHOW_ATTRIBUTE(xive_core_debug); 16708c2ecf20Sopenharmony_ci 16718c2ecf20Sopenharmony_ciint xive_core_debug_init(void) 16728c2ecf20Sopenharmony_ci{ 16738c2ecf20Sopenharmony_ci if (xive_enabled()) 16748c2ecf20Sopenharmony_ci debugfs_create_file("xive", 0400, powerpc_debugfs_root, 16758c2ecf20Sopenharmony_ci NULL, &xive_core_debug_fops); 16768c2ecf20Sopenharmony_ci return 0; 16778c2ecf20Sopenharmony_ci} 1678