162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright 2016,2017 IBM Corporation. 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci 662306a36Sopenharmony_ci#define pr_fmt(fmt) "xive: " fmt 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include <linux/types.h> 962306a36Sopenharmony_ci#include <linux/threads.h> 1062306a36Sopenharmony_ci#include <linux/kernel.h> 1162306a36Sopenharmony_ci#include <linux/irq.h> 1262306a36Sopenharmony_ci#include <linux/irqdomain.h> 1362306a36Sopenharmony_ci#include <linux/debugfs.h> 1462306a36Sopenharmony_ci#include <linux/smp.h> 1562306a36Sopenharmony_ci#include <linux/interrupt.h> 1662306a36Sopenharmony_ci#include <linux/seq_file.h> 1762306a36Sopenharmony_ci#include <linux/init.h> 1862306a36Sopenharmony_ci#include <linux/cpu.h> 1962306a36Sopenharmony_ci#include <linux/of.h> 2062306a36Sopenharmony_ci#include <linux/slab.h> 2162306a36Sopenharmony_ci#include <linux/spinlock.h> 2262306a36Sopenharmony_ci#include <linux/msi.h> 2362306a36Sopenharmony_ci#include <linux/vmalloc.h> 2462306a36Sopenharmony_ci 2562306a36Sopenharmony_ci#include <asm/io.h> 2662306a36Sopenharmony_ci#include <asm/smp.h> 2762306a36Sopenharmony_ci#include <asm/machdep.h> 2862306a36Sopenharmony_ci#include <asm/irq.h> 2962306a36Sopenharmony_ci#include <asm/errno.h> 3062306a36Sopenharmony_ci#include <asm/xive.h> 3162306a36Sopenharmony_ci#include <asm/xive-regs.h> 3262306a36Sopenharmony_ci#include <asm/xmon.h> 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci#include "xive-internal.h" 3562306a36Sopenharmony_ci 3662306a36Sopenharmony_ci#undef DEBUG_FLUSH 3762306a36Sopenharmony_ci#undef DEBUG_ALL 3862306a36Sopenharmony_ci 3962306a36Sopenharmony_ci#ifdef DEBUG_ALL 4062306a36Sopenharmony_ci#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 4162306a36Sopenharmony_ci smp_processor_id(), ## __VA_ARGS__) 4262306a36Sopenharmony_ci#else 4362306a36Sopenharmony_ci#define DBG_VERBOSE(fmt...) do { } while(0) 4462306a36Sopenharmony_ci#endif 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_cibool __xive_enabled; 4762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(__xive_enabled); 4862306a36Sopenharmony_cibool xive_cmdline_disabled; 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci/* We use only one priority for now */ 5162306a36Sopenharmony_cistatic u8 xive_irq_priority; 5262306a36Sopenharmony_ci 5362306a36Sopenharmony_ci/* TIMA exported to KVM */ 5462306a36Sopenharmony_civoid __iomem *xive_tima; 5562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(xive_tima); 5662306a36Sopenharmony_ciu32 xive_tima_offset; 5762306a36Sopenharmony_ci 5862306a36Sopenharmony_ci/* Backend ops */ 5962306a36Sopenharmony_cistatic const struct xive_ops *xive_ops; 6062306a36Sopenharmony_ci 6162306a36Sopenharmony_ci/* Our global interrupt domain */ 6262306a36Sopenharmony_cistatic struct irq_domain *xive_irq_domain; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci#ifdef CONFIG_SMP 6562306a36Sopenharmony_ci/* The IPIs use the same logical irq number when on the same chip */ 6662306a36Sopenharmony_cistatic struct xive_ipi_desc { 6762306a36Sopenharmony_ci unsigned int irq; 6862306a36Sopenharmony_ci char name[16]; 6962306a36Sopenharmony_ci atomic_t started; 7062306a36Sopenharmony_ci} *xive_ipis; 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci/* 7362306a36Sopenharmony_ci * Use early_cpu_to_node() for hot-plugged CPUs 7462306a36Sopenharmony_ci */ 7562306a36Sopenharmony_cistatic unsigned int xive_ipi_cpu_to_irq(unsigned int cpu) 7662306a36Sopenharmony_ci{ 7762306a36Sopenharmony_ci return xive_ipis[early_cpu_to_node(cpu)].irq; 7862306a36Sopenharmony_ci} 7962306a36Sopenharmony_ci#endif 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_ci/* Xive state for each CPU */ 8262306a36Sopenharmony_cistatic DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci/* An invalid CPU target */ 8562306a36Sopenharmony_ci#define XIVE_INVALID_TARGET (-1) 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci/* 8862306a36Sopenharmony_ci * Global toggle to switch on/off StoreEOI 8962306a36Sopenharmony_ci */ 9062306a36Sopenharmony_cistatic bool xive_store_eoi = true; 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_cistatic bool xive_is_store_eoi(struct xive_irq_data *xd) 9362306a36Sopenharmony_ci{ 9462306a36Sopenharmony_ci return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi; 9562306a36Sopenharmony_ci} 9662306a36Sopenharmony_ci 9762306a36Sopenharmony_ci/* 9862306a36Sopenharmony_ci * Read the next entry in a queue, return its content if it's valid 9962306a36Sopenharmony_ci * or 0 if there is no new entry. 10062306a36Sopenharmony_ci * 10162306a36Sopenharmony_ci * The queue pointer is moved forward unless "just_peek" is set 10262306a36Sopenharmony_ci */ 10362306a36Sopenharmony_cistatic u32 xive_read_eq(struct xive_q *q, bool just_peek) 10462306a36Sopenharmony_ci{ 10562306a36Sopenharmony_ci u32 cur; 10662306a36Sopenharmony_ci 10762306a36Sopenharmony_ci if (!q->qpage) 10862306a36Sopenharmony_ci return 0; 10962306a36Sopenharmony_ci cur = be32_to_cpup(q->qpage + q->idx); 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_ci /* Check valid bit (31) vs current toggle polarity */ 11262306a36Sopenharmony_ci if ((cur >> 31) == q->toggle) 11362306a36Sopenharmony_ci return 0; 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci /* If consuming from the queue ... */ 11662306a36Sopenharmony_ci if (!just_peek) { 11762306a36Sopenharmony_ci /* Next entry */ 11862306a36Sopenharmony_ci q->idx = (q->idx + 1) & q->msk; 11962306a36Sopenharmony_ci 12062306a36Sopenharmony_ci /* Wrap around: flip valid toggle */ 12162306a36Sopenharmony_ci if (q->idx == 0) 12262306a36Sopenharmony_ci q->toggle ^= 1; 12362306a36Sopenharmony_ci } 12462306a36Sopenharmony_ci /* Mask out the valid bit (31) */ 12562306a36Sopenharmony_ci return cur & 0x7fffffff; 12662306a36Sopenharmony_ci} 12762306a36Sopenharmony_ci 12862306a36Sopenharmony_ci/* 12962306a36Sopenharmony_ci * Scans all the queue that may have interrupts in them 13062306a36Sopenharmony_ci * (based on "pending_prio") in priority order until an 13162306a36Sopenharmony_ci * interrupt is found or all the queues are empty. 13262306a36Sopenharmony_ci * 13362306a36Sopenharmony_ci * Then updates the CPPR (Current Processor Priority 13462306a36Sopenharmony_ci * Register) based on the most favored interrupt found 13562306a36Sopenharmony_ci * (0xff if none) and return what was found (0 if none). 13662306a36Sopenharmony_ci * 13762306a36Sopenharmony_ci * If just_peek is set, return the most favored pending 13862306a36Sopenharmony_ci * interrupt if any but don't update the queue pointers. 13962306a36Sopenharmony_ci * 14062306a36Sopenharmony_ci * Note: This function can operate generically on any number 14162306a36Sopenharmony_ci * of queues (up to 8). The current implementation of the XIVE 14262306a36Sopenharmony_ci * driver only uses a single queue however. 14362306a36Sopenharmony_ci * 14462306a36Sopenharmony_ci * Note2: This will also "flush" "the pending_count" of a queue 14562306a36Sopenharmony_ci * into the "count" when that queue is observed to be empty. 14662306a36Sopenharmony_ci * This is used to keep track of the amount of interrupts 14762306a36Sopenharmony_ci * targetting a queue. When an interrupt is moved away from 14862306a36Sopenharmony_ci * a queue, we only decrement that queue count once the queue 14962306a36Sopenharmony_ci * has been observed empty to avoid races. 15062306a36Sopenharmony_ci */ 15162306a36Sopenharmony_cistatic u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 15262306a36Sopenharmony_ci{ 15362306a36Sopenharmony_ci u32 irq = 0; 15462306a36Sopenharmony_ci u8 prio = 0; 15562306a36Sopenharmony_ci 15662306a36Sopenharmony_ci /* Find highest pending priority */ 15762306a36Sopenharmony_ci while (xc->pending_prio != 0) { 15862306a36Sopenharmony_ci struct xive_q *q; 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci prio = ffs(xc->pending_prio) - 1; 16162306a36Sopenharmony_ci DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci /* Try to fetch */ 16462306a36Sopenharmony_ci irq = xive_read_eq(&xc->queue[prio], just_peek); 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci /* Found something ? That's it */ 16762306a36Sopenharmony_ci if (irq) { 16862306a36Sopenharmony_ci if (just_peek || irq_to_desc(irq)) 16962306a36Sopenharmony_ci break; 17062306a36Sopenharmony_ci /* 17162306a36Sopenharmony_ci * We should never get here; if we do then we must 17262306a36Sopenharmony_ci * have failed to synchronize the interrupt properly 17362306a36Sopenharmony_ci * when shutting it down. 17462306a36Sopenharmony_ci */ 17562306a36Sopenharmony_ci pr_crit("xive: got interrupt %d without descriptor, dropping\n", 17662306a36Sopenharmony_ci irq); 17762306a36Sopenharmony_ci WARN_ON(1); 17862306a36Sopenharmony_ci continue; 17962306a36Sopenharmony_ci } 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ci /* Clear pending bits */ 18262306a36Sopenharmony_ci xc->pending_prio &= ~(1 << prio); 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci /* 18562306a36Sopenharmony_ci * Check if the queue count needs adjusting due to 18662306a36Sopenharmony_ci * interrupts being moved away. See description of 18762306a36Sopenharmony_ci * xive_dec_target_count() 18862306a36Sopenharmony_ci */ 18962306a36Sopenharmony_ci q = &xc->queue[prio]; 19062306a36Sopenharmony_ci if (atomic_read(&q->pending_count)) { 19162306a36Sopenharmony_ci int p = atomic_xchg(&q->pending_count, 0); 19262306a36Sopenharmony_ci if (p) { 19362306a36Sopenharmony_ci WARN_ON(p > atomic_read(&q->count)); 19462306a36Sopenharmony_ci atomic_sub(p, &q->count); 19562306a36Sopenharmony_ci } 19662306a36Sopenharmony_ci } 19762306a36Sopenharmony_ci } 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_ci /* If nothing was found, set CPPR to 0xff */ 20062306a36Sopenharmony_ci if (irq == 0) 20162306a36Sopenharmony_ci prio = 0xff; 20262306a36Sopenharmony_ci 20362306a36Sopenharmony_ci /* Update HW CPPR to match if necessary */ 20462306a36Sopenharmony_ci if (prio != xc->cppr) { 20562306a36Sopenharmony_ci DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 20662306a36Sopenharmony_ci xc->cppr = prio; 20762306a36Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 20862306a36Sopenharmony_ci } 20962306a36Sopenharmony_ci 21062306a36Sopenharmony_ci return irq; 21162306a36Sopenharmony_ci} 21262306a36Sopenharmony_ci 21362306a36Sopenharmony_ci/* 21462306a36Sopenharmony_ci * This is used to perform the magic loads from an ESB 21562306a36Sopenharmony_ci * described in xive-regs.h 21662306a36Sopenharmony_ci */ 21762306a36Sopenharmony_cistatic notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 21862306a36Sopenharmony_ci{ 21962306a36Sopenharmony_ci u64 val; 22062306a36Sopenharmony_ci 22162306a36Sopenharmony_ci if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd)) 22262306a36Sopenharmony_ci offset |= XIVE_ESB_LD_ST_MO; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 22562306a36Sopenharmony_ci val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 22662306a36Sopenharmony_ci else 22762306a36Sopenharmony_ci val = in_be64(xd->eoi_mmio + offset); 22862306a36Sopenharmony_ci 22962306a36Sopenharmony_ci return (u8)val; 23062306a36Sopenharmony_ci} 23162306a36Sopenharmony_ci 23262306a36Sopenharmony_cistatic void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 23362306a36Sopenharmony_ci{ 23462306a36Sopenharmony_ci if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 23562306a36Sopenharmony_ci xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 23662306a36Sopenharmony_ci else 23762306a36Sopenharmony_ci out_be64(xd->eoi_mmio + offset, data); 23862306a36Sopenharmony_ci} 23962306a36Sopenharmony_ci 24062306a36Sopenharmony_ci#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS) 24162306a36Sopenharmony_cistatic void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size) 24262306a36Sopenharmony_ci{ 24362306a36Sopenharmony_ci u64 val = xive_esb_read(xd, XIVE_ESB_GET); 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx", 24662306a36Sopenharmony_ci xive_is_store_eoi(xd) ? 'S' : ' ', 24762306a36Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 24862306a36Sopenharmony_ci xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 24962306a36Sopenharmony_ci val & XIVE_ESB_VAL_P ? 'P' : '-', 25062306a36Sopenharmony_ci val & XIVE_ESB_VAL_Q ? 'Q' : '-', 25162306a36Sopenharmony_ci xd->trig_page, xd->eoi_page); 25262306a36Sopenharmony_ci} 25362306a36Sopenharmony_ci#endif 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci#ifdef CONFIG_XMON 25662306a36Sopenharmony_cistatic notrace void xive_dump_eq(const char *name, struct xive_q *q) 25762306a36Sopenharmony_ci{ 25862306a36Sopenharmony_ci u32 i0, i1, idx; 25962306a36Sopenharmony_ci 26062306a36Sopenharmony_ci if (!q->qpage) 26162306a36Sopenharmony_ci return; 26262306a36Sopenharmony_ci idx = q->idx; 26362306a36Sopenharmony_ci i0 = be32_to_cpup(q->qpage + idx); 26462306a36Sopenharmony_ci idx = (idx + 1) & q->msk; 26562306a36Sopenharmony_ci i1 = be32_to_cpup(q->qpage + idx); 26662306a36Sopenharmony_ci xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 26762306a36Sopenharmony_ci q->idx, q->toggle, i0, i1); 26862306a36Sopenharmony_ci} 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_cinotrace void xmon_xive_do_dump(int cpu) 27162306a36Sopenharmony_ci{ 27262306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci xmon_printf("CPU %d:", cpu); 27562306a36Sopenharmony_ci if (xc) { 27662306a36Sopenharmony_ci xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_ci#ifdef CONFIG_SMP 27962306a36Sopenharmony_ci { 28062306a36Sopenharmony_ci char buffer[128]; 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); 28362306a36Sopenharmony_ci xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer); 28462306a36Sopenharmony_ci } 28562306a36Sopenharmony_ci#endif 28662306a36Sopenharmony_ci xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 28762306a36Sopenharmony_ci } 28862306a36Sopenharmony_ci xmon_printf("\n"); 28962306a36Sopenharmony_ci} 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_cistatic struct irq_data *xive_get_irq_data(u32 hw_irq) 29262306a36Sopenharmony_ci{ 29362306a36Sopenharmony_ci unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); 29462306a36Sopenharmony_ci 29562306a36Sopenharmony_ci return irq ? irq_get_irq_data(irq) : NULL; 29662306a36Sopenharmony_ci} 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_ciint xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 29962306a36Sopenharmony_ci{ 30062306a36Sopenharmony_ci int rc; 30162306a36Sopenharmony_ci u32 target; 30262306a36Sopenharmony_ci u8 prio; 30362306a36Sopenharmony_ci u32 lirq; 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 30662306a36Sopenharmony_ci if (rc) { 30762306a36Sopenharmony_ci xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 30862306a36Sopenharmony_ci return rc; 30962306a36Sopenharmony_ci } 31062306a36Sopenharmony_ci 31162306a36Sopenharmony_ci xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 31262306a36Sopenharmony_ci hw_irq, target, prio, lirq); 31362306a36Sopenharmony_ci 31462306a36Sopenharmony_ci if (!d) 31562306a36Sopenharmony_ci d = xive_get_irq_data(hw_irq); 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci if (d) { 31862306a36Sopenharmony_ci char buffer[128]; 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci xive_irq_data_dump(irq_data_get_irq_handler_data(d), 32162306a36Sopenharmony_ci buffer, sizeof(buffer)); 32262306a36Sopenharmony_ci xmon_printf("%s", buffer); 32362306a36Sopenharmony_ci } 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci xmon_printf("\n"); 32662306a36Sopenharmony_ci return 0; 32762306a36Sopenharmony_ci} 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_civoid xmon_xive_get_irq_all(void) 33062306a36Sopenharmony_ci{ 33162306a36Sopenharmony_ci unsigned int i; 33262306a36Sopenharmony_ci struct irq_desc *desc; 33362306a36Sopenharmony_ci 33462306a36Sopenharmony_ci for_each_irq_desc(i, desc) { 33562306a36Sopenharmony_ci struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); 33662306a36Sopenharmony_ci 33762306a36Sopenharmony_ci if (d) 33862306a36Sopenharmony_ci xmon_xive_get_irq_config(irqd_to_hwirq(d), d); 33962306a36Sopenharmony_ci } 34062306a36Sopenharmony_ci} 34162306a36Sopenharmony_ci 34262306a36Sopenharmony_ci#endif /* CONFIG_XMON */ 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_cistatic unsigned int xive_get_irq(void) 34562306a36Sopenharmony_ci{ 34662306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 34762306a36Sopenharmony_ci u32 irq; 34862306a36Sopenharmony_ci 34962306a36Sopenharmony_ci /* 35062306a36Sopenharmony_ci * This can be called either as a result of a HW interrupt or 35162306a36Sopenharmony_ci * as a "replay" because EOI decided there was still something 35262306a36Sopenharmony_ci * in one of the queues. 35362306a36Sopenharmony_ci * 35462306a36Sopenharmony_ci * First we perform an ACK cycle in order to update our mask 35562306a36Sopenharmony_ci * of pending priorities. This will also have the effect of 35662306a36Sopenharmony_ci * updating the CPPR to the most favored pending interrupts. 35762306a36Sopenharmony_ci * 35862306a36Sopenharmony_ci * In the future, if we have a way to differentiate a first 35962306a36Sopenharmony_ci * entry (on HW interrupt) from a replay triggered by EOI, 36062306a36Sopenharmony_ci * we could skip this on replays unless we soft-mask tells us 36162306a36Sopenharmony_ci * that a new HW interrupt occurred. 36262306a36Sopenharmony_ci */ 36362306a36Sopenharmony_ci xive_ops->update_pending(xc); 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 36662306a36Sopenharmony_ci 36762306a36Sopenharmony_ci /* Scan our queue(s) for interrupts */ 36862306a36Sopenharmony_ci irq = xive_scan_interrupts(xc, false); 36962306a36Sopenharmony_ci 37062306a36Sopenharmony_ci DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 37162306a36Sopenharmony_ci irq, xc->pending_prio); 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_ci /* Return pending interrupt if any */ 37462306a36Sopenharmony_ci if (irq == XIVE_BAD_IRQ) 37562306a36Sopenharmony_ci return 0; 37662306a36Sopenharmony_ci return irq; 37762306a36Sopenharmony_ci} 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci/* 38062306a36Sopenharmony_ci * After EOI'ing an interrupt, we need to re-check the queue 38162306a36Sopenharmony_ci * to see if another interrupt is pending since multiple 38262306a36Sopenharmony_ci * interrupts can coalesce into a single notification to the 38362306a36Sopenharmony_ci * CPU. 38462306a36Sopenharmony_ci * 38562306a36Sopenharmony_ci * If we find that there is indeed more in there, we call 38662306a36Sopenharmony_ci * force_external_irq_replay() to make Linux synthetize an 38762306a36Sopenharmony_ci * external interrupt on the next call to local_irq_restore(). 38862306a36Sopenharmony_ci */ 38962306a36Sopenharmony_cistatic void xive_do_queue_eoi(struct xive_cpu *xc) 39062306a36Sopenharmony_ci{ 39162306a36Sopenharmony_ci if (xive_scan_interrupts(xc, true) != 0) { 39262306a36Sopenharmony_ci DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 39362306a36Sopenharmony_ci force_external_irq_replay(); 39462306a36Sopenharmony_ci } 39562306a36Sopenharmony_ci} 39662306a36Sopenharmony_ci 39762306a36Sopenharmony_ci/* 39862306a36Sopenharmony_ci * EOI an interrupt at the source. There are several methods 39962306a36Sopenharmony_ci * to do this depending on the HW version and source type 40062306a36Sopenharmony_ci */ 40162306a36Sopenharmony_cistatic void xive_do_source_eoi(struct xive_irq_data *xd) 40262306a36Sopenharmony_ci{ 40362306a36Sopenharmony_ci u8 eoi_val; 40462306a36Sopenharmony_ci 40562306a36Sopenharmony_ci xd->stale_p = false; 40662306a36Sopenharmony_ci 40762306a36Sopenharmony_ci /* If the XIVE supports the new "store EOI facility, use it */ 40862306a36Sopenharmony_ci if (xive_is_store_eoi(xd)) { 40962306a36Sopenharmony_ci xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 41062306a36Sopenharmony_ci return; 41162306a36Sopenharmony_ci } 41262306a36Sopenharmony_ci 41362306a36Sopenharmony_ci /* 41462306a36Sopenharmony_ci * For LSIs, we use the "EOI cycle" special load rather than 41562306a36Sopenharmony_ci * PQ bits, as they are automatically re-triggered in HW when 41662306a36Sopenharmony_ci * still pending. 41762306a36Sopenharmony_ci */ 41862306a36Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_LSI) { 41962306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 42062306a36Sopenharmony_ci return; 42162306a36Sopenharmony_ci } 42262306a36Sopenharmony_ci 42362306a36Sopenharmony_ci /* 42462306a36Sopenharmony_ci * Otherwise, we use the special MMIO that does a clear of 42562306a36Sopenharmony_ci * both P and Q and returns the old Q. This allows us to then 42662306a36Sopenharmony_ci * do a re-trigger if Q was set rather than synthesizing an 42762306a36Sopenharmony_ci * interrupt in software 42862306a36Sopenharmony_ci */ 42962306a36Sopenharmony_ci eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 43062306a36Sopenharmony_ci DBG_VERBOSE("eoi_val=%x\n", eoi_val); 43162306a36Sopenharmony_ci 43262306a36Sopenharmony_ci /* Re-trigger if needed */ 43362306a36Sopenharmony_ci if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 43462306a36Sopenharmony_ci out_be64(xd->trig_mmio, 0); 43562306a36Sopenharmony_ci} 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci/* irq_chip eoi callback, called with irq descriptor lock held */ 43862306a36Sopenharmony_cistatic void xive_irq_eoi(struct irq_data *d) 43962306a36Sopenharmony_ci{ 44062306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 44162306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 44262306a36Sopenharmony_ci 44362306a36Sopenharmony_ci DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 44462306a36Sopenharmony_ci d->irq, irqd_to_hwirq(d), xc->pending_prio); 44562306a36Sopenharmony_ci 44662306a36Sopenharmony_ci /* 44762306a36Sopenharmony_ci * EOI the source if it hasn't been disabled and hasn't 44862306a36Sopenharmony_ci * been passed-through to a KVM guest 44962306a36Sopenharmony_ci */ 45062306a36Sopenharmony_ci if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 45162306a36Sopenharmony_ci !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) 45262306a36Sopenharmony_ci xive_do_source_eoi(xd); 45362306a36Sopenharmony_ci else 45462306a36Sopenharmony_ci xd->stale_p = true; 45562306a36Sopenharmony_ci 45662306a36Sopenharmony_ci /* 45762306a36Sopenharmony_ci * Clear saved_p to indicate that it's no longer occupying 45862306a36Sopenharmony_ci * a queue slot on the target queue 45962306a36Sopenharmony_ci */ 46062306a36Sopenharmony_ci xd->saved_p = false; 46162306a36Sopenharmony_ci 46262306a36Sopenharmony_ci /* Check for more work in the queue */ 46362306a36Sopenharmony_ci xive_do_queue_eoi(xc); 46462306a36Sopenharmony_ci} 46562306a36Sopenharmony_ci 46662306a36Sopenharmony_ci/* 46762306a36Sopenharmony_ci * Helper used to mask and unmask an interrupt source. 46862306a36Sopenharmony_ci */ 46962306a36Sopenharmony_cistatic void xive_do_source_set_mask(struct xive_irq_data *xd, 47062306a36Sopenharmony_ci bool mask) 47162306a36Sopenharmony_ci{ 47262306a36Sopenharmony_ci u64 val; 47362306a36Sopenharmony_ci 47462306a36Sopenharmony_ci pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un"); 47562306a36Sopenharmony_ci 47662306a36Sopenharmony_ci /* 47762306a36Sopenharmony_ci * If the interrupt had P set, it may be in a queue. 47862306a36Sopenharmony_ci * 47962306a36Sopenharmony_ci * We need to make sure we don't re-enable it until it 48062306a36Sopenharmony_ci * has been fetched from that queue and EOId. We keep 48162306a36Sopenharmony_ci * a copy of that P state and use it to restore the 48262306a36Sopenharmony_ci * ESB accordingly on unmask. 48362306a36Sopenharmony_ci */ 48462306a36Sopenharmony_ci if (mask) { 48562306a36Sopenharmony_ci val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 48662306a36Sopenharmony_ci if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 48762306a36Sopenharmony_ci xd->saved_p = true; 48862306a36Sopenharmony_ci xd->stale_p = false; 48962306a36Sopenharmony_ci } else if (xd->saved_p) { 49062306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 49162306a36Sopenharmony_ci xd->saved_p = false; 49262306a36Sopenharmony_ci } else { 49362306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 49462306a36Sopenharmony_ci xd->stale_p = false; 49562306a36Sopenharmony_ci } 49662306a36Sopenharmony_ci} 49762306a36Sopenharmony_ci 49862306a36Sopenharmony_ci/* 49962306a36Sopenharmony_ci * Try to chose "cpu" as a new interrupt target. Increments 50062306a36Sopenharmony_ci * the queue accounting for that target if it's not already 50162306a36Sopenharmony_ci * full. 50262306a36Sopenharmony_ci */ 50362306a36Sopenharmony_cistatic bool xive_try_pick_target(int cpu) 50462306a36Sopenharmony_ci{ 50562306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 50662306a36Sopenharmony_ci struct xive_q *q = &xc->queue[xive_irq_priority]; 50762306a36Sopenharmony_ci int max; 50862306a36Sopenharmony_ci 50962306a36Sopenharmony_ci /* 51062306a36Sopenharmony_ci * Calculate max number of interrupts in that queue. 51162306a36Sopenharmony_ci * 51262306a36Sopenharmony_ci * We leave a gap of 1 just in case... 51362306a36Sopenharmony_ci */ 51462306a36Sopenharmony_ci max = (q->msk + 1) - 1; 51562306a36Sopenharmony_ci return !!atomic_add_unless(&q->count, 1, max); 51662306a36Sopenharmony_ci} 51762306a36Sopenharmony_ci 51862306a36Sopenharmony_ci/* 51962306a36Sopenharmony_ci * Un-account an interrupt for a target CPU. We don't directly 52062306a36Sopenharmony_ci * decrement q->count since the interrupt might still be present 52162306a36Sopenharmony_ci * in the queue. 52262306a36Sopenharmony_ci * 52362306a36Sopenharmony_ci * Instead increment a separate counter "pending_count" which 52462306a36Sopenharmony_ci * will be substracted from "count" later when that CPU observes 52562306a36Sopenharmony_ci * the queue to be empty. 52662306a36Sopenharmony_ci */ 52762306a36Sopenharmony_cistatic void xive_dec_target_count(int cpu) 52862306a36Sopenharmony_ci{ 52962306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 53062306a36Sopenharmony_ci struct xive_q *q = &xc->queue[xive_irq_priority]; 53162306a36Sopenharmony_ci 53262306a36Sopenharmony_ci if (WARN_ON(cpu < 0 || !xc)) { 53362306a36Sopenharmony_ci pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 53462306a36Sopenharmony_ci return; 53562306a36Sopenharmony_ci } 53662306a36Sopenharmony_ci 53762306a36Sopenharmony_ci /* 53862306a36Sopenharmony_ci * We increment the "pending count" which will be used 53962306a36Sopenharmony_ci * to decrement the target queue count whenever it's next 54062306a36Sopenharmony_ci * processed and found empty. This ensure that we don't 54162306a36Sopenharmony_ci * decrement while we still have the interrupt there 54262306a36Sopenharmony_ci * occupying a slot. 54362306a36Sopenharmony_ci */ 54462306a36Sopenharmony_ci atomic_inc(&q->pending_count); 54562306a36Sopenharmony_ci} 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci/* Find a tentative CPU target in a CPU mask */ 54862306a36Sopenharmony_cistatic int xive_find_target_in_mask(const struct cpumask *mask, 54962306a36Sopenharmony_ci unsigned int fuzz) 55062306a36Sopenharmony_ci{ 55162306a36Sopenharmony_ci int cpu, first, num, i; 55262306a36Sopenharmony_ci 55362306a36Sopenharmony_ci /* Pick up a starting point CPU in the mask based on fuzz */ 55462306a36Sopenharmony_ci num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 55562306a36Sopenharmony_ci first = fuzz % num; 55662306a36Sopenharmony_ci 55762306a36Sopenharmony_ci /* Locate it */ 55862306a36Sopenharmony_ci cpu = cpumask_first(mask); 55962306a36Sopenharmony_ci for (i = 0; i < first && cpu < nr_cpu_ids; i++) 56062306a36Sopenharmony_ci cpu = cpumask_next(cpu, mask); 56162306a36Sopenharmony_ci 56262306a36Sopenharmony_ci /* Sanity check */ 56362306a36Sopenharmony_ci if (WARN_ON(cpu >= nr_cpu_ids)) 56462306a36Sopenharmony_ci cpu = cpumask_first(cpu_online_mask); 56562306a36Sopenharmony_ci 56662306a36Sopenharmony_ci /* Remember first one to handle wrap-around */ 56762306a36Sopenharmony_ci first = cpu; 56862306a36Sopenharmony_ci 56962306a36Sopenharmony_ci /* 57062306a36Sopenharmony_ci * Now go through the entire mask until we find a valid 57162306a36Sopenharmony_ci * target. 57262306a36Sopenharmony_ci */ 57362306a36Sopenharmony_ci do { 57462306a36Sopenharmony_ci /* 57562306a36Sopenharmony_ci * We re-check online as the fallback case passes us 57662306a36Sopenharmony_ci * an untested affinity mask 57762306a36Sopenharmony_ci */ 57862306a36Sopenharmony_ci if (cpu_online(cpu) && xive_try_pick_target(cpu)) 57962306a36Sopenharmony_ci return cpu; 58062306a36Sopenharmony_ci cpu = cpumask_next(cpu, mask); 58162306a36Sopenharmony_ci /* Wrap around */ 58262306a36Sopenharmony_ci if (cpu >= nr_cpu_ids) 58362306a36Sopenharmony_ci cpu = cpumask_first(mask); 58462306a36Sopenharmony_ci } while (cpu != first); 58562306a36Sopenharmony_ci 58662306a36Sopenharmony_ci return -1; 58762306a36Sopenharmony_ci} 58862306a36Sopenharmony_ci 58962306a36Sopenharmony_ci/* 59062306a36Sopenharmony_ci * Pick a target CPU for an interrupt. This is done at 59162306a36Sopenharmony_ci * startup or if the affinity is changed in a way that 59262306a36Sopenharmony_ci * invalidates the current target. 59362306a36Sopenharmony_ci */ 59462306a36Sopenharmony_cistatic int xive_pick_irq_target(struct irq_data *d, 59562306a36Sopenharmony_ci const struct cpumask *affinity) 59662306a36Sopenharmony_ci{ 59762306a36Sopenharmony_ci static unsigned int fuzz; 59862306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 59962306a36Sopenharmony_ci cpumask_var_t mask; 60062306a36Sopenharmony_ci int cpu = -1; 60162306a36Sopenharmony_ci 60262306a36Sopenharmony_ci /* 60362306a36Sopenharmony_ci * If we have chip IDs, first we try to build a mask of 60462306a36Sopenharmony_ci * CPUs matching the CPU and find a target in there 60562306a36Sopenharmony_ci */ 60662306a36Sopenharmony_ci if (xd->src_chip != XIVE_INVALID_CHIP_ID && 60762306a36Sopenharmony_ci zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 60862306a36Sopenharmony_ci /* Build a mask of matching chip IDs */ 60962306a36Sopenharmony_ci for_each_cpu_and(cpu, affinity, cpu_online_mask) { 61062306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 61162306a36Sopenharmony_ci if (xc->chip_id == xd->src_chip) 61262306a36Sopenharmony_ci cpumask_set_cpu(cpu, mask); 61362306a36Sopenharmony_ci } 61462306a36Sopenharmony_ci /* Try to find a target */ 61562306a36Sopenharmony_ci if (cpumask_empty(mask)) 61662306a36Sopenharmony_ci cpu = -1; 61762306a36Sopenharmony_ci else 61862306a36Sopenharmony_ci cpu = xive_find_target_in_mask(mask, fuzz++); 61962306a36Sopenharmony_ci free_cpumask_var(mask); 62062306a36Sopenharmony_ci if (cpu >= 0) 62162306a36Sopenharmony_ci return cpu; 62262306a36Sopenharmony_ci fuzz--; 62362306a36Sopenharmony_ci } 62462306a36Sopenharmony_ci 62562306a36Sopenharmony_ci /* No chip IDs, fallback to using the affinity mask */ 62662306a36Sopenharmony_ci return xive_find_target_in_mask(affinity, fuzz++); 62762306a36Sopenharmony_ci} 62862306a36Sopenharmony_ci 62962306a36Sopenharmony_cistatic unsigned int xive_irq_startup(struct irq_data *d) 63062306a36Sopenharmony_ci{ 63162306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 63262306a36Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 63362306a36Sopenharmony_ci int target, rc; 63462306a36Sopenharmony_ci 63562306a36Sopenharmony_ci xd->saved_p = false; 63662306a36Sopenharmony_ci xd->stale_p = false; 63762306a36Sopenharmony_ci 63862306a36Sopenharmony_ci pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); 63962306a36Sopenharmony_ci 64062306a36Sopenharmony_ci /* Pick a target */ 64162306a36Sopenharmony_ci target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 64262306a36Sopenharmony_ci if (target == XIVE_INVALID_TARGET) { 64362306a36Sopenharmony_ci /* Try again breaking affinity */ 64462306a36Sopenharmony_ci target = xive_pick_irq_target(d, cpu_online_mask); 64562306a36Sopenharmony_ci if (target == XIVE_INVALID_TARGET) 64662306a36Sopenharmony_ci return -ENXIO; 64762306a36Sopenharmony_ci pr_warn("irq %d started with broken affinity\n", d->irq); 64862306a36Sopenharmony_ci } 64962306a36Sopenharmony_ci 65062306a36Sopenharmony_ci /* Sanity check */ 65162306a36Sopenharmony_ci if (WARN_ON(target == XIVE_INVALID_TARGET || 65262306a36Sopenharmony_ci target >= nr_cpu_ids)) 65362306a36Sopenharmony_ci target = smp_processor_id(); 65462306a36Sopenharmony_ci 65562306a36Sopenharmony_ci xd->target = target; 65662306a36Sopenharmony_ci 65762306a36Sopenharmony_ci /* 65862306a36Sopenharmony_ci * Configure the logical number to be the Linux IRQ number 65962306a36Sopenharmony_ci * and set the target queue 66062306a36Sopenharmony_ci */ 66162306a36Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 66262306a36Sopenharmony_ci get_hard_smp_processor_id(target), 66362306a36Sopenharmony_ci xive_irq_priority, d->irq); 66462306a36Sopenharmony_ci if (rc) 66562306a36Sopenharmony_ci return rc; 66662306a36Sopenharmony_ci 66762306a36Sopenharmony_ci /* Unmask the ESB */ 66862306a36Sopenharmony_ci xive_do_source_set_mask(xd, false); 66962306a36Sopenharmony_ci 67062306a36Sopenharmony_ci return 0; 67162306a36Sopenharmony_ci} 67262306a36Sopenharmony_ci 67362306a36Sopenharmony_ci/* called with irq descriptor lock held */ 67462306a36Sopenharmony_cistatic void xive_irq_shutdown(struct irq_data *d) 67562306a36Sopenharmony_ci{ 67662306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 67762306a36Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 67862306a36Sopenharmony_ci 67962306a36Sopenharmony_ci pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); 68062306a36Sopenharmony_ci 68162306a36Sopenharmony_ci if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 68262306a36Sopenharmony_ci return; 68362306a36Sopenharmony_ci 68462306a36Sopenharmony_ci /* Mask the interrupt at the source */ 68562306a36Sopenharmony_ci xive_do_source_set_mask(xd, true); 68662306a36Sopenharmony_ci 68762306a36Sopenharmony_ci /* 68862306a36Sopenharmony_ci * Mask the interrupt in HW in the IVT/EAS and set the number 68962306a36Sopenharmony_ci * to be the "bad" IRQ number 69062306a36Sopenharmony_ci */ 69162306a36Sopenharmony_ci xive_ops->configure_irq(hw_irq, 69262306a36Sopenharmony_ci get_hard_smp_processor_id(xd->target), 69362306a36Sopenharmony_ci 0xff, XIVE_BAD_IRQ); 69462306a36Sopenharmony_ci 69562306a36Sopenharmony_ci xive_dec_target_count(xd->target); 69662306a36Sopenharmony_ci xd->target = XIVE_INVALID_TARGET; 69762306a36Sopenharmony_ci} 69862306a36Sopenharmony_ci 69962306a36Sopenharmony_cistatic void xive_irq_unmask(struct irq_data *d) 70062306a36Sopenharmony_ci{ 70162306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 70262306a36Sopenharmony_ci 70362306a36Sopenharmony_ci pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); 70462306a36Sopenharmony_ci 70562306a36Sopenharmony_ci xive_do_source_set_mask(xd, false); 70662306a36Sopenharmony_ci} 70762306a36Sopenharmony_ci 70862306a36Sopenharmony_cistatic void xive_irq_mask(struct irq_data *d) 70962306a36Sopenharmony_ci{ 71062306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 71162306a36Sopenharmony_ci 71262306a36Sopenharmony_ci pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); 71362306a36Sopenharmony_ci 71462306a36Sopenharmony_ci xive_do_source_set_mask(xd, true); 71562306a36Sopenharmony_ci} 71662306a36Sopenharmony_ci 71762306a36Sopenharmony_cistatic int xive_irq_set_affinity(struct irq_data *d, 71862306a36Sopenharmony_ci const struct cpumask *cpumask, 71962306a36Sopenharmony_ci bool force) 72062306a36Sopenharmony_ci{ 72162306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 72262306a36Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 72362306a36Sopenharmony_ci u32 target, old_target; 72462306a36Sopenharmony_ci int rc = 0; 72562306a36Sopenharmony_ci 72662306a36Sopenharmony_ci pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq); 72762306a36Sopenharmony_ci 72862306a36Sopenharmony_ci /* Is this valid ? */ 72962306a36Sopenharmony_ci if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 73062306a36Sopenharmony_ci return -EINVAL; 73162306a36Sopenharmony_ci 73262306a36Sopenharmony_ci /* 73362306a36Sopenharmony_ci * If existing target is already in the new mask, and is 73462306a36Sopenharmony_ci * online then do nothing. 73562306a36Sopenharmony_ci */ 73662306a36Sopenharmony_ci if (xd->target != XIVE_INVALID_TARGET && 73762306a36Sopenharmony_ci cpu_online(xd->target) && 73862306a36Sopenharmony_ci cpumask_test_cpu(xd->target, cpumask)) 73962306a36Sopenharmony_ci return IRQ_SET_MASK_OK; 74062306a36Sopenharmony_ci 74162306a36Sopenharmony_ci /* Pick a new target */ 74262306a36Sopenharmony_ci target = xive_pick_irq_target(d, cpumask); 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_ci /* No target found */ 74562306a36Sopenharmony_ci if (target == XIVE_INVALID_TARGET) 74662306a36Sopenharmony_ci return -ENXIO; 74762306a36Sopenharmony_ci 74862306a36Sopenharmony_ci /* Sanity check */ 74962306a36Sopenharmony_ci if (WARN_ON(target >= nr_cpu_ids)) 75062306a36Sopenharmony_ci target = smp_processor_id(); 75162306a36Sopenharmony_ci 75262306a36Sopenharmony_ci old_target = xd->target; 75362306a36Sopenharmony_ci 75462306a36Sopenharmony_ci /* 75562306a36Sopenharmony_ci * Only configure the irq if it's not currently passed-through to 75662306a36Sopenharmony_ci * a KVM guest 75762306a36Sopenharmony_ci */ 75862306a36Sopenharmony_ci if (!irqd_is_forwarded_to_vcpu(d)) 75962306a36Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 76062306a36Sopenharmony_ci get_hard_smp_processor_id(target), 76162306a36Sopenharmony_ci xive_irq_priority, d->irq); 76262306a36Sopenharmony_ci if (rc < 0) { 76362306a36Sopenharmony_ci pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 76462306a36Sopenharmony_ci return rc; 76562306a36Sopenharmony_ci } 76662306a36Sopenharmony_ci 76762306a36Sopenharmony_ci pr_debug(" target: 0x%x\n", target); 76862306a36Sopenharmony_ci xd->target = target; 76962306a36Sopenharmony_ci 77062306a36Sopenharmony_ci /* Give up previous target */ 77162306a36Sopenharmony_ci if (old_target != XIVE_INVALID_TARGET) 77262306a36Sopenharmony_ci xive_dec_target_count(old_target); 77362306a36Sopenharmony_ci 77462306a36Sopenharmony_ci return IRQ_SET_MASK_OK; 77562306a36Sopenharmony_ci} 77662306a36Sopenharmony_ci 77762306a36Sopenharmony_cistatic int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 77862306a36Sopenharmony_ci{ 77962306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 78062306a36Sopenharmony_ci 78162306a36Sopenharmony_ci /* 78262306a36Sopenharmony_ci * We only support these. This has really no effect other than setting 78362306a36Sopenharmony_ci * the corresponding descriptor bits mind you but those will in turn 78462306a36Sopenharmony_ci * affect the resend function when re-enabling an edge interrupt. 78562306a36Sopenharmony_ci * 78662306a36Sopenharmony_ci * Set the default to edge as explained in map(). 78762306a36Sopenharmony_ci */ 78862306a36Sopenharmony_ci if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 78962306a36Sopenharmony_ci flow_type = IRQ_TYPE_EDGE_RISING; 79062306a36Sopenharmony_ci 79162306a36Sopenharmony_ci if (flow_type != IRQ_TYPE_EDGE_RISING && 79262306a36Sopenharmony_ci flow_type != IRQ_TYPE_LEVEL_LOW) 79362306a36Sopenharmony_ci return -EINVAL; 79462306a36Sopenharmony_ci 79562306a36Sopenharmony_ci irqd_set_trigger_type(d, flow_type); 79662306a36Sopenharmony_ci 79762306a36Sopenharmony_ci /* 79862306a36Sopenharmony_ci * Double check it matches what the FW thinks 79962306a36Sopenharmony_ci * 80062306a36Sopenharmony_ci * NOTE: We don't know yet if the PAPR interface will provide 80162306a36Sopenharmony_ci * the LSI vs MSI information apart from the device-tree so 80262306a36Sopenharmony_ci * this check might have to move into an optional backend call 80362306a36Sopenharmony_ci * that is specific to the native backend 80462306a36Sopenharmony_ci */ 80562306a36Sopenharmony_ci if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 80662306a36Sopenharmony_ci !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 80762306a36Sopenharmony_ci pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 80862306a36Sopenharmony_ci d->irq, (u32)irqd_to_hwirq(d), 80962306a36Sopenharmony_ci (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 81062306a36Sopenharmony_ci (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 81162306a36Sopenharmony_ci } 81262306a36Sopenharmony_ci 81362306a36Sopenharmony_ci return IRQ_SET_MASK_OK_NOCOPY; 81462306a36Sopenharmony_ci} 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_cistatic int xive_irq_retrigger(struct irq_data *d) 81762306a36Sopenharmony_ci{ 81862306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 81962306a36Sopenharmony_ci 82062306a36Sopenharmony_ci /* This should be only for MSIs */ 82162306a36Sopenharmony_ci if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 82262306a36Sopenharmony_ci return 0; 82362306a36Sopenharmony_ci 82462306a36Sopenharmony_ci /* 82562306a36Sopenharmony_ci * To perform a retrigger, we first set the PQ bits to 82662306a36Sopenharmony_ci * 11, then perform an EOI. 82762306a36Sopenharmony_ci */ 82862306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 82962306a36Sopenharmony_ci xive_do_source_eoi(xd); 83062306a36Sopenharmony_ci 83162306a36Sopenharmony_ci return 1; 83262306a36Sopenharmony_ci} 83362306a36Sopenharmony_ci 83462306a36Sopenharmony_ci/* 83562306a36Sopenharmony_ci * Caller holds the irq descriptor lock, so this won't be called 83662306a36Sopenharmony_ci * concurrently with xive_get_irqchip_state on the same interrupt. 83762306a36Sopenharmony_ci */ 83862306a36Sopenharmony_cistatic int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 83962306a36Sopenharmony_ci{ 84062306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 84162306a36Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 84262306a36Sopenharmony_ci int rc; 84362306a36Sopenharmony_ci u8 pq; 84462306a36Sopenharmony_ci 84562306a36Sopenharmony_ci /* 84662306a36Sopenharmony_ci * This is called by KVM with state non-NULL for enabling 84762306a36Sopenharmony_ci * pass-through or NULL for disabling it 84862306a36Sopenharmony_ci */ 84962306a36Sopenharmony_ci if (state) { 85062306a36Sopenharmony_ci irqd_set_forwarded_to_vcpu(d); 85162306a36Sopenharmony_ci 85262306a36Sopenharmony_ci /* Set it to PQ=10 state to prevent further sends */ 85362306a36Sopenharmony_ci pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 85462306a36Sopenharmony_ci if (!xd->stale_p) { 85562306a36Sopenharmony_ci xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 85662306a36Sopenharmony_ci xd->stale_p = !xd->saved_p; 85762306a36Sopenharmony_ci } 85862306a36Sopenharmony_ci 85962306a36Sopenharmony_ci /* No target ? nothing to do */ 86062306a36Sopenharmony_ci if (xd->target == XIVE_INVALID_TARGET) { 86162306a36Sopenharmony_ci /* 86262306a36Sopenharmony_ci * An untargetted interrupt should have been 86362306a36Sopenharmony_ci * also masked at the source 86462306a36Sopenharmony_ci */ 86562306a36Sopenharmony_ci WARN_ON(xd->saved_p); 86662306a36Sopenharmony_ci 86762306a36Sopenharmony_ci return 0; 86862306a36Sopenharmony_ci } 86962306a36Sopenharmony_ci 87062306a36Sopenharmony_ci /* 87162306a36Sopenharmony_ci * If P was set, adjust state to PQ=11 to indicate 87262306a36Sopenharmony_ci * that a resend is needed for the interrupt to reach 87362306a36Sopenharmony_ci * the guest. Also remember the value of P. 87462306a36Sopenharmony_ci * 87562306a36Sopenharmony_ci * This also tells us that it's in flight to a host queue 87662306a36Sopenharmony_ci * or has already been fetched but hasn't been EOIed yet 87762306a36Sopenharmony_ci * by the host. This it's potentially using up a host 87862306a36Sopenharmony_ci * queue slot. This is important to know because as long 87962306a36Sopenharmony_ci * as this is the case, we must not hard-unmask it when 88062306a36Sopenharmony_ci * "returning" that interrupt to the host. 88162306a36Sopenharmony_ci * 88262306a36Sopenharmony_ci * This saved_p is cleared by the host EOI, when we know 88362306a36Sopenharmony_ci * for sure the queue slot is no longer in use. 88462306a36Sopenharmony_ci */ 88562306a36Sopenharmony_ci if (xd->saved_p) { 88662306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 88762306a36Sopenharmony_ci 88862306a36Sopenharmony_ci /* 88962306a36Sopenharmony_ci * Sync the XIVE source HW to ensure the interrupt 89062306a36Sopenharmony_ci * has gone through the EAS before we change its 89162306a36Sopenharmony_ci * target to the guest. That should guarantee us 89262306a36Sopenharmony_ci * that we *will* eventually get an EOI for it on 89362306a36Sopenharmony_ci * the host. Otherwise there would be a small window 89462306a36Sopenharmony_ci * for P to be seen here but the interrupt going 89562306a36Sopenharmony_ci * to the guest queue. 89662306a36Sopenharmony_ci */ 89762306a36Sopenharmony_ci if (xive_ops->sync_source) 89862306a36Sopenharmony_ci xive_ops->sync_source(hw_irq); 89962306a36Sopenharmony_ci } 90062306a36Sopenharmony_ci } else { 90162306a36Sopenharmony_ci irqd_clr_forwarded_to_vcpu(d); 90262306a36Sopenharmony_ci 90362306a36Sopenharmony_ci /* No host target ? hard mask and return */ 90462306a36Sopenharmony_ci if (xd->target == XIVE_INVALID_TARGET) { 90562306a36Sopenharmony_ci xive_do_source_set_mask(xd, true); 90662306a36Sopenharmony_ci return 0; 90762306a36Sopenharmony_ci } 90862306a36Sopenharmony_ci 90962306a36Sopenharmony_ci /* 91062306a36Sopenharmony_ci * Sync the XIVE source HW to ensure the interrupt 91162306a36Sopenharmony_ci * has gone through the EAS before we change its 91262306a36Sopenharmony_ci * target to the host. 91362306a36Sopenharmony_ci */ 91462306a36Sopenharmony_ci if (xive_ops->sync_source) 91562306a36Sopenharmony_ci xive_ops->sync_source(hw_irq); 91662306a36Sopenharmony_ci 91762306a36Sopenharmony_ci /* 91862306a36Sopenharmony_ci * By convention we are called with the interrupt in 91962306a36Sopenharmony_ci * a PQ=10 or PQ=11 state, ie, it won't fire and will 92062306a36Sopenharmony_ci * have latched in Q whether there's a pending HW 92162306a36Sopenharmony_ci * interrupt or not. 92262306a36Sopenharmony_ci * 92362306a36Sopenharmony_ci * First reconfigure the target. 92462306a36Sopenharmony_ci */ 92562306a36Sopenharmony_ci rc = xive_ops->configure_irq(hw_irq, 92662306a36Sopenharmony_ci get_hard_smp_processor_id(xd->target), 92762306a36Sopenharmony_ci xive_irq_priority, d->irq); 92862306a36Sopenharmony_ci if (rc) 92962306a36Sopenharmony_ci return rc; 93062306a36Sopenharmony_ci 93162306a36Sopenharmony_ci /* 93262306a36Sopenharmony_ci * Then if saved_p is not set, effectively re-enable the 93362306a36Sopenharmony_ci * interrupt with an EOI. If it is set, we know there is 93462306a36Sopenharmony_ci * still a message in a host queue somewhere that will be 93562306a36Sopenharmony_ci * EOId eventually. 93662306a36Sopenharmony_ci * 93762306a36Sopenharmony_ci * Note: We don't check irqd_irq_disabled(). Effectively, 93862306a36Sopenharmony_ci * we *will* let the irq get through even if masked if the 93962306a36Sopenharmony_ci * HW is still firing it in order to deal with the whole 94062306a36Sopenharmony_ci * saved_p business properly. If the interrupt triggers 94162306a36Sopenharmony_ci * while masked, the generic code will re-mask it anyway. 94262306a36Sopenharmony_ci */ 94362306a36Sopenharmony_ci if (!xd->saved_p) 94462306a36Sopenharmony_ci xive_do_source_eoi(xd); 94562306a36Sopenharmony_ci 94662306a36Sopenharmony_ci } 94762306a36Sopenharmony_ci return 0; 94862306a36Sopenharmony_ci} 94962306a36Sopenharmony_ci 95062306a36Sopenharmony_ci/* Called with irq descriptor lock held. */ 95162306a36Sopenharmony_cistatic int xive_get_irqchip_state(struct irq_data *data, 95262306a36Sopenharmony_ci enum irqchip_irq_state which, bool *state) 95362306a36Sopenharmony_ci{ 95462306a36Sopenharmony_ci struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 95562306a36Sopenharmony_ci u8 pq; 95662306a36Sopenharmony_ci 95762306a36Sopenharmony_ci switch (which) { 95862306a36Sopenharmony_ci case IRQCHIP_STATE_ACTIVE: 95962306a36Sopenharmony_ci pq = xive_esb_read(xd, XIVE_ESB_GET); 96062306a36Sopenharmony_ci 96162306a36Sopenharmony_ci /* 96262306a36Sopenharmony_ci * The esb value being all 1's means we couldn't get 96362306a36Sopenharmony_ci * the PQ state of the interrupt through mmio. It may 96462306a36Sopenharmony_ci * happen, for example when querying a PHB interrupt 96562306a36Sopenharmony_ci * while the PHB is in an error state. We consider the 96662306a36Sopenharmony_ci * interrupt to be inactive in that case. 96762306a36Sopenharmony_ci */ 96862306a36Sopenharmony_ci *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 96962306a36Sopenharmony_ci (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) && 97062306a36Sopenharmony_ci !irqd_irq_disabled(data))); 97162306a36Sopenharmony_ci return 0; 97262306a36Sopenharmony_ci default: 97362306a36Sopenharmony_ci return -EINVAL; 97462306a36Sopenharmony_ci } 97562306a36Sopenharmony_ci} 97662306a36Sopenharmony_ci 97762306a36Sopenharmony_cistatic struct irq_chip xive_irq_chip = { 97862306a36Sopenharmony_ci .name = "XIVE-IRQ", 97962306a36Sopenharmony_ci .irq_startup = xive_irq_startup, 98062306a36Sopenharmony_ci .irq_shutdown = xive_irq_shutdown, 98162306a36Sopenharmony_ci .irq_eoi = xive_irq_eoi, 98262306a36Sopenharmony_ci .irq_mask = xive_irq_mask, 98362306a36Sopenharmony_ci .irq_unmask = xive_irq_unmask, 98462306a36Sopenharmony_ci .irq_set_affinity = xive_irq_set_affinity, 98562306a36Sopenharmony_ci .irq_set_type = xive_irq_set_type, 98662306a36Sopenharmony_ci .irq_retrigger = xive_irq_retrigger, 98762306a36Sopenharmony_ci .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 98862306a36Sopenharmony_ci .irq_get_irqchip_state = xive_get_irqchip_state, 98962306a36Sopenharmony_ci}; 99062306a36Sopenharmony_ci 99162306a36Sopenharmony_cibool is_xive_irq(struct irq_chip *chip) 99262306a36Sopenharmony_ci{ 99362306a36Sopenharmony_ci return chip == &xive_irq_chip; 99462306a36Sopenharmony_ci} 99562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(is_xive_irq); 99662306a36Sopenharmony_ci 99762306a36Sopenharmony_civoid xive_cleanup_irq_data(struct xive_irq_data *xd) 99862306a36Sopenharmony_ci{ 99962306a36Sopenharmony_ci pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq); 100062306a36Sopenharmony_ci 100162306a36Sopenharmony_ci if (xd->eoi_mmio) { 100262306a36Sopenharmony_ci iounmap(xd->eoi_mmio); 100362306a36Sopenharmony_ci if (xd->eoi_mmio == xd->trig_mmio) 100462306a36Sopenharmony_ci xd->trig_mmio = NULL; 100562306a36Sopenharmony_ci xd->eoi_mmio = NULL; 100662306a36Sopenharmony_ci } 100762306a36Sopenharmony_ci if (xd->trig_mmio) { 100862306a36Sopenharmony_ci iounmap(xd->trig_mmio); 100962306a36Sopenharmony_ci xd->trig_mmio = NULL; 101062306a36Sopenharmony_ci } 101162306a36Sopenharmony_ci} 101262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 101362306a36Sopenharmony_ci 101462306a36Sopenharmony_cistatic int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 101562306a36Sopenharmony_ci{ 101662306a36Sopenharmony_ci struct xive_irq_data *xd; 101762306a36Sopenharmony_ci int rc; 101862306a36Sopenharmony_ci 101962306a36Sopenharmony_ci xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 102062306a36Sopenharmony_ci if (!xd) 102162306a36Sopenharmony_ci return -ENOMEM; 102262306a36Sopenharmony_ci rc = xive_ops->populate_irq_data(hw, xd); 102362306a36Sopenharmony_ci if (rc) { 102462306a36Sopenharmony_ci kfree(xd); 102562306a36Sopenharmony_ci return rc; 102662306a36Sopenharmony_ci } 102762306a36Sopenharmony_ci xd->target = XIVE_INVALID_TARGET; 102862306a36Sopenharmony_ci irq_set_handler_data(virq, xd); 102962306a36Sopenharmony_ci 103062306a36Sopenharmony_ci /* 103162306a36Sopenharmony_ci * Turn OFF by default the interrupt being mapped. A side 103262306a36Sopenharmony_ci * effect of this check is the mapping the ESB page of the 103362306a36Sopenharmony_ci * interrupt in the Linux address space. This prevents page 103462306a36Sopenharmony_ci * fault issues in the crash handler which masks all 103562306a36Sopenharmony_ci * interrupts. 103662306a36Sopenharmony_ci */ 103762306a36Sopenharmony_ci xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 103862306a36Sopenharmony_ci 103962306a36Sopenharmony_ci return 0; 104062306a36Sopenharmony_ci} 104162306a36Sopenharmony_ci 104262306a36Sopenharmony_civoid xive_irq_free_data(unsigned int virq) 104362306a36Sopenharmony_ci{ 104462306a36Sopenharmony_ci struct xive_irq_data *xd = irq_get_handler_data(virq); 104562306a36Sopenharmony_ci 104662306a36Sopenharmony_ci if (!xd) 104762306a36Sopenharmony_ci return; 104862306a36Sopenharmony_ci irq_set_handler_data(virq, NULL); 104962306a36Sopenharmony_ci xive_cleanup_irq_data(xd); 105062306a36Sopenharmony_ci kfree(xd); 105162306a36Sopenharmony_ci} 105262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(xive_irq_free_data); 105362306a36Sopenharmony_ci 105462306a36Sopenharmony_ci#ifdef CONFIG_SMP 105562306a36Sopenharmony_ci 105662306a36Sopenharmony_cistatic void xive_cause_ipi(int cpu) 105762306a36Sopenharmony_ci{ 105862306a36Sopenharmony_ci struct xive_cpu *xc; 105962306a36Sopenharmony_ci struct xive_irq_data *xd; 106062306a36Sopenharmony_ci 106162306a36Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 106262306a36Sopenharmony_ci 106362306a36Sopenharmony_ci DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 106462306a36Sopenharmony_ci smp_processor_id(), cpu, xc->hw_ipi); 106562306a36Sopenharmony_ci 106662306a36Sopenharmony_ci xd = &xc->ipi_data; 106762306a36Sopenharmony_ci if (WARN_ON(!xd->trig_mmio)) 106862306a36Sopenharmony_ci return; 106962306a36Sopenharmony_ci out_be64(xd->trig_mmio, 0); 107062306a36Sopenharmony_ci} 107162306a36Sopenharmony_ci 107262306a36Sopenharmony_cistatic irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 107362306a36Sopenharmony_ci{ 107462306a36Sopenharmony_ci return smp_ipi_demux(); 107562306a36Sopenharmony_ci} 107662306a36Sopenharmony_ci 107762306a36Sopenharmony_cistatic void xive_ipi_eoi(struct irq_data *d) 107862306a36Sopenharmony_ci{ 107962306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 108062306a36Sopenharmony_ci 108162306a36Sopenharmony_ci /* Handle possible race with unplug and drop stale IPIs */ 108262306a36Sopenharmony_ci if (!xc) 108362306a36Sopenharmony_ci return; 108462306a36Sopenharmony_ci 108562306a36Sopenharmony_ci DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 108662306a36Sopenharmony_ci d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 108762306a36Sopenharmony_ci 108862306a36Sopenharmony_ci xive_do_source_eoi(&xc->ipi_data); 108962306a36Sopenharmony_ci xive_do_queue_eoi(xc); 109062306a36Sopenharmony_ci} 109162306a36Sopenharmony_ci 109262306a36Sopenharmony_cistatic void xive_ipi_do_nothing(struct irq_data *d) 109362306a36Sopenharmony_ci{ 109462306a36Sopenharmony_ci /* 109562306a36Sopenharmony_ci * Nothing to do, we never mask/unmask IPIs, but the callback 109662306a36Sopenharmony_ci * has to exist for the struct irq_chip. 109762306a36Sopenharmony_ci */ 109862306a36Sopenharmony_ci} 109962306a36Sopenharmony_ci 110062306a36Sopenharmony_cistatic struct irq_chip xive_ipi_chip = { 110162306a36Sopenharmony_ci .name = "XIVE-IPI", 110262306a36Sopenharmony_ci .irq_eoi = xive_ipi_eoi, 110362306a36Sopenharmony_ci .irq_mask = xive_ipi_do_nothing, 110462306a36Sopenharmony_ci .irq_unmask = xive_ipi_do_nothing, 110562306a36Sopenharmony_ci}; 110662306a36Sopenharmony_ci 110762306a36Sopenharmony_ci/* 110862306a36Sopenharmony_ci * IPIs are marked per-cpu. We use separate HW interrupts under the 110962306a36Sopenharmony_ci * hood but associated with the same "linux" interrupt 111062306a36Sopenharmony_ci */ 111162306a36Sopenharmony_cistruct xive_ipi_alloc_info { 111262306a36Sopenharmony_ci irq_hw_number_t hwirq; 111362306a36Sopenharmony_ci}; 111462306a36Sopenharmony_ci 111562306a36Sopenharmony_cistatic int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 111662306a36Sopenharmony_ci unsigned int nr_irqs, void *arg) 111762306a36Sopenharmony_ci{ 111862306a36Sopenharmony_ci struct xive_ipi_alloc_info *info = arg; 111962306a36Sopenharmony_ci int i; 112062306a36Sopenharmony_ci 112162306a36Sopenharmony_ci for (i = 0; i < nr_irqs; i++) { 112262306a36Sopenharmony_ci irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip, 112362306a36Sopenharmony_ci domain->host_data, handle_percpu_irq, 112462306a36Sopenharmony_ci NULL, NULL); 112562306a36Sopenharmony_ci } 112662306a36Sopenharmony_ci return 0; 112762306a36Sopenharmony_ci} 112862306a36Sopenharmony_ci 112962306a36Sopenharmony_cistatic const struct irq_domain_ops xive_ipi_irq_domain_ops = { 113062306a36Sopenharmony_ci .alloc = xive_ipi_irq_domain_alloc, 113162306a36Sopenharmony_ci}; 113262306a36Sopenharmony_ci 113362306a36Sopenharmony_cistatic int __init xive_init_ipis(void) 113462306a36Sopenharmony_ci{ 113562306a36Sopenharmony_ci struct fwnode_handle *fwnode; 113662306a36Sopenharmony_ci struct irq_domain *ipi_domain; 113762306a36Sopenharmony_ci unsigned int node; 113862306a36Sopenharmony_ci int ret = -ENOMEM; 113962306a36Sopenharmony_ci 114062306a36Sopenharmony_ci fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); 114162306a36Sopenharmony_ci if (!fwnode) 114262306a36Sopenharmony_ci goto out; 114362306a36Sopenharmony_ci 114462306a36Sopenharmony_ci ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, 114562306a36Sopenharmony_ci &xive_ipi_irq_domain_ops, NULL); 114662306a36Sopenharmony_ci if (!ipi_domain) 114762306a36Sopenharmony_ci goto out_free_fwnode; 114862306a36Sopenharmony_ci 114962306a36Sopenharmony_ci xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); 115062306a36Sopenharmony_ci if (!xive_ipis) 115162306a36Sopenharmony_ci goto out_free_domain; 115262306a36Sopenharmony_ci 115362306a36Sopenharmony_ci for_each_node(node) { 115462306a36Sopenharmony_ci struct xive_ipi_desc *xid = &xive_ipis[node]; 115562306a36Sopenharmony_ci struct xive_ipi_alloc_info info = { node }; 115662306a36Sopenharmony_ci 115762306a36Sopenharmony_ci /* 115862306a36Sopenharmony_ci * Map one IPI interrupt per node for all cpus of that node. 115962306a36Sopenharmony_ci * Since the HW interrupt number doesn't have any meaning, 116062306a36Sopenharmony_ci * simply use the node number. 116162306a36Sopenharmony_ci */ 116262306a36Sopenharmony_ci ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info); 116362306a36Sopenharmony_ci if (ret < 0) 116462306a36Sopenharmony_ci goto out_free_xive_ipis; 116562306a36Sopenharmony_ci xid->irq = ret; 116662306a36Sopenharmony_ci 116762306a36Sopenharmony_ci snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); 116862306a36Sopenharmony_ci } 116962306a36Sopenharmony_ci 117062306a36Sopenharmony_ci return ret; 117162306a36Sopenharmony_ci 117262306a36Sopenharmony_ciout_free_xive_ipis: 117362306a36Sopenharmony_ci kfree(xive_ipis); 117462306a36Sopenharmony_ciout_free_domain: 117562306a36Sopenharmony_ci irq_domain_remove(ipi_domain); 117662306a36Sopenharmony_ciout_free_fwnode: 117762306a36Sopenharmony_ci irq_domain_free_fwnode(fwnode); 117862306a36Sopenharmony_ciout: 117962306a36Sopenharmony_ci return ret; 118062306a36Sopenharmony_ci} 118162306a36Sopenharmony_ci 118262306a36Sopenharmony_cistatic int xive_request_ipi(unsigned int cpu) 118362306a36Sopenharmony_ci{ 118462306a36Sopenharmony_ci struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; 118562306a36Sopenharmony_ci int ret; 118662306a36Sopenharmony_ci 118762306a36Sopenharmony_ci if (atomic_inc_return(&xid->started) > 1) 118862306a36Sopenharmony_ci return 0; 118962306a36Sopenharmony_ci 119062306a36Sopenharmony_ci ret = request_irq(xid->irq, xive_muxed_ipi_action, 119162306a36Sopenharmony_ci IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, 119262306a36Sopenharmony_ci xid->name, NULL); 119362306a36Sopenharmony_ci 119462306a36Sopenharmony_ci WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 119562306a36Sopenharmony_ci return ret; 119662306a36Sopenharmony_ci} 119762306a36Sopenharmony_ci 119862306a36Sopenharmony_cistatic int xive_setup_cpu_ipi(unsigned int cpu) 119962306a36Sopenharmony_ci{ 120062306a36Sopenharmony_ci unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 120162306a36Sopenharmony_ci struct xive_cpu *xc; 120262306a36Sopenharmony_ci int rc; 120362306a36Sopenharmony_ci 120462306a36Sopenharmony_ci pr_debug("Setting up IPI for CPU %d\n", cpu); 120562306a36Sopenharmony_ci 120662306a36Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 120762306a36Sopenharmony_ci 120862306a36Sopenharmony_ci /* Check if we are already setup */ 120962306a36Sopenharmony_ci if (xc->hw_ipi != XIVE_BAD_IRQ) 121062306a36Sopenharmony_ci return 0; 121162306a36Sopenharmony_ci 121262306a36Sopenharmony_ci /* Register the IPI */ 121362306a36Sopenharmony_ci xive_request_ipi(cpu); 121462306a36Sopenharmony_ci 121562306a36Sopenharmony_ci /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 121662306a36Sopenharmony_ci if (xive_ops->get_ipi(cpu, xc)) 121762306a36Sopenharmony_ci return -EIO; 121862306a36Sopenharmony_ci 121962306a36Sopenharmony_ci /* 122062306a36Sopenharmony_ci * Populate the IRQ data in the xive_cpu structure and 122162306a36Sopenharmony_ci * configure the HW / enable the IPIs. 122262306a36Sopenharmony_ci */ 122362306a36Sopenharmony_ci rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 122462306a36Sopenharmony_ci if (rc) { 122562306a36Sopenharmony_ci pr_err("Failed to populate IPI data on CPU %d\n", cpu); 122662306a36Sopenharmony_ci return -EIO; 122762306a36Sopenharmony_ci } 122862306a36Sopenharmony_ci rc = xive_ops->configure_irq(xc->hw_ipi, 122962306a36Sopenharmony_ci get_hard_smp_processor_id(cpu), 123062306a36Sopenharmony_ci xive_irq_priority, xive_ipi_irq); 123162306a36Sopenharmony_ci if (rc) { 123262306a36Sopenharmony_ci pr_err("Failed to map IPI CPU %d\n", cpu); 123362306a36Sopenharmony_ci return -EIO; 123462306a36Sopenharmony_ci } 123562306a36Sopenharmony_ci pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu, 123662306a36Sopenharmony_ci xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 123762306a36Sopenharmony_ci 123862306a36Sopenharmony_ci /* Unmask it */ 123962306a36Sopenharmony_ci xive_do_source_set_mask(&xc->ipi_data, false); 124062306a36Sopenharmony_ci 124162306a36Sopenharmony_ci return 0; 124262306a36Sopenharmony_ci} 124362306a36Sopenharmony_ci 124462306a36Sopenharmony_cinoinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 124562306a36Sopenharmony_ci{ 124662306a36Sopenharmony_ci unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 124762306a36Sopenharmony_ci 124862306a36Sopenharmony_ci /* Disable the IPI and free the IRQ data */ 124962306a36Sopenharmony_ci 125062306a36Sopenharmony_ci /* Already cleaned up ? */ 125162306a36Sopenharmony_ci if (xc->hw_ipi == XIVE_BAD_IRQ) 125262306a36Sopenharmony_ci return; 125362306a36Sopenharmony_ci 125462306a36Sopenharmony_ci /* TODO: clear IPI mapping */ 125562306a36Sopenharmony_ci 125662306a36Sopenharmony_ci /* Mask the IPI */ 125762306a36Sopenharmony_ci xive_do_source_set_mask(&xc->ipi_data, true); 125862306a36Sopenharmony_ci 125962306a36Sopenharmony_ci /* 126062306a36Sopenharmony_ci * Note: We don't call xive_cleanup_irq_data() to free 126162306a36Sopenharmony_ci * the mappings as this is called from an IPI on kexec 126262306a36Sopenharmony_ci * which is not a safe environment to call iounmap() 126362306a36Sopenharmony_ci */ 126462306a36Sopenharmony_ci 126562306a36Sopenharmony_ci /* Deconfigure/mask in the backend */ 126662306a36Sopenharmony_ci xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 126762306a36Sopenharmony_ci 0xff, xive_ipi_irq); 126862306a36Sopenharmony_ci 126962306a36Sopenharmony_ci /* Free the IPIs in the backend */ 127062306a36Sopenharmony_ci xive_ops->put_ipi(cpu, xc); 127162306a36Sopenharmony_ci} 127262306a36Sopenharmony_ci 127362306a36Sopenharmony_civoid __init xive_smp_probe(void) 127462306a36Sopenharmony_ci{ 127562306a36Sopenharmony_ci smp_ops->cause_ipi = xive_cause_ipi; 127662306a36Sopenharmony_ci 127762306a36Sopenharmony_ci /* Register the IPI */ 127862306a36Sopenharmony_ci xive_init_ipis(); 127962306a36Sopenharmony_ci 128062306a36Sopenharmony_ci /* Allocate and setup IPI for the boot CPU */ 128162306a36Sopenharmony_ci xive_setup_cpu_ipi(smp_processor_id()); 128262306a36Sopenharmony_ci} 128362306a36Sopenharmony_ci 128462306a36Sopenharmony_ci#endif /* CONFIG_SMP */ 128562306a36Sopenharmony_ci 128662306a36Sopenharmony_cistatic int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 128762306a36Sopenharmony_ci irq_hw_number_t hw) 128862306a36Sopenharmony_ci{ 128962306a36Sopenharmony_ci int rc; 129062306a36Sopenharmony_ci 129162306a36Sopenharmony_ci /* 129262306a36Sopenharmony_ci * Mark interrupts as edge sensitive by default so that resend 129362306a36Sopenharmony_ci * actually works. Will fix that up below if needed. 129462306a36Sopenharmony_ci */ 129562306a36Sopenharmony_ci irq_clear_status_flags(virq, IRQ_LEVEL); 129662306a36Sopenharmony_ci 129762306a36Sopenharmony_ci rc = xive_irq_alloc_data(virq, hw); 129862306a36Sopenharmony_ci if (rc) 129962306a36Sopenharmony_ci return rc; 130062306a36Sopenharmony_ci 130162306a36Sopenharmony_ci irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 130262306a36Sopenharmony_ci 130362306a36Sopenharmony_ci return 0; 130462306a36Sopenharmony_ci} 130562306a36Sopenharmony_ci 130662306a36Sopenharmony_cistatic void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 130762306a36Sopenharmony_ci{ 130862306a36Sopenharmony_ci xive_irq_free_data(virq); 130962306a36Sopenharmony_ci} 131062306a36Sopenharmony_ci 131162306a36Sopenharmony_cistatic int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 131262306a36Sopenharmony_ci const u32 *intspec, unsigned int intsize, 131362306a36Sopenharmony_ci irq_hw_number_t *out_hwirq, unsigned int *out_flags) 131462306a36Sopenharmony_ci 131562306a36Sopenharmony_ci{ 131662306a36Sopenharmony_ci *out_hwirq = intspec[0]; 131762306a36Sopenharmony_ci 131862306a36Sopenharmony_ci /* 131962306a36Sopenharmony_ci * If intsize is at least 2, we look for the type in the second cell, 132062306a36Sopenharmony_ci * we assume the LSB indicates a level interrupt. 132162306a36Sopenharmony_ci */ 132262306a36Sopenharmony_ci if (intsize > 1) { 132362306a36Sopenharmony_ci if (intspec[1] & 1) 132462306a36Sopenharmony_ci *out_flags = IRQ_TYPE_LEVEL_LOW; 132562306a36Sopenharmony_ci else 132662306a36Sopenharmony_ci *out_flags = IRQ_TYPE_EDGE_RISING; 132762306a36Sopenharmony_ci } else 132862306a36Sopenharmony_ci *out_flags = IRQ_TYPE_LEVEL_LOW; 132962306a36Sopenharmony_ci 133062306a36Sopenharmony_ci return 0; 133162306a36Sopenharmony_ci} 133262306a36Sopenharmony_ci 133362306a36Sopenharmony_cistatic int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 133462306a36Sopenharmony_ci enum irq_domain_bus_token bus_token) 133562306a36Sopenharmony_ci{ 133662306a36Sopenharmony_ci return xive_ops->match(node); 133762306a36Sopenharmony_ci} 133862306a36Sopenharmony_ci 133962306a36Sopenharmony_ci#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 134062306a36Sopenharmony_cistatic const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" }; 134162306a36Sopenharmony_ci 134262306a36Sopenharmony_cistatic const struct { 134362306a36Sopenharmony_ci u64 mask; 134462306a36Sopenharmony_ci char *name; 134562306a36Sopenharmony_ci} xive_irq_flags[] = { 134662306a36Sopenharmony_ci { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, 134762306a36Sopenharmony_ci { XIVE_IRQ_FLAG_LSI, "LSI" }, 134862306a36Sopenharmony_ci { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, 134962306a36Sopenharmony_ci { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, 135062306a36Sopenharmony_ci}; 135162306a36Sopenharmony_ci 135262306a36Sopenharmony_cistatic void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, 135362306a36Sopenharmony_ci struct irq_data *irqd, int ind) 135462306a36Sopenharmony_ci{ 135562306a36Sopenharmony_ci struct xive_irq_data *xd; 135662306a36Sopenharmony_ci u64 val; 135762306a36Sopenharmony_ci int i; 135862306a36Sopenharmony_ci 135962306a36Sopenharmony_ci /* No IRQ domain level information. To be done */ 136062306a36Sopenharmony_ci if (!irqd) 136162306a36Sopenharmony_ci return; 136262306a36Sopenharmony_ci 136362306a36Sopenharmony_ci if (!is_xive_irq(irq_data_get_irq_chip(irqd))) 136462306a36Sopenharmony_ci return; 136562306a36Sopenharmony_ci 136662306a36Sopenharmony_ci seq_printf(m, "%*sXIVE:\n", ind, ""); 136762306a36Sopenharmony_ci ind++; 136862306a36Sopenharmony_ci 136962306a36Sopenharmony_ci xd = irq_data_get_irq_handler_data(irqd); 137062306a36Sopenharmony_ci if (!xd) { 137162306a36Sopenharmony_ci seq_printf(m, "%*snot assigned\n", ind, ""); 137262306a36Sopenharmony_ci return; 137362306a36Sopenharmony_ci } 137462306a36Sopenharmony_ci 137562306a36Sopenharmony_ci val = xive_esb_read(xd, XIVE_ESB_GET); 137662306a36Sopenharmony_ci seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); 137762306a36Sopenharmony_ci seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", 137862306a36Sopenharmony_ci xd->saved_p ? "saved" : ""); 137962306a36Sopenharmony_ci seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); 138062306a36Sopenharmony_ci seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); 138162306a36Sopenharmony_ci seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); 138262306a36Sopenharmony_ci seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); 138362306a36Sopenharmony_ci seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); 138462306a36Sopenharmony_ci for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { 138562306a36Sopenharmony_ci if (xd->flags & xive_irq_flags[i].mask) 138662306a36Sopenharmony_ci seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name); 138762306a36Sopenharmony_ci } 138862306a36Sopenharmony_ci} 138962306a36Sopenharmony_ci#endif 139062306a36Sopenharmony_ci 139162306a36Sopenharmony_ci#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 139262306a36Sopenharmony_cistatic int xive_irq_domain_translate(struct irq_domain *d, 139362306a36Sopenharmony_ci struct irq_fwspec *fwspec, 139462306a36Sopenharmony_ci unsigned long *hwirq, 139562306a36Sopenharmony_ci unsigned int *type) 139662306a36Sopenharmony_ci{ 139762306a36Sopenharmony_ci return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode), 139862306a36Sopenharmony_ci fwspec->param, fwspec->param_count, 139962306a36Sopenharmony_ci hwirq, type); 140062306a36Sopenharmony_ci} 140162306a36Sopenharmony_ci 140262306a36Sopenharmony_cistatic int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 140362306a36Sopenharmony_ci unsigned int nr_irqs, void *arg) 140462306a36Sopenharmony_ci{ 140562306a36Sopenharmony_ci struct irq_fwspec *fwspec = arg; 140662306a36Sopenharmony_ci irq_hw_number_t hwirq; 140762306a36Sopenharmony_ci unsigned int type = IRQ_TYPE_NONE; 140862306a36Sopenharmony_ci int i, rc; 140962306a36Sopenharmony_ci 141062306a36Sopenharmony_ci rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type); 141162306a36Sopenharmony_ci if (rc) 141262306a36Sopenharmony_ci return rc; 141362306a36Sopenharmony_ci 141462306a36Sopenharmony_ci pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs); 141562306a36Sopenharmony_ci 141662306a36Sopenharmony_ci for (i = 0; i < nr_irqs; i++) { 141762306a36Sopenharmony_ci /* TODO: call xive_irq_domain_map() */ 141862306a36Sopenharmony_ci 141962306a36Sopenharmony_ci /* 142062306a36Sopenharmony_ci * Mark interrupts as edge sensitive by default so that resend 142162306a36Sopenharmony_ci * actually works. Will fix that up below if needed. 142262306a36Sopenharmony_ci */ 142362306a36Sopenharmony_ci irq_clear_status_flags(virq, IRQ_LEVEL); 142462306a36Sopenharmony_ci 142562306a36Sopenharmony_ci /* allocates and sets handler data */ 142662306a36Sopenharmony_ci rc = xive_irq_alloc_data(virq + i, hwirq + i); 142762306a36Sopenharmony_ci if (rc) 142862306a36Sopenharmony_ci return rc; 142962306a36Sopenharmony_ci 143062306a36Sopenharmony_ci irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 143162306a36Sopenharmony_ci &xive_irq_chip, domain->host_data); 143262306a36Sopenharmony_ci irq_set_handler(virq + i, handle_fasteoi_irq); 143362306a36Sopenharmony_ci } 143462306a36Sopenharmony_ci 143562306a36Sopenharmony_ci return 0; 143662306a36Sopenharmony_ci} 143762306a36Sopenharmony_ci 143862306a36Sopenharmony_cistatic void xive_irq_domain_free(struct irq_domain *domain, 143962306a36Sopenharmony_ci unsigned int virq, unsigned int nr_irqs) 144062306a36Sopenharmony_ci{ 144162306a36Sopenharmony_ci int i; 144262306a36Sopenharmony_ci 144362306a36Sopenharmony_ci pr_debug("%s %d #%d\n", __func__, virq, nr_irqs); 144462306a36Sopenharmony_ci 144562306a36Sopenharmony_ci for (i = 0; i < nr_irqs; i++) 144662306a36Sopenharmony_ci xive_irq_free_data(virq + i); 144762306a36Sopenharmony_ci} 144862306a36Sopenharmony_ci#endif 144962306a36Sopenharmony_ci 145062306a36Sopenharmony_cistatic const struct irq_domain_ops xive_irq_domain_ops = { 145162306a36Sopenharmony_ci#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 145262306a36Sopenharmony_ci .alloc = xive_irq_domain_alloc, 145362306a36Sopenharmony_ci .free = xive_irq_domain_free, 145462306a36Sopenharmony_ci .translate = xive_irq_domain_translate, 145562306a36Sopenharmony_ci#endif 145662306a36Sopenharmony_ci .match = xive_irq_domain_match, 145762306a36Sopenharmony_ci .map = xive_irq_domain_map, 145862306a36Sopenharmony_ci .unmap = xive_irq_domain_unmap, 145962306a36Sopenharmony_ci .xlate = xive_irq_domain_xlate, 146062306a36Sopenharmony_ci#ifdef CONFIG_GENERIC_IRQ_DEBUGFS 146162306a36Sopenharmony_ci .debug_show = xive_irq_domain_debug_show, 146262306a36Sopenharmony_ci#endif 146362306a36Sopenharmony_ci}; 146462306a36Sopenharmony_ci 146562306a36Sopenharmony_cistatic void __init xive_init_host(struct device_node *np) 146662306a36Sopenharmony_ci{ 146762306a36Sopenharmony_ci xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL); 146862306a36Sopenharmony_ci if (WARN_ON(xive_irq_domain == NULL)) 146962306a36Sopenharmony_ci return; 147062306a36Sopenharmony_ci irq_set_default_host(xive_irq_domain); 147162306a36Sopenharmony_ci} 147262306a36Sopenharmony_ci 147362306a36Sopenharmony_cistatic void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 147462306a36Sopenharmony_ci{ 147562306a36Sopenharmony_ci if (xc->queue[xive_irq_priority].qpage) 147662306a36Sopenharmony_ci xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 147762306a36Sopenharmony_ci} 147862306a36Sopenharmony_ci 147962306a36Sopenharmony_cistatic int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 148062306a36Sopenharmony_ci{ 148162306a36Sopenharmony_ci int rc = 0; 148262306a36Sopenharmony_ci 148362306a36Sopenharmony_ci /* We setup 1 queues for now with a 64k page */ 148462306a36Sopenharmony_ci if (!xc->queue[xive_irq_priority].qpage) 148562306a36Sopenharmony_ci rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 148662306a36Sopenharmony_ci 148762306a36Sopenharmony_ci return rc; 148862306a36Sopenharmony_ci} 148962306a36Sopenharmony_ci 149062306a36Sopenharmony_cistatic int xive_prepare_cpu(unsigned int cpu) 149162306a36Sopenharmony_ci{ 149262306a36Sopenharmony_ci struct xive_cpu *xc; 149362306a36Sopenharmony_ci 149462306a36Sopenharmony_ci xc = per_cpu(xive_cpu, cpu); 149562306a36Sopenharmony_ci if (!xc) { 149662306a36Sopenharmony_ci xc = kzalloc_node(sizeof(struct xive_cpu), 149762306a36Sopenharmony_ci GFP_KERNEL, cpu_to_node(cpu)); 149862306a36Sopenharmony_ci if (!xc) 149962306a36Sopenharmony_ci return -ENOMEM; 150062306a36Sopenharmony_ci xc->hw_ipi = XIVE_BAD_IRQ; 150162306a36Sopenharmony_ci xc->chip_id = XIVE_INVALID_CHIP_ID; 150262306a36Sopenharmony_ci if (xive_ops->prepare_cpu) 150362306a36Sopenharmony_ci xive_ops->prepare_cpu(cpu, xc); 150462306a36Sopenharmony_ci 150562306a36Sopenharmony_ci per_cpu(xive_cpu, cpu) = xc; 150662306a36Sopenharmony_ci } 150762306a36Sopenharmony_ci 150862306a36Sopenharmony_ci /* Setup EQs if not already */ 150962306a36Sopenharmony_ci return xive_setup_cpu_queues(cpu, xc); 151062306a36Sopenharmony_ci} 151162306a36Sopenharmony_ci 151262306a36Sopenharmony_cistatic void xive_setup_cpu(void) 151362306a36Sopenharmony_ci{ 151462306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 151562306a36Sopenharmony_ci 151662306a36Sopenharmony_ci /* The backend might have additional things to do */ 151762306a36Sopenharmony_ci if (xive_ops->setup_cpu) 151862306a36Sopenharmony_ci xive_ops->setup_cpu(smp_processor_id(), xc); 151962306a36Sopenharmony_ci 152062306a36Sopenharmony_ci /* Set CPPR to 0xff to enable flow of interrupts */ 152162306a36Sopenharmony_ci xc->cppr = 0xff; 152262306a36Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 152362306a36Sopenharmony_ci} 152462306a36Sopenharmony_ci 152562306a36Sopenharmony_ci#ifdef CONFIG_SMP 152662306a36Sopenharmony_civoid xive_smp_setup_cpu(void) 152762306a36Sopenharmony_ci{ 152862306a36Sopenharmony_ci pr_debug("SMP setup CPU %d\n", smp_processor_id()); 152962306a36Sopenharmony_ci 153062306a36Sopenharmony_ci /* This will have already been done on the boot CPU */ 153162306a36Sopenharmony_ci if (smp_processor_id() != boot_cpuid) 153262306a36Sopenharmony_ci xive_setup_cpu(); 153362306a36Sopenharmony_ci 153462306a36Sopenharmony_ci} 153562306a36Sopenharmony_ci 153662306a36Sopenharmony_ciint xive_smp_prepare_cpu(unsigned int cpu) 153762306a36Sopenharmony_ci{ 153862306a36Sopenharmony_ci int rc; 153962306a36Sopenharmony_ci 154062306a36Sopenharmony_ci /* Allocate per-CPU data and queues */ 154162306a36Sopenharmony_ci rc = xive_prepare_cpu(cpu); 154262306a36Sopenharmony_ci if (rc) 154362306a36Sopenharmony_ci return rc; 154462306a36Sopenharmony_ci 154562306a36Sopenharmony_ci /* Allocate and setup IPI for the new CPU */ 154662306a36Sopenharmony_ci return xive_setup_cpu_ipi(cpu); 154762306a36Sopenharmony_ci} 154862306a36Sopenharmony_ci 154962306a36Sopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU 155062306a36Sopenharmony_cistatic void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 155162306a36Sopenharmony_ci{ 155262306a36Sopenharmony_ci u32 irq; 155362306a36Sopenharmony_ci 155462306a36Sopenharmony_ci /* We assume local irqs are disabled */ 155562306a36Sopenharmony_ci WARN_ON(!irqs_disabled()); 155662306a36Sopenharmony_ci 155762306a36Sopenharmony_ci /* Check what's already in the CPU queue */ 155862306a36Sopenharmony_ci while ((irq = xive_scan_interrupts(xc, false)) != 0) { 155962306a36Sopenharmony_ci /* 156062306a36Sopenharmony_ci * We need to re-route that interrupt to its new destination. 156162306a36Sopenharmony_ci * First get and lock the descriptor 156262306a36Sopenharmony_ci */ 156362306a36Sopenharmony_ci struct irq_desc *desc = irq_to_desc(irq); 156462306a36Sopenharmony_ci struct irq_data *d = irq_desc_get_irq_data(desc); 156562306a36Sopenharmony_ci struct xive_irq_data *xd; 156662306a36Sopenharmony_ci 156762306a36Sopenharmony_ci /* 156862306a36Sopenharmony_ci * Ignore anything that isn't a XIVE irq and ignore 156962306a36Sopenharmony_ci * IPIs, so can just be dropped. 157062306a36Sopenharmony_ci */ 157162306a36Sopenharmony_ci if (d->domain != xive_irq_domain) 157262306a36Sopenharmony_ci continue; 157362306a36Sopenharmony_ci 157462306a36Sopenharmony_ci /* 157562306a36Sopenharmony_ci * The IRQ should have already been re-routed, it's just a 157662306a36Sopenharmony_ci * stale in the old queue, so re-trigger it in order to make 157762306a36Sopenharmony_ci * it reach is new destination. 157862306a36Sopenharmony_ci */ 157962306a36Sopenharmony_ci#ifdef DEBUG_FLUSH 158062306a36Sopenharmony_ci pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 158162306a36Sopenharmony_ci cpu, irq); 158262306a36Sopenharmony_ci#endif 158362306a36Sopenharmony_ci raw_spin_lock(&desc->lock); 158462306a36Sopenharmony_ci xd = irq_desc_get_handler_data(desc); 158562306a36Sopenharmony_ci 158662306a36Sopenharmony_ci /* 158762306a36Sopenharmony_ci * Clear saved_p to indicate that it's no longer pending 158862306a36Sopenharmony_ci */ 158962306a36Sopenharmony_ci xd->saved_p = false; 159062306a36Sopenharmony_ci 159162306a36Sopenharmony_ci /* 159262306a36Sopenharmony_ci * For LSIs, we EOI, this will cause a resend if it's 159362306a36Sopenharmony_ci * still asserted. Otherwise do an MSI retrigger. 159462306a36Sopenharmony_ci */ 159562306a36Sopenharmony_ci if (xd->flags & XIVE_IRQ_FLAG_LSI) 159662306a36Sopenharmony_ci xive_do_source_eoi(xd); 159762306a36Sopenharmony_ci else 159862306a36Sopenharmony_ci xive_irq_retrigger(d); 159962306a36Sopenharmony_ci 160062306a36Sopenharmony_ci raw_spin_unlock(&desc->lock); 160162306a36Sopenharmony_ci } 160262306a36Sopenharmony_ci} 160362306a36Sopenharmony_ci 160462306a36Sopenharmony_civoid xive_smp_disable_cpu(void) 160562306a36Sopenharmony_ci{ 160662306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 160762306a36Sopenharmony_ci unsigned int cpu = smp_processor_id(); 160862306a36Sopenharmony_ci 160962306a36Sopenharmony_ci /* Migrate interrupts away from the CPU */ 161062306a36Sopenharmony_ci irq_migrate_all_off_this_cpu(); 161162306a36Sopenharmony_ci 161262306a36Sopenharmony_ci /* Set CPPR to 0 to disable flow of interrupts */ 161362306a36Sopenharmony_ci xc->cppr = 0; 161462306a36Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 161562306a36Sopenharmony_ci 161662306a36Sopenharmony_ci /* Flush everything still in the queue */ 161762306a36Sopenharmony_ci xive_flush_cpu_queue(cpu, xc); 161862306a36Sopenharmony_ci 161962306a36Sopenharmony_ci /* Re-enable CPPR */ 162062306a36Sopenharmony_ci xc->cppr = 0xff; 162162306a36Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 162262306a36Sopenharmony_ci} 162362306a36Sopenharmony_ci 162462306a36Sopenharmony_civoid xive_flush_interrupt(void) 162562306a36Sopenharmony_ci{ 162662306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 162762306a36Sopenharmony_ci unsigned int cpu = smp_processor_id(); 162862306a36Sopenharmony_ci 162962306a36Sopenharmony_ci /* Called if an interrupt occurs while the CPU is hot unplugged */ 163062306a36Sopenharmony_ci xive_flush_cpu_queue(cpu, xc); 163162306a36Sopenharmony_ci} 163262306a36Sopenharmony_ci 163362306a36Sopenharmony_ci#endif /* CONFIG_HOTPLUG_CPU */ 163462306a36Sopenharmony_ci 163562306a36Sopenharmony_ci#endif /* CONFIG_SMP */ 163662306a36Sopenharmony_ci 163762306a36Sopenharmony_cinoinstr void xive_teardown_cpu(void) 163862306a36Sopenharmony_ci{ 163962306a36Sopenharmony_ci struct xive_cpu *xc = __this_cpu_read(xive_cpu); 164062306a36Sopenharmony_ci unsigned int cpu = smp_processor_id(); 164162306a36Sopenharmony_ci 164262306a36Sopenharmony_ci /* Set CPPR to 0 to disable flow of interrupts */ 164362306a36Sopenharmony_ci xc->cppr = 0; 164462306a36Sopenharmony_ci out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 164562306a36Sopenharmony_ci 164662306a36Sopenharmony_ci if (xive_ops->teardown_cpu) 164762306a36Sopenharmony_ci xive_ops->teardown_cpu(cpu, xc); 164862306a36Sopenharmony_ci 164962306a36Sopenharmony_ci#ifdef CONFIG_SMP 165062306a36Sopenharmony_ci /* Get rid of IPI */ 165162306a36Sopenharmony_ci xive_cleanup_cpu_ipi(cpu, xc); 165262306a36Sopenharmony_ci#endif 165362306a36Sopenharmony_ci 165462306a36Sopenharmony_ci /* Disable and free the queues */ 165562306a36Sopenharmony_ci xive_cleanup_cpu_queues(cpu, xc); 165662306a36Sopenharmony_ci} 165762306a36Sopenharmony_ci 165862306a36Sopenharmony_civoid xive_shutdown(void) 165962306a36Sopenharmony_ci{ 166062306a36Sopenharmony_ci xive_ops->shutdown(); 166162306a36Sopenharmony_ci} 166262306a36Sopenharmony_ci 166362306a36Sopenharmony_cibool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, 166462306a36Sopenharmony_ci void __iomem *area, u32 offset, u8 max_prio) 166562306a36Sopenharmony_ci{ 166662306a36Sopenharmony_ci xive_tima = area; 166762306a36Sopenharmony_ci xive_tima_offset = offset; 166862306a36Sopenharmony_ci xive_ops = ops; 166962306a36Sopenharmony_ci xive_irq_priority = max_prio; 167062306a36Sopenharmony_ci 167162306a36Sopenharmony_ci ppc_md.get_irq = xive_get_irq; 167262306a36Sopenharmony_ci __xive_enabled = true; 167362306a36Sopenharmony_ci 167462306a36Sopenharmony_ci pr_debug("Initializing host..\n"); 167562306a36Sopenharmony_ci xive_init_host(np); 167662306a36Sopenharmony_ci 167762306a36Sopenharmony_ci pr_debug("Initializing boot CPU..\n"); 167862306a36Sopenharmony_ci 167962306a36Sopenharmony_ci /* Allocate per-CPU data and queues */ 168062306a36Sopenharmony_ci xive_prepare_cpu(smp_processor_id()); 168162306a36Sopenharmony_ci 168262306a36Sopenharmony_ci /* Get ready for interrupts */ 168362306a36Sopenharmony_ci xive_setup_cpu(); 168462306a36Sopenharmony_ci 168562306a36Sopenharmony_ci pr_info("Interrupt handling initialized with %s backend\n", 168662306a36Sopenharmony_ci xive_ops->name); 168762306a36Sopenharmony_ci pr_info("Using priority %d for all interrupts\n", max_prio); 168862306a36Sopenharmony_ci 168962306a36Sopenharmony_ci return true; 169062306a36Sopenharmony_ci} 169162306a36Sopenharmony_ci 169262306a36Sopenharmony_ci__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 169362306a36Sopenharmony_ci{ 169462306a36Sopenharmony_ci unsigned int alloc_order; 169562306a36Sopenharmony_ci struct page *pages; 169662306a36Sopenharmony_ci __be32 *qpage; 169762306a36Sopenharmony_ci 169862306a36Sopenharmony_ci alloc_order = xive_alloc_order(queue_shift); 169962306a36Sopenharmony_ci pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 170062306a36Sopenharmony_ci if (!pages) 170162306a36Sopenharmony_ci return ERR_PTR(-ENOMEM); 170262306a36Sopenharmony_ci qpage = (__be32 *)page_address(pages); 170362306a36Sopenharmony_ci memset(qpage, 0, 1 << queue_shift); 170462306a36Sopenharmony_ci 170562306a36Sopenharmony_ci return qpage; 170662306a36Sopenharmony_ci} 170762306a36Sopenharmony_ci 170862306a36Sopenharmony_cistatic int __init xive_off(char *arg) 170962306a36Sopenharmony_ci{ 171062306a36Sopenharmony_ci xive_cmdline_disabled = true; 171162306a36Sopenharmony_ci return 1; 171262306a36Sopenharmony_ci} 171362306a36Sopenharmony_ci__setup("xive=off", xive_off); 171462306a36Sopenharmony_ci 171562306a36Sopenharmony_cistatic int __init xive_store_eoi_cmdline(char *arg) 171662306a36Sopenharmony_ci{ 171762306a36Sopenharmony_ci if (!arg) 171862306a36Sopenharmony_ci return 1; 171962306a36Sopenharmony_ci 172062306a36Sopenharmony_ci if (strncmp(arg, "off", 3) == 0) { 172162306a36Sopenharmony_ci pr_info("StoreEOI disabled on kernel command line\n"); 172262306a36Sopenharmony_ci xive_store_eoi = false; 172362306a36Sopenharmony_ci } 172462306a36Sopenharmony_ci return 1; 172562306a36Sopenharmony_ci} 172662306a36Sopenharmony_ci__setup("xive.store-eoi=", xive_store_eoi_cmdline); 172762306a36Sopenharmony_ci 172862306a36Sopenharmony_ci#ifdef CONFIG_DEBUG_FS 172962306a36Sopenharmony_cistatic void xive_debug_show_ipi(struct seq_file *m, int cpu) 173062306a36Sopenharmony_ci{ 173162306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 173262306a36Sopenharmony_ci 173362306a36Sopenharmony_ci seq_printf(m, "CPU %d: ", cpu); 173462306a36Sopenharmony_ci if (xc) { 173562306a36Sopenharmony_ci seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 173662306a36Sopenharmony_ci 173762306a36Sopenharmony_ci#ifdef CONFIG_SMP 173862306a36Sopenharmony_ci { 173962306a36Sopenharmony_ci char buffer[128]; 174062306a36Sopenharmony_ci 174162306a36Sopenharmony_ci xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); 174262306a36Sopenharmony_ci seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer); 174362306a36Sopenharmony_ci } 174462306a36Sopenharmony_ci#endif 174562306a36Sopenharmony_ci } 174662306a36Sopenharmony_ci seq_puts(m, "\n"); 174762306a36Sopenharmony_ci} 174862306a36Sopenharmony_ci 174962306a36Sopenharmony_cistatic void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) 175062306a36Sopenharmony_ci{ 175162306a36Sopenharmony_ci unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 175262306a36Sopenharmony_ci int rc; 175362306a36Sopenharmony_ci u32 target; 175462306a36Sopenharmony_ci u8 prio; 175562306a36Sopenharmony_ci u32 lirq; 175662306a36Sopenharmony_ci char buffer[128]; 175762306a36Sopenharmony_ci 175862306a36Sopenharmony_ci rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 175962306a36Sopenharmony_ci if (rc) { 176062306a36Sopenharmony_ci seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 176162306a36Sopenharmony_ci return; 176262306a36Sopenharmony_ci } 176362306a36Sopenharmony_ci 176462306a36Sopenharmony_ci seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 176562306a36Sopenharmony_ci hw_irq, target, prio, lirq); 176662306a36Sopenharmony_ci 176762306a36Sopenharmony_ci xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer)); 176862306a36Sopenharmony_ci seq_puts(m, buffer); 176962306a36Sopenharmony_ci seq_puts(m, "\n"); 177062306a36Sopenharmony_ci} 177162306a36Sopenharmony_ci 177262306a36Sopenharmony_cistatic int xive_irq_debug_show(struct seq_file *m, void *private) 177362306a36Sopenharmony_ci{ 177462306a36Sopenharmony_ci unsigned int i; 177562306a36Sopenharmony_ci struct irq_desc *desc; 177662306a36Sopenharmony_ci 177762306a36Sopenharmony_ci for_each_irq_desc(i, desc) { 177862306a36Sopenharmony_ci struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); 177962306a36Sopenharmony_ci 178062306a36Sopenharmony_ci if (d) 178162306a36Sopenharmony_ci xive_debug_show_irq(m, d); 178262306a36Sopenharmony_ci } 178362306a36Sopenharmony_ci return 0; 178462306a36Sopenharmony_ci} 178562306a36Sopenharmony_ciDEFINE_SHOW_ATTRIBUTE(xive_irq_debug); 178662306a36Sopenharmony_ci 178762306a36Sopenharmony_cistatic int xive_ipi_debug_show(struct seq_file *m, void *private) 178862306a36Sopenharmony_ci{ 178962306a36Sopenharmony_ci int cpu; 179062306a36Sopenharmony_ci 179162306a36Sopenharmony_ci if (xive_ops->debug_show) 179262306a36Sopenharmony_ci xive_ops->debug_show(m, private); 179362306a36Sopenharmony_ci 179462306a36Sopenharmony_ci for_each_online_cpu(cpu) 179562306a36Sopenharmony_ci xive_debug_show_ipi(m, cpu); 179662306a36Sopenharmony_ci return 0; 179762306a36Sopenharmony_ci} 179862306a36Sopenharmony_ciDEFINE_SHOW_ATTRIBUTE(xive_ipi_debug); 179962306a36Sopenharmony_ci 180062306a36Sopenharmony_cistatic void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio) 180162306a36Sopenharmony_ci{ 180262306a36Sopenharmony_ci int i; 180362306a36Sopenharmony_ci 180462306a36Sopenharmony_ci seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle); 180562306a36Sopenharmony_ci if (q->qpage) { 180662306a36Sopenharmony_ci for (i = 0; i < q->msk + 1; i++) { 180762306a36Sopenharmony_ci if (!(i % 8)) 180862306a36Sopenharmony_ci seq_printf(m, "%05d ", i); 180962306a36Sopenharmony_ci seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i), 181062306a36Sopenharmony_ci (i + 1) % 8 ? " " : "\n"); 181162306a36Sopenharmony_ci } 181262306a36Sopenharmony_ci } 181362306a36Sopenharmony_ci seq_puts(m, "\n"); 181462306a36Sopenharmony_ci} 181562306a36Sopenharmony_ci 181662306a36Sopenharmony_cistatic int xive_eq_debug_show(struct seq_file *m, void *private) 181762306a36Sopenharmony_ci{ 181862306a36Sopenharmony_ci int cpu = (long)m->private; 181962306a36Sopenharmony_ci struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 182062306a36Sopenharmony_ci 182162306a36Sopenharmony_ci if (xc) 182262306a36Sopenharmony_ci xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority], 182362306a36Sopenharmony_ci xive_irq_priority); 182462306a36Sopenharmony_ci return 0; 182562306a36Sopenharmony_ci} 182662306a36Sopenharmony_ciDEFINE_SHOW_ATTRIBUTE(xive_eq_debug); 182762306a36Sopenharmony_ci 182862306a36Sopenharmony_cistatic void xive_core_debugfs_create(void) 182962306a36Sopenharmony_ci{ 183062306a36Sopenharmony_ci struct dentry *xive_dir; 183162306a36Sopenharmony_ci struct dentry *xive_eq_dir; 183262306a36Sopenharmony_ci long cpu; 183362306a36Sopenharmony_ci char name[16]; 183462306a36Sopenharmony_ci 183562306a36Sopenharmony_ci xive_dir = debugfs_create_dir("xive", arch_debugfs_dir); 183662306a36Sopenharmony_ci if (IS_ERR(xive_dir)) 183762306a36Sopenharmony_ci return; 183862306a36Sopenharmony_ci 183962306a36Sopenharmony_ci debugfs_create_file("ipis", 0400, xive_dir, 184062306a36Sopenharmony_ci NULL, &xive_ipi_debug_fops); 184162306a36Sopenharmony_ci debugfs_create_file("interrupts", 0400, xive_dir, 184262306a36Sopenharmony_ci NULL, &xive_irq_debug_fops); 184362306a36Sopenharmony_ci xive_eq_dir = debugfs_create_dir("eqs", xive_dir); 184462306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 184562306a36Sopenharmony_ci snprintf(name, sizeof(name), "cpu%ld", cpu); 184662306a36Sopenharmony_ci debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu, 184762306a36Sopenharmony_ci &xive_eq_debug_fops); 184862306a36Sopenharmony_ci } 184962306a36Sopenharmony_ci debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi); 185062306a36Sopenharmony_ci 185162306a36Sopenharmony_ci if (xive_ops->debug_create) 185262306a36Sopenharmony_ci xive_ops->debug_create(xive_dir); 185362306a36Sopenharmony_ci} 185462306a36Sopenharmony_ci#else 185562306a36Sopenharmony_cistatic inline void xive_core_debugfs_create(void) { } 185662306a36Sopenharmony_ci#endif /* CONFIG_DEBUG_FS */ 185762306a36Sopenharmony_ci 185862306a36Sopenharmony_ciint xive_core_debug_init(void) 185962306a36Sopenharmony_ci{ 186062306a36Sopenharmony_ci if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS)) 186162306a36Sopenharmony_ci xive_core_debugfs_create(); 186262306a36Sopenharmony_ci 186362306a36Sopenharmony_ci return 0; 186462306a36Sopenharmony_ci} 1865