1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2020, Jianmin Lv <lvjianmin@loongson.cn> 4 * Loongson Extend I/O Interrupt Vector support 5 */ 6 7#define pr_fmt(fmt) "eiointc: " fmt 8 9#include <linux/interrupt.h> 10#include <linux/irq.h> 11#include <linux/irqchip.h> 12#include <linux/irqdomain.h> 13#include <linux/irqchip/chained_irq.h> 14#include <linux/kernel.h> 15#include <linux/platform_device.h> 16#include <linux/of_address.h> 17#include <linux/of_irq.h> 18#include <linux/of_platform.h> 19#include <linux/syscore_ops.h> 20 21#define EIOINTC_REG_NODEMAP 0x14a0 22#define EIOINTC_REG_IPMAP 0x14c0 23#define EIOINTC_REG_ENABLE 0x1600 24#define EIOINTC_REG_BOUNCE 0x1680 25#define EIOINTC_REG_ISR 0x1800 26#define EIOINTC_REG_ROUTE 0x1c00 27 28#define VEC_REG_COUNT 4 29#define VEC_COUNT_PER_REG 64 30#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) 31#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG) 32#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG) 33#define EIOINTC_ALL_ENABLE 0xffffffff 34 35#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE) 36 37static int nr_pics; 38 39struct eiointc_priv { 40 u32 node; 41 nodemask_t node_map; 42 cpumask_t cpuspan_map; 43 struct fwnode_handle *domain_handle; 44 struct irq_domain *eiointc_domain; 45}; 46 47static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; 48 49int eiointc_get_node(int id) 50{ 51 return eiointc_priv[id]->node; 52} 53 54static int cpu_to_eio_node(int cpu) 55{ 56 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 57} 58 59static unsigned int cpumask_nth(unsigned int idx, const struct cpumask *srcp) 60{ 61 int cpu; 62 63 for_each_cpu(cpu, srcp) 64 if (idx-- == 0) 65 return cpu; 66 67 BUG(); 68} 69 70static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) 71{ 72 int i, node, cpu_node, route_node; 73 unsigned char coremap[MAX_EIO_NODES]; 74 uint32_t pos_off, data, data_byte, data_mask; 75 76 pos_off = pos & ~3; 77 data_byte = pos & 3; 78 data_mask = ~BIT_MASK(data_byte) & 0xf; 79 80 memset(coremap, 0, sizeof(unsigned char) * MAX_EIO_NODES); 81 82 /* Calculate node and coremap of target irq */ 83 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE; 84 coremap[cpu_node] |= BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE); 85 86 for_each_online_cpu(i) { 87 node = cpu_to_eio_node(i); 88 if (!node_isset(node, *node_map)) 89 continue; 90 91 /* EIO node 0 is in charge of inter-node interrupt dispatch */ 92 route_node = (node == mnode) ? cpu_node : node; 93 data = ((coremap[node] | (route_node << 4)) << (data_byte * 8)); 94 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE); 95 } 96} 97 98#ifdef CONFIG_LOONGARCH 99static void virt_eiointc_set_irq_route(int pos, unsigned int cpu) 100{ 101 iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + pos); 102} 103#endif 104 105static DEFINE_RAW_SPINLOCK(affinity_lock); 106 107static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) 108{ 109 unsigned int cpu; 110 unsigned long flags; 111 uint32_t vector, regaddr; 112 struct cpumask online_affinity; 113 struct cpumask intersect_affinity; 114 struct eiointc_priv *priv = d->domain->host_data; 115 116 if (!IS_ENABLED(CONFIG_SMP)) 117 return -EPERM; 118 119 raw_spin_lock_irqsave(&affinity_lock, flags); 120 121 cpumask_and(&online_affinity, affinity, cpu_online_mask); 122 if (cpumask_empty(&online_affinity)) { 123 raw_spin_unlock_irqrestore(&affinity_lock, flags); 124 return -EINVAL; 125 } 126 cpumask_and(&intersect_affinity, &online_affinity, &priv->cpuspan_map); 127 128 if (!cpumask_empty(&intersect_affinity)) 129 cpu = cpumask_first(&intersect_affinity); 130 else { 131 int c, idx = 0; 132 struct cpumask complement_map; 133 struct cpumask cpuspan_online_map; 134 135 cpu = cpumask_first(&online_affinity); 136 cpumask_complement(&complement_map, &priv->cpuspan_map); 137 cpumask_and(&cpuspan_online_map, &priv->cpuspan_map, cpu_online_mask); 138 139 for_each_cpu(c, &complement_map) { 140 if (c == cpu) break; 141 idx++; 142 } 143 144 idx = idx % cpumask_weight(&cpuspan_online_map); 145 cpu = cpumask_nth(idx, &cpuspan_online_map); 146 } 147 148 if (!d->parent_data) 149 vector = d->hwirq; 150 else 151 vector = d->parent_data->hwirq; 152 153 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); 154 155 if (!cpu_has_hypervisor) { 156 /* Mask target vector */ 157 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), 158 0x0, priv->node * CORES_PER_EIO_NODE); 159 /* Set route for target vector */ 160 eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); 161 /* Unmask target vector */ 162 csr_any_send(regaddr, EIOINTC_ALL_ENABLE, 163 0x0, priv->node * CORES_PER_EIO_NODE); 164 } else { 165 iocsr_write32(EIOINTC_ALL_ENABLE & (~((1 << (vector & 0x1F)))), regaddr); 166 virt_eiointc_set_irq_route(vector, cpu); 167 iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); 168 } 169 170 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 171 172 raw_spin_unlock_irqrestore(&affinity_lock, flags); 173 174 return IRQ_SET_MASK_OK; 175} 176 177static int eiointc_index(int node) 178{ 179 int i; 180 181 for (i = 0; i < nr_pics; i++) { 182 if (node_isset(node, eiointc_priv[i]->node_map)) 183 return i; 184 } 185 186 return -1; 187} 188 189static int eiointc_router_init(unsigned int cpu) 190{ 191 int i, bit; 192 int node = cpu_to_eio_node(cpu); 193 int index = eiointc_index(node); 194 uint32_t data; 195 196 if (index < 0) { 197 pr_err("Error: invalid nodemap!\n"); 198 return -1; 199 } 200 201 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { 202 eiointc_enable(); 203 204 for (i = 0; i < VEC_COUNT / 32; i++) { 205 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2))); 206 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4); 207 } 208 209 for (i = 0; i < VEC_COUNT / 32 / 4; i++) { 210 bit = BIT(1 + index); /* Route to IP[1 + index] */ 211 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 212 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4); 213 } 214 215 for (i = 0; i < VEC_COUNT / 4; i++) { 216 /* Route to Node-0 Core-0 */ 217 if (index == 0) 218 bit = BIT(cpu_logical_map(0)); 219 else 220 bit = (eiointc_priv[index]->node << 4) | 1; 221 if (cpu_has_hypervisor) 222 bit = cpu_logical_map(0); 223 224 data = bit | (bit << 8) | (bit << 16) | (bit << 24); 225 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4); 226 } 227 228 for (i = 0; i < VEC_COUNT / 32; i++) { 229 data = 0xffffffff; 230 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4); 231 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4); 232 } 233 } 234 235 return 0; 236} 237 238static void eiointc_irq_dispatch(struct irq_desc *desc) 239{ 240 int i; 241 u64 pending; 242 bool handled = false; 243 struct irq_chip *chip = irq_desc_get_chip(desc); 244 struct eiointc_priv *priv = irq_desc_get_handler_data(desc); 245 246 chained_irq_enter(chip, desc); 247 248 for (i = 0; i < VEC_REG_COUNT; i++) { 249 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); 250 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); 251 while (pending) { 252 int bit = __ffs(pending); 253 int virq = irq_linear_revmap(priv->eiointc_domain, bit + VEC_COUNT_PER_REG * i); 254 255 generic_handle_irq(virq); 256 pending &= ~BIT(bit); 257 handled = true; 258 } 259 } 260 261 if (!handled) 262 spurious_interrupt(); 263 264 chained_irq_exit(chip, desc); 265} 266 267static void eiointc_ack_irq(struct irq_data *d) 268{ 269 if (d->parent_data) 270 irq_chip_ack_parent(d); 271} 272 273static void eiointc_mask_irq(struct irq_data *d) 274{ 275 if (d->parent_data) 276 irq_chip_mask_parent(d); 277} 278 279static void eiointc_unmask_irq(struct irq_data *d) 280{ 281 if (d->parent_data) 282 irq_chip_unmask_parent(d); 283} 284 285static struct irq_chip eiointc_irq_chip = { 286 .name = "EIOINTC", 287 .irq_ack = eiointc_ack_irq, 288 .irq_mask = eiointc_mask_irq, 289 .irq_unmask = eiointc_unmask_irq, 290 .irq_set_affinity = eiointc_set_irq_affinity, 291}; 292 293static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, 294 unsigned int nr_irqs, void *arg) 295{ 296 int ret; 297 unsigned int i, type; 298 unsigned long hwirq = 0; 299 struct eiointc *priv = domain->host_data; 300 301 ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); 302 if (ret < 0) 303 return -EINVAL; 304 305 if (hwirq >= IOCSR_EXTIOI_VECTOR_NUM) 306 return -EINVAL; 307 308 for (i = 0; i < nr_irqs; i++) { 309 irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip, 310 priv, handle_edge_irq, NULL, NULL); 311 } 312 313 return 0; 314} 315 316static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq, 317 unsigned int nr_irqs) 318{ 319 int i; 320 321 for (i = 0; i < nr_irqs; i++) { 322 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 323 324 irq_set_handler(virq + i, NULL); 325 irq_domain_reset_irq_data(d); 326 } 327} 328 329static const struct irq_domain_ops eiointc_domain_ops = { 330 .translate = irq_domain_translate_onecell, 331 .alloc = eiointc_domain_alloc, 332 .free = eiointc_domain_free, 333}; 334 335static int eiointc_suspend(void) 336{ 337 return 0; 338} 339 340static void eiointc_resume(void) 341{ 342 int i, j; 343 struct irq_desc *desc; 344 struct irq_data *irq_data; 345 346 eiointc_router_init(0); 347 348 for (i = 0; i < nr_pics; i++) { 349 for (j = 0; j < VEC_COUNT; j++) { 350 desc = irq_to_desc(irq_find_mapping(eiointc_priv[i]->eiointc_domain, j)); 351 if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { 352 raw_spin_lock(&desc->lock); 353 irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); 354 if (irq_data) eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); 355 raw_spin_unlock(&desc->lock); 356 } 357 } 358 } 359} 360 361static struct syscore_ops eiointc_syscore_ops = { 362 .suspend = eiointc_suspend, 363 .resume = eiointc_resume, 364}; 365 366struct irq_domain *eiointc_acpi_init(struct irq_domain *parent, 367 struct acpi_madt_eio_pic *acpi_eiointc) 368{ 369 int i, parent_irq; 370 unsigned long node_map; 371 struct eiointc_priv *priv; 372 373 if (!acpi_eiointc) 374 return NULL; 375 376 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 377 if (!priv) 378 return NULL; 379 380 priv->domain_handle = irq_domain_alloc_fwnode((phys_addr_t *)acpi_eiointc); 381 if (!priv->domain_handle) { 382 pr_err("Unable to allocate domain handle\n"); 383 goto out_free_priv; 384 } 385 386 priv->node = acpi_eiointc->node; 387 node_map = acpi_eiointc->node_map ? : -1ULL; 388 389 for_each_possible_cpu(i) { 390 if (node_map & (1ULL << cpu_to_eio_node(i))) { 391 node_set(cpu_to_eio_node(i), priv->node_map); 392 cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i)); 393 } 394 } 395 396 /* Setup IRQ domain */ 397 priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT, 398 &eiointc_domain_ops, priv); 399 if (!priv->eiointc_domain) { 400 pr_err("loongson-eiointc: cannot add IRQ domain\n"); 401 goto out_free_priv; 402 } 403 404 eiointc_priv[nr_pics++] = priv; 405 406 eiointc_router_init(0); 407 408 parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade); 409 irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); 410 411 register_syscore_ops(&eiointc_syscore_ops); 412 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, 413 "irqchip/loongarch/intc:starting", 414 eiointc_router_init, NULL); 415 416 return irq_find_matching_fwnode(priv->domain_handle, DOMAIN_BUS_ANY); 417 418out_free_priv: 419 priv->domain_handle = NULL; 420 kfree(priv); 421 422 return NULL; 423} 424