1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * PCIe host controller driver for NWL PCIe Bridge 4 * Based on pcie-xilinx.c, pci-tegra.c 5 * 6 * (C) Copyright 2014 - 2015, Xilinx, Inc. 7 */ 8 9#include <linux/clk.h> 10#include <linux/delay.h> 11#include <linux/interrupt.h> 12#include <linux/irq.h> 13#include <linux/irqdomain.h> 14#include <linux/kernel.h> 15#include <linux/init.h> 16#include <linux/msi.h> 17#include <linux/of_address.h> 18#include <linux/of_pci.h> 19#include <linux/of_platform.h> 20#include <linux/of_irq.h> 21#include <linux/pci.h> 22#include <linux/platform_device.h> 23#include <linux/irqchip/chained_irq.h> 24 25#include "../pci.h" 26 27/* Bridge core config registers */ 28#define BRCFG_PCIE_RX0 0x00000000 29#define BRCFG_INTERRUPT 0x00000010 30#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 31 32/* Egress - Bridge translation registers */ 33#define E_BREG_CAPABILITIES 0x00000200 34#define E_BREG_CONTROL 0x00000208 35#define E_BREG_BASE_LO 0x00000210 36#define E_BREG_BASE_HI 0x00000214 37#define E_ECAM_CAPABILITIES 0x00000220 38#define E_ECAM_CONTROL 0x00000228 39#define E_ECAM_BASE_LO 0x00000230 40#define E_ECAM_BASE_HI 0x00000234 41 42/* Ingress - address translations */ 43#define I_MSII_CAPABILITIES 0x00000300 44#define I_MSII_CONTROL 0x00000308 45#define I_MSII_BASE_LO 0x00000310 46#define I_MSII_BASE_HI 0x00000314 47 48#define I_ISUB_CONTROL 0x000003E8 49#define SET_ISUB_CONTROL BIT(0) 50/* Rxed msg fifo - Interrupt status registers */ 51#define MSGF_MISC_STATUS 0x00000400 52#define MSGF_MISC_MASK 0x00000404 53#define MSGF_LEG_STATUS 0x00000420 54#define MSGF_LEG_MASK 0x00000424 55#define MSGF_MSI_STATUS_LO 0x00000440 56#define MSGF_MSI_STATUS_HI 0x00000444 57#define MSGF_MSI_MASK_LO 0x00000448 58#define MSGF_MSI_MASK_HI 0x0000044C 59 60/* Msg filter mask bits */ 61#define CFG_ENABLE_PM_MSG_FWD BIT(1) 62#define CFG_ENABLE_INT_MSG_FWD BIT(2) 63#define CFG_ENABLE_ERR_MSG_FWD BIT(3) 64#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ 65 CFG_ENABLE_INT_MSG_FWD | \ 66 CFG_ENABLE_ERR_MSG_FWD) 67 68/* Misc interrupt status mask bits */ 69#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) 70#define MSGF_MISC_SR_RXMSG_OVER BIT(1) 71#define MSGF_MISC_SR_SLAVE_ERR BIT(4) 72#define MSGF_MISC_SR_MASTER_ERR BIT(5) 73#define MSGF_MISC_SR_I_ADDR_ERR BIT(6) 74#define MSGF_MISC_SR_E_ADDR_ERR BIT(7) 75#define MSGF_MISC_SR_FATAL_AER BIT(16) 76#define MSGF_MISC_SR_NON_FATAL_AER BIT(17) 77#define MSGF_MISC_SR_CORR_AER BIT(18) 78#define MSGF_MISC_SR_UR_DETECT BIT(20) 79#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) 80#define MSGF_MISC_SR_FATAL_DEV BIT(23) 81#define MSGF_MISC_SR_LINK_DOWN BIT(24) 82#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) 83#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) 84 85#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ 86 MSGF_MISC_SR_RXMSG_OVER | \ 87 MSGF_MISC_SR_SLAVE_ERR | \ 88 MSGF_MISC_SR_MASTER_ERR | \ 89 MSGF_MISC_SR_I_ADDR_ERR | \ 90 MSGF_MISC_SR_E_ADDR_ERR | \ 91 MSGF_MISC_SR_FATAL_AER | \ 92 MSGF_MISC_SR_NON_FATAL_AER | \ 93 MSGF_MISC_SR_CORR_AER | \ 94 MSGF_MISC_SR_UR_DETECT | \ 95 MSGF_MISC_SR_NON_FATAL_DEV | \ 96 MSGF_MISC_SR_FATAL_DEV | \ 97 MSGF_MISC_SR_LINK_DOWN | \ 98 MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ 99 MSGF_MSIC_SR_LINK_BWIDTH) 100 101/* Legacy interrupt status mask bits */ 102#define MSGF_LEG_SR_INTA BIT(0) 103#define MSGF_LEG_SR_INTB BIT(1) 104#define MSGF_LEG_SR_INTC BIT(2) 105#define MSGF_LEG_SR_INTD BIT(3) 106#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ 107 MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) 108 109/* MSI interrupt status mask bits */ 110#define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) 111#define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) 112 113#define MSII_PRESENT BIT(0) 114#define MSII_ENABLE BIT(0) 115#define MSII_STATUS_ENABLE BIT(15) 116 117/* Bridge config interrupt mask */ 118#define BRCFG_INTERRUPT_MASK BIT(0) 119#define BREG_PRESENT BIT(0) 120#define BREG_ENABLE BIT(0) 121#define BREG_ENABLE_FORCE BIT(1) 122 123/* E_ECAM status mask bits */ 124#define E_ECAM_PRESENT BIT(0) 125#define E_ECAM_CR_ENABLE BIT(0) 126#define E_ECAM_SIZE_LOC GENMASK(20, 16) 127#define E_ECAM_SIZE_SHIFT 16 128#define ECAM_BUS_LOC_SHIFT 20 129#define ECAM_DEV_LOC_SHIFT 12 130#define NWL_ECAM_VALUE_DEFAULT 12 131 132#define CFG_DMA_REG_BAR GENMASK(2, 0) 133 134#define INT_PCI_MSI_NR (2 * 32) 135 136/* Readin the PS_LINKUP */ 137#define PS_LINKUP_OFFSET 0x00000238 138#define PCIE_PHY_LINKUP_BIT BIT(0) 139#define PHY_RDY_LINKUP_BIT BIT(1) 140 141/* Parameters for the waiting for link up routine */ 142#define LINK_WAIT_MAX_RETRIES 10 143#define LINK_WAIT_USLEEP_MIN 90000 144#define LINK_WAIT_USLEEP_MAX 100000 145 146struct nwl_msi { /* MSI information */ 147 struct irq_domain *msi_domain; 148 unsigned long *bitmap; 149 struct irq_domain *dev_domain; 150 struct mutex lock; /* protect bitmap variable */ 151 int irq_msi0; 152 int irq_msi1; 153}; 154 155struct nwl_pcie { 156 struct device *dev; 157 void __iomem *breg_base; 158 void __iomem *pcireg_base; 159 void __iomem *ecam_base; 160 phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ 161 phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ 162 phys_addr_t phys_ecam_base; /* Physical Configuration Base */ 163 u32 breg_size; 164 u32 pcie_reg_size; 165 u32 ecam_size; 166 int irq_intx; 167 int irq_misc; 168 u32 ecam_value; 169 u8 last_busno; 170 struct nwl_msi msi; 171 struct irq_domain *legacy_irq_domain; 172 struct clk *clk; 173 raw_spinlock_t leg_mask_lock; 174}; 175 176static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) 177{ 178 return readl(pcie->breg_base + off); 179} 180 181static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) 182{ 183 writel(val, pcie->breg_base + off); 184} 185 186static bool nwl_pcie_link_up(struct nwl_pcie *pcie) 187{ 188 if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) 189 return true; 190 return false; 191} 192 193static bool nwl_phy_link_up(struct nwl_pcie *pcie) 194{ 195 if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) 196 return true; 197 return false; 198} 199 200static int nwl_wait_for_link(struct nwl_pcie *pcie) 201{ 202 struct device *dev = pcie->dev; 203 int retries; 204 205 /* check if the link is up or not */ 206 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 207 if (nwl_phy_link_up(pcie)) 208 return 0; 209 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 210 } 211 212 dev_err(dev, "PHY link never came up\n"); 213 return -ETIMEDOUT; 214} 215 216static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) 217{ 218 struct nwl_pcie *pcie = bus->sysdata; 219 220 /* Check link before accessing downstream ports */ 221 if (!pci_is_root_bus(bus)) { 222 if (!nwl_pcie_link_up(pcie)) 223 return false; 224 } else if (devfn > 0) 225 /* Only one device down on each root port */ 226 return false; 227 228 return true; 229} 230 231/** 232 * nwl_pcie_map_bus - Get configuration base 233 * 234 * @bus: Bus structure of current bus 235 * @devfn: Device/function 236 * @where: Offset from base 237 * 238 * Return: Base address of the configuration space needed to be 239 * accessed. 240 */ 241static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, 242 int where) 243{ 244 struct nwl_pcie *pcie = bus->sysdata; 245 int relbus; 246 247 if (!nwl_pcie_valid_device(bus, devfn)) 248 return NULL; 249 250 relbus = (bus->number << ECAM_BUS_LOC_SHIFT) | 251 (devfn << ECAM_DEV_LOC_SHIFT); 252 253 return pcie->ecam_base + relbus + where; 254} 255 256/* PCIe operations */ 257static struct pci_ops nwl_pcie_ops = { 258 .map_bus = nwl_pcie_map_bus, 259 .read = pci_generic_config_read, 260 .write = pci_generic_config_write, 261}; 262 263static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) 264{ 265 struct nwl_pcie *pcie = data; 266 struct device *dev = pcie->dev; 267 u32 misc_stat; 268 269 /* Checking for misc interrupts */ 270 misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & 271 MSGF_MISC_SR_MASKALL; 272 if (!misc_stat) 273 return IRQ_NONE; 274 275 if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) 276 dev_err(dev, "Received Message FIFO Overflow\n"); 277 278 if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) 279 dev_err(dev, "Slave error\n"); 280 281 if (misc_stat & MSGF_MISC_SR_MASTER_ERR) 282 dev_err(dev, "Master error\n"); 283 284 if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) 285 dev_err(dev, "In Misc Ingress address translation error\n"); 286 287 if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) 288 dev_err(dev, "In Misc Egress address translation error\n"); 289 290 if (misc_stat & MSGF_MISC_SR_FATAL_AER) 291 dev_err(dev, "Fatal Error in AER Capability\n"); 292 293 if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) 294 dev_err(dev, "Non-Fatal Error in AER Capability\n"); 295 296 if (misc_stat & MSGF_MISC_SR_CORR_AER) 297 dev_err(dev, "Correctable Error in AER Capability\n"); 298 299 if (misc_stat & MSGF_MISC_SR_UR_DETECT) 300 dev_err(dev, "Unsupported request Detected\n"); 301 302 if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) 303 dev_err(dev, "Non-Fatal Error Detected\n"); 304 305 if (misc_stat & MSGF_MISC_SR_FATAL_DEV) 306 dev_err(dev, "Fatal Error Detected\n"); 307 308 if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) 309 dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); 310 311 if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) 312 dev_info(dev, "Link Bandwidth Management Status bit set\n"); 313 314 /* Clear misc interrupt status */ 315 nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); 316 317 return IRQ_HANDLED; 318} 319 320static void nwl_pcie_leg_handler(struct irq_desc *desc) 321{ 322 struct irq_chip *chip = irq_desc_get_chip(desc); 323 struct nwl_pcie *pcie; 324 unsigned long status; 325 u32 bit; 326 u32 virq; 327 328 chained_irq_enter(chip, desc); 329 pcie = irq_desc_get_handler_data(desc); 330 331 while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & 332 MSGF_LEG_SR_MASKALL) != 0) { 333 for_each_set_bit(bit, &status, PCI_NUM_INTX) { 334 virq = irq_find_mapping(pcie->legacy_irq_domain, bit); 335 if (virq) 336 generic_handle_irq(virq); 337 } 338 } 339 340 chained_irq_exit(chip, desc); 341} 342 343static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) 344{ 345 struct nwl_msi *msi; 346 unsigned long status; 347 u32 bit; 348 u32 virq; 349 350 msi = &pcie->msi; 351 352 while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { 353 for_each_set_bit(bit, &status, 32) { 354 nwl_bridge_writel(pcie, 1 << bit, status_reg); 355 virq = irq_find_mapping(msi->dev_domain, bit); 356 if (virq) 357 generic_handle_irq(virq); 358 } 359 } 360} 361 362static void nwl_pcie_msi_handler_high(struct irq_desc *desc) 363{ 364 struct irq_chip *chip = irq_desc_get_chip(desc); 365 struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); 366 367 chained_irq_enter(chip, desc); 368 nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI); 369 chained_irq_exit(chip, desc); 370} 371 372static void nwl_pcie_msi_handler_low(struct irq_desc *desc) 373{ 374 struct irq_chip *chip = irq_desc_get_chip(desc); 375 struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); 376 377 chained_irq_enter(chip, desc); 378 nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO); 379 chained_irq_exit(chip, desc); 380} 381 382static void nwl_mask_leg_irq(struct irq_data *data) 383{ 384 struct irq_desc *desc = irq_to_desc(data->irq); 385 struct nwl_pcie *pcie; 386 unsigned long flags; 387 u32 mask; 388 u32 val; 389 390 pcie = irq_desc_get_chip_data(desc); 391 mask = 1 << (data->hwirq - 1); 392 raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); 393 val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); 394 nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); 395 raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); 396} 397 398static void nwl_unmask_leg_irq(struct irq_data *data) 399{ 400 struct irq_desc *desc = irq_to_desc(data->irq); 401 struct nwl_pcie *pcie; 402 unsigned long flags; 403 u32 mask; 404 u32 val; 405 406 pcie = irq_desc_get_chip_data(desc); 407 mask = 1 << (data->hwirq - 1); 408 raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); 409 val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); 410 nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); 411 raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); 412} 413 414static struct irq_chip nwl_leg_irq_chip = { 415 .name = "nwl_pcie:legacy", 416 .irq_enable = nwl_unmask_leg_irq, 417 .irq_disable = nwl_mask_leg_irq, 418 .irq_mask = nwl_mask_leg_irq, 419 .irq_unmask = nwl_unmask_leg_irq, 420}; 421 422static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, 423 irq_hw_number_t hwirq) 424{ 425 irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); 426 irq_set_chip_data(irq, domain->host_data); 427 irq_set_status_flags(irq, IRQ_LEVEL); 428 429 return 0; 430} 431 432static const struct irq_domain_ops legacy_domain_ops = { 433 .map = nwl_legacy_map, 434 .xlate = pci_irqd_intx_xlate, 435}; 436 437#ifdef CONFIG_PCI_MSI 438static struct irq_chip nwl_msi_irq_chip = { 439 .name = "nwl_pcie:msi", 440 .irq_enable = pci_msi_unmask_irq, 441 .irq_disable = pci_msi_mask_irq, 442 .irq_mask = pci_msi_mask_irq, 443 .irq_unmask = pci_msi_unmask_irq, 444}; 445 446static struct msi_domain_info nwl_msi_domain_info = { 447 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 448 MSI_FLAG_MULTI_PCI_MSI), 449 .chip = &nwl_msi_irq_chip, 450}; 451#endif 452 453static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 454{ 455 struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); 456 phys_addr_t msi_addr = pcie->phys_pcie_reg_base; 457 458 msg->address_lo = lower_32_bits(msi_addr); 459 msg->address_hi = upper_32_bits(msi_addr); 460 msg->data = data->hwirq; 461} 462 463static int nwl_msi_set_affinity(struct irq_data *irq_data, 464 const struct cpumask *mask, bool force) 465{ 466 return -EINVAL; 467} 468 469static struct irq_chip nwl_irq_chip = { 470 .name = "Xilinx MSI", 471 .irq_compose_msi_msg = nwl_compose_msi_msg, 472 .irq_set_affinity = nwl_msi_set_affinity, 473}; 474 475static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 476 unsigned int nr_irqs, void *args) 477{ 478 struct nwl_pcie *pcie = domain->host_data; 479 struct nwl_msi *msi = &pcie->msi; 480 int bit; 481 int i; 482 483 mutex_lock(&msi->lock); 484 bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR, 485 get_count_order(nr_irqs)); 486 if (bit < 0) { 487 mutex_unlock(&msi->lock); 488 return -ENOSPC; 489 } 490 491 for (i = 0; i < nr_irqs; i++) { 492 irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, 493 domain->host_data, handle_simple_irq, 494 NULL, NULL); 495 } 496 mutex_unlock(&msi->lock); 497 return 0; 498} 499 500static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, 501 unsigned int nr_irqs) 502{ 503 struct irq_data *data = irq_domain_get_irq_data(domain, virq); 504 struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); 505 struct nwl_msi *msi = &pcie->msi; 506 507 mutex_lock(&msi->lock); 508 bitmap_release_region(msi->bitmap, data->hwirq, 509 get_count_order(nr_irqs)); 510 mutex_unlock(&msi->lock); 511} 512 513static const struct irq_domain_ops dev_msi_domain_ops = { 514 .alloc = nwl_irq_domain_alloc, 515 .free = nwl_irq_domain_free, 516}; 517 518static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) 519{ 520#ifdef CONFIG_PCI_MSI 521 struct device *dev = pcie->dev; 522 struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); 523 struct nwl_msi *msi = &pcie->msi; 524 525 msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, 526 &dev_msi_domain_ops, pcie); 527 if (!msi->dev_domain) { 528 dev_err(dev, "failed to create dev IRQ domain\n"); 529 return -ENOMEM; 530 } 531 msi->msi_domain = pci_msi_create_irq_domain(fwnode, 532 &nwl_msi_domain_info, 533 msi->dev_domain); 534 if (!msi->msi_domain) { 535 dev_err(dev, "failed to create msi IRQ domain\n"); 536 irq_domain_remove(msi->dev_domain); 537 return -ENOMEM; 538 } 539#endif 540 return 0; 541} 542 543static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) 544{ 545 struct device *dev = pcie->dev; 546 struct device_node *node = dev->of_node; 547 struct device_node *legacy_intc_node; 548 549 legacy_intc_node = of_get_next_child(node, NULL); 550 if (!legacy_intc_node) { 551 dev_err(dev, "No legacy intc node found\n"); 552 return -EINVAL; 553 } 554 555 pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, 556 PCI_NUM_INTX, 557 &legacy_domain_ops, 558 pcie); 559 of_node_put(legacy_intc_node); 560 if (!pcie->legacy_irq_domain) { 561 dev_err(dev, "failed to create IRQ domain\n"); 562 return -ENOMEM; 563 } 564 565 raw_spin_lock_init(&pcie->leg_mask_lock); 566 nwl_pcie_init_msi_irq_domain(pcie); 567 return 0; 568} 569 570static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) 571{ 572 struct device *dev = pcie->dev; 573 struct platform_device *pdev = to_platform_device(dev); 574 struct nwl_msi *msi = &pcie->msi; 575 unsigned long base; 576 int ret; 577 int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long); 578 579 mutex_init(&msi->lock); 580 581 msi->bitmap = kzalloc(size, GFP_KERNEL); 582 if (!msi->bitmap) 583 return -ENOMEM; 584 585 /* Get msi_1 IRQ number */ 586 msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); 587 if (msi->irq_msi1 < 0) { 588 ret = -EINVAL; 589 goto err; 590 } 591 592 irq_set_chained_handler_and_data(msi->irq_msi1, 593 nwl_pcie_msi_handler_high, pcie); 594 595 /* Get msi_0 IRQ number */ 596 msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); 597 if (msi->irq_msi0 < 0) { 598 ret = -EINVAL; 599 goto err; 600 } 601 602 irq_set_chained_handler_and_data(msi->irq_msi0, 603 nwl_pcie_msi_handler_low, pcie); 604 605 /* Check for msii_present bit */ 606 ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; 607 if (!ret) { 608 dev_err(dev, "MSI not present\n"); 609 ret = -EIO; 610 goto err; 611 } 612 613 /* Enable MSII */ 614 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | 615 MSII_ENABLE, I_MSII_CONTROL); 616 617 /* Enable MSII status */ 618 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | 619 MSII_STATUS_ENABLE, I_MSII_CONTROL); 620 621 /* setup AFI/FPCI range */ 622 base = pcie->phys_pcie_reg_base; 623 nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO); 624 nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI); 625 626 /* 627 * For high range MSI interrupts: disable, clear any pending, 628 * and enable 629 */ 630 nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI); 631 632 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) & 633 MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI); 634 635 nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI); 636 637 /* 638 * For low range MSI interrupts: disable, clear any pending, 639 * and enable 640 */ 641 nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO); 642 643 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) & 644 MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO); 645 646 nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); 647 648 return 0; 649err: 650 kfree(msi->bitmap); 651 msi->bitmap = NULL; 652 return ret; 653} 654 655static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) 656{ 657 struct device *dev = pcie->dev; 658 struct platform_device *pdev = to_platform_device(dev); 659 u32 breg_val, ecam_val, first_busno = 0; 660 int err; 661 662 breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; 663 if (!breg_val) { 664 dev_err(dev, "BREG is not present\n"); 665 return breg_val; 666 } 667 668 /* Write bridge_off to breg base */ 669 nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), 670 E_BREG_BASE_LO); 671 nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), 672 E_BREG_BASE_HI); 673 674 /* Enable BREG */ 675 nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, 676 E_BREG_CONTROL); 677 678 /* Disable DMA channel registers */ 679 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | 680 CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); 681 682 /* Enable Ingress subtractive decode translation */ 683 nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); 684 685 /* Enable msg filtering details */ 686 nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, 687 BRCFG_PCIE_RX_MSG_FILTER); 688 689 err = nwl_wait_for_link(pcie); 690 if (err) 691 return err; 692 693 ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; 694 if (!ecam_val) { 695 dev_err(dev, "ECAM is not present\n"); 696 return ecam_val; 697 } 698 699 /* Enable ECAM */ 700 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | 701 E_ECAM_CR_ENABLE, E_ECAM_CONTROL); 702 703 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | 704 (pcie->ecam_value << E_ECAM_SIZE_SHIFT), 705 E_ECAM_CONTROL); 706 707 nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), 708 E_ECAM_BASE_LO); 709 nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), 710 E_ECAM_BASE_HI); 711 712 /* Get bus range */ 713 ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); 714 pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT; 715 /* Write primary, secondary and subordinate bus numbers */ 716 ecam_val = first_busno; 717 ecam_val |= (first_busno + 1) << 8; 718 ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT); 719 writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); 720 721 if (nwl_pcie_link_up(pcie)) 722 dev_info(dev, "Link is UP\n"); 723 else 724 dev_info(dev, "Link is DOWN\n"); 725 726 /* Get misc IRQ number */ 727 pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); 728 if (pcie->irq_misc < 0) 729 return -EINVAL; 730 731 err = devm_request_irq(dev, pcie->irq_misc, 732 nwl_pcie_misc_handler, IRQF_SHARED, 733 "nwl_pcie:misc", pcie); 734 if (err) { 735 dev_err(dev, "fail to register misc IRQ#%d\n", 736 pcie->irq_misc); 737 return err; 738 } 739 740 /* Disable all misc interrupts */ 741 nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); 742 743 /* Clear pending misc interrupts */ 744 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & 745 MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); 746 747 /* Enable all misc interrupts */ 748 nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); 749 750 751 /* Disable all legacy interrupts */ 752 nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); 753 754 /* Clear pending legacy interrupts */ 755 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & 756 MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); 757 758 /* Enable all legacy interrupts */ 759 nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); 760 761 /* Enable the bridge config interrupt */ 762 nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) | 763 BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT); 764 765 return 0; 766} 767 768static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, 769 struct platform_device *pdev) 770{ 771 struct device *dev = pcie->dev; 772 struct resource *res; 773 774 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); 775 pcie->breg_base = devm_ioremap_resource(dev, res); 776 if (IS_ERR(pcie->breg_base)) 777 return PTR_ERR(pcie->breg_base); 778 pcie->phys_breg_base = res->start; 779 780 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); 781 pcie->pcireg_base = devm_ioremap_resource(dev, res); 782 if (IS_ERR(pcie->pcireg_base)) 783 return PTR_ERR(pcie->pcireg_base); 784 pcie->phys_pcie_reg_base = res->start; 785 786 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 787 pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); 788 if (IS_ERR(pcie->ecam_base)) 789 return PTR_ERR(pcie->ecam_base); 790 pcie->phys_ecam_base = res->start; 791 792 /* Get intx IRQ number */ 793 pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); 794 if (pcie->irq_intx < 0) 795 return pcie->irq_intx; 796 797 irq_set_chained_handler_and_data(pcie->irq_intx, 798 nwl_pcie_leg_handler, pcie); 799 800 return 0; 801} 802 803static const struct of_device_id nwl_pcie_of_match[] = { 804 { .compatible = "xlnx,nwl-pcie-2.11", }, 805 {} 806}; 807 808static int nwl_pcie_probe(struct platform_device *pdev) 809{ 810 struct device *dev = &pdev->dev; 811 struct nwl_pcie *pcie; 812 struct pci_host_bridge *bridge; 813 int err; 814 815 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); 816 if (!bridge) 817 return -ENODEV; 818 819 pcie = pci_host_bridge_priv(bridge); 820 821 pcie->dev = dev; 822 pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; 823 824 err = nwl_pcie_parse_dt(pcie, pdev); 825 if (err) { 826 dev_err(dev, "Parsing DT failed\n"); 827 return err; 828 } 829 830 pcie->clk = devm_clk_get(dev, NULL); 831 if (IS_ERR(pcie->clk)) 832 return PTR_ERR(pcie->clk); 833 834 err = clk_prepare_enable(pcie->clk); 835 if (err) { 836 dev_err(dev, "can't enable PCIe ref clock\n"); 837 return err; 838 } 839 840 err = nwl_pcie_bridge_init(pcie); 841 if (err) { 842 dev_err(dev, "HW Initialization failed\n"); 843 return err; 844 } 845 846 err = nwl_pcie_init_irq_domain(pcie); 847 if (err) { 848 dev_err(dev, "Failed creating IRQ Domain\n"); 849 return err; 850 } 851 852 bridge->sysdata = pcie; 853 bridge->ops = &nwl_pcie_ops; 854 855 if (IS_ENABLED(CONFIG_PCI_MSI)) { 856 err = nwl_pcie_enable_msi(pcie); 857 if (err < 0) { 858 dev_err(dev, "failed to enable MSI support: %d\n", err); 859 return err; 860 } 861 } 862 863 return pci_host_probe(bridge); 864} 865 866static struct platform_driver nwl_pcie_driver = { 867 .driver = { 868 .name = "nwl-pcie", 869 .suppress_bind_attrs = true, 870 .of_match_table = nwl_pcie_of_match, 871 }, 872 .probe = nwl_pcie_probe, 873}; 874builtin_platform_driver(nwl_pcie_driver); 875