1/*
2 * PCI / PCI-X / PCI-Express support for 4xx parts
3 *
4 * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
5 *
6 * Most PCI Express code is coming from Stefan Roese implementation for
7 * arch/ppc in the Denx tree, slightly reworked by me.
8 *
9 * Copyright 2007 DENX Software Engineering, Stefan Roese <sr@denx.de>
10 *
11 * Some of that comes itself from a previous implementation for 440SPE only
12 * by Roland Dreier:
13 *
14 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
15 * Roland Dreier <rolandd@cisco.com>
16 *
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/pci.h>
23#include <linux/init.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/delay.h>
27#include <linux/slab.h>
28
29#include <asm/io.h>
30#include <asm/pci-bridge.h>
31#include <asm/machdep.h>
32#include <asm/dcr.h>
33#include <asm/dcr-regs.h>
34#include <mm/mmu_decl.h>
35
36#include "pci.h"
37
38static int dma_offset_set;
39
40#define U64_TO_U32_LOW(val)	((u32)((val) & 0x00000000ffffffffULL))
41#define U64_TO_U32_HIGH(val)	((u32)((val) >> 32))
42
43#define RES_TO_U32_LOW(val)	\
44	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
45#define RES_TO_U32_HIGH(val)	\
46	((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
47
48static inline int ppc440spe_revA(void)
49{
50	/* Catch both 440SPe variants, with and without RAID6 support */
51        if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
52                return 1;
53        else
54                return 0;
55}
56
57static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
58{
59	struct pci_controller *hose;
60	struct resource *r;
61
62	if (dev->devfn != 0 || dev->bus->self != NULL)
63		return;
64
65	hose = pci_bus_to_host(dev->bus);
66	if (hose == NULL)
67		return;
68
69	if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
70	    !of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
71	    !of_device_is_compatible(hose->dn, "ibm,plb-pci"))
72		return;
73
74	if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
75		of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
76		hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
77	}
78
79	/* Hide the PCI host BARs from the kernel as their content doesn't
80	 * fit well in the resource management
81	 */
82	pci_dev_for_each_resource(dev, r) {
83		r->start = r->end = 0;
84		r->flags = 0;
85	}
86
87	printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
88	       pci_name(dev));
89}
90DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
91
92static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
93					  void __iomem *reg,
94					  struct resource *res)
95{
96	u64 size;
97	const u32 *ranges;
98	int rlen;
99	int pna = of_n_addr_cells(hose->dn);
100	int np = pna + 5;
101
102	/* Default */
103	res->start = 0;
104	size = 0x80000000;
105	res->end = size - 1;
106	res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
107
108	/* Get dma-ranges property */
109	ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
110	if (ranges == NULL)
111		goto out;
112
113	/* Walk it */
114	while ((rlen -= np * 4) >= 0) {
115		u32 pci_space = ranges[0];
116		u64 pci_addr = of_read_number(ranges + 1, 2);
117		u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
118		size = of_read_number(ranges + pna + 3, 2);
119		ranges += np;
120		if (cpu_addr == OF_BAD_ADDR || size == 0)
121			continue;
122
123		/* We only care about memory */
124		if ((pci_space & 0x03000000) != 0x02000000)
125			continue;
126
127		/* We currently only support memory at 0, and pci_addr
128		 * within 32 bits space
129		 */
130		if (cpu_addr != 0 || pci_addr > 0xffffffff) {
131			printk(KERN_WARNING "%pOF: Ignored unsupported dma range"
132			       " 0x%016llx...0x%016llx -> 0x%016llx\n",
133			       hose->dn,
134			       pci_addr, pci_addr + size - 1, cpu_addr);
135			continue;
136		}
137
138		/* Check if not prefetchable */
139		if (!(pci_space & 0x40000000))
140			res->flags &= ~IORESOURCE_PREFETCH;
141
142
143		/* Use that */
144		res->start = pci_addr;
145		/* Beware of 32 bits resources */
146		if (sizeof(resource_size_t) == sizeof(u32) &&
147		    (pci_addr + size) > 0x100000000ull)
148			res->end = 0xffffffff;
149		else
150			res->end = res->start + size - 1;
151		break;
152	}
153
154	/* We only support one global DMA offset */
155	if (dma_offset_set && pci_dram_offset != res->start) {
156		printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn);
157		return -ENXIO;
158	}
159
160	/* Check that we can fit all of memory as we don't support
161	 * DMA bounce buffers
162	 */
163	if (size < total_memory) {
164		printk(KERN_ERR "%pOF: dma-ranges too small "
165		       "(size=%llx total_memory=%llx)\n",
166		       hose->dn, size, (u64)total_memory);
167		return -ENXIO;
168	}
169
170	/* Check we are a power of 2 size and that base is a multiple of size*/
171	if ((size & (size - 1)) != 0  ||
172	    (res->start & (size - 1)) != 0) {
173		printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn);
174		return -ENXIO;
175	}
176
177	/* Check that we are fully contained within 32 bits space if we are not
178	 * running on a 460sx or 476fpe which have 64 bit bus addresses.
179	 */
180	if (res->end > 0xffffffff &&
181	    !(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
182	      || of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
183		printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n",
184		       hose->dn);
185		return -ENXIO;
186	}
187 out:
188	dma_offset_set = 1;
189	pci_dram_offset = res->start;
190	hose->dma_window_base_cur = res->start;
191	hose->dma_window_size = resource_size(res);
192
193	printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
194	       pci_dram_offset);
195	printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
196	       (unsigned long long)hose->dma_window_base_cur);
197	printk(KERN_INFO "DMA window size 0x%016llx\n",
198	       (unsigned long long)hose->dma_window_size);
199	return 0;
200}
201
202/*
203 * 4xx PCI 2.x part
204 */
205
206static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller	*hose,
207					   void __iomem			*reg,
208					   u64				plb_addr,
209					   u64				pci_addr,
210					   u64				size,
211					   unsigned int			flags,
212					   int				index)
213{
214	u32 ma, pcila, pciha;
215
216	/* Hack warning ! The "old" PCI 2.x cell only let us configure the low
217	 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
218	 * address are actually hard wired to a value that appears to depend
219	 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
220	 *
221	 * The trick here is we just crop those top bits and ignore them when
222	 * programming the chip. That means the device-tree has to be right
223	 * for the specific part used (we don't print a warning if it's wrong
224	 * but on the other hand, you'll crash quickly enough), but at least
225	 * this code should work whatever the hard coded value is
226	 */
227	plb_addr &= 0xffffffffull;
228
229	/* Note: Due to the above hack, the test below doesn't actually test
230	 * if you address is above 4G, but it tests that address and
231	 * (address + size) are both contained in the same 4G
232	 */
233	if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
234	    size < 0x1000 || (plb_addr & (size - 1)) != 0) {
235		printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
236		return -1;
237	}
238	ma = (0xffffffffu << ilog2(size)) | 1;
239	if (flags & IORESOURCE_PREFETCH)
240		ma |= 2;
241
242	pciha = RES_TO_U32_HIGH(pci_addr);
243	pcila = RES_TO_U32_LOW(pci_addr);
244
245	writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
246	writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
247	writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
248	writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
249
250	return 0;
251}
252
253static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
254					     void __iomem *reg)
255{
256	int i, j, found_isa_hole = 0;
257
258	/* Setup outbound memory windows */
259	for (i = j = 0; i < 3; i++) {
260		struct resource *res = &hose->mem_resources[i];
261		resource_size_t offset = hose->mem_offset[i];
262
263		/* we only care about memory windows */
264		if (!(res->flags & IORESOURCE_MEM))
265			continue;
266		if (j > 2) {
267			printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
268			break;
269		}
270
271		/* Configure the resource */
272		if (ppc4xx_setup_one_pci_PMM(hose, reg,
273					     res->start,
274					     res->start - offset,
275					     resource_size(res),
276					     res->flags,
277					     j) == 0) {
278			j++;
279
280			/* If the resource PCI address is 0 then we have our
281			 * ISA memory hole
282			 */
283			if (res->start == offset)
284				found_isa_hole = 1;
285		}
286	}
287
288	/* Handle ISA memory hole if not already covered */
289	if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
290		if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
291					     hose->isa_mem_size, 0, j) == 0)
292			printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
293			       hose->dn);
294}
295
296static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
297					     void __iomem *reg,
298					     const struct resource *res)
299{
300	resource_size_t size = resource_size(res);
301	u32 sa;
302
303	/* Calculate window size */
304	sa = (0xffffffffu << ilog2(size)) | 1;
305	sa |= 0x1;
306
307	/* RAM is always at 0 local for now */
308	writel(0, reg + PCIL0_PTM1LA);
309	writel(sa, reg + PCIL0_PTM1MS);
310
311	/* Map on PCI side */
312	early_write_config_dword(hose, hose->first_busno, 0,
313				 PCI_BASE_ADDRESS_1, res->start);
314	early_write_config_dword(hose, hose->first_busno, 0,
315				 PCI_BASE_ADDRESS_2, 0x00000000);
316	early_write_config_word(hose, hose->first_busno, 0,
317				PCI_COMMAND, 0x0006);
318}
319
320static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
321{
322	/* NYI */
323	struct resource rsrc_cfg;
324	struct resource rsrc_reg;
325	struct resource dma_window;
326	struct pci_controller *hose = NULL;
327	void __iomem *reg = NULL;
328	const int *bus_range;
329	int primary = 0;
330
331	/* Check if device is enabled */
332	if (!of_device_is_available(np)) {
333		printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np);
334		return;
335	}
336
337	/* Fetch config space registers address */
338	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
339		printk(KERN_ERR "%pOF: Can't get PCI config register base !",
340		       np);
341		return;
342	}
343	/* Fetch host bridge internal registers address */
344	if (of_address_to_resource(np, 3, &rsrc_reg)) {
345		printk(KERN_ERR "%pOF: Can't get PCI internal register base !",
346		       np);
347		return;
348	}
349
350	/* Check if primary bridge */
351	if (of_property_read_bool(np, "primary"))
352		primary = 1;
353
354	/* Get bus range if any */
355	bus_range = of_get_property(np, "bus-range", NULL);
356
357	/* Map registers */
358	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
359	if (reg == NULL) {
360		printk(KERN_ERR "%pOF: Can't map registers !", np);
361		goto fail;
362	}
363
364	/* Allocate the host controller data structure */
365	hose = pcibios_alloc_controller(np);
366	if (!hose)
367		goto fail;
368
369	hose->first_busno = bus_range ? bus_range[0] : 0x0;
370	hose->last_busno = bus_range ? bus_range[1] : 0xff;
371
372	/* Setup config space */
373	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
374
375	/* Disable all windows */
376	writel(0, reg + PCIL0_PMM0MA);
377	writel(0, reg + PCIL0_PMM1MA);
378	writel(0, reg + PCIL0_PMM2MA);
379	writel(0, reg + PCIL0_PTM1MS);
380	writel(0, reg + PCIL0_PTM2MS);
381
382	/* Parse outbound mapping resources */
383	pci_process_bridge_OF_ranges(hose, np, primary);
384
385	/* Parse inbound mapping resources */
386	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
387		goto fail;
388
389	/* Configure outbound ranges POMs */
390	ppc4xx_configure_pci_PMMs(hose, reg);
391
392	/* Configure inbound ranges PIMs */
393	ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
394
395	/* We don't need the registers anymore */
396	iounmap(reg);
397	return;
398
399 fail:
400	if (hose)
401		pcibios_free_controller(hose);
402	if (reg)
403		iounmap(reg);
404}
405
406/*
407 * 4xx PCI-X part
408 */
409
410static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller	*hose,
411					    void __iomem		*reg,
412					    u64				plb_addr,
413					    u64				pci_addr,
414					    u64				size,
415					    unsigned int		flags,
416					    int				index)
417{
418	u32 lah, lal, pciah, pcial, sa;
419
420	if (!is_power_of_2(size) || size < 0x1000 ||
421	    (plb_addr & (size - 1)) != 0) {
422		printk(KERN_WARNING "%pOF: Resource out of range\n",
423		       hose->dn);
424		return -1;
425	}
426
427	/* Calculate register values */
428	lah = RES_TO_U32_HIGH(plb_addr);
429	lal = RES_TO_U32_LOW(plb_addr);
430	pciah = RES_TO_U32_HIGH(pci_addr);
431	pcial = RES_TO_U32_LOW(pci_addr);
432	sa = (0xffffffffu << ilog2(size)) | 0x1;
433
434	/* Program register values */
435	if (index == 0) {
436		writel(lah, reg + PCIX0_POM0LAH);
437		writel(lal, reg + PCIX0_POM0LAL);
438		writel(pciah, reg + PCIX0_POM0PCIAH);
439		writel(pcial, reg + PCIX0_POM0PCIAL);
440		writel(sa, reg + PCIX0_POM0SA);
441	} else {
442		writel(lah, reg + PCIX0_POM1LAH);
443		writel(lal, reg + PCIX0_POM1LAL);
444		writel(pciah, reg + PCIX0_POM1PCIAH);
445		writel(pcial, reg + PCIX0_POM1PCIAL);
446		writel(sa, reg + PCIX0_POM1SA);
447	}
448
449	return 0;
450}
451
452static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
453					      void __iomem *reg)
454{
455	int i, j, found_isa_hole = 0;
456
457	/* Setup outbound memory windows */
458	for (i = j = 0; i < 3; i++) {
459		struct resource *res = &hose->mem_resources[i];
460		resource_size_t offset = hose->mem_offset[i];
461
462		/* we only care about memory windows */
463		if (!(res->flags & IORESOURCE_MEM))
464			continue;
465		if (j > 1) {
466			printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
467			break;
468		}
469
470		/* Configure the resource */
471		if (ppc4xx_setup_one_pcix_POM(hose, reg,
472					      res->start,
473					      res->start - offset,
474					      resource_size(res),
475					      res->flags,
476					      j) == 0) {
477			j++;
478
479			/* If the resource PCI address is 0 then we have our
480			 * ISA memory hole
481			 */
482			if (res->start == offset)
483				found_isa_hole = 1;
484		}
485	}
486
487	/* Handle ISA memory hole if not already covered */
488	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
489		if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
490					      hose->isa_mem_size, 0, j) == 0)
491			printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
492			       hose->dn);
493}
494
495static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
496					      void __iomem *reg,
497					      const struct resource *res,
498					      int big_pim,
499					      int enable_msi_hole)
500{
501	resource_size_t size = resource_size(res);
502	u32 sa;
503
504	/* RAM is always at 0 */
505	writel(0x00000000, reg + PCIX0_PIM0LAH);
506	writel(0x00000000, reg + PCIX0_PIM0LAL);
507
508	/* Calculate window size */
509	sa = (0xffffffffu << ilog2(size)) | 1;
510	sa |= 0x1;
511	if (res->flags & IORESOURCE_PREFETCH)
512		sa |= 0x2;
513	if (enable_msi_hole)
514		sa |= 0x4;
515	writel(sa, reg + PCIX0_PIM0SA);
516	if (big_pim)
517		writel(0xffffffff, reg + PCIX0_PIM0SAH);
518
519	/* Map on PCI side */
520	writel(0x00000000, reg + PCIX0_BAR0H);
521	writel(res->start, reg + PCIX0_BAR0L);
522	writew(0x0006, reg + PCIX0_COMMAND);
523}
524
525static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
526{
527	struct resource rsrc_cfg;
528	struct resource rsrc_reg;
529	struct resource dma_window;
530	struct pci_controller *hose = NULL;
531	void __iomem *reg = NULL;
532	const int *bus_range;
533	int big_pim, msi, primary;
534
535	/* Fetch config space registers address */
536	if (of_address_to_resource(np, 0, &rsrc_cfg)) {
537		printk(KERN_ERR "%pOF: Can't get PCI-X config register base !",
538		       np);
539		return;
540	}
541	/* Fetch host bridge internal registers address */
542	if (of_address_to_resource(np, 3, &rsrc_reg)) {
543		printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !",
544		       np);
545		return;
546	}
547
548	/* Check if it supports large PIMs (440GX) */
549	big_pim = of_property_read_bool(np, "large-inbound-windows");
550
551	/* Check if we should enable MSIs inbound hole */
552	msi = of_property_read_bool(np, "enable-msi-hole");
553
554	/* Check if primary bridge */
555	primary = of_property_read_bool(np, "primary");
556
557	/* Get bus range if any */
558	bus_range = of_get_property(np, "bus-range", NULL);
559
560	/* Map registers */
561	reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
562	if (reg == NULL) {
563		printk(KERN_ERR "%pOF: Can't map registers !", np);
564		goto fail;
565	}
566
567	/* Allocate the host controller data structure */
568	hose = pcibios_alloc_controller(np);
569	if (!hose)
570		goto fail;
571
572	hose->first_busno = bus_range ? bus_range[0] : 0x0;
573	hose->last_busno = bus_range ? bus_range[1] : 0xff;
574
575	/* Setup config space */
576	setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
577					PPC_INDIRECT_TYPE_SET_CFG_TYPE);
578
579	/* Disable all windows */
580	writel(0, reg + PCIX0_POM0SA);
581	writel(0, reg + PCIX0_POM1SA);
582	writel(0, reg + PCIX0_POM2SA);
583	writel(0, reg + PCIX0_PIM0SA);
584	writel(0, reg + PCIX0_PIM1SA);
585	writel(0, reg + PCIX0_PIM2SA);
586	if (big_pim) {
587		writel(0, reg + PCIX0_PIM0SAH);
588		writel(0, reg + PCIX0_PIM2SAH);
589	}
590
591	/* Parse outbound mapping resources */
592	pci_process_bridge_OF_ranges(hose, np, primary);
593
594	/* Parse inbound mapping resources */
595	if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
596		goto fail;
597
598	/* Configure outbound ranges POMs */
599	ppc4xx_configure_pcix_POMs(hose, reg);
600
601	/* Configure inbound ranges PIMs */
602	ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
603
604	/* We don't need the registers anymore */
605	iounmap(reg);
606	return;
607
608 fail:
609	if (hose)
610		pcibios_free_controller(hose);
611	if (reg)
612		iounmap(reg);
613}
614
615#ifdef CONFIG_PPC4xx_PCI_EXPRESS
616
617/*
618 * 4xx PCI-Express part
619 *
620 * We support 3 parts currently based on the compatible property:
621 *
622 * ibm,plb-pciex-440spe
623 * ibm,plb-pciex-405ex
624 * ibm,plb-pciex-460ex
625 *
626 * Anything else will be rejected for now as they are all subtly
627 * different unfortunately.
628 *
629 */
630
631#define MAX_PCIE_BUS_MAPPED	0x40
632
633struct ppc4xx_pciex_port
634{
635	struct pci_controller	*hose;
636	struct device_node	*node;
637	unsigned int		index;
638	int			endpoint;
639	int			link;
640	int			has_ibpre;
641	unsigned int		sdr_base;
642	dcr_host_t		dcrs;
643	struct resource		cfg_space;
644	struct resource		utl_regs;
645	void __iomem		*utl_base;
646};
647
648static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
649static unsigned int ppc4xx_pciex_port_count;
650
651struct ppc4xx_pciex_hwops
652{
653	bool want_sdr;
654	int (*core_init)(struct device_node *np);
655	int (*port_init_hw)(struct ppc4xx_pciex_port *port);
656	int (*setup_utl)(struct ppc4xx_pciex_port *port);
657	void (*check_link)(struct ppc4xx_pciex_port *port);
658};
659
660static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
661
662static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
663					   unsigned int sdr_offset,
664					   unsigned int mask,
665					   unsigned int value,
666					   int timeout_ms)
667{
668	u32 val;
669
670	while(timeout_ms--) {
671		val = mfdcri(SDR0, port->sdr_base + sdr_offset);
672		if ((val & mask) == value) {
673			pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
674				 port->index, sdr_offset, timeout_ms, val);
675			return 0;
676		}
677		msleep(1);
678	}
679	return -1;
680}
681
682static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
683{
684	/* Wait for reset to complete */
685	if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
686		printk(KERN_WARNING "PCIE%d: PGRST failed\n",
687		       port->index);
688		return -1;
689	}
690	return 0;
691}
692
693
694static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
695{
696	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
697
698	/* Check for card presence detect if supported, if not, just wait for
699	 * link unconditionally.
700	 *
701	 * note that we don't fail if there is no link, we just filter out
702	 * config space accesses. That way, it will be easier to implement
703	 * hotplug later on.
704	 */
705	if (!port->has_ibpre ||
706	    !ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
707				      1 << 28, 1 << 28, 100)) {
708		printk(KERN_INFO
709		       "PCIE%d: Device detected, waiting for link...\n",
710		       port->index);
711		if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
712					     0x1000, 0x1000, 2000))
713			printk(KERN_WARNING
714			       "PCIE%d: Link up failed\n", port->index);
715		else {
716			printk(KERN_INFO
717			       "PCIE%d: link is up !\n", port->index);
718			port->link = 1;
719		}
720	} else
721		printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
722}
723
724#ifdef CONFIG_44x
725
726/* Check various reset bits of the 440SPe PCIe core */
727static int __init ppc440spe_pciex_check_reset(struct device_node *np)
728{
729	u32 valPE0, valPE1, valPE2;
730	int err = 0;
731
732	/* SDR0_PEGPLLLCT1 reset */
733	if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
734		/*
735		 * the PCIe core was probably already initialised
736		 * by firmware - let's re-reset RCSSET regs
737		 *
738		 * -- Shouldn't we also re-reset the whole thing ? -- BenH
739		 */
740		pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
741		mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
742		mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
743		mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
744	}
745
746	valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
747	valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
748	valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
749
750	/* SDR0_PExRCSSET rstgu */
751	if (!(valPE0 & 0x01000000) ||
752	    !(valPE1 & 0x01000000) ||
753	    !(valPE2 & 0x01000000)) {
754		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
755		err = -1;
756	}
757
758	/* SDR0_PExRCSSET rstdl */
759	if (!(valPE0 & 0x00010000) ||
760	    !(valPE1 & 0x00010000) ||
761	    !(valPE2 & 0x00010000)) {
762		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
763		err = -1;
764	}
765
766	/* SDR0_PExRCSSET rstpyn */
767	if ((valPE0 & 0x00001000) ||
768	    (valPE1 & 0x00001000) ||
769	    (valPE2 & 0x00001000)) {
770		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
771		err = -1;
772	}
773
774	/* SDR0_PExRCSSET hldplb */
775	if ((valPE0 & 0x10000000) ||
776	    (valPE1 & 0x10000000) ||
777	    (valPE2 & 0x10000000)) {
778		printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
779		err = -1;
780	}
781
782	/* SDR0_PExRCSSET rdy */
783	if ((valPE0 & 0x00100000) ||
784	    (valPE1 & 0x00100000) ||
785	    (valPE2 & 0x00100000)) {
786		printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
787		err = -1;
788	}
789
790	/* SDR0_PExRCSSET shutdown */
791	if ((valPE0 & 0x00000100) ||
792	    (valPE1 & 0x00000100) ||
793	    (valPE2 & 0x00000100)) {
794		printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
795		err = -1;
796	}
797
798	return err;
799}
800
801/* Global PCIe core initializations for 440SPe core */
802static int __init ppc440spe_pciex_core_init(struct device_node *np)
803{
804	int time_out = 20;
805
806	/* Set PLL clock receiver to LVPECL */
807	dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
808
809	/* Shouldn't we do all the calibration stuff etc... here ? */
810	if (ppc440spe_pciex_check_reset(np))
811		return -ENXIO;
812
813	if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
814		printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
815		       "failed (0x%08x)\n",
816		       mfdcri(SDR0, PESDR0_PLLLCT2));
817		return -1;
818	}
819
820	/* De-assert reset of PCIe PLL, wait for lock */
821	dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
822	udelay(3);
823
824	while (time_out) {
825		if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
826			time_out--;
827			udelay(1);
828		} else
829			break;
830	}
831	if (!time_out) {
832		printk(KERN_INFO "PCIE: VCO output not locked\n");
833		return -1;
834	}
835
836	pr_debug("PCIE initialization OK\n");
837
838	return 3;
839}
840
841static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
842{
843	u32 val = 1 << 24;
844
845	if (port->endpoint)
846		val = PTYPE_LEGACY_ENDPOINT << 20;
847	else
848		val = PTYPE_ROOT_PORT << 20;
849
850	if (port->index == 0)
851		val |= LNKW_X8 << 12;
852	else
853		val |= LNKW_X4 << 12;
854
855	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
856	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
857	if (ppc440spe_revA())
858		mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
859	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
860	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
861	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
862	mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
863	if (port->index == 0) {
864		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
865		       0x35000000);
866		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
867		       0x35000000);
868		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
869		       0x35000000);
870		mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
871		       0x35000000);
872	}
873	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
874			(1 << 24) | (1 << 16), 1 << 12);
875
876	return ppc4xx_pciex_port_reset_sdr(port);
877}
878
879static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
880{
881	return ppc440spe_pciex_init_port_hw(port);
882}
883
884static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
885{
886	int rc = ppc440spe_pciex_init_port_hw(port);
887
888	port->has_ibpre = 1;
889
890	return rc;
891}
892
893static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
894{
895	/* XXX Check what that value means... I hate magic */
896	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
897
898	/*
899	 * Set buffer allocations and then assert VRB and TXE.
900	 */
901	out_be32(port->utl_base + PEUTL_OUTTR,   0x08000000);
902	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
903	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x10000000);
904	out_be32(port->utl_base + PEUTL_PBBSZ,   0x53000000);
905	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x08000000);
906	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x10000000);
907	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
908	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
909
910	return 0;
911}
912
913static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
914{
915	/* Report CRS to the operating system */
916	out_be32(port->utl_base + PEUTL_PBCTL,    0x08000000);
917
918	return 0;
919}
920
921static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
922{
923	.want_sdr	= true,
924	.core_init	= ppc440spe_pciex_core_init,
925	.port_init_hw	= ppc440speA_pciex_init_port_hw,
926	.setup_utl	= ppc440speA_pciex_init_utl,
927	.check_link	= ppc4xx_pciex_check_link_sdr,
928};
929
930static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
931{
932	.want_sdr	= true,
933	.core_init	= ppc440spe_pciex_core_init,
934	.port_init_hw	= ppc440speB_pciex_init_port_hw,
935	.setup_utl	= ppc440speB_pciex_init_utl,
936	.check_link	= ppc4xx_pciex_check_link_sdr,
937};
938
939static int __init ppc460ex_pciex_core_init(struct device_node *np)
940{
941	/* Nothing to do, return 2 ports */
942	return 2;
943}
944
945static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
946{
947	u32 val;
948	u32 utlset1;
949
950	if (port->endpoint)
951		val = PTYPE_LEGACY_ENDPOINT << 20;
952	else
953		val = PTYPE_ROOT_PORT << 20;
954
955	if (port->index == 0) {
956		val |= LNKW_X1 << 12;
957		utlset1 = 0x20000000;
958	} else {
959		val |= LNKW_X4 << 12;
960		utlset1 = 0x20101101;
961	}
962
963	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
964	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
965	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
966
967	switch (port->index) {
968	case 0:
969		mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
970		mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
971		mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
972
973		mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
974		break;
975
976	case 1:
977		mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
978		mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
979		mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
980		mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
981		mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
982		mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
983		mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
984		mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
985		mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
986		mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
987		mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
988		mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
989
990		mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
991		break;
992	}
993
994	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
995	       mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
996	       (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
997
998	/* Poll for PHY reset */
999	/* XXX FIXME add timeout */
1000	switch (port->index) {
1001	case 0:
1002		while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
1003			udelay(10);
1004		break;
1005	case 1:
1006		while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
1007			udelay(10);
1008		break;
1009	}
1010
1011	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1012	       (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1013		~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1014	       PESDRx_RCSSET_RSTPYN);
1015
1016	port->has_ibpre = 1;
1017
1018	return ppc4xx_pciex_port_reset_sdr(port);
1019}
1020
1021static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1022{
1023	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1024
1025	/*
1026	 * Set buffer allocations and then assert VRB and TXE.
1027	 */
1028	out_be32(port->utl_base + PEUTL_PBCTL,	0x0800000c);
1029	out_be32(port->utl_base + PEUTL_OUTTR,	0x08000000);
1030	out_be32(port->utl_base + PEUTL_INTR,	0x02000000);
1031	out_be32(port->utl_base + PEUTL_OPDBSZ,	0x04000000);
1032	out_be32(port->utl_base + PEUTL_PBBSZ,	0x00000000);
1033	out_be32(port->utl_base + PEUTL_IPHBSZ,	0x02000000);
1034	out_be32(port->utl_base + PEUTL_IPDBSZ,	0x04000000);
1035	out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
1036	out_be32(port->utl_base + PEUTL_PCTL,	0x80800066);
1037
1038	return 0;
1039}
1040
1041static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1042{
1043	.want_sdr	= true,
1044	.core_init	= ppc460ex_pciex_core_init,
1045	.port_init_hw	= ppc460ex_pciex_init_port_hw,
1046	.setup_utl	= ppc460ex_pciex_init_utl,
1047	.check_link	= ppc4xx_pciex_check_link_sdr,
1048};
1049
1050static int __init apm821xx_pciex_core_init(struct device_node *np)
1051{
1052	/* Return the number of pcie port */
1053	return 1;
1054}
1055
1056static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1057{
1058	u32 val;
1059
1060	/*
1061	 * Do a software reset on PCIe ports.
1062	 * This code is to fix the issue that pci drivers doesn't re-assign
1063	 * bus number for PCIE devices after Uboot
1064	 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1065	 * PT quad port, SAS LSI 1064E)
1066	 */
1067
1068	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1069	mdelay(10);
1070
1071	if (port->endpoint)
1072		val = PTYPE_LEGACY_ENDPOINT << 20;
1073	else
1074		val = PTYPE_ROOT_PORT << 20;
1075
1076	val |= LNKW_X1 << 12;
1077
1078	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1079	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1080	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1081
1082	mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1083	mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1084	mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1085
1086	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1087	mdelay(50);
1088	mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1089
1090	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1091		mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1092		(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1093
1094	/* Poll for PHY reset */
1095	val = PESDR0_460EX_RSTSTA - port->sdr_base;
1096	if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1,	100)) {
1097		printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1098		return -EBUSY;
1099	} else {
1100		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1101			(mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1102			~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1103			PESDRx_RCSSET_RSTPYN);
1104
1105		port->has_ibpre = 1;
1106		return 0;
1107	}
1108}
1109
1110static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1111	.want_sdr   = true,
1112	.core_init	= apm821xx_pciex_core_init,
1113	.port_init_hw	= apm821xx_pciex_init_port_hw,
1114	.setup_utl	= ppc460ex_pciex_init_utl,
1115	.check_link = ppc4xx_pciex_check_link_sdr,
1116};
1117
1118static int __init ppc460sx_pciex_core_init(struct device_node *np)
1119{
1120	/* HSS drive amplitude */
1121	mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
1122	mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
1123	mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
1124	mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
1125	mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
1126	mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
1127	mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
1128	mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
1129
1130	mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
1131	mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
1132	mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
1133	mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
1134
1135	mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
1136	mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
1137	mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
1138	mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
1139
1140	/* HSS TX pre-emphasis */
1141	mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
1142	mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
1143	mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
1144	mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
1145	mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
1146	mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
1147	mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
1148	mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
1149
1150	mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
1151	mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
1152	mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
1153	mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
1154
1155	mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
1156	mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
1157	mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
1158	mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
1159
1160	/* HSS TX calibration control */
1161	mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
1162	mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
1163	mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
1164
1165	/* HSS TX slew control */
1166	mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
1167	mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
1168	mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
1169
1170	/* Set HSS PRBS enabled */
1171	mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
1172	mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
1173
1174	udelay(100);
1175
1176	/* De-assert PLLRESET */
1177	dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
1178
1179	/* Reset DL, UTL, GPL before configuration */
1180	mtdcri(SDR0, PESDR0_460SX_RCSSET,
1181			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1182	mtdcri(SDR0, PESDR1_460SX_RCSSET,
1183			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1184	mtdcri(SDR0, PESDR2_460SX_RCSSET,
1185			PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
1186
1187	udelay(100);
1188
1189	/*
1190	 * If bifurcation is not enabled, u-boot would have disabled the
1191	 * third PCIe port
1192	 */
1193	if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
1194				0x00000001)) {
1195		printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
1196		printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
1197		return 3;
1198	}
1199
1200	printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
1201	return 2;
1202}
1203
1204static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1205{
1206
1207	if (port->endpoint)
1208		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1209				0x01000000, 0);
1210	else
1211		dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
1212				0, 0x01000000);
1213
1214	dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
1215			(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
1216			PESDRx_RCSSET_RSTPYN);
1217
1218	port->has_ibpre = 1;
1219
1220	return ppc4xx_pciex_port_reset_sdr(port);
1221}
1222
1223static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
1224{
1225	/* Max 128 Bytes */
1226	out_be32 (port->utl_base + PEUTL_PBBSZ,   0x00000000);
1227	/* Assert VRB and TXE - per datasheet turn off addr validation */
1228	out_be32(port->utl_base + PEUTL_PCTL,  0x80800000);
1229	return 0;
1230}
1231
1232static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
1233{
1234	void __iomem *mbase;
1235	int attempt = 50;
1236
1237	port->link = 0;
1238
1239	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1240	if (mbase == NULL) {
1241		printk(KERN_ERR "%pOF: Can't map internal config space !",
1242			port->node);
1243		return;
1244	}
1245
1246	while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
1247			& PECFG_460SX_DLLSTA_LINKUP))) {
1248		attempt--;
1249		mdelay(10);
1250	}
1251	if (attempt)
1252		port->link = 1;
1253	iounmap(mbase);
1254}
1255
1256static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
1257	.want_sdr	= true,
1258	.core_init	= ppc460sx_pciex_core_init,
1259	.port_init_hw	= ppc460sx_pciex_init_port_hw,
1260	.setup_utl	= ppc460sx_pciex_init_utl,
1261	.check_link	= ppc460sx_pciex_check_link,
1262};
1263
1264#endif /* CONFIG_44x */
1265
1266#ifdef CONFIG_40x
1267
1268static int __init ppc405ex_pciex_core_init(struct device_node *np)
1269{
1270	/* Nothing to do, return 2 ports */
1271	return 2;
1272}
1273
1274static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
1275{
1276	/* Assert the PE0_PHY reset */
1277	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
1278	msleep(1);
1279
1280	/* deassert the PE0_hotreset */
1281	if (port->endpoint)
1282		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
1283	else
1284		mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
1285
1286	/* poll for phy !reset */
1287	/* XXX FIXME add timeout */
1288	while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
1289		;
1290
1291	/* deassert the PE0_gpl_utl_reset */
1292	mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
1293}
1294
1295static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1296{
1297	u32 val;
1298
1299	if (port->endpoint)
1300		val = PTYPE_LEGACY_ENDPOINT;
1301	else
1302		val = PTYPE_ROOT_PORT;
1303
1304	mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1305	       1 << 24 | val << 20 | LNKW_X1 << 12);
1306
1307	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1308	mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1309	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
1310	mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
1311
1312	/*
1313	 * Only reset the PHY when no link is currently established.
1314	 * This is for the Atheros PCIe board which has problems to establish
1315	 * the link (again) after this PHY reset. All other currently tested
1316	 * PCIe boards don't show this problem.
1317	 * This has to be re-tested and fixed in a later release!
1318	 */
1319	val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
1320	if (!(val & 0x00001000))
1321		ppc405ex_pcie_phy_reset(port);
1322
1323	dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000);  /* guarded on */
1324
1325	port->has_ibpre = 1;
1326
1327	return ppc4xx_pciex_port_reset_sdr(port);
1328}
1329
1330static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
1331{
1332	dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
1333
1334	/*
1335	 * Set buffer allocations and then assert VRB and TXE.
1336	 */
1337	out_be32(port->utl_base + PEUTL_OUTTR,   0x02000000);
1338	out_be32(port->utl_base + PEUTL_INTR,    0x02000000);
1339	out_be32(port->utl_base + PEUTL_OPDBSZ,  0x04000000);
1340	out_be32(port->utl_base + PEUTL_PBBSZ,   0x21000000);
1341	out_be32(port->utl_base + PEUTL_IPHBSZ,  0x02000000);
1342	out_be32(port->utl_base + PEUTL_IPDBSZ,  0x04000000);
1343	out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
1344	out_be32(port->utl_base + PEUTL_PCTL,    0x80800066);
1345
1346	out_be32(port->utl_base + PEUTL_PBCTL,   0x08000000);
1347
1348	return 0;
1349}
1350
1351static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
1352{
1353	.want_sdr	= true,
1354	.core_init	= ppc405ex_pciex_core_init,
1355	.port_init_hw	= ppc405ex_pciex_init_port_hw,
1356	.setup_utl	= ppc405ex_pciex_init_utl,
1357	.check_link	= ppc4xx_pciex_check_link_sdr,
1358};
1359
1360#endif /* CONFIG_40x */
1361
1362#ifdef CONFIG_476FPE
1363static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
1364{
1365	return 4;
1366}
1367
1368static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
1369{
1370	u32 timeout_ms = 20;
1371	u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
1372	void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
1373	                              0x1000);
1374
1375	printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
1376
1377	if (mbase == NULL) {
1378		printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
1379		                    port->index);
1380		return;
1381	}
1382
1383	while (timeout_ms--) {
1384		val = in_le32(mbase + PECFG_TLDLP);
1385
1386		if ((val & mask) == mask)
1387			break;
1388		msleep(10);
1389	}
1390
1391	if (val & PECFG_TLDLP_PRESENT) {
1392		printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
1393		port->link = 1;
1394	} else
1395		printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
1396
1397	iounmap(mbase);
1398}
1399
1400static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
1401{
1402	.core_init	= ppc_476fpe_pciex_core_init,
1403	.check_link	= ppc_476fpe_pciex_check_link,
1404};
1405#endif /* CONFIG_476FPE */
1406
1407/* Check that the core has been initied and if not, do it */
1408static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1409{
1410	static int core_init;
1411	int count = -ENODEV;
1412
1413	if (core_init++)
1414		return 0;
1415
1416#ifdef CONFIG_44x
1417	if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
1418		if (ppc440spe_revA())
1419			ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
1420		else
1421			ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
1422	}
1423	if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
1424		ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1425	if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1426		ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1427	if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1428		ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1429#endif /* CONFIG_44x    */
1430#ifdef CONFIG_40x
1431	if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
1432		ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
1433#endif
1434#ifdef CONFIG_476FPE
1435	if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
1436		|| of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
1437		ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
1438#endif
1439	if (ppc4xx_pciex_hwops == NULL) {
1440		printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np);
1441		return -ENODEV;
1442	}
1443
1444	count = ppc4xx_pciex_hwops->core_init(np);
1445	if (count > 0) {
1446		ppc4xx_pciex_ports =
1447		       kcalloc(count, sizeof(struct ppc4xx_pciex_port),
1448			       GFP_KERNEL);
1449		if (ppc4xx_pciex_ports) {
1450			ppc4xx_pciex_port_count = count;
1451			return 0;
1452		}
1453		printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
1454		return -ENOMEM;
1455	}
1456	return -ENODEV;
1457}
1458
1459static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
1460{
1461	/* We map PCI Express configuration based on the reg property */
1462	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
1463		  RES_TO_U32_HIGH(port->cfg_space.start));
1464	dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
1465		  RES_TO_U32_LOW(port->cfg_space.start));
1466
1467	/* XXX FIXME: Use size from reg property. For now, map 512M */
1468	dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
1469
1470	/* We map UTL registers based on the reg property */
1471	dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
1472		  RES_TO_U32_HIGH(port->utl_regs.start));
1473	dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
1474		  RES_TO_U32_LOW(port->utl_regs.start));
1475
1476	/* XXX FIXME: Use size from reg property */
1477	dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
1478
1479	/* Disable all other outbound windows */
1480	dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
1481	dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
1482	dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
1483	dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
1484}
1485
1486static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
1487{
1488	int rc = 0;
1489
1490	/* Init HW */
1491	if (ppc4xx_pciex_hwops->port_init_hw)
1492		rc = ppc4xx_pciex_hwops->port_init_hw(port);
1493	if (rc != 0)
1494		return rc;
1495
1496	/*
1497	 * Initialize mapping: disable all regions and configure
1498	 * CFG and REG regions based on resources in the device tree
1499	 */
1500	ppc4xx_pciex_port_init_mapping(port);
1501
1502	if (ppc4xx_pciex_hwops->check_link)
1503		ppc4xx_pciex_hwops->check_link(port);
1504
1505	/*
1506	 * Map UTL
1507	 */
1508	port->utl_base = ioremap(port->utl_regs.start, 0x100);
1509	BUG_ON(port->utl_base == NULL);
1510
1511	/*
1512	 * Setup UTL registers --BenH.
1513	 */
1514	if (ppc4xx_pciex_hwops->setup_utl)
1515		ppc4xx_pciex_hwops->setup_utl(port);
1516
1517	/*
1518	 * Check for VC0 active or PLL Locked and assert RDY.
1519	 */
1520	if (port->sdr_base) {
1521		if (of_device_is_compatible(port->node,
1522				"ibm,plb-pciex-460sx")){
1523			if (port->link && ppc4xx_pciex_wait_on_sdr(port,
1524					PESDRn_RCSSTS,
1525					1 << 12, 1 << 12, 5000)) {
1526				printk(KERN_INFO "PCIE%d: PLL not locked\n",
1527						port->index);
1528				port->link = 0;
1529			}
1530		} else if (port->link &&
1531			ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1532				1 << 16, 1 << 16, 5000)) {
1533			printk(KERN_INFO "PCIE%d: VC0 not active\n",
1534					port->index);
1535			port->link = 0;
1536		}
1537
1538		dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
1539	}
1540
1541	msleep(100);
1542
1543	return 0;
1544}
1545
1546static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
1547				     struct pci_bus *bus,
1548				     unsigned int devfn)
1549{
1550	static int message;
1551
1552	/* Endpoint can not generate upstream(remote) config cycles */
1553	if (port->endpoint && bus->number != port->hose->first_busno)
1554		return PCIBIOS_DEVICE_NOT_FOUND;
1555
1556	/* Check we are within the mapped range */
1557	if (bus->number > port->hose->last_busno) {
1558		if (!message) {
1559			printk(KERN_WARNING "Warning! Probing bus %u"
1560			       " out of range !\n", bus->number);
1561			message++;
1562		}
1563		return PCIBIOS_DEVICE_NOT_FOUND;
1564	}
1565
1566	/* The root complex has only one device / function */
1567	if (bus->number == port->hose->first_busno && devfn != 0)
1568		return PCIBIOS_DEVICE_NOT_FOUND;
1569
1570	/* The other side of the RC has only one device as well */
1571	if (bus->number == (port->hose->first_busno + 1) &&
1572	    PCI_SLOT(devfn) != 0)
1573		return PCIBIOS_DEVICE_NOT_FOUND;
1574
1575	/* Check if we have a link */
1576	if ((bus->number != port->hose->first_busno) && !port->link)
1577		return PCIBIOS_DEVICE_NOT_FOUND;
1578
1579	return 0;
1580}
1581
1582static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
1583						  struct pci_bus *bus,
1584						  unsigned int devfn)
1585{
1586	int relbus;
1587
1588	/* Remove the casts when we finally remove the stupid volatile
1589	 * in struct pci_controller
1590	 */
1591	if (bus->number == port->hose->first_busno)
1592		return (void __iomem *)port->hose->cfg_addr;
1593
1594	relbus = bus->number - (port->hose->first_busno + 1);
1595	return (void __iomem *)port->hose->cfg_data +
1596		((relbus  << 20) | (devfn << 12));
1597}
1598
1599static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
1600				    int offset, int len, u32 *val)
1601{
1602	struct pci_controller *hose = pci_bus_to_host(bus);
1603	struct ppc4xx_pciex_port *port =
1604		&ppc4xx_pciex_ports[hose->indirect_type];
1605	void __iomem *addr;
1606	u32 gpl_cfg;
1607
1608	BUG_ON(hose != port->hose);
1609
1610	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1611		return PCIBIOS_DEVICE_NOT_FOUND;
1612
1613	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1614
1615	/*
1616	 * Reading from configuration space of non-existing device can
1617	 * generate transaction errors. For the read duration we suppress
1618	 * assertion of machine check exceptions to avoid those.
1619	 */
1620	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1621	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1622
1623	/* Make sure no CRS is recorded */
1624	out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
1625
1626	switch (len) {
1627	case 1:
1628		*val = in_8((u8 *)(addr + offset));
1629		break;
1630	case 2:
1631		*val = in_le16((u16 *)(addr + offset));
1632		break;
1633	default:
1634		*val = in_le32((u32 *)(addr + offset));
1635		break;
1636	}
1637
1638	pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
1639		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1640		 bus->number, hose->first_busno, hose->last_busno,
1641		 devfn, offset, len, addr + offset, *val);
1642
1643	/* Check for CRS (440SPe rev B does that for us but heh ..) */
1644	if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
1645		pr_debug("Got CRS !\n");
1646		if (len != 4 || offset != 0)
1647			return PCIBIOS_DEVICE_NOT_FOUND;
1648		*val = 0xffff0001;
1649	}
1650
1651	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1652
1653	return PCIBIOS_SUCCESSFUL;
1654}
1655
1656static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
1657				     int offset, int len, u32 val)
1658{
1659	struct pci_controller *hose = pci_bus_to_host(bus);
1660	struct ppc4xx_pciex_port *port =
1661		&ppc4xx_pciex_ports[hose->indirect_type];
1662	void __iomem *addr;
1663	u32 gpl_cfg;
1664
1665	if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
1666		return PCIBIOS_DEVICE_NOT_FOUND;
1667
1668	addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
1669
1670	/*
1671	 * Reading from configuration space of non-existing device can
1672	 * generate transaction errors. For the read duration we suppress
1673	 * assertion of machine check exceptions to avoid those.
1674	 */
1675	gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
1676	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
1677
1678	pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
1679		 " offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
1680		 bus->number, hose->first_busno, hose->last_busno,
1681		 devfn, offset, len, addr + offset, val);
1682
1683	switch (len) {
1684	case 1:
1685		out_8((u8 *)(addr + offset), val);
1686		break;
1687	case 2:
1688		out_le16((u16 *)(addr + offset), val);
1689		break;
1690	default:
1691		out_le32((u32 *)(addr + offset), val);
1692		break;
1693	}
1694
1695	dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
1696
1697	return PCIBIOS_SUCCESSFUL;
1698}
1699
1700static struct pci_ops ppc4xx_pciex_pci_ops =
1701{
1702	.read  = ppc4xx_pciex_read_config,
1703	.write = ppc4xx_pciex_write_config,
1704};
1705
1706static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port	*port,
1707					     struct pci_controller	*hose,
1708					     void __iomem		*mbase,
1709					     u64			plb_addr,
1710					     u64			pci_addr,
1711					     u64			size,
1712					     unsigned int		flags,
1713					     int			index)
1714{
1715	u32 lah, lal, pciah, pcial, sa;
1716
1717	if (!is_power_of_2(size) ||
1718	    (index < 2 && size < 0x100000) ||
1719	    (index == 2 && size < 0x100) ||
1720	    (plb_addr & (size - 1)) != 0) {
1721		printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
1722		return -1;
1723	}
1724
1725	/* Calculate register values */
1726	lah = RES_TO_U32_HIGH(plb_addr);
1727	lal = RES_TO_U32_LOW(plb_addr);
1728	pciah = RES_TO_U32_HIGH(pci_addr);
1729	pcial = RES_TO_U32_LOW(pci_addr);
1730	sa = (0xffffffffu << ilog2(size)) | 0x1;
1731
1732	/* Program register values */
1733	switch (index) {
1734	case 0:
1735		out_le32(mbase + PECFG_POM0LAH, pciah);
1736		out_le32(mbase + PECFG_POM0LAL, pcial);
1737		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
1738		dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
1739		dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
1740		/*Enabled and single region */
1741		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
1742			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1743				sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
1744					| DCRO_PEGPL_OMRxMSKL_VAL);
1745		else if (of_device_is_compatible(
1746				port->node, "ibm,plb-pciex-476fpe") ||
1747			of_device_is_compatible(
1748				port->node, "ibm,plb-pciex-476gtr"))
1749			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1750				sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
1751					| DCRO_PEGPL_OMRxMSKL_VAL);
1752		else
1753			dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
1754				sa | DCRO_PEGPL_OMR1MSKL_UOT
1755					| DCRO_PEGPL_OMRxMSKL_VAL);
1756		break;
1757	case 1:
1758		out_le32(mbase + PECFG_POM1LAH, pciah);
1759		out_le32(mbase + PECFG_POM1LAL, pcial);
1760		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
1761		dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
1762		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
1763		dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
1764				sa | DCRO_PEGPL_OMRxMSKL_VAL);
1765		break;
1766	case 2:
1767		out_le32(mbase + PECFG_POM2LAH, pciah);
1768		out_le32(mbase + PECFG_POM2LAL, pcial);
1769		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
1770		dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
1771		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
1772		/* Note that 3 here means enabled | IO space !!! */
1773		dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
1774				sa | DCRO_PEGPL_OMR3MSKL_IO
1775					| DCRO_PEGPL_OMRxMSKL_VAL);
1776		break;
1777	}
1778
1779	return 0;
1780}
1781
1782static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
1783					       struct pci_controller *hose,
1784					       void __iomem *mbase)
1785{
1786	int i, j, found_isa_hole = 0;
1787
1788	/* Setup outbound memory windows */
1789	for (i = j = 0; i < 3; i++) {
1790		struct resource *res = &hose->mem_resources[i];
1791		resource_size_t offset = hose->mem_offset[i];
1792
1793		/* we only care about memory windows */
1794		if (!(res->flags & IORESOURCE_MEM))
1795			continue;
1796		if (j > 1) {
1797			printk(KERN_WARNING "%pOF: Too many ranges\n",
1798			       port->node);
1799			break;
1800		}
1801
1802		/* Configure the resource */
1803		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1804					       res->start,
1805					       res->start - offset,
1806					       resource_size(res),
1807					       res->flags,
1808					       j) == 0) {
1809			j++;
1810
1811			/* If the resource PCI address is 0 then we have our
1812			 * ISA memory hole
1813			 */
1814			if (res->start == offset)
1815				found_isa_hole = 1;
1816		}
1817	}
1818
1819	/* Handle ISA memory hole if not already covered */
1820	if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
1821		if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1822					       hose->isa_mem_phys, 0,
1823					       hose->isa_mem_size, 0, j) == 0)
1824			printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
1825			       hose->dn);
1826
1827	/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
1828	 * Note also that it -has- to be region index 2 on this HW
1829	 */
1830	if (hose->io_resource.flags & IORESOURCE_IO)
1831		ppc4xx_setup_one_pciex_POM(port, hose, mbase,
1832					   hose->io_base_phys, 0,
1833					   0x10000, IORESOURCE_IO, 2);
1834}
1835
1836static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
1837					       struct pci_controller *hose,
1838					       void __iomem *mbase,
1839					       struct resource *res)
1840{
1841	resource_size_t size = resource_size(res);
1842	u64 sa;
1843
1844	if (port->endpoint) {
1845		resource_size_t ep_addr = 0;
1846		resource_size_t ep_size = 32 << 20;
1847
1848		/* Currently we map a fixed 64MByte window to PLB address
1849		 * 0 (SDRAM). This should probably be configurable via a dts
1850		 * property.
1851		 */
1852
1853		/* Calculate window size */
1854		sa = (0xffffffffffffffffull << ilog2(ep_size));
1855
1856		/* Setup BAR0 */
1857		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1858		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
1859			 PCI_BASE_ADDRESS_MEM_TYPE_64);
1860
1861		/* Disable BAR1 & BAR2 */
1862		out_le32(mbase + PECFG_BAR1MPA, 0);
1863		out_le32(mbase + PECFG_BAR2HMPA, 0);
1864		out_le32(mbase + PECFG_BAR2LMPA, 0);
1865
1866		out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
1867		out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
1868
1869		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
1870		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
1871	} else {
1872		/* Calculate window size */
1873		sa = (0xffffffffffffffffull << ilog2(size));
1874		if (res->flags & IORESOURCE_PREFETCH)
1875			sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1876
1877		if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
1878		    of_device_is_compatible(
1879			    port->node, "ibm,plb-pciex-476fpe") ||
1880		    of_device_is_compatible(
1881			    port->node, "ibm,plb-pciex-476gtr"))
1882			sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
1883
1884		out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
1885		out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
1886
1887		/* The setup of the split looks weird to me ... let's see
1888		 * if it works
1889		 */
1890		out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
1891		out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
1892		out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
1893		out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
1894		out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
1895		out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
1896
1897		out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
1898		out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
1899	}
1900
1901	/* Enable inbound mapping */
1902	out_le32(mbase + PECFG_PIMEN, 0x1);
1903
1904	/* Enable I/O, Mem, and Busmaster cycles */
1905	out_le16(mbase + PCI_COMMAND,
1906		 in_le16(mbase + PCI_COMMAND) |
1907		 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1908}
1909
1910static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
1911{
1912	struct resource dma_window;
1913	struct pci_controller *hose = NULL;
1914	const int *bus_range;
1915	int primary, busses;
1916	void __iomem *mbase = NULL, *cfg_data = NULL;
1917	const u32 *pval;
1918	u32 val;
1919
1920	/* Check if primary bridge */
1921	primary = of_property_read_bool(port->node, "primary");
1922
1923	/* Get bus range if any */
1924	bus_range = of_get_property(port->node, "bus-range", NULL);
1925
1926	/* Allocate the host controller data structure */
1927	hose = pcibios_alloc_controller(port->node);
1928	if (!hose)
1929		goto fail;
1930
1931	/* We stick the port number in "indirect_type" so the config space
1932	 * ops can retrieve the port data structure easily
1933	 */
1934	hose->indirect_type = port->index;
1935
1936	/* Get bus range */
1937	hose->first_busno = bus_range ? bus_range[0] : 0x0;
1938	hose->last_busno = bus_range ? bus_range[1] : 0xff;
1939
1940	/* Because of how big mapping the config space is (1M per bus), we
1941	 * limit how many busses we support. In the long run, we could replace
1942	 * that with something akin to kmap_atomic instead. We set aside 1 bus
1943	 * for the host itself too.
1944	 */
1945	busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
1946	if (busses > MAX_PCIE_BUS_MAPPED) {
1947		busses = MAX_PCIE_BUS_MAPPED;
1948		hose->last_busno = hose->first_busno + busses;
1949	}
1950
1951	if (!port->endpoint) {
1952		/* Only map the external config space in cfg_data for
1953		 * PCIe root-complexes. External space is 1M per bus
1954		 */
1955		cfg_data = ioremap(port->cfg_space.start +
1956				   (hose->first_busno + 1) * 0x100000,
1957				   busses * 0x100000);
1958		if (cfg_data == NULL) {
1959			printk(KERN_ERR "%pOF: Can't map external config space !",
1960			       port->node);
1961			goto fail;
1962		}
1963		hose->cfg_data = cfg_data;
1964	}
1965
1966	/* Always map the host config space in cfg_addr.
1967	 * Internal space is 4K
1968	 */
1969	mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
1970	if (mbase == NULL) {
1971		printk(KERN_ERR "%pOF: Can't map internal config space !",
1972		       port->node);
1973		goto fail;
1974	}
1975	hose->cfg_addr = mbase;
1976
1977	pr_debug("PCIE %pOF, bus %d..%d\n", port->node,
1978		 hose->first_busno, hose->last_busno);
1979	pr_debug("     config space mapped at: root @0x%p, other @0x%p\n",
1980		 hose->cfg_addr, hose->cfg_data);
1981
1982	/* Setup config space */
1983	hose->ops = &ppc4xx_pciex_pci_ops;
1984	port->hose = hose;
1985	mbase = (void __iomem *)hose->cfg_addr;
1986
1987	if (!port->endpoint) {
1988		/*
1989		 * Set bus numbers on our root port
1990		 */
1991		out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
1992		out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
1993		out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
1994	}
1995
1996	/*
1997	 * OMRs are already reset, also disable PIMs
1998	 */
1999	out_le32(mbase + PECFG_PIMEN, 0);
2000
2001	/* Parse outbound mapping resources */
2002	pci_process_bridge_OF_ranges(hose, port->node, primary);
2003
2004	/* Parse inbound mapping resources */
2005	if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
2006		goto fail;
2007
2008	/* Configure outbound ranges POMs */
2009	ppc4xx_configure_pciex_POMs(port, hose, mbase);
2010
2011	/* Configure inbound ranges PIMs */
2012	ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
2013
2014	/* The root complex doesn't show up if we don't set some vendor
2015	 * and device IDs into it. The defaults below are the same bogus
2016	 * one that the initial code in arch/ppc had. This can be
2017	 * overwritten by setting the "vendor-id/device-id" properties
2018	 * in the pciex node.
2019	 */
2020
2021	/* Get the (optional) vendor-/device-id from the device-tree */
2022	pval = of_get_property(port->node, "vendor-id", NULL);
2023	if (pval) {
2024		val = *pval;
2025	} else {
2026		if (!port->endpoint)
2027			val = 0xaaa0 + port->index;
2028		else
2029			val = 0xeee0 + port->index;
2030	}
2031	out_le16(mbase + 0x200, val);
2032
2033	pval = of_get_property(port->node, "device-id", NULL);
2034	if (pval) {
2035		val = *pval;
2036	} else {
2037		if (!port->endpoint)
2038			val = 0xbed0 + port->index;
2039		else
2040			val = 0xfed0 + port->index;
2041	}
2042	out_le16(mbase + 0x202, val);
2043
2044	/* Enable Bus master, memory, and io space */
2045	if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
2046		out_le16(mbase + 0x204, 0x7);
2047
2048	if (!port->endpoint) {
2049		/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
2050		out_le32(mbase + 0x208, 0x06040001);
2051
2052		printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
2053		       port->index);
2054	} else {
2055		/* Set Class Code to Processor/PPC */
2056		out_le32(mbase + 0x208, 0x0b200001);
2057
2058		printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
2059		       port->index);
2060	}
2061
2062	return;
2063 fail:
2064	if (hose)
2065		pcibios_free_controller(hose);
2066	if (cfg_data)
2067		iounmap(cfg_data);
2068	if (mbase)
2069		iounmap(mbase);
2070}
2071
2072static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
2073{
2074	struct ppc4xx_pciex_port *port;
2075	const u32 *pval;
2076	int portno;
2077	unsigned int dcrs;
2078
2079	/* First, proceed to core initialization as we assume there's
2080	 * only one PCIe core in the system
2081	 */
2082	if (ppc4xx_pciex_check_core_init(np))
2083		return;
2084
2085	/* Get the port number from the device-tree */
2086	pval = of_get_property(np, "port", NULL);
2087	if (pval == NULL) {
2088		printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np);
2089		return;
2090	}
2091	portno = *pval;
2092	if (portno >= ppc4xx_pciex_port_count) {
2093		printk(KERN_ERR "PCIE: port number out of range for %pOF\n",
2094		       np);
2095		return;
2096	}
2097	port = &ppc4xx_pciex_ports[portno];
2098	port->index = portno;
2099
2100	/*
2101	 * Check if device is enabled
2102	 */
2103	if (!of_device_is_available(np)) {
2104		printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
2105		return;
2106	}
2107
2108	port->node = of_node_get(np);
2109	if (ppc4xx_pciex_hwops->want_sdr) {
2110		pval = of_get_property(np, "sdr-base", NULL);
2111		if (pval == NULL) {
2112			printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n",
2113			       np);
2114			return;
2115		}
2116		port->sdr_base = *pval;
2117	}
2118
2119	/* Check if device_type property is set to "pci" or "pci-endpoint".
2120	 * Resulting from this setup this PCIe port will be configured
2121	 * as root-complex or as endpoint.
2122	 */
2123	if (of_node_is_type(port->node, "pci-endpoint")) {
2124		port->endpoint = 1;
2125	} else if (of_node_is_type(port->node, "pci")) {
2126		port->endpoint = 0;
2127	} else {
2128		printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n",
2129		       np);
2130		return;
2131	}
2132
2133	/* Fetch config space registers address */
2134	if (of_address_to_resource(np, 0, &port->cfg_space)) {
2135		printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np);
2136		return;
2137	}
2138	/* Fetch host bridge internal registers address */
2139	if (of_address_to_resource(np, 1, &port->utl_regs)) {
2140		printk(KERN_ERR "%pOF: Can't get UTL register base !", np);
2141		return;
2142	}
2143
2144	/* Map DCRs */
2145	dcrs = dcr_resource_start(np, 0);
2146	if (dcrs == 0) {
2147		printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
2148		return;
2149	}
2150	port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
2151
2152	/* Initialize the port specific registers */
2153	if (ppc4xx_pciex_port_init(port)) {
2154		printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
2155		return;
2156	}
2157
2158	/* Setup the linux hose data structure */
2159	ppc4xx_pciex_port_setup_hose(port);
2160}
2161
2162#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
2163
2164static int __init ppc4xx_pci_find_bridges(void)
2165{
2166	struct device_node *np;
2167
2168	pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
2169
2170#ifdef CONFIG_PPC4xx_PCI_EXPRESS
2171	for_each_compatible_node(np, NULL, "ibm,plb-pciex")
2172		ppc4xx_probe_pciex_bridge(np);
2173#endif
2174	for_each_compatible_node(np, NULL, "ibm,plb-pcix")
2175		ppc4xx_probe_pcix_bridge(np);
2176	for_each_compatible_node(np, NULL, "ibm,plb-pci")
2177		ppc4xx_probe_pci_bridge(np);
2178
2179	return 0;
2180}
2181arch_initcall(ppc4xx_pci_find_bridges);
2182
2183