1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for Rockchip SoCs
4 *
5 * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
6 * http://www.rock-chips.com
7 *
8 * Author: Simon Xue <xxm@rock-chips.com>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/fs.h>
14 #include <linux/gpio.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip/chained_irq.h>
19 #include <linux/irqdomain.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/list.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/miscdevice.h>
25 #include <linux/module.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/of_gpio.h>
29 #include <linux/of_pci.h>
30 #include <linux/pci.h>
31 #include <linux/phy/phy.h>
32 #include <linux/phy/pcie.h>
33 #include <linux/platform_device.h>
34 #include <linux/poll.h>
35 #include <linux/regmap.h>
36 #include <linux/reset.h>
37 #include <linux/resource.h>
38 #include <linux/rfkill-wlan.h>
39 #include <linux/signal.h>
40 #include <linux/types.h>
41 #include <linux/uaccess.h>
42 #include <linux/pci-epf.h>
43
44 #include "pcie-designware.h"
45 #include "../../pci.h"
46 #include "../rockchip-pcie-dma.h"
47
48 enum rk_pcie_device_mode {
49 RK_PCIE_EP_TYPE,
50 RK_PCIE_RC_TYPE,
51 };
52
53 struct reset_bulk_data {
54 const char *id;
55 struct reset_control *rst;
56 };
57
58 #define PCIE_DMA_OFFSET 0x380000
59
60 #define PCIE_DMA_WR_ENB 0xc
61 #define PCIE_DMA_WR_CTRL_LO 0x200
62 #define PCIE_DMA_WR_CTRL_HI 0x204
63 #define PCIE_DMA_WR_XFERSIZE 0x208
64 #define PCIE_DMA_WR_SAR_PTR_LO 0x20c
65 #define PCIE_DMA_WR_SAR_PTR_HI 0x210
66 #define PCIE_DMA_WR_DAR_PTR_LO 0x214
67 #define PCIE_DMA_WR_DAR_PTR_HI 0x218
68 #define PCIE_DMA_WR_WEILO 0x18
69 #define PCIE_DMA_WR_WEIHI 0x1c
70 #define PCIE_DMA_WR_DOORBELL 0x10
71 #define PCIE_DMA_WR_INT_STATUS 0x4c
72 #define PCIE_DMA_WR_INT_MASK 0x54
73 #define PCIE_DMA_WR_INT_CLEAR 0x58
74
75 #define PCIE_DMA_RD_ENB 0x2c
76 #define PCIE_DMA_RD_CTRL_LO 0x300
77 #define PCIE_DMA_RD_CTRL_HI 0x304
78 #define PCIE_DMA_RD_XFERSIZE 0x308
79 #define PCIE_DMA_RD_SAR_PTR_LO 0x30c
80 #define PCIE_DMA_RD_SAR_PTR_HI 0x310
81 #define PCIE_DMA_RD_DAR_PTR_LO 0x314
82 #define PCIE_DMA_RD_DAR_PTR_HI 0x318
83 #define PCIE_DMA_RD_WEILO 0x38
84 #define PCIE_DMA_RD_WEIHI 0x3c
85 #define PCIE_DMA_RD_DOORBELL 0x30
86 #define PCIE_DMA_RD_INT_STATUS 0xa0
87 #define PCIE_DMA_RD_INT_MASK 0xa8
88 #define PCIE_DMA_RD_INT_CLEAR 0xac
89
90 /* Parameters for the waiting for iATU enabled routine */
91 #define LINK_WAIT_IATU_MIN 9000
92 #define LINK_WAIT_IATU_MAX 10000
93
94 #define PCIE_DIRECT_SPEED_CHANGE (0x1 << 17)
95
96 #define PCIE_TYPE0_STATUS_COMMAND_REG 0x4
97 #define PCIE_TYPE0_BAR0_REG 0x10
98
99 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
100
101 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
102 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
103 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
104 #define UNMASK_ALL_LEGACY_INT 0xffff0000
105 #define MASK_LEGACY_INT(x) (0x00110011 << x)
106 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
107 #define PCIE_CLIENT_INTR_MASK 0x24
108 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
109 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
110 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
111 #define PCIE_CLIENT_LTSSM_STATUS 0x300
112 #define SMLH_LINKUP BIT(16)
113 #define RDLH_LINKUP BIT(17)
114 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
115 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
116 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
117 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
118 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
119 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
120 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
121 #define PCIE_CLIENT_DBF_EN 0xffff0003
122 #define RK_PCIE_DBG 0
123
124 #define PCIE_PHY_LINKUP BIT(0)
125 #define PCIE_DATA_LINKUP BIT(1)
126
127 #define PCIE_SB_BAR0_MASK_REG 0x100010
128
129 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
130
131 struct rk_pcie {
132 struct dw_pcie *pci;
133 enum rk_pcie_device_mode mode;
134 enum phy_mode phy_mode;
135 int phy_sub_mode;
136 unsigned char bar_to_atu[6];
137 phys_addr_t *outbound_addr;
138 unsigned long *ib_window_map;
139 unsigned long *ob_window_map;
140 unsigned int num_ib_windows;
141 unsigned int num_ob_windows;
142 void __iomem *dbi_base;
143 void __iomem *apb_base;
144 struct phy *phy;
145 struct clk_bulk_data *clks;
146 unsigned int clk_cnt;
147 struct reset_bulk_data *rsts;
148 struct gpio_desc *rst_gpio;
149 phys_addr_t mem_start;
150 size_t mem_size;
151 struct pcie_port pp;
152 struct regmap *usb_pcie_grf;
153 struct regmap *pmu_grf;
154 struct dma_trx_obj *dma_obj;
155 bool in_suspend;
156 bool skip_scan_in_resume;
157 bool is_rk1808;
158 bool is_signal_test;
159 bool bifurcation;
160 struct regulator *vpcie3v3;
161 struct irq_domain *irq_domain;
162 raw_spinlock_t intx_lock;
163 };
164
165 struct rk_pcie_of_data {
166 enum rk_pcie_device_mode mode;
167 };
168
169 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
170
rk_pcie_read(void __iomem *addr, int size, u32 *val)171 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
172 {
173 if ((uintptr_t)addr & (size - 1)) {
174 *val = 0;
175 return PCIBIOS_BAD_REGISTER_NUMBER;
176 }
177
178 if (size == 4) {
179 *val = readl(addr);
180 } else if (size == 2) {
181 *val = readw(addr);
182 } else if (size == 1) {
183 *val = readb(addr);
184 } else {
185 *val = 0;
186 return PCIBIOS_BAD_REGISTER_NUMBER;
187 }
188
189 return PCIBIOS_SUCCESSFUL;
190 }
191
rk_pcie_write(void __iomem *addr, int size, u32 val)192 static int rk_pcie_write(void __iomem *addr, int size, u32 val)
193 {
194 if ((uintptr_t)addr & (size - 1))
195 return PCIBIOS_BAD_REGISTER_NUMBER;
196
197 if (size == 4)
198 writel(val, addr);
199 else if (size == 2)
200 writew(val, addr);
201 else if (size == 1)
202 writeb(val, addr);
203 else
204 return PCIBIOS_BAD_REGISTER_NUMBER;
205
206 return PCIBIOS_SUCCESSFUL;
207 }
208
__rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base, u32 reg, size_t size)209 static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base,
210 u32 reg, size_t size)
211 {
212 int ret;
213 u32 val;
214
215 ret = rk_pcie_read(base + reg, size, &val);
216 if (ret)
217 dev_err(rk_pcie->pci->dev, "Read APB address failed\n");
218
219 return val;
220 }
221
__rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base, u32 reg, size_t size, u32 val)222 static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base,
223 u32 reg, size_t size, u32 val)
224 {
225 int ret;
226
227 ret = rk_pcie_write(base + reg, size, val);
228 if (ret)
229 dev_err(rk_pcie->pci->dev, "Write APB address failed\n");
230 }
231
rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)232 static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)
233 {
234 return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4);
235 }
236
rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg, u32 val)237 static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
238 u32 val)
239 {
240 __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val);
241 }
242
rk_pcie_iatu_unroll_enabled(struct dw_pcie *pci)243 static u8 rk_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
244 {
245 u32 val;
246
247 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
248 if (val == 0xffffffff)
249 return 1;
250
251 return 0;
252 }
253
rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)254 static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
255 {
256 int ret;
257
258 if (pci->ops->write_dbi) {
259 pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
260 return;
261 }
262
263 ret = dw_pcie_write(pci->atu_base + reg, 4, val);
264 if (ret)
265 dev_err(pci->dev, "Write ATU address failed\n");
266 }
267
rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, u32 val)268 static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
269 u32 val)
270 {
271 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
272
273 rk_pcie_writel_atu(pci, offset + reg, val);
274 }
275
rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)276 static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
277 {
278 int ret;
279 u32 val;
280
281 if (pci->ops->read_dbi)
282 return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
283
284 ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
285 if (ret)
286 dev_err(pci->dev, "Read ATU address failed\n");
287
288 return val;
289 }
290
rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)291 static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
292 {
293 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
294
295 return rk_pcie_readl_atu(pci, offset + reg);
296 }
297
rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, int index, int bar, u64 cpu_addr, enum dw_pcie_as_type as_type)298 static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
299 int index, int bar, u64 cpu_addr,
300 enum dw_pcie_as_type as_type)
301 {
302 int type;
303 u32 retries, val;
304
305 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
306 lower_32_bits(cpu_addr));
307 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
308 upper_32_bits(cpu_addr));
309
310 switch (as_type) {
311 case DW_PCIE_AS_MEM:
312 type = PCIE_ATU_TYPE_MEM;
313 break;
314 case DW_PCIE_AS_IO:
315 type = PCIE_ATU_TYPE_IO;
316 break;
317 default:
318 return -EINVAL;
319 }
320
321 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
322 PCIE_ATU_FUNC_NUM(func_no));
323 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
324 PCIE_ATU_FUNC_NUM_MATCH_EN |
325 PCIE_ATU_ENABLE |
326 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
327
328 /*
329 * Make sure ATU enable takes effect before any subsequent config
330 * and I/O accesses.
331 */
332 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
333 val = rk_pcie_readl_ib_unroll(pci, index,
334 PCIE_ATU_UNR_REGION_CTRL2);
335 if (val & PCIE_ATU_ENABLE)
336 return 0;
337
338 mdelay(LINK_WAIT_IATU);
339 }
340 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
341
342 return -EBUSY;
343 }
344
345
rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, int bar, u64 cpu_addr, enum dw_pcie_as_type as_type)346 static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
347 int bar, u64 cpu_addr,
348 enum dw_pcie_as_type as_type)
349 {
350 int type;
351 u32 retries, val;
352
353 if (pci->iatu_unroll_enabled)
354 return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
355 cpu_addr, as_type);
356
357 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
358 index);
359 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
360 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
361
362 switch (as_type) {
363 case DW_PCIE_AS_MEM:
364 type = PCIE_ATU_TYPE_MEM;
365 break;
366 case DW_PCIE_AS_IO:
367 type = PCIE_ATU_TYPE_IO;
368 break;
369 default:
370 return -EINVAL;
371 }
372
373 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
374 PCIE_ATU_FUNC_NUM(func_no));
375 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
376 PCIE_ATU_FUNC_NUM_MATCH_EN |
377 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
378
379 /*
380 * Make sure ATU enable takes effect before any subsequent config
381 * and I/O accesses.
382 */
383 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
384 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
385 if (val & PCIE_ATU_ENABLE)
386 return 0;
387
388 mdelay(LINK_WAIT_IATU);
389 }
390 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
391
392 return -EBUSY;
393 }
394
rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie, enum pci_barno bar, dma_addr_t cpu_addr, enum dw_pcie_as_type as_type)395 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
396 enum pci_barno bar, dma_addr_t cpu_addr,
397 enum dw_pcie_as_type as_type)
398 {
399 int ret;
400 u32 free_win;
401 u8 func_no = 0x0;
402
403 if (rk_pcie->in_suspend) {
404 free_win = rk_pcie->bar_to_atu[bar];
405 } else {
406 free_win = find_first_zero_bit(rk_pcie->ib_window_map,
407 rk_pcie->num_ib_windows);
408 if (free_win >= rk_pcie->num_ib_windows) {
409 dev_err(rk_pcie->pci->dev, "No free inbound window\n");
410 return -EINVAL;
411 }
412 }
413
414 ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
415 cpu_addr, as_type);
416 if (ret < 0) {
417 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
418 return ret;
419 }
420
421 if (rk_pcie->in_suspend)
422 return 0;
423
424 rk_pcie->bar_to_atu[bar] = free_win;
425 set_bit(free_win, rk_pcie->ib_window_map);
426
427 return 0;
428 }
429
rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, u32 val)430 static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
431 u32 val)
432 {
433 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
434
435 rk_pcie_writel_atu(pci, offset + reg, val);
436 }
437
rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)438 static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
439 {
440 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
441
442 return rk_pcie_readl_atu(pci, offset + reg);
443 }
444
rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size)445 static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
446 int index, int type,
447 u64 cpu_addr, u64 pci_addr,
448 u32 size)
449 {
450 u32 retries, val;
451 u64 limit_addr = cpu_addr + size - 1;
452
453 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
454 lower_32_bits(cpu_addr));
455 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
456 upper_32_bits(cpu_addr));
457 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
458 lower_32_bits(limit_addr));
459 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
460 upper_32_bits(limit_addr));
461 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
462 lower_32_bits(pci_addr));
463 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
464 upper_32_bits(pci_addr));
465 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
466 type | PCIE_ATU_FUNC_NUM(func_no));
467 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
468 PCIE_ATU_ENABLE);
469
470 /*
471 * Make sure ATU enable takes effect before any subsequent config
472 * and I/O accesses.
473 */
474 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
475 val = rk_pcie_readl_ob_unroll(pci, index,
476 PCIE_ATU_UNR_REGION_CTRL2);
477 if (val & PCIE_ATU_ENABLE)
478 return;
479
480 mdelay(LINK_WAIT_IATU);
481 }
482 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
483 }
484
rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size)485 static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
486 int type, u64 cpu_addr, u64 pci_addr, u32 size)
487 {
488 u32 retries, val;
489
490 if (pci->ops->cpu_addr_fixup)
491 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
492
493 if (pci->iatu_unroll_enabled) {
494 rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
495 cpu_addr, pci_addr, size);
496 return;
497 }
498
499 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
500 PCIE_ATU_REGION_OUTBOUND | index);
501 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
502 lower_32_bits(cpu_addr));
503 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
504 upper_32_bits(cpu_addr));
505 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
506 lower_32_bits(cpu_addr + size - 1));
507 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
508 lower_32_bits(pci_addr));
509 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
510 upper_32_bits(pci_addr));
511 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
512 PCIE_ATU_FUNC_NUM(0x0));
513 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
514
515 /*
516 * Make sure ATU enable takes effect before any subsequent config
517 * and I/O accesses.
518 */
519 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
520 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
521 if (val & PCIE_ATU_ENABLE)
522 return;
523
524 mdelay(LINK_WAIT_IATU);
525 }
526 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
527 }
528
rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie, phys_addr_t phys_addr, u64 pci_addr, size_t size)529 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
530 phys_addr_t phys_addr, u64 pci_addr,
531 size_t size)
532 {
533 u32 free_win;
534
535 if (rk_pcie->in_suspend) {
536 free_win = find_first_bit(rk_pcie->ob_window_map,
537 rk_pcie->num_ob_windows);
538 } else {
539 free_win = find_first_zero_bit(rk_pcie->ob_window_map,
540 rk_pcie->num_ob_windows);
541 if (free_win >= rk_pcie->num_ob_windows) {
542 dev_err(rk_pcie->pci->dev, "No free outbound window\n");
543 return -EINVAL;
544 }
545 }
546
547 rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
548 phys_addr, pci_addr, size);
549
550 if (rk_pcie->in_suspend)
551 return 0;
552
553 set_bit(free_win, rk_pcie->ob_window_map);
554 rk_pcie->outbound_addr[free_win] = phys_addr;
555
556 return 0;
557 }
558
__rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie, enum pci_barno bar, int flags)559 static void __rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie,
560 enum pci_barno bar, int flags)
561 {
562 u32 reg;
563
564 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
565 dw_pcie_writel_dbi(rk_pcie->pci, reg, 0x0);
566 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
567 dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0x0);
568 }
569
rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie, enum pci_barno bar)570 static void rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie, enum pci_barno bar)
571 {
572 __rk_pcie_ep_reset_bar(rk_pcie, bar, 0);
573 }
574
rk_pcie_ep_atu_init(struct rk_pcie *rk_pcie)575 static int rk_pcie_ep_atu_init(struct rk_pcie *rk_pcie)
576 {
577 int ret;
578 enum pci_barno bar;
579 enum dw_pcie_as_type as_type;
580 dma_addr_t cpu_addr;
581 phys_addr_t phys_addr;
582 u64 pci_addr;
583 size_t size;
584
585 for (bar = BAR_0; bar <= BAR_5; bar++)
586 rk_pcie_ep_reset_bar(rk_pcie, bar);
587
588 cpu_addr = rk_pcie->mem_start;
589 as_type = DW_PCIE_AS_MEM;
590 ret = rk_pcie_ep_inbound_atu(rk_pcie, BAR_0, cpu_addr, as_type);
591 if (ret)
592 return ret;
593
594 phys_addr = 0x0;
595 pci_addr = 0x0;
596 size = SZ_2G;
597 ret = rk_pcie_ep_outbound_atu(rk_pcie, phys_addr, pci_addr, size);
598 if (ret)
599 return ret;
600
601 return 0;
602 }
603
rk_pcie_set_mode(struct rk_pcie *rk_pcie)604 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
605 {
606 switch (rk_pcie->mode) {
607 case RK_PCIE_EP_TYPE:
608 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
609 break;
610 case RK_PCIE_RC_TYPE:
611 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
612 /*
613 * Disable order rule for CPL can't pass halted P queue.
614 * Need to check producer-consumer model.
615 * Just for RK1808 platform.
616 */
617 if (rk_pcie->is_rk1808)
618 dw_pcie_writel_dbi(rk_pcie->pci,
619 PCIE_PL_ORDER_RULE_CTRL_OFF,
620 0xff00);
621 break;
622 }
623 }
624
rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)625 static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)
626 {
627 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0);
628 }
629
rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)630 static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)
631 {
632 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008);
633 }
634
rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)635 static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)
636 {
637 rk_pcie_writel_apb(rk_pcie, 0x0, 0xC000C);
638 }
639
rk_pcie_link_up(struct dw_pcie *pci)640 static int rk_pcie_link_up(struct dw_pcie *pci)
641 {
642 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
643 u32 val;
644
645 if (rk_pcie->is_rk1808) {
646 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
647 if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3 &&
648 ((val & GENMASK(15, 10)) >> 10) == 0x11)
649 return 1;
650 } else {
651 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
652 if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
653 return 1;
654 }
655
656 return 0;
657 }
658
rk_pcie_enable_debug(struct rk_pcie *rk_pcie)659 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
660 {
661 #if RK_PCIE_DBG
662 if (rk_pcie->is_rk1808 == true)
663 return;
664
665 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0,
666 PCIE_CLIENT_DBG_TRANSITION_DATA);
667 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1,
668 PCIE_CLIENT_DBG_TRANSITION_DATA);
669 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0,
670 PCIE_CLIENT_DBG_TRANSITION_DATA);
671 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1,
672 PCIE_CLIENT_DBG_TRANSITION_DATA);
673 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
674 PCIE_CLIENT_DBF_EN);
675 #endif
676 }
677
rk_pcie_debug_dump(struct rk_pcie *rk_pcie)678 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
679 {
680 #if RK_PCIE_DBG
681 u32 loop;
682 struct dw_pcie *pci = rk_pcie->pci;
683
684 dev_info(pci->dev, "ltssm = 0x%x\n",
685 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
686 for (loop = 0; loop < 64; loop++)
687 dev_info(pci->dev, "fifo_status = 0x%x\n",
688 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
689 #endif
690 }
691
rk_pcie_establish_link(struct dw_pcie *pci)692 static int rk_pcie_establish_link(struct dw_pcie *pci)
693 {
694 int retries, power;
695 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
696 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
697
698 /*
699 * For standard RC, even if the link has been setup by firmware,
700 * we still need to reset link as we need to remove all resource info
701 * from devices, for instance BAR, as it wasn't assigned by kernel.
702 */
703 if (dw_pcie_link_up(pci) && !std_rc) {
704 dev_err(pci->dev, "link is already up\n");
705 return 0;
706 }
707
708 /* Rest the device */
709 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
710
711 rk_pcie_disable_ltssm(rk_pcie);
712 rk_pcie_link_status_clear(rk_pcie);
713 rk_pcie_enable_debug(rk_pcie);
714
715 /* Enable client reset or link down interrupt */
716 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000);
717
718 /* Enable LTSSM */
719 rk_pcie_enable_ltssm(rk_pcie);
720
721 /*
722 * In resume routine, function devices' resume function must be late after
723 * controllers'. Some devices, such as Wi-Fi, need special IO setting before
724 * finishing training. So there must be timeout here. These kinds of devices
725 * need rescan devices by its driver when used. So no need to waste time waiting
726 * for training pass.
727 */
728 if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
729 rfkill_get_wifi_power_state(&power);
730 if (!power) {
731 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
732 return 0;
733 }
734 }
735
736 /*
737 * PCIe requires the refclk to be stable for 100µs prior to releasing
738 * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
739 * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
740 * Card Electromechanical Specification 3.0. So 100ms in total is the min
741 * requuirement here. We add a 200ms for sake of hoping everthings work fine.
742 */
743 msleep(200);
744 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
745
746 /*
747 * Add this 1ms delay because we observe link is always up stably after it and
748 * could help us save 20ms for scanning devices.
749 */
750 usleep_range(1000, 1100);
751
752 for (retries = 0; retries < 100; retries++) {
753 if (dw_pcie_link_up(pci)) {
754 /*
755 * We may be here in case of L0 in Gen1. But if EP is capable
756 * of Gen2 or Gen3, Gen switch may happen just in this time, but
757 * we keep on accessing devices in unstable link status. Given
758 * that LTSSM max timeout is 24ms per period, we can wait a bit
759 * more for Gen switch.
760 */
761 msleep(50);
762 dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
763 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
764 rk_pcie_debug_dump(rk_pcie);
765 return 0;
766 }
767
768 dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
769 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
770 rk_pcie_debug_dump(rk_pcie);
771 msleep(20);
772 }
773
774 dev_err(pci->dev, "PCIe Link Fail\n");
775
776 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
777 }
778
rk_pcie_host_init_dma_trx(struct rk_pcie *rk_pcie)779 static int rk_pcie_host_init_dma_trx(struct rk_pcie *rk_pcie)
780 {
781 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
782 if (IS_ERR(rk_pcie->dma_obj)) {
783 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
784 return -EINVAL;
785 }
786
787 /* Enable client write and read interrupt */
788 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
789
790 /* Enable core write interrupt */
791 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
792 0x0);
793 /* Enable core read interrupt */
794 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
795 0x0);
796 return 0;
797 }
798
rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)799 static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
800 {
801 u32 header;
802 int ttl;
803 int start = 0;
804 int pos = PCI_CFG_SPACE_SIZE;
805 int cap = PCI_EXT_CAP_ID_REBAR;
806
807 /* minimum 8 bytes per capability */
808 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
809
810 header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
811
812 /*
813 * If we have no capabilities, this is indicated by cap ID,
814 * cap version and next pointer all being 0.
815 */
816 if (header == 0)
817 return 0;
818
819 while (ttl-- > 0) {
820 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
821 return pos;
822
823 pos = PCI_EXT_CAP_NEXT(header);
824 if (pos < PCI_CFG_SPACE_SIZE)
825 break;
826
827 header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
828 if (!header)
829 break;
830 }
831
832 return 0;
833 }
834
rk_pcie_ep_setup(struct rk_pcie *rk_pcie)835 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
836 {
837 int ret;
838 u32 val;
839 u32 lanes;
840 struct device *dev = rk_pcie->pci->dev;
841 struct device_node *np = dev->of_node;
842 int resbar_base;
843 int bar;
844
845 /* Enable client write and read interrupt */
846 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
847
848 /* Enable core write interrupt */
849 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
850 0x0);
851 /* Enable core read interrupt */
852 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
853 0x0);
854
855 ret = of_property_read_u32(np, "num-lanes", &lanes);
856 if (ret)
857 lanes = 0;
858
859 /* Set the number of lanes */
860 val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL);
861 val &= ~PORT_LINK_MODE_MASK;
862 switch (lanes) {
863 case 1:
864 val |= PORT_LINK_MODE_1_LANES;
865 break;
866 case 2:
867 val |= PORT_LINK_MODE_2_LANES;
868 break;
869 case 4:
870 val |= PORT_LINK_MODE_4_LANES;
871 break;
872 case 8:
873 val |= PORT_LINK_MODE_8_LANES;
874 break;
875 default:
876 dev_err(dev, "num-lanes %u: invalid value\n", lanes);
877 return;
878 }
879
880 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL, val);
881
882 /* Set link width speed control register */
883 val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
884 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
885 switch (lanes) {
886 case 1:
887 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
888 break;
889 case 2:
890 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
891 break;
892 case 4:
893 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
894 break;
895 case 8:
896 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
897 break;
898 }
899
900 val |= PCIE_DIRECT_SPEED_CHANGE;
901
902 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
903
904 /* Enable bus master and memory space */
905 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
906
907 resbar_base = rk_pci_find_resbar_capability(rk_pcie);
908 if (!resbar_base) {
909 dev_warn(dev, "failed to find resbar_base\n");
910 } else {
911 /* Resize BAR0 to support 512GB */
912 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
913 /* Bit13-8 set to 19 means 2^19MB (512GB) */
914 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
915 /* Resize bar1 - bar6 to 64M */
916 for (bar = 1; bar < 6; bar++) {
917 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base +
918 0x4 + bar * 0x8, 0xfffff0);
919 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base +
920 0x8 + bar * 0x8, 0x6c0);
921 }
922 }
923
924 /* Device id and class id needed for request bar address */
925 dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
926 dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
927
928 /* Set shadow BAR0 */
929 if (rk_pcie->is_rk1808) {
930 val = rk_pcie->mem_size - 1;
931 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
932 }
933 }
934
rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)935 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
936 {
937 int ret;
938 void *addr;
939 struct device *dev = rk_pcie->pci->dev;
940 struct device_node *np = dev->of_node;
941
942 ret = of_property_read_u32(np, "num-ib-windows",
943 &rk_pcie->num_ib_windows);
944 if (ret < 0) {
945 dev_err(dev, "unable to read *num-ib-windows* property\n");
946 return ret;
947 }
948
949 if (rk_pcie->num_ib_windows > MAX_IATU_IN) {
950 dev_err(dev, "Invalid *num-ib-windows*\n");
951 return -EINVAL;
952 }
953
954 ret = of_property_read_u32(np, "num-ob-windows",
955 &rk_pcie->num_ob_windows);
956 if (ret < 0) {
957 dev_err(dev, "Unable to read *num-ob-windows* property\n");
958 return ret;
959 }
960
961 if (rk_pcie->num_ob_windows > MAX_IATU_OUT) {
962 dev_err(dev, "Invalid *num-ob-windows*\n");
963 return -EINVAL;
964 }
965
966 rk_pcie->ib_window_map = devm_kcalloc(dev,
967 BITS_TO_LONGS(rk_pcie->num_ib_windows),
968 sizeof(long), GFP_KERNEL);
969 if (!rk_pcie->ib_window_map)
970 return -ENOMEM;
971
972 rk_pcie->ob_window_map = devm_kcalloc(dev,
973 BITS_TO_LONGS(rk_pcie->num_ob_windows),
974 sizeof(long), GFP_KERNEL);
975 if (!rk_pcie->ob_window_map)
976 return -ENOMEM;
977
978 addr = devm_kcalloc(dev, rk_pcie->num_ob_windows, sizeof(phys_addr_t),
979 GFP_KERNEL);
980 if (!addr)
981 return -ENOMEM;
982
983 rk_pcie->outbound_addr = addr;
984
985 return 0;
986 }
987
rk_pcie_msi_host_init(struct pcie_port *pp)988 static int rk_pcie_msi_host_init(struct pcie_port *pp)
989 {
990 return 0;
991 }
992
rk_pcie_host_init(struct pcie_port *pp)993 static int rk_pcie_host_init(struct pcie_port *pp)
994 {
995 int ret;
996 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
997
998 dw_pcie_setup_rc(pp);
999
1000 ret = rk_pcie_establish_link(pci);
1001
1002 return ret;
1003 }
1004
1005 static const struct dw_pcie_host_ops rk_pcie_host_ops = {
1006 .host_init = rk_pcie_host_init,
1007 .msi_host_init = rk_pcie_msi_host_init,
1008 };
1009
rk_add_pcie_port(struct rk_pcie *rk_pcie)1010 static int rk_add_pcie_port(struct rk_pcie *rk_pcie)
1011 {
1012 int ret;
1013 struct dw_pcie *pci = rk_pcie->pci;
1014 struct pcie_port *pp = &pci->pp;
1015 struct device *dev = pci->dev;
1016
1017 pp->ops = &rk_pcie_host_ops;
1018
1019 ret = dw_pcie_host_init(pp);
1020 if (ret) {
1021 dev_err(dev, "failed to initialize host\n");
1022 return ret;
1023 }
1024
1025 ret = rk_pcie_host_init_dma_trx(rk_pcie);
1026 if (ret) {
1027 dev_err(dev, "failed to init host dma trx\n");
1028 return ret;
1029 }
1030 return 0;
1031 }
1032
rk_pcie_add_ep(struct rk_pcie *rk_pcie)1033 static int rk_pcie_add_ep(struct rk_pcie *rk_pcie)
1034 {
1035 int ret;
1036 struct device *dev = rk_pcie->pci->dev;
1037 struct device_node *np = dev->of_node;
1038 struct device_node *mem;
1039 struct resource reg;
1040
1041 mem = of_parse_phandle(np, "memory-region", 0);
1042 if (!mem) {
1043 dev_err(dev, "missing \"memory-region\" property\n");
1044 return -ENODEV;
1045 }
1046
1047 ret = of_address_to_resource(mem, 0, ®);
1048 if (ret < 0) {
1049 dev_err(dev, "missing \"reg\" property\n");
1050 return ret;
1051 }
1052
1053 rk_pcie->mem_start = reg.start;
1054 rk_pcie->mem_size = resource_size(®);
1055
1056 ret = rk_pcie_ep_win_parse(rk_pcie);
1057 if (ret) {
1058 dev_err(dev, "failed to parse ep dts\n");
1059 return ret;
1060 }
1061
1062 rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
1063 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
1064
1065 ret = rk_pcie_ep_atu_init(rk_pcie);
1066 if (ret) {
1067 dev_err(dev, "failed to init ep device\n");
1068 return ret;
1069 }
1070
1071 rk_pcie_ep_setup(rk_pcie);
1072
1073 ret = rk_pcie_establish_link(rk_pcie->pci);
1074 if (ret) {
1075 dev_err(dev, "failed to establish pcie link\n");
1076 return ret;
1077 }
1078
1079 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(dev);
1080 if (IS_ERR(rk_pcie->dma_obj)) {
1081 dev_err(dev, "failed to prepare dma object\n");
1082 return -EINVAL;
1083 }
1084
1085 return 0;
1086 }
1087
rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)1088 static void rk_pcie_clk_deinit(struct rk_pcie *rk_pcie)
1089 {
1090 clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
1091 clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1092 }
1093
rk_pcie_clk_init(struct rk_pcie *rk_pcie)1094 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
1095 {
1096 struct device *dev = rk_pcie->pci->dev;
1097 struct property *prop;
1098 const char *name;
1099 int i = 0, ret, count;
1100
1101 count = of_property_count_strings(dev->of_node, "clock-names");
1102 if (count < 1)
1103 return -ENODEV;
1104
1105 rk_pcie->clks = devm_kcalloc(dev, count,
1106 sizeof(struct clk_bulk_data),
1107 GFP_KERNEL);
1108 if (!rk_pcie->clks)
1109 return -ENOMEM;
1110
1111 rk_pcie->clk_cnt = count;
1112
1113 of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
1114 rk_pcie->clks[i].id = name;
1115 if (!rk_pcie->clks[i].id)
1116 return -ENOMEM;
1117 i++;
1118 }
1119
1120 ret = devm_clk_bulk_get(dev, count, rk_pcie->clks);
1121 if (ret)
1122 return ret;
1123
1124 ret = clk_bulk_prepare(count, rk_pcie->clks);
1125 if (ret)
1126 return ret;
1127
1128 ret = clk_bulk_enable(count, rk_pcie->clks);
1129 if (ret) {
1130 clk_bulk_unprepare(count, rk_pcie->clks);
1131 return ret;
1132 }
1133
1134 return 0;
1135 }
1136
rk_pcie_resource_get(struct platform_device *pdev, struct rk_pcie *rk_pcie)1137 static int rk_pcie_resource_get(struct platform_device *pdev,
1138 struct rk_pcie *rk_pcie)
1139 {
1140 struct resource *dbi_base;
1141 struct resource *apb_base;
1142
1143 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1144 "pcie-dbi");
1145 if (!dbi_base) {
1146 dev_err(&pdev->dev, "get pcie-dbi failed\n");
1147 return -ENODEV;
1148 }
1149
1150 rk_pcie->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
1151 if (IS_ERR(rk_pcie->dbi_base))
1152 return PTR_ERR(rk_pcie->dbi_base);
1153
1154 rk_pcie->pci->dbi_base = rk_pcie->dbi_base;
1155
1156 apb_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1157 "pcie-apb");
1158 if (!apb_base) {
1159 dev_err(&pdev->dev, "get pcie-apb failed\n");
1160 return -ENODEV;
1161 }
1162 rk_pcie->apb_base = devm_ioremap_resource(&pdev->dev, apb_base);
1163 if (IS_ERR(rk_pcie->apb_base))
1164 return PTR_ERR(rk_pcie->apb_base);
1165
1166 /*
1167 * Rest the device before enabling power because some of the
1168 * platforms may use external refclk input with the some power
1169 * rail connect to 100MHz OSC chip. So once the power is up for
1170 * the slot and the refclk is available, which isn't quite follow
1171 * the spec. We should make sure it is in reset state before
1172 * everthing's ready.
1173 */
1174 rk_pcie->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
1175 GPIOD_OUT_LOW);
1176 if (IS_ERR(rk_pcie->rst_gpio)) {
1177 dev_err(&pdev->dev, "invalid reset-gpios property in node\n");
1178 return PTR_ERR(rk_pcie->rst_gpio);
1179 }
1180
1181 return 0;
1182 }
1183
rk_pcie_phy_init(struct rk_pcie *rk_pcie)1184 static int rk_pcie_phy_init(struct rk_pcie *rk_pcie)
1185 {
1186 int ret;
1187 struct device *dev = rk_pcie->pci->dev;
1188
1189 rk_pcie->phy = devm_phy_get(dev, "pcie-phy");
1190 if (IS_ERR(rk_pcie->phy)) {
1191 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
1192 dev_info(dev, "missing phy\n");
1193 return PTR_ERR(rk_pcie->phy);
1194 }
1195
1196 switch (rk_pcie->mode) {
1197 case RK_PCIE_RC_TYPE:
1198 rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1199 rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
1200 break;
1201 case RK_PCIE_EP_TYPE:
1202 rk_pcie->phy_mode = PHY_MODE_PCIE;
1203 rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
1204 break;
1205 }
1206
1207 ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1208 rk_pcie->phy_sub_mode);
1209 if (ret) {
1210 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1211 (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1212 ret);
1213 return ret;
1214 }
1215
1216 if (rk_pcie->bifurcation)
1217 ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1218 PHY_MODE_PCIE_BIFURCATION);
1219
1220 ret = phy_init(rk_pcie->phy);
1221 if (ret < 0) {
1222 dev_err(dev, "fail to init phy, err %d\n", ret);
1223 return ret;
1224 }
1225
1226 phy_power_on(rk_pcie->phy);
1227
1228 return 0;
1229 }
1230
rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)1231 static int rk_pcie_reset_control_release(struct rk_pcie *rk_pcie)
1232 {
1233 struct device *dev = rk_pcie->pci->dev;
1234 struct property *prop;
1235 const char *name;
1236 int ret, count, i = 0;
1237
1238 count = of_property_count_strings(dev->of_node, "reset-names");
1239 if (count < 1)
1240 return -ENODEV;
1241
1242 rk_pcie->rsts = devm_kcalloc(dev, count,
1243 sizeof(struct reset_bulk_data),
1244 GFP_KERNEL);
1245 if (!rk_pcie->rsts)
1246 return -ENOMEM;
1247
1248 of_property_for_each_string(dev->of_node, "reset-names",
1249 prop, name) {
1250 rk_pcie->rsts[i].id = name;
1251 if (!rk_pcie->rsts[i].id)
1252 return -ENOMEM;
1253 i++;
1254 }
1255
1256 for (i = 0; i < count; i++) {
1257 rk_pcie->rsts[i].rst = devm_reset_control_get_exclusive(dev,
1258 rk_pcie->rsts[i].id);
1259 if (IS_ERR_OR_NULL(rk_pcie->rsts[i].rst)) {
1260 dev_err(dev, "failed to get %s\n",
1261 rk_pcie->clks[i].id);
1262 return -PTR_ERR(rk_pcie->rsts[i].rst);
1263 }
1264 }
1265
1266 for (i = 0; i < count; i++) {
1267 ret = reset_control_deassert(rk_pcie->rsts[i].rst);
1268 if (ret) {
1269 dev_err(dev, "failed to release %s\n",
1270 rk_pcie->rsts[i].id);
1271 return ret;
1272 }
1273 }
1274
1275 return 0;
1276 }
1277
rk_pcie_reset_grant_ctrl(struct rk_pcie *rk_pcie, bool enable)1278 static int rk_pcie_reset_grant_ctrl(struct rk_pcie *rk_pcie,
1279 bool enable)
1280 {
1281 int ret;
1282 u32 val = (0x1 << 18); /* Write mask bit */
1283
1284 if (enable)
1285 val |= (0x1 << 2);
1286
1287 ret = regmap_write(rk_pcie->usb_pcie_grf, 0x0, val);
1288 return ret;
1289 }
1290
rk_pcie_start_dma_rd(struct dma_trx_obj *obj, int ctr_off)1291 static void rk_pcie_start_dma_rd(struct dma_trx_obj *obj, int ctr_off)
1292 {
1293 struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
1294 struct dma_table *cur = obj->cur;
1295
1296 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB,
1297 cur->enb.asdword);
1298 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_LO,
1299 cur->ctx_reg.ctrllo.asdword);
1300 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_HI,
1301 cur->ctx_reg.ctrlhi.asdword);
1302 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_XFERSIZE,
1303 cur->ctx_reg.xfersize);
1304 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_LO,
1305 cur->ctx_reg.sarptrlo);
1306 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_HI,
1307 cur->ctx_reg.sarptrhi);
1308 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_LO,
1309 cur->ctx_reg.darptrlo);
1310 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_HI,
1311 cur->ctx_reg.darptrhi);
1312 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL,
1313 cur->start.asdword);
1314 }
1315
rk_pcie_start_dma_wr(struct dma_trx_obj *obj, int ctr_off)1316 static void rk_pcie_start_dma_wr(struct dma_trx_obj *obj, int ctr_off)
1317 {
1318 struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
1319 struct dma_table *cur = obj->cur;
1320
1321 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB,
1322 cur->enb.asdword);
1323 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_LO,
1324 cur->ctx_reg.ctrllo.asdword);
1325 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_HI,
1326 cur->ctx_reg.ctrlhi.asdword);
1327 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_XFERSIZE,
1328 cur->ctx_reg.xfersize);
1329 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_LO,
1330 cur->ctx_reg.sarptrlo);
1331 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_HI,
1332 cur->ctx_reg.sarptrhi);
1333 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_LO,
1334 cur->ctx_reg.darptrlo);
1335 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_HI,
1336 cur->ctx_reg.darptrhi);
1337 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_WEILO,
1338 cur->weilo.asdword);
1339 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL,
1340 cur->start.asdword);
1341 }
1342
rk_pcie_start_dma_dwc(struct dma_trx_obj *obj)1343 static void rk_pcie_start_dma_dwc(struct dma_trx_obj *obj)
1344 {
1345 int dir = obj->cur->dir;
1346 int chn = obj->cur->chn;
1347
1348 int ctr_off = PCIE_DMA_OFFSET + chn * 0x200;
1349
1350 if (dir == DMA_FROM_BUS)
1351 rk_pcie_start_dma_rd(obj, ctr_off);
1352 else if (dir == DMA_TO_BUS)
1353 rk_pcie_start_dma_wr(obj, ctr_off);
1354 }
1355
rk_pcie_config_dma_dwc(struct dma_table *table)1356 static void rk_pcie_config_dma_dwc(struct dma_table *table)
1357 {
1358 table->enb.enb = 0x1;
1359 table->ctx_reg.ctrllo.lie = 0x1;
1360 table->ctx_reg.ctrllo.rie = 0x0;
1361 table->ctx_reg.ctrllo.td = 0x1;
1362 table->ctx_reg.ctrlhi.asdword = 0x0;
1363 table->ctx_reg.xfersize = table->buf_size;
1364 if (table->dir == DMA_FROM_BUS) {
1365 table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff);
1366 table->ctx_reg.sarptrhi = (u32)(table->bus >> 32);
1367 table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff);
1368 table->ctx_reg.darptrhi = (u32)(table->local >> 32);
1369 } else if (table->dir == DMA_TO_BUS) {
1370 table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff);
1371 table->ctx_reg.sarptrhi = (u32)(table->local >> 32);
1372 table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff);
1373 table->ctx_reg.darptrhi = (u32)(table->bus >> 32);
1374 }
1375 table->weilo.weight0 = 0x0;
1376 table->start.stop = 0x0;
1377 table->start.chnl = table->chn;
1378 }
1379
1380 static inline void
rk_pcie_handle_dma_interrupt(struct rk_pcie *rk_pcie)1381 rk_pcie_handle_dma_interrupt(struct rk_pcie *rk_pcie)
1382 {
1383 struct dma_trx_obj *obj = rk_pcie->dma_obj;
1384 struct dma_table *cur;
1385
1386 if (!obj)
1387 return;
1388
1389 cur = obj->cur;
1390 if (!cur) {
1391 pr_err("no pcie dma table\n");
1392 return;
1393 }
1394
1395 obj->dma_free = true;
1396 obj->irq_num++;
1397
1398 if (cur->dir == DMA_TO_BUS) {
1399 if (list_empty(&obj->tbl_list)) {
1400 if (obj->dma_free &&
1401 obj->loop_count >= obj->loop_count_threshold)
1402 complete(&obj->done);
1403 }
1404 }
1405 }
1406
rk_pcie_sys_irq_handler(int irq, void *arg)1407 static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg)
1408 {
1409 struct rk_pcie *rk_pcie = arg;
1410 u32 chn = 0;
1411 union int_status status;
1412 union int_clear clears;
1413 u32 reg, val;
1414
1415 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1416 PCIE_DMA_WR_INT_STATUS);
1417
1418 if (rk_pcie->dma_obj && rk_pcie->dma_obj->cur)
1419 chn = rk_pcie->dma_obj->cur->chn;
1420
1421 if (status.donesta & BIT(chn)) {
1422 clears.doneclr = 0x1 << chn;
1423 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1424 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
1425 rk_pcie_handle_dma_interrupt(rk_pcie);
1426 }
1427
1428 if (status.abortsta & BIT(chn)) {
1429 dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
1430 clears.abortclr = 0x1 << chn;
1431 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1432 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
1433 }
1434
1435 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1436 PCIE_DMA_RD_INT_STATUS);
1437
1438 if (status.donesta & BIT(chn)) {
1439 clears.doneclr = 0x1 << chn;
1440 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1441 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
1442 rk_pcie_handle_dma_interrupt(rk_pcie);
1443 }
1444
1445 if (status.abortsta & BIT(chn)) {
1446 dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
1447 clears.abortclr = 0x1 << chn;
1448 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1449 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
1450 }
1451
1452 reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC);
1453 if (reg & BIT(2)) {
1454 /* Setup command register */
1455 val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1456 val &= 0xffff0000;
1457 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1458 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1459 dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1460 }
1461
1462 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg);
1463
1464 return IRQ_HANDLED;
1465 }
1466
rk_pcie_request_sys_irq(struct rk_pcie *rk_pcie, struct platform_device *pdev)1467 static int rk_pcie_request_sys_irq(struct rk_pcie *rk_pcie,
1468 struct platform_device *pdev)
1469 {
1470 int irq;
1471 int ret;
1472
1473 irq = platform_get_irq_byname(pdev, "sys");
1474 if (irq < 0) {
1475 dev_err(rk_pcie->pci->dev, "missing sys IRQ resource\n");
1476 return -EINVAL;
1477 }
1478
1479 ret = devm_request_irq(rk_pcie->pci->dev, irq, rk_pcie_sys_irq_handler,
1480 IRQF_SHARED, "pcie-sys", rk_pcie);
1481 if (ret) {
1482 dev_err(rk_pcie->pci->dev, "failed to request PCIe subsystem IRQ\n");
1483 return ret;
1484 }
1485
1486 return 0;
1487 }
1488
1489 static const struct rk_pcie_of_data rk_pcie_rc_of_data = {
1490 .mode = RK_PCIE_RC_TYPE,
1491 };
1492
1493 static const struct rk_pcie_of_data rk_pcie_ep_of_data = {
1494 .mode = RK_PCIE_EP_TYPE,
1495 };
1496
1497 static const struct of_device_id rk_pcie_of_match[] = {
1498 {
1499 .compatible = "rockchip,rk1808-pcie",
1500 .data = &rk_pcie_rc_of_data,
1501 },
1502 {
1503 .compatible = "rockchip,rk1808-pcie-ep",
1504 .data = &rk_pcie_ep_of_data,
1505 },
1506 {
1507 .compatible = "rockchip,rk3568-pcie",
1508 .data = &rk_pcie_rc_of_data,
1509 },
1510 {
1511 .compatible = "rockchip,rk3568-pcie-ep",
1512 .data = &rk_pcie_ep_of_data,
1513 },
1514 {
1515 .compatible = "rockchip,rk3588-pcie",
1516 .data = &rk_pcie_rc_of_data,
1517 },
1518 {
1519 .compatible = "rockchip,rk3588-pcie-ep",
1520 .data = &rk_pcie_ep_of_data,
1521 },
1522 {},
1523 };
1524
1525 MODULE_DEVICE_TABLE(of, rk_pcie_of_match);
1526
1527 static const struct dw_pcie_ops dw_pcie_ops = {
1528 .start_link = rk_pcie_establish_link,
1529 .link_up = rk_pcie_link_up,
1530 };
1531
rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np)1532 static int rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np)
1533 {
1534 int ret;
1535 struct device *dev = rk_pcie->pci->dev;
1536
1537 rk_pcie->usb_pcie_grf = syscon_regmap_lookup_by_phandle(np,
1538 "rockchip,usbpciegrf");
1539 if (IS_ERR(rk_pcie->usb_pcie_grf)) {
1540 dev_err(dev, "failed to find usb_pcie_grf regmap\n");
1541 return PTR_ERR(rk_pcie->usb_pcie_grf);
1542 }
1543
1544 rk_pcie->pmu_grf = syscon_regmap_lookup_by_phandle(np,
1545 "rockchip,pmugrf");
1546 if (IS_ERR(rk_pcie->pmu_grf)) {
1547 dev_err(dev, "failed to find pmugrf regmap\n");
1548 return PTR_ERR(rk_pcie->pmu_grf);
1549 }
1550
1551 /* Workaround for pcie, switch to PCIe_PRSTNm0 */
1552 ret = regmap_write(rk_pcie->pmu_grf, 0x100, 0x01000100);
1553 if (ret)
1554 return ret;
1555
1556 ret = regmap_write(rk_pcie->pmu_grf, 0x0, 0x0c000000);
1557 if (ret)
1558 return ret;
1559
1560 /* release link reset grant */
1561 ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
1562 return ret;
1563 }
1564
rk_pcie_fast_link_setup(struct rk_pcie *rk_pcie)1565 static void rk_pcie_fast_link_setup(struct rk_pcie *rk_pcie)
1566 {
1567 u32 val;
1568
1569 /* LTSSM EN ctrl mode */
1570 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL);
1571 val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
1572 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val);
1573 }
1574
rk_pcie_legacy_irq_mask(struct irq_data *d)1575 static void rk_pcie_legacy_irq_mask(struct irq_data *d)
1576 {
1577 struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
1578 unsigned long flags;
1579
1580 raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
1581 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
1582 MASK_LEGACY_INT(d->hwirq));
1583 raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
1584 }
1585
rk_pcie_legacy_irq_unmask(struct irq_data *d)1586 static void rk_pcie_legacy_irq_unmask(struct irq_data *d)
1587 {
1588 struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
1589 unsigned long flags;
1590
1591 raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
1592 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
1593 UNMASK_LEGACY_INT(d->hwirq));
1594 raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
1595 }
1596
1597 static struct irq_chip rk_pcie_legacy_irq_chip = {
1598 .name = "rk-pcie-legacy-int",
1599 .irq_enable = rk_pcie_legacy_irq_unmask,
1600 .irq_disable = rk_pcie_legacy_irq_mask,
1601 .irq_mask = rk_pcie_legacy_irq_mask,
1602 .irq_unmask = rk_pcie_legacy_irq_unmask,
1603 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
1604 };
1605
rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq)1606 static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
1607 irq_hw_number_t hwirq)
1608 {
1609 irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq);
1610 irq_set_chip_data(irq, domain->host_data);
1611
1612 return 0;
1613 }
1614
1615 static const struct irq_domain_ops intx_domain_ops = {
1616 .map = rk_pcie_intx_map,
1617 };
1618
rk_pcie_legacy_int_handler(struct irq_desc *desc)1619 static void rk_pcie_legacy_int_handler(struct irq_desc *desc)
1620 {
1621 struct irq_chip *chip = irq_desc_get_chip(desc);
1622 struct rk_pcie *rockchip = irq_desc_get_handler_data(desc);
1623 struct device *dev = rockchip->pci->dev;
1624 u32 reg;
1625 u32 hwirq;
1626 u32 virq;
1627
1628 chained_irq_enter(chip, desc);
1629
1630 reg = rk_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
1631 reg = reg & 0xf;
1632
1633 while (reg) {
1634 hwirq = ffs(reg) - 1;
1635 reg &= ~BIT(hwirq);
1636
1637 virq = irq_find_mapping(rockchip->irq_domain, hwirq);
1638 if (virq)
1639 generic_handle_irq(virq);
1640 else
1641 dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
1642 }
1643
1644 chained_irq_exit(chip, desc);
1645 }
1646
rk_pcie_init_irq_domain(struct rk_pcie *rockchip)1647 static int rk_pcie_init_irq_domain(struct rk_pcie *rockchip)
1648 {
1649 struct device *dev = rockchip->pci->dev;
1650 struct device_node *intc = of_get_next_child(dev->of_node, NULL);
1651
1652 if (!intc) {
1653 dev_err(dev, "missing child interrupt-controller node\n");
1654 return -EINVAL;
1655 }
1656
1657 raw_spin_lock_init(&rockchip->intx_lock);
1658 rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
1659 &intx_domain_ops, rockchip);
1660 if (!rockchip->irq_domain) {
1661 dev_err(dev, "failed to get a INTx IRQ domain\n");
1662 return -EINVAL;
1663 }
1664
1665 return 0;
1666 }
1667
rk_pcie_enable_power(struct rk_pcie *rk_pcie)1668 static int rk_pcie_enable_power(struct rk_pcie *rk_pcie)
1669 {
1670 int ret = 0;
1671 struct device *dev = rk_pcie->pci->dev;
1672
1673 if (IS_ERR(rk_pcie->vpcie3v3))
1674 return ret;
1675
1676 ret = regulator_enable(rk_pcie->vpcie3v3);
1677 if (ret)
1678 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
1679
1680 return ret;
1681 }
1682
rk_pcie_disable_power(struct rk_pcie *rk_pcie)1683 static int rk_pcie_disable_power(struct rk_pcie *rk_pcie)
1684 {
1685 int ret = 0;
1686 struct device *dev = rk_pcie->pci->dev;
1687
1688 if (IS_ERR(rk_pcie->vpcie3v3))
1689 return ret;
1690
1691 ret = regulator_disable(rk_pcie->vpcie3v3);
1692 if (ret)
1693 dev_err(dev, "fail to disable vpcie3v3 regulator\n");
1694
1695 return ret;
1696 }
1697
rk_pcie_really_probe(void *p)1698 static int rk_pcie_really_probe(void *p)
1699 {
1700 struct platform_device *pdev = p;
1701 struct device *dev = &pdev->dev;
1702 struct rk_pcie *rk_pcie;
1703 struct dw_pcie *pci;
1704 int ret;
1705 const struct of_device_id *match;
1706 const struct rk_pcie_of_data *data;
1707 enum rk_pcie_device_mode mode;
1708 struct device_node *np = pdev->dev.of_node;
1709 u32 val;
1710 int irq;
1711
1712 match = of_match_device(rk_pcie_of_match, dev);
1713 if (!match)
1714 return -EINVAL;
1715
1716 data = (struct rk_pcie_of_data *)match->data;
1717 mode = (enum rk_pcie_device_mode)data->mode;
1718
1719 rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
1720 if (!rk_pcie)
1721 return -ENOMEM;
1722
1723 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1724 if (!pci)
1725 return -ENOMEM;
1726
1727 pci->dev = dev;
1728 pci->ops = &dw_pcie_ops;
1729
1730 rk_pcie->mode = mode;
1731 rk_pcie->pci = pci;
1732
1733 if (of_device_is_compatible(np, "rockchip,rk1808-pcie") ||
1734 of_device_is_compatible(np, "rockchip,rk1808-pcie-ep"))
1735 rk_pcie->is_rk1808 = true;
1736 else
1737 rk_pcie->is_rk1808 = false;
1738
1739 if (device_property_read_bool(dev, "rockchip,bifurcation"))
1740 rk_pcie->bifurcation = true;
1741
1742 ret = rk_pcie_resource_get(pdev, rk_pcie);
1743 if (ret) {
1744 dev_err(dev, "resource init failed\n");
1745 return ret;
1746 }
1747
1748 /* DON'T MOVE ME: must be enable before phy init */
1749 rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
1750 if (IS_ERR(rk_pcie->vpcie3v3)) {
1751 if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV)
1752 return PTR_ERR(rk_pcie->vpcie3v3);
1753 dev_info(dev, "no vpcie3v3 regulator found\n");
1754 }
1755
1756 ret = rk_pcie_enable_power(rk_pcie);
1757 if (ret)
1758 return ret;
1759
1760 ret = rk_pcie_phy_init(rk_pcie);
1761 if (ret) {
1762 dev_err(dev, "phy init failed\n");
1763 goto disable_vpcie3v3;
1764 }
1765
1766 ret = rk_pcie_reset_control_release(rk_pcie);
1767 if (ret) {
1768 dev_err(dev, "reset control init failed\n");
1769 goto disable_phy;
1770 }
1771
1772 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
1773 if (ret) {
1774 dev_err(dev, "pcie irq init failed\n");
1775 goto disable_phy;
1776 }
1777
1778 platform_set_drvdata(pdev, rk_pcie);
1779
1780 ret = rk_pcie_clk_init(rk_pcie);
1781 if (ret) {
1782 dev_err(dev, "clock init failed\n");
1783 goto disable_phy;
1784 }
1785
1786 dw_pcie_dbi_ro_wr_en(pci);
1787
1788 if (rk_pcie->is_rk1808) {
1789 ret = rk1808_pcie_fixup(rk_pcie, np);
1790 if (ret)
1791 goto deinit_clk;
1792 } else {
1793 rk_pcie_fast_link_setup(rk_pcie);
1794 }
1795
1796 /* Legacy interrupt is optional */
1797 ret = rk_pcie_init_irq_domain(rk_pcie);
1798 if (!ret) {
1799 irq = platform_get_irq_byname(pdev, "legacy");
1800 if (irq >= 0) {
1801 irq_set_chained_handler_and_data(irq, rk_pcie_legacy_int_handler,
1802 rk_pcie);
1803 /* Unmask all legacy interrupt from INTA~INTD */
1804 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
1805 UNMASK_ALL_LEGACY_INT);
1806 }
1807
1808 dev_info(dev, "missing legacy IRQ resource\n");
1809 }
1810
1811 /* Set PCIe mode */
1812 rk_pcie_set_mode(rk_pcie);
1813
1814 /* Force into loopback master mode */
1815 if (device_property_read_bool(dev, "rockchip,lpbk-master")) {
1816 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
1817 val |= PORT_LINK_LPBK_ENABLE;
1818 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
1819 rk_pcie->is_signal_test = true;
1820 }
1821
1822 /* Force into compliance mode */
1823 if (device_property_read_bool(dev, "rockchip,compliance-mode")) {
1824 val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
1825 val |= BIT(4);
1826 dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
1827 rk_pcie->is_signal_test = true;
1828 }
1829
1830 /* Skip waiting for training to pass in system PM routine */
1831 if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
1832 rk_pcie->skip_scan_in_resume = true;
1833
1834 switch (rk_pcie->mode) {
1835 case RK_PCIE_RC_TYPE:
1836 ret = rk_add_pcie_port(rk_pcie);
1837 break;
1838 case RK_PCIE_EP_TYPE:
1839 ret = rk_pcie_add_ep(rk_pcie);
1840 break;
1841 }
1842
1843 if (rk_pcie->is_signal_test == true)
1844 return 0;
1845
1846 if (ret)
1847 goto remove_irq_domain;
1848
1849 if (rk_pcie->dma_obj) {
1850 rk_pcie->dma_obj->start_dma_func = rk_pcie_start_dma_dwc;
1851 rk_pcie->dma_obj->config_dma_func = rk_pcie_config_dma_dwc;
1852 }
1853
1854 if (rk_pcie->is_rk1808) {
1855 /* hold link reset grant after link-up */
1856 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
1857 if (ret)
1858 goto remove_irq_domain;
1859 }
1860
1861 dw_pcie_dbi_ro_wr_dis(pci);
1862
1863 device_init_wakeup(dev, true);
1864
1865 /* Enable async system PM for multiports SoC */
1866 device_enable_async_suspend(dev);
1867
1868 return 0;
1869
1870 remove_irq_domain:
1871 if (rk_pcie->irq_domain)
1872 irq_domain_remove(rk_pcie->irq_domain);
1873 disable_phy:
1874 phy_power_off(rk_pcie->phy);
1875 phy_exit(rk_pcie->phy);
1876 deinit_clk:
1877 rk_pcie_clk_deinit(rk_pcie);
1878 disable_vpcie3v3:
1879 rk_pcie_disable_power(rk_pcie);
1880
1881 device_release_driver(dev);
1882
1883 return ret;
1884 }
1885
rk_pcie_probe(struct platform_device *pdev)1886 static int rk_pcie_probe(struct platform_device *pdev)
1887 {
1888 struct task_struct *tsk;
1889
1890 tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
1891 if (IS_ERR(tsk)) {
1892 dev_err(&pdev->dev, "start rk-pcie thread failed\n");
1893 return PTR_ERR(tsk);
1894 }
1895 return 0;
1896 }
1897
rockchip_dw_pcie_suspend(struct device *dev)1898 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
1899 {
1900 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1901 int ret;
1902
1903 rk_pcie_link_status_clear(rk_pcie);
1904 rk_pcie_disable_ltssm(rk_pcie);
1905
1906 /* make sure assert phy success */
1907 usleep_range(200, 300);
1908
1909 phy_power_off(rk_pcie->phy);
1910 phy_exit(rk_pcie->phy);
1911
1912 clk_bulk_disable(rk_pcie->clk_cnt, rk_pcie->clks);
1913
1914 rk_pcie->in_suspend = true;
1915
1916 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
1917 ret = rk_pcie_disable_power(rk_pcie);
1918
1919 return ret;
1920 }
1921
rockchip_dw_pcie_resume(struct device *dev)1922 static int __maybe_unused rockchip_dw_pcie_resume(struct device *dev)
1923 {
1924 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
1925 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
1926 int ret;
1927
1928 ret = rk_pcie_enable_power(rk_pcie);
1929 if (ret)
1930 return ret;
1931
1932 ret = clk_bulk_enable(rk_pcie->clk_cnt, rk_pcie->clks);
1933 if (ret) {
1934 clk_bulk_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
1935 return ret;
1936 }
1937
1938 ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1939 rk_pcie->phy_sub_mode);
1940 if (ret) {
1941 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1942 (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1943 ret);
1944 return ret;
1945 }
1946
1947 ret = phy_init(rk_pcie->phy);
1948 if (ret < 0) {
1949 dev_err(dev, "fail to init phy, err %d\n", ret);
1950 return ret;
1951 }
1952
1953 phy_power_on(rk_pcie->phy);
1954
1955 dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
1956
1957 if (rk_pcie->is_rk1808) {
1958 /* release link reset grant */
1959 ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
1960 if (ret)
1961 return ret;
1962 } else {
1963 rk_pcie_fast_link_setup(rk_pcie);
1964 }
1965
1966 /* Set PCIe mode */
1967 rk_pcie_set_mode(rk_pcie);
1968
1969 if (std_rc)
1970 dw_pcie_setup_rc(&rk_pcie->pci->pp);
1971
1972 ret = rk_pcie_establish_link(rk_pcie->pci);
1973 if (ret) {
1974 dev_err(dev, "failed to establish pcie link\n");
1975 goto err;
1976 }
1977
1978 if (std_rc)
1979 goto std_rc_done;
1980
1981 ret = rk_pcie_ep_atu_init(rk_pcie);
1982 if (ret) {
1983 dev_err(dev, "failed to init ep device\n");
1984 goto err;
1985 }
1986
1987 rk_pcie_ep_setup(rk_pcie);
1988
1989 rk_pcie->in_suspend = false;
1990
1991 std_rc_done:
1992 dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
1993 /* hold link reset grant after link-up */
1994 if (rk_pcie->is_rk1808) {
1995 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
1996 if (ret)
1997 goto err;
1998 }
1999
2000 return 0;
2001 err:
2002 rk_pcie_disable_power(rk_pcie);
2003
2004 return ret;
2005 }
2006
2007 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2008 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
2009 rockchip_dw_pcie_resume)
2010 };
2011
2012 static struct platform_driver rk_plat_pcie_driver = {
2013 .driver = {
2014 .name = "rk-pcie",
2015 .of_match_table = rk_pcie_of_match,
2016 .suppress_bind_attrs = true,
2017 .pm = &rockchip_dw_pcie_pm_ops,
2018 },
2019 .probe = rk_pcie_probe,
2020 };
2021
2022 module_platform_driver(rk_plat_pcie_driver);
2023
2024 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
2025 MODULE_DESCRIPTION("RockChip PCIe Controller driver");
2026 MODULE_LICENSE("GPL v2");
2027