1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * AppliedMicro X-Gene SoC SATA Host Controller Driver
4 *
5 * Copyright (c) 2014, Applied Micro Circuits Corporation
6 * Author: Loc Ho <lho@apm.com>
7 *         Tuan Phan <tphan@apm.com>
8 *         Suman Tripathi <stripathi@apm.com>
9 *
10 * NOTE: PM support is not currently available.
11 */
12#include <linux/acpi.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/ahci_platform.h>
16#include <linux/of_address.h>
17#include <linux/of_device.h>
18#include <linux/of_irq.h>
19#include <linux/phy/phy.h>
20#include "ahci.h"
21
22#define DRV_NAME "xgene-ahci"
23
24/* Max # of disk per a controller */
25#define MAX_AHCI_CHN_PERCTR		2
26
27/* MUX CSR */
28#define SATA_ENET_CONFIG_REG		0x00000000
29#define  CFG_SATA_ENET_SELECT_MASK	0x00000001
30
31/* SATA core host controller CSR */
32#define SLVRDERRATTRIBUTES		0x00000000
33#define SLVWRERRATTRIBUTES		0x00000004
34#define MSTRDERRATTRIBUTES		0x00000008
35#define MSTWRERRATTRIBUTES		0x0000000c
36#define BUSCTLREG			0x00000014
37#define IOFMSTRWAUX			0x00000018
38#define INTSTATUSMASK			0x0000002c
39#define ERRINTSTATUS			0x00000030
40#define ERRINTSTATUSMASK		0x00000034
41
42/* SATA host AHCI CSR */
43#define PORTCFG				0x000000a4
44#define  PORTADDR_SET(dst, src) \
45		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
46#define PORTPHY1CFG		0x000000a8
47#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
48		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
49#define PORTPHY2CFG			0x000000ac
50#define PORTPHY3CFG			0x000000b0
51#define PORTPHY4CFG			0x000000b4
52#define PORTPHY5CFG			0x000000b8
53#define SCTL0				0x0000012C
54#define PORTPHY5CFG_RTCHG_SET(dst, src) \
55		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
56#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
57		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
58#define PORTAXICFG			0x000000bc
59#define PORTAXICFG_OUTTRANS_SET(dst, src) \
60		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
61#define PORTRANSCFG			0x000000c8
62#define PORTRANSCFG_RXWM_SET(dst, src)		\
63		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
64
65/* SATA host controller AXI CSR */
66#define INT_SLV_TMOMASK			0x00000010
67
68/* SATA diagnostic CSR */
69#define CFG_MEM_RAM_SHUTDOWN		0x00000070
70#define BLOCK_MEM_RDY			0x00000074
71
72/* Max retry for link down */
73#define MAX_LINK_DOWN_RETRY 3
74
75enum xgene_ahci_version {
76	XGENE_AHCI_V1 = 1,
77	XGENE_AHCI_V2,
78};
79
80struct xgene_ahci_context {
81	struct ahci_host_priv *hpriv;
82	struct device *dev;
83	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
84	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
85	void __iomem *csr_core;		/* Core CSR address of IP */
86	void __iomem *csr_diag;		/* Diag CSR address of IP */
87	void __iomem *csr_axi;		/* AXI CSR address of IP */
88	void __iomem *csr_mux;		/* MUX CSR address of IP */
89};
90
91static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
92{
93	dev_dbg(ctx->dev, "Release memory from shutdown\n");
94	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
95	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
96	msleep(1);	/* reset may take up to 1ms */
97	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
98		dev_err(ctx->dev, "failed to release memory from shutdown\n");
99		return -ENODEV;
100	}
101	return 0;
102}
103
104/**
105 * xgene_ahci_poll_reg_val- Poll a register on a specific value.
106 * @ap : ATA port of interest.
107 * @reg : Register of interest.
108 * @val : Value to be attained.
109 * @interval : waiting interval for polling.
110 * @timeout : timeout for achieving the value.
111 */
112static int xgene_ahci_poll_reg_val(struct ata_port *ap,
113				   void __iomem *reg, unsigned int val,
114				   unsigned int interval, unsigned int timeout)
115{
116	unsigned long deadline;
117	unsigned int tmp;
118
119	tmp = ioread32(reg);
120	deadline = ata_deadline(jiffies, timeout);
121
122	while (tmp != val && time_before(jiffies, deadline)) {
123		ata_msleep(ap, interval);
124		tmp = ioread32(reg);
125	}
126
127	return tmp;
128}
129
130/**
131 * xgene_ahci_restart_engine - Restart the dma engine.
132 * @ap : ATA port of interest
133 *
134 * Waits for completion of multiple commands and restarts
135 * the DMA engine inside the controller.
136 */
137static int xgene_ahci_restart_engine(struct ata_port *ap)
138{
139	struct ahci_host_priv *hpriv = ap->host->private_data;
140	struct ahci_port_priv *pp = ap->private_data;
141	void __iomem *port_mmio = ahci_port_base(ap);
142	u32 fbs;
143
144	/*
145	 * In case of PMP multiple IDENTIFY DEVICE commands can be
146	 * issued inside PxCI. So need to poll PxCI for the
147	 * completion of outstanding IDENTIFY DEVICE commands before
148	 * we restart the DMA engine.
149	 */
150	if (xgene_ahci_poll_reg_val(ap, port_mmio +
151				    PORT_CMD_ISSUE, 0x0, 1, 100))
152		  return -EBUSY;
153
154	hpriv->stop_engine(ap);
155	ahci_start_fis_rx(ap);
156
157	/*
158	 * Enable the PxFBS.FBS_EN bit as it
159	 * gets cleared due to stopping the engine.
160	 */
161	if (pp->fbs_supported) {
162		fbs = readl(port_mmio + PORT_FBS);
163		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
164		fbs = readl(port_mmio + PORT_FBS);
165	}
166
167	hpriv->start_engine(ap);
168
169	return 0;
170}
171
172/**
173 * xgene_ahci_qc_issue - Issue commands to the device
174 * @qc: Command to issue
175 *
176 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
177 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
178 * state machine goes into the CMFatalErrorUpdate state and locks up. By
179 * restarting the dma engine, it removes the controller out of lock up state.
180 *
181 * Due to H/W errata, the controller is unable to save the PMP
182 * field fetched from command header before sending the H2D FIS.
183 * When the device returns the PMP port field in the D2H FIS, there is
184 * a mismatch and results in command completion failure. The
185 * workaround is to write the pmp value to PxFBS.DEV field before issuing
186 * any command to PMP.
187 */
188static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
189{
190	struct ata_port *ap = qc->ap;
191	struct ahci_host_priv *hpriv = ap->host->private_data;
192	struct xgene_ahci_context *ctx = hpriv->plat_data;
193	int rc = 0;
194	u32 port_fbs;
195	void __iomem *port_mmio = ahci_port_base(ap);
196
197	/*
198	 * Write the pmp value to PxFBS.DEV
199	 * for case of Port Mulitplier.
200	 */
201	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
202		port_fbs = readl(port_mmio + PORT_FBS);
203		port_fbs &= ~PORT_FBS_DEV_MASK;
204		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
205		writel(port_fbs, port_mmio + PORT_FBS);
206	}
207
208	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
209	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
210	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
211		xgene_ahci_restart_engine(ap);
212
213	rc = ahci_qc_issue(qc);
214
215	/* Save the last command issued */
216	ctx->last_cmd[ap->port_no] = qc->tf.command;
217
218	return rc;
219}
220
221static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
222{
223	void __iomem *diagcsr = ctx->csr_diag;
224
225	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
226	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
227}
228
229/**
230 * xgene_ahci_read_id - Read ID data from the specified device
231 * @dev: device
232 * @tf: proposed taskfile
233 * @id: data buffer
234 *
235 * This custom read ID function is required due to the fact that the HW
236 * does not support DEVSLP.
237 */
238static unsigned int xgene_ahci_read_id(struct ata_device *dev,
239				       struct ata_taskfile *tf, __le16 *id)
240{
241	u32 err_mask;
242
243	err_mask = ata_do_dev_read_id(dev, tf, id);
244	if (err_mask)
245		return err_mask;
246
247	/*
248	 * Mask reserved area. Word78 spec of Link Power Management
249	 * bit15-8: reserved
250	 * bit7: NCQ autosence
251	 * bit6: Software settings preservation supported
252	 * bit5: reserved
253	 * bit4: In-order sata delivery supported
254	 * bit3: DIPM requests supported
255	 * bit2: DMA Setup FIS Auto-Activate optimization supported
256	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
257	 * bit0: Reserved
258	 *
259	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
260	 */
261	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
262
263	return 0;
264}
265
266static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
267{
268	void __iomem *mmio = ctx->hpriv->mmio;
269	u32 val;
270
271	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
272		mmio, channel);
273	val = readl(mmio + PORTCFG);
274	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
275	writel(val, mmio + PORTCFG);
276	readl(mmio + PORTCFG);  /* Force a barrier */
277	/* Disable fix rate */
278	writel(0x0001fffe, mmio + PORTPHY1CFG);
279	readl(mmio + PORTPHY1CFG); /* Force a barrier */
280	writel(0x28183219, mmio + PORTPHY2CFG);
281	readl(mmio + PORTPHY2CFG); /* Force a barrier */
282	writel(0x13081008, mmio + PORTPHY3CFG);
283	readl(mmio + PORTPHY3CFG); /* Force a barrier */
284	writel(0x00480815, mmio + PORTPHY4CFG);
285	readl(mmio + PORTPHY4CFG); /* Force a barrier */
286	/* Set window negotiation */
287	val = readl(mmio + PORTPHY5CFG);
288	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
289	writel(val, mmio + PORTPHY5CFG);
290	readl(mmio + PORTPHY5CFG); /* Force a barrier */
291	val = readl(mmio + PORTAXICFG);
292	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
293	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
294	writel(val, mmio + PORTAXICFG);
295	readl(mmio + PORTAXICFG); /* Force a barrier */
296	/* Set the watermark threshold of the receive FIFO */
297	val = readl(mmio + PORTRANSCFG);
298	val = PORTRANSCFG_RXWM_SET(val, 0x30);
299	writel(val, mmio + PORTRANSCFG);
300}
301
302/**
303 * xgene_ahci_do_hardreset - Issue the actual COMRESET
304 * @link: link to reset
305 * @deadline: deadline jiffies for the operation
306 * @online: Return value to indicate if device online
307 *
308 * Due to the limitation of the hardware PHY, a difference set of setting is
309 * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
310 * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
311 * report disparity error and etc. In addition, during COMRESET, there can
312 * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
313 * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
314 * reboot cycle regression, sometimes the PHY reports link down even if the
315 * device is present because of speed negotiation failure. so need to retry
316 * the COMRESET to get the link up. The following algorithm is followed to
317 * proper configure the hardware PHY during COMRESET:
318 *
319 * Alg Part 1:
320 * 1. Start the PHY at Gen3 speed (default setting)
321 * 2. Issue the COMRESET
322 * 3. If no link, go to Alg Part 3
323 * 4. If link up, determine if the negotiated speed matches the PHY
324 *    configured speed
325 * 5. If they matched, go to Alg Part 2
326 * 6. If they do not matched and first time, configure the PHY for the linked
327 *    up disk speed and repeat step 2
328 * 7. Go to Alg Part 2
329 *
330 * Alg Part 2:
331 * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
332 *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
333 * 2. Go to Alg Part 4
334 *
335 * Alg Part 3:
336 * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
337 *    communication establishment failed and maximum link down attempts are
338 *    less than Max attempts 3 then goto Alg Part 1.
339 * 2. Go to Alg Part 4.
340 *
341 * Alg Part 4:
342 * 1. Clear any pending from register PORT_SCR_ERR.
343 *
344 * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
345 *       and until the underlying PHY supports an method to reset the receiver
346 *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
347 *       an warning message will be printed.
348 */
349static int xgene_ahci_do_hardreset(struct ata_link *link,
350				   unsigned long deadline, bool *online)
351{
352	const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
353	struct ata_port *ap = link->ap;
354	struct ahci_host_priv *hpriv = ap->host->private_data;
355	struct xgene_ahci_context *ctx = hpriv->plat_data;
356	struct ahci_port_priv *pp = ap->private_data;
357	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
358	void __iomem *port_mmio = ahci_port_base(ap);
359	struct ata_taskfile tf;
360	int link_down_retry = 0;
361	int rc;
362	u32 val, sstatus;
363
364	do {
365		/* clear D2H reception area to properly wait for D2H FIS */
366		ata_tf_init(link->device, &tf);
367		tf.status = ATA_BUSY;
368		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
369		rc = sata_link_hardreset(link, timing, deadline, online,
370				 ahci_check_ready);
371		if (*online) {
372			val = readl(port_mmio + PORT_SCR_ERR);
373			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
374				dev_warn(ctx->dev, "link has error\n");
375			break;
376		}
377
378		sata_scr_read(link, SCR_STATUS, &sstatus);
379	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
380		 (sstatus & 0xff) == 0x1);
381
382	/* clear all errors if any pending */
383	val = readl(port_mmio + PORT_SCR_ERR);
384	writel(val, port_mmio + PORT_SCR_ERR);
385
386	return rc;
387}
388
389static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
390				unsigned long deadline)
391{
392	struct ata_port *ap = link->ap;
393        struct ahci_host_priv *hpriv = ap->host->private_data;
394	void __iomem *port_mmio = ahci_port_base(ap);
395	bool online;
396	int rc;
397	u32 portcmd_saved;
398	u32 portclb_saved;
399	u32 portclbhi_saved;
400	u32 portrxfis_saved;
401	u32 portrxfishi_saved;
402
403	/* As hardreset resets these CSR, save it to restore later */
404	portcmd_saved = readl(port_mmio + PORT_CMD);
405	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
406	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
407	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
408	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
409
410	hpriv->stop_engine(ap);
411
412	rc = xgene_ahci_do_hardreset(link, deadline, &online);
413
414	/* As controller hardreset clears them, restore them */
415	writel(portcmd_saved, port_mmio + PORT_CMD);
416	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
417	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
418	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
419	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
420
421	hpriv->start_engine(ap);
422
423	if (online)
424		*class = ahci_dev_classify(ap);
425
426	return rc;
427}
428
429static void xgene_ahci_host_stop(struct ata_host *host)
430{
431	struct ahci_host_priv *hpriv = host->private_data;
432
433	ahci_platform_disable_resources(hpriv);
434}
435
436/**
437 * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
438 *                            to Port Multiplier.
439 * @link: link to reset
440 * @class: Return value to indicate class of device
441 * @deadline: deadline jiffies for the operation
442 *
443 * Due to H/W errata, the controller is unable to save the PMP
444 * field fetched from command header before sending the H2D FIS.
445 * When the device returns the PMP port field in the D2H FIS, there is
446 * a mismatch and results in command completion failure. The workaround
447 * is to write the pmp value to PxFBS.DEV field before issuing any command
448 * to PMP.
449 */
450static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
451			  unsigned long deadline)
452{
453	int pmp = sata_srst_pmp(link);
454	struct ata_port *ap = link->ap;
455	u32 rc;
456	void __iomem *port_mmio = ahci_port_base(ap);
457	u32 port_fbs;
458
459	/*
460	 * Set PxFBS.DEV field with pmp
461	 * value.
462	 */
463	port_fbs = readl(port_mmio + PORT_FBS);
464	port_fbs &= ~PORT_FBS_DEV_MASK;
465	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
466	writel(port_fbs, port_mmio + PORT_FBS);
467
468	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
469
470	return rc;
471}
472
473/**
474 * xgene_ahci_softreset - Issue the softreset to the drive.
475 * @link: link to reset
476 * @class: Return value to indicate class of device
477 * @deadline: deadline jiffies for the operation
478 *
479 * Due to H/W errata, the controller is unable to save the PMP
480 * field fetched from command header before sending the H2D FIS.
481 * When the device returns the PMP port field in the D2H FIS, there is
482 * a mismatch and results in command completion failure. The workaround
483 * is to write the pmp value to PxFBS.DEV field before issuing any command
484 * to PMP. Here is the algorithm to detect PMP :
485 *
486 * 1. Save the PxFBS value
487 * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
488 *    0xF for both PMP/NON-PMP initially
489 * 3. Issue softreset
490 * 4. If signature class is PMP goto 6
491 * 5. restore the original PxFBS and goto 3
492 * 6. return
493 */
494static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
495			  unsigned long deadline)
496{
497	int pmp = sata_srst_pmp(link);
498	struct ata_port *ap = link->ap;
499	struct ahci_host_priv *hpriv = ap->host->private_data;
500	struct xgene_ahci_context *ctx = hpriv->plat_data;
501	void __iomem *port_mmio = ahci_port_base(ap);
502	u32 port_fbs;
503	u32 port_fbs_save;
504	u32 retry = 1;
505	u32 rc;
506
507	port_fbs_save = readl(port_mmio + PORT_FBS);
508
509	/*
510	 * Set PxFBS.DEV field with pmp
511	 * value.
512	 */
513	port_fbs = readl(port_mmio + PORT_FBS);
514	port_fbs &= ~PORT_FBS_DEV_MASK;
515	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
516	writel(port_fbs, port_mmio + PORT_FBS);
517
518softreset_retry:
519	rc = ahci_do_softreset(link, class, pmp,
520			       deadline, ahci_check_ready);
521
522	ctx->class[ap->port_no] = *class;
523	if (*class != ATA_DEV_PMP) {
524		/*
525		 * Retry for normal drives without
526		 * setting PxFBS.DEV field with pmp value.
527		 */
528		if (retry--) {
529			writel(port_fbs_save, port_mmio + PORT_FBS);
530			goto softreset_retry;
531		}
532	}
533
534	return rc;
535}
536
537/**
538 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
539 * @host: Host that recieved the irq
540 * @irq_masked: HOST_IRQ_STAT value
541 *
542 * For hardware with broken edge trigger latch
543 * the HOST_IRQ_STAT register misses the edge interrupt
544 * when clearing of HOST_IRQ_STAT register and hardware
545 * reporting the PORT_IRQ_STAT register at the
546 * same clock cycle.
547 * As such, the algorithm below outlines the workaround.
548 *
549 * 1. Read HOST_IRQ_STAT register and save the state.
550 * 2. Clear the HOST_IRQ_STAT register.
551 * 3. Read back the HOST_IRQ_STAT register.
552 * 4. If HOST_IRQ_STAT register equals to zero, then
553 *    traverse the rest of port's PORT_IRQ_STAT register
554 *    to check if an interrupt is triggered at that point else
555 *    go to step 6.
556 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
557 *    then update the state of HOST_IRQ_STAT saved in step 1.
558 * 6. Handle port interrupts.
559 * 7. Exit
560 */
561static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
562					     u32 irq_masked)
563{
564	struct ahci_host_priv *hpriv = host->private_data;
565	void __iomem *port_mmio;
566	int i;
567
568	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
569		for (i = 0; i < host->n_ports; i++) {
570			if (irq_masked & (1 << i))
571				continue;
572
573			port_mmio = ahci_port_base(host->ports[i]);
574			if (readl(port_mmio + PORT_IRQ_STAT))
575				irq_masked |= (1 << i);
576		}
577	}
578
579	return ahci_handle_port_intr(host, irq_masked);
580}
581
582static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
583{
584	struct ata_host *host = dev_instance;
585	struct ahci_host_priv *hpriv;
586	unsigned int rc = 0;
587	void __iomem *mmio;
588	u32 irq_stat, irq_masked;
589
590	hpriv = host->private_data;
591	mmio = hpriv->mmio;
592
593	/* sigh.  0xffffffff is a valid return from h/w */
594	irq_stat = readl(mmio + HOST_IRQ_STAT);
595	if (!irq_stat)
596		return IRQ_NONE;
597
598	irq_masked = irq_stat & hpriv->port_map;
599
600	spin_lock(&host->lock);
601
602	/*
603	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
604	 * it should be cleared before all the port events are cleared.
605	 */
606	writel(irq_stat, mmio + HOST_IRQ_STAT);
607
608	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
609
610	spin_unlock(&host->lock);
611
612	return IRQ_RETVAL(rc);
613}
614
615static struct ata_port_operations xgene_ahci_v1_ops = {
616	.inherits = &ahci_ops,
617	.host_stop = xgene_ahci_host_stop,
618	.hardreset = xgene_ahci_hardreset,
619	.read_id = xgene_ahci_read_id,
620	.qc_issue = xgene_ahci_qc_issue,
621	.softreset = xgene_ahci_softreset,
622	.pmp_softreset = xgene_ahci_pmp_softreset
623};
624
625static const struct ata_port_info xgene_ahci_v1_port_info = {
626	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
627	.pio_mask = ATA_PIO4,
628	.udma_mask = ATA_UDMA6,
629	.port_ops = &xgene_ahci_v1_ops,
630};
631
632static struct ata_port_operations xgene_ahci_v2_ops = {
633	.inherits = &ahci_ops,
634	.host_stop = xgene_ahci_host_stop,
635	.hardreset = xgene_ahci_hardreset,
636	.read_id = xgene_ahci_read_id,
637};
638
639static const struct ata_port_info xgene_ahci_v2_port_info = {
640	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
641	.pio_mask = ATA_PIO4,
642	.udma_mask = ATA_UDMA6,
643	.port_ops = &xgene_ahci_v2_ops,
644};
645
646static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
647{
648	struct xgene_ahci_context *ctx = hpriv->plat_data;
649	int i;
650	int rc;
651	u32 val;
652
653	/* Remove IP RAM out of shutdown */
654	rc = xgene_ahci_init_memram(ctx);
655	if (rc)
656		return rc;
657
658	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
659		xgene_ahci_set_phy_cfg(ctx, i);
660
661	/* AXI disable Mask */
662	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
663	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
664	writel(0, ctx->csr_core + INTSTATUSMASK);
665	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
666	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
667		INTSTATUSMASK, val);
668
669	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
670	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
671	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
672	readl(ctx->csr_axi + INT_SLV_TMOMASK);
673
674	/* Enable AXI Interrupt */
675	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
676	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
677	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
678	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
679
680	/* Enable coherency */
681	val = readl(ctx->csr_core + BUSCTLREG);
682	val &= ~0x00000002;     /* Enable write coherency */
683	val &= ~0x00000001;     /* Enable read coherency */
684	writel(val, ctx->csr_core + BUSCTLREG);
685
686	val = readl(ctx->csr_core + IOFMSTRWAUX);
687	val |= (1 << 3);        /* Enable read coherency */
688	val |= (1 << 9);        /* Enable write coherency */
689	writel(val, ctx->csr_core + IOFMSTRWAUX);
690	val = readl(ctx->csr_core + IOFMSTRWAUX);
691	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
692		IOFMSTRWAUX, val);
693
694	return rc;
695}
696
697static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
698{
699	u32 val;
700
701	/* Check for optional MUX resource */
702	if (!ctx->csr_mux)
703		return 0;
704
705	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
706	val &= ~CFG_SATA_ENET_SELECT_MASK;
707	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
708	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
709	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
710}
711
712static const struct scsi_host_template ahci_platform_sht = {
713	AHCI_SHT(DRV_NAME),
714};
715
716#ifdef CONFIG_ACPI
717static const struct acpi_device_id xgene_ahci_acpi_match[] = {
718	{ "APMC0D0D", XGENE_AHCI_V1},
719	{ "APMC0D32", XGENE_AHCI_V2},
720	{},
721};
722MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
723#endif
724
725static const struct of_device_id xgene_ahci_of_match[] = {
726	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
727	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
728	{ /* sentinel */ }
729};
730MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
731
732static int xgene_ahci_probe(struct platform_device *pdev)
733{
734	struct device *dev = &pdev->dev;
735	struct ahci_host_priv *hpriv;
736	struct xgene_ahci_context *ctx;
737	struct resource *res;
738	const struct of_device_id *of_devid;
739	enum xgene_ahci_version version = XGENE_AHCI_V1;
740	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
741					      &xgene_ahci_v2_port_info };
742	int rc;
743
744	hpriv = ahci_platform_get_resources(pdev, 0);
745	if (IS_ERR(hpriv))
746		return PTR_ERR(hpriv);
747
748	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
749	if (!ctx)
750		return -ENOMEM;
751
752	hpriv->plat_data = ctx;
753	ctx->hpriv = hpriv;
754	ctx->dev = dev;
755
756	/* Retrieve the IP core resource */
757	ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
758	if (IS_ERR(ctx->csr_core))
759		return PTR_ERR(ctx->csr_core);
760
761	/* Retrieve the IP diagnostic resource */
762	ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
763	if (IS_ERR(ctx->csr_diag))
764		return PTR_ERR(ctx->csr_diag);
765
766	/* Retrieve the IP AXI resource */
767	ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
768	if (IS_ERR(ctx->csr_axi))
769		return PTR_ERR(ctx->csr_axi);
770
771	/* Retrieve the optional IP mux resource */
772	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
773	if (res) {
774		void __iomem *csr = devm_ioremap_resource(dev, res);
775		if (IS_ERR(csr))
776			return PTR_ERR(csr);
777
778		ctx->csr_mux = csr;
779	}
780
781	of_devid = of_match_device(xgene_ahci_of_match, dev);
782	if (of_devid) {
783		if (of_devid->data)
784			version = (unsigned long) of_devid->data;
785	}
786#ifdef CONFIG_ACPI
787	else {
788		const struct acpi_device_id *acpi_id;
789		struct acpi_device_info *info;
790		acpi_status status;
791
792		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
793		if (!acpi_id) {
794			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
795			version = XGENE_AHCI_V1;
796		} else if (acpi_id->driver_data) {
797			version = (enum xgene_ahci_version) acpi_id->driver_data;
798			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
799			if (ACPI_FAILURE(status)) {
800				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
801					__func__);
802				version = XGENE_AHCI_V1;
803			} else {
804				if (info->valid & ACPI_VALID_CID)
805					version = XGENE_AHCI_V2;
806				kfree(info);
807			}
808		}
809	}
810#endif
811
812	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
813		hpriv->mmio);
814
815	/* Select ATA */
816	if ((rc = xgene_ahci_mux_select(ctx))) {
817		dev_err(dev, "SATA mux selection failed error %d\n", rc);
818		return -ENODEV;
819	}
820
821	if (xgene_ahci_is_memram_inited(ctx)) {
822		dev_info(dev, "skip clock and PHY initialization\n");
823		goto skip_clk_phy;
824	}
825
826	/* Due to errata, HW requires full toggle transition */
827	rc = ahci_platform_enable_clks(hpriv);
828	if (rc)
829		goto disable_resources;
830	ahci_platform_disable_clks(hpriv);
831
832	rc = ahci_platform_enable_resources(hpriv);
833	if (rc)
834		goto disable_resources;
835
836	/* Configure the host controller */
837	xgene_ahci_hw_init(hpriv);
838skip_clk_phy:
839
840	switch (version) {
841	case XGENE_AHCI_V1:
842		hpriv->flags = AHCI_HFLAG_NO_NCQ;
843		break;
844	case XGENE_AHCI_V2:
845		hpriv->flags |= AHCI_HFLAG_YES_FBS;
846		hpriv->irq_handler = xgene_ahci_irq_intr;
847		break;
848	default:
849		break;
850	}
851
852	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
853				     &ahci_platform_sht);
854	if (rc)
855		goto disable_resources;
856
857	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
858	return 0;
859
860disable_resources:
861	ahci_platform_disable_resources(hpriv);
862	return rc;
863}
864
865static struct platform_driver xgene_ahci_driver = {
866	.probe = xgene_ahci_probe,
867	.remove_new = ata_platform_remove_one,
868	.driver = {
869		.name = DRV_NAME,
870		.of_match_table = xgene_ahci_of_match,
871		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
872	},
873};
874
875module_platform_driver(xgene_ahci_driver);
876
877MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
878MODULE_AUTHOR("Loc Ho <lho@apm.com>");
879MODULE_LICENSE("GPL");
880MODULE_VERSION("0.4");
881