xref: /kernel/linux/linux-5.10/drivers/ata/sata_mv.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * sata_mv.c - Marvell SATA support
4 *
5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
6 * Copyright 2005: EMC Corporation, all rights reserved.
7 * Copyright 2005 Red Hat, Inc.  All rights reserved.
8 *
9 * Originally written by Brett Russ.
10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
11 *
12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
13 */
14
15/*
16 * sata_mv TODO list:
17 *
18 * --> Develop a low-power-consumption strategy, and implement it.
19 *
20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
21 *
22 * --> [Experiment, Marvell value added] Is it possible to use target
23 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
24 *       creating LibATA target mode support would be very interesting.
25 *
26 *       Target mode, for those without docs, is the ability to directly
27 *       connect two SATA ports.
28 */
29
30/*
31 * 80x1-B2 errata PCI#11:
32 *
33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
34 * should be careful to insert those cards only onto PCI-X bus #0,
35 * and only in device slots 0..7, not higher.  The chips may not
36 * work correctly otherwise  (note: this is a pretty rare condition).
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/dmapool.h>
47#include <linux/dma-mapping.h>
48#include <linux/device.h>
49#include <linux/clk.h>
50#include <linux/phy/phy.h>
51#include <linux/platform_device.h>
52#include <linux/ata_platform.h>
53#include <linux/mbus.h>
54#include <linux/bitops.h>
55#include <linux/gfp.h>
56#include <linux/of.h>
57#include <linux/of_irq.h>
58#include <scsi/scsi_host.h>
59#include <scsi/scsi_cmnd.h>
60#include <scsi/scsi_device.h>
61#include <linux/libata.h>
62
63#define DRV_NAME	"sata_mv"
64#define DRV_VERSION	"1.28"
65
66/*
67 * module options
68 */
69
70#ifdef CONFIG_PCI
71static int msi;
72module_param(msi, int, S_IRUGO);
73MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
74#endif
75
76static int irq_coalescing_io_count;
77module_param(irq_coalescing_io_count, int, S_IRUGO);
78MODULE_PARM_DESC(irq_coalescing_io_count,
79		 "IRQ coalescing I/O count threshold (0..255)");
80
81static int irq_coalescing_usecs;
82module_param(irq_coalescing_usecs, int, S_IRUGO);
83MODULE_PARM_DESC(irq_coalescing_usecs,
84		 "IRQ coalescing time threshold in usecs");
85
86enum {
87	/* BAR's are enumerated in terms of pci_resource_start() terms */
88	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
89	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
90	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
91
92	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
93	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
94
95	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
96	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
97	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
98	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
99
100	MV_PCI_REG_BASE		= 0,
101
102	/*
103	 * Per-chip ("all ports") interrupt coalescing feature.
104	 * This is only for GEN_II / GEN_IIE hardware.
105	 *
106	 * Coalescing defers the interrupt until either the IO_THRESHOLD
107	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
108	 */
109	COAL_REG_BASE		= 0x18000,
110	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
111	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
112
113	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
114	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
115
116	/*
117	 * Registers for the (unused here) transaction coalescing feature:
118	 */
119	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
120	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
121
122	SATAHC0_REG_BASE	= 0x20000,
123	FLASH_CTL		= 0x1046c,
124	GPIO_PORT_CTL		= 0x104f0,
125	RESET_CFG		= 0x180d8,
126
127	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
128	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
129	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
130	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
131
132	MV_MAX_Q_DEPTH		= 32,
133	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
134
135	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
136	 * CRPB needs alignment on a 256B boundary. Size == 256B
137	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
138	 */
139	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
140	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
141	MV_MAX_SG_CT		= 256,
142	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
143
144	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
145	MV_PORT_HC_SHIFT	= 2,
146	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
147	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
148	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
149
150	/* Host Flags */
151	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
152
153	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
154
155	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
156
157	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
158				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
159
160	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
161
162	CRQB_FLAG_READ		= (1 << 0),
163	CRQB_TAG_SHIFT		= 1,
164	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
165	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
166	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
167	CRQB_CMD_ADDR_SHIFT	= 8,
168	CRQB_CMD_CS		= (0x2 << 11),
169	CRQB_CMD_LAST		= (1 << 15),
170
171	CRPB_FLAG_STATUS_SHIFT	= 8,
172	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
173	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
174
175	EPRD_FLAG_END_OF_TBL	= (1 << 31),
176
177	/* PCI interface registers */
178
179	MV_PCI_COMMAND		= 0xc00,
180	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
181	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
182
183	PCI_MAIN_CMD_STS	= 0xd30,
184	STOP_PCI_MASTER		= (1 << 2),
185	PCI_MASTER_EMPTY	= (1 << 3),
186	GLOB_SFT_RST		= (1 << 4),
187
188	MV_PCI_MODE		= 0xd00,
189	MV_PCI_MODE_MASK	= 0x30,
190
191	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
192	MV_PCI_DISC_TIMER	= 0xd04,
193	MV_PCI_MSI_TRIGGER	= 0xc38,
194	MV_PCI_SERR_MASK	= 0xc28,
195	MV_PCI_XBAR_TMOUT	= 0x1d04,
196	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
197	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
198	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
199	MV_PCI_ERR_COMMAND	= 0x1d50,
200
201	PCI_IRQ_CAUSE		= 0x1d58,
202	PCI_IRQ_MASK		= 0x1d5c,
203	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
204
205	PCIE_IRQ_CAUSE		= 0x1900,
206	PCIE_IRQ_MASK		= 0x1910,
207	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
208
209	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
210	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
211	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
212	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
213	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
214	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
215	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
216	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
217	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
218	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
219	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
220	PCI_ERR			= (1 << 18),
221	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
222	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
223	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
224	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
225	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
226	GPIO_INT		= (1 << 22),
227	SELF_INT		= (1 << 23),
228	TWSI_INT		= (1 << 24),
229	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
230	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
231	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
232
233	/* SATAHC registers */
234	HC_CFG			= 0x00,
235
236	HC_IRQ_CAUSE		= 0x14,
237	DMA_IRQ			= (1 << 0),	/* shift by port # */
238	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
239	DEV_IRQ			= (1 << 8),	/* shift by port # */
240
241	/*
242	 * Per-HC (Host-Controller) interrupt coalescing feature.
243	 * This is present on all chip generations.
244	 *
245	 * Coalescing defers the interrupt until either the IO_THRESHOLD
246	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
247	 */
248	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
249	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
250
251	SOC_LED_CTRL		= 0x2c,
252	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
253	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
254						/*  with dev activity LED */
255
256	/* Shadow block registers */
257	SHD_BLK			= 0x100,
258	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
259
260	/* SATA registers */
261	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
262	SATA_ACTIVE		= 0x350,
263	FIS_IRQ_CAUSE		= 0x364,
264	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
265
266	LTMODE			= 0x30c,	/* requires read-after-write */
267	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
268
269	PHY_MODE2		= 0x330,
270	PHY_MODE3		= 0x310,
271
272	PHY_MODE4		= 0x314,	/* requires read-after-write */
273	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
274	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
275	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
276	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
277
278	SATA_IFCTL		= 0x344,
279	SATA_TESTCTL		= 0x348,
280	SATA_IFSTAT		= 0x34c,
281	VENDOR_UNIQUE_FIS	= 0x35c,
282
283	FISCFG			= 0x360,
284	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
285	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
286
287	PHY_MODE9_GEN2		= 0x398,
288	PHY_MODE9_GEN1		= 0x39c,
289	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
290
291	MV5_PHY_MODE		= 0x74,
292	MV5_LTMODE		= 0x30,
293	MV5_PHY_CTL		= 0x0C,
294	SATA_IFCFG		= 0x050,
295	LP_PHY_CTL		= 0x058,
296	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
297	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
298	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
299	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
300	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
301
302	MV_M2_PREAMP_MASK	= 0x7e0,
303
304	/* Port registers */
305	EDMA_CFG		= 0,
306	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
307	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
308	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
309	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
310	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
311	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
312	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
313
314	EDMA_ERR_IRQ_CAUSE	= 0x8,
315	EDMA_ERR_IRQ_MASK	= 0xc,
316	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
317	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
318	EDMA_ERR_DEV		= (1 << 2),	/* device error */
319	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
320	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
321	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
322	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
323	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
324	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
325	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
326	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
327	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
328	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
329	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
330
331	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
332	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
333	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
334	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
335	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
336
337	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
338
339	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
340	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
341	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
342	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
343	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
344	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
345
346	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
347
348	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
349	EDMA_ERR_OVERRUN_5	= (1 << 5),
350	EDMA_ERR_UNDERRUN_5	= (1 << 6),
351
352	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
353				  EDMA_ERR_LNK_CTRL_RX_1 |
354				  EDMA_ERR_LNK_CTRL_RX_3 |
355				  EDMA_ERR_LNK_CTRL_TX,
356
357	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
358				  EDMA_ERR_PRD_PAR |
359				  EDMA_ERR_DEV_DCON |
360				  EDMA_ERR_DEV_CON |
361				  EDMA_ERR_SERR |
362				  EDMA_ERR_SELF_DIS |
363				  EDMA_ERR_CRQB_PAR |
364				  EDMA_ERR_CRPB_PAR |
365				  EDMA_ERR_INTRL_PAR |
366				  EDMA_ERR_IORDY |
367				  EDMA_ERR_LNK_CTRL_RX_2 |
368				  EDMA_ERR_LNK_DATA_RX |
369				  EDMA_ERR_LNK_DATA_TX |
370				  EDMA_ERR_TRANS_PROTO,
371
372	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
373				  EDMA_ERR_PRD_PAR |
374				  EDMA_ERR_DEV_DCON |
375				  EDMA_ERR_DEV_CON |
376				  EDMA_ERR_OVERRUN_5 |
377				  EDMA_ERR_UNDERRUN_5 |
378				  EDMA_ERR_SELF_DIS_5 |
379				  EDMA_ERR_CRQB_PAR |
380				  EDMA_ERR_CRPB_PAR |
381				  EDMA_ERR_INTRL_PAR |
382				  EDMA_ERR_IORDY,
383
384	EDMA_REQ_Q_BASE_HI	= 0x10,
385	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
386
387	EDMA_REQ_Q_OUT_PTR	= 0x18,
388	EDMA_REQ_Q_PTR_SHIFT	= 5,
389
390	EDMA_RSP_Q_BASE_HI	= 0x1c,
391	EDMA_RSP_Q_IN_PTR	= 0x20,
392	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
393	EDMA_RSP_Q_PTR_SHIFT	= 3,
394
395	EDMA_CMD		= 0x28,		/* EDMA command register */
396	EDMA_EN			= (1 << 0),	/* enable EDMA */
397	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
398	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
399
400	EDMA_STATUS		= 0x30,		/* EDMA engine status */
401	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
402	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
403
404	EDMA_IORDY_TMOUT	= 0x34,
405	EDMA_ARB_CFG		= 0x38,
406
407	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
408	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
409
410	BMDMA_CMD		= 0x224,	/* bmdma command register */
411	BMDMA_STATUS		= 0x228,	/* bmdma status register */
412	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
413	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
414
415	/* Host private flags (hp_flags) */
416	MV_HP_FLAG_MSI		= (1 << 0),
417	MV_HP_ERRATA_50XXB0	= (1 << 1),
418	MV_HP_ERRATA_50XXB2	= (1 << 2),
419	MV_HP_ERRATA_60X1B2	= (1 << 3),
420	MV_HP_ERRATA_60X1C0	= (1 << 4),
421	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
422	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
423	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
424	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
425	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
426	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
427	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
428	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
429
430	/* Port private flags (pp_flags) */
431	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
432	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
433	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
434	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
435	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
436};
437
438#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
439#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
440#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
441#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
442#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
443
444#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
445#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
446
447enum {
448	/* DMA boundary 0xffff is required by the s/g splitting
449	 * we need on /length/ in mv_fill-sg().
450	 */
451	MV_DMA_BOUNDARY		= 0xffffU,
452
453	/* mask of register bits containing lower 32 bits
454	 * of EDMA request queue DMA address
455	 */
456	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
457
458	/* ditto, for response queue */
459	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
460};
461
462enum chip_type {
463	chip_504x,
464	chip_508x,
465	chip_5080,
466	chip_604x,
467	chip_608x,
468	chip_6042,
469	chip_7042,
470	chip_soc,
471};
472
473/* Command ReQuest Block: 32B */
474struct mv_crqb {
475	__le32			sg_addr;
476	__le32			sg_addr_hi;
477	__le16			ctrl_flags;
478	__le16			ata_cmd[11];
479};
480
481struct mv_crqb_iie {
482	__le32			addr;
483	__le32			addr_hi;
484	__le32			flags;
485	__le32			len;
486	__le32			ata_cmd[4];
487};
488
489/* Command ResPonse Block: 8B */
490struct mv_crpb {
491	__le16			id;
492	__le16			flags;
493	__le32			tmstmp;
494};
495
496/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
497struct mv_sg {
498	__le32			addr;
499	__le32			flags_size;
500	__le32			addr_hi;
501	__le32			reserved;
502};
503
504/*
505 * We keep a local cache of a few frequently accessed port
506 * registers here, to avoid having to read them (very slow)
507 * when switching between EDMA and non-EDMA modes.
508 */
509struct mv_cached_regs {
510	u32			fiscfg;
511	u32			ltmode;
512	u32			haltcond;
513	u32			unknown_rsvd;
514};
515
516struct mv_port_priv {
517	struct mv_crqb		*crqb;
518	dma_addr_t		crqb_dma;
519	struct mv_crpb		*crpb;
520	dma_addr_t		crpb_dma;
521	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
522	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
523
524	unsigned int		req_idx;
525	unsigned int		resp_idx;
526
527	u32			pp_flags;
528	struct mv_cached_regs	cached;
529	unsigned int		delayed_eh_pmp_map;
530};
531
532struct mv_port_signal {
533	u32			amps;
534	u32			pre;
535};
536
537struct mv_host_priv {
538	u32			hp_flags;
539	unsigned int 		board_idx;
540	u32			main_irq_mask;
541	struct mv_port_signal	signal[8];
542	const struct mv_hw_ops	*ops;
543	int			n_ports;
544	void __iomem		*base;
545	void __iomem		*main_irq_cause_addr;
546	void __iomem		*main_irq_mask_addr;
547	u32			irq_cause_offset;
548	u32			irq_mask_offset;
549	u32			unmask_all_irqs;
550
551	/*
552	 * Needed on some devices that require their clocks to be enabled.
553	 * These are optional: if the platform device does not have any
554	 * clocks, they won't be used.  Also, if the underlying hardware
555	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
556	 * all the clock operations become no-ops (see clk.h).
557	 */
558	struct clk		*clk;
559	struct clk              **port_clks;
560	/*
561	 * Some devices have a SATA PHY which can be enabled/disabled
562	 * in order to save power. These are optional: if the platform
563	 * devices does not have any phy, they won't be used.
564	 */
565	struct phy		**port_phys;
566	/*
567	 * These consistent DMA memory pools give us guaranteed
568	 * alignment for hardware-accessed data structures,
569	 * and less memory waste in accomplishing the alignment.
570	 */
571	struct dma_pool		*crqb_pool;
572	struct dma_pool		*crpb_pool;
573	struct dma_pool		*sg_tbl_pool;
574};
575
576struct mv_hw_ops {
577	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
578			   unsigned int port);
579	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
580	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
581			   void __iomem *mmio);
582	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
583			unsigned int n_hc);
584	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
585	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
586};
587
588static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
589static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
590static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
591static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
592static int mv_port_start(struct ata_port *ap);
593static void mv_port_stop(struct ata_port *ap);
594static int mv_qc_defer(struct ata_queued_cmd *qc);
595static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
596static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
597static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
598static int mv_hardreset(struct ata_link *link, unsigned int *class,
599			unsigned long deadline);
600static void mv_eh_freeze(struct ata_port *ap);
601static void mv_eh_thaw(struct ata_port *ap);
602static void mv6_dev_config(struct ata_device *dev);
603
604static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
605			   unsigned int port);
606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
607static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
608			   void __iomem *mmio);
609static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
610			unsigned int n_hc);
611static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
612static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
613
614static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
615			   unsigned int port);
616static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
617static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
618			   void __iomem *mmio);
619static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
620			unsigned int n_hc);
621static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
622static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
623				      void __iomem *mmio);
624static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
625				      void __iomem *mmio);
626static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
627				  void __iomem *mmio, unsigned int n_hc);
628static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
629				      void __iomem *mmio);
630static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
631static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
632				  void __iomem *mmio, unsigned int port);
633static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
634static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
635			     unsigned int port_no);
636static int mv_stop_edma(struct ata_port *ap);
637static int mv_stop_edma_engine(void __iomem *port_mmio);
638static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
639
640static void mv_pmp_select(struct ata_port *ap, int pmp);
641static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
642				unsigned long deadline);
643static int  mv_softreset(struct ata_link *link, unsigned int *class,
644				unsigned long deadline);
645static void mv_pmp_error_handler(struct ata_port *ap);
646static void mv_process_crpb_entries(struct ata_port *ap,
647					struct mv_port_priv *pp);
648
649static void mv_sff_irq_clear(struct ata_port *ap);
650static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
651static void mv_bmdma_setup(struct ata_queued_cmd *qc);
652static void mv_bmdma_start(struct ata_queued_cmd *qc);
653static void mv_bmdma_stop(struct ata_queued_cmd *qc);
654static u8   mv_bmdma_status(struct ata_port *ap);
655static u8 mv_sff_check_status(struct ata_port *ap);
656
657/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
658 * because we have to allow room for worst case splitting of
659 * PRDs for 64K boundaries in mv_fill_sg().
660 */
661#ifdef CONFIG_PCI
662static struct scsi_host_template mv5_sht = {
663	ATA_BASE_SHT(DRV_NAME),
664	.sg_tablesize		= MV_MAX_SG_CT / 2,
665	.dma_boundary		= MV_DMA_BOUNDARY,
666};
667#endif
668static struct scsi_host_template mv6_sht = {
669	ATA_NCQ_SHT(DRV_NAME),
670	.can_queue		= MV_MAX_Q_DEPTH - 1,
671	.sg_tablesize		= MV_MAX_SG_CT / 2,
672	.dma_boundary		= MV_DMA_BOUNDARY,
673};
674
675static struct ata_port_operations mv5_ops = {
676	.inherits		= &ata_sff_port_ops,
677
678	.lost_interrupt		= ATA_OP_NULL,
679
680	.qc_defer		= mv_qc_defer,
681	.qc_prep		= mv_qc_prep,
682	.qc_issue		= mv_qc_issue,
683
684	.freeze			= mv_eh_freeze,
685	.thaw			= mv_eh_thaw,
686	.hardreset		= mv_hardreset,
687
688	.scr_read		= mv5_scr_read,
689	.scr_write		= mv5_scr_write,
690
691	.port_start		= mv_port_start,
692	.port_stop		= mv_port_stop,
693};
694
695static struct ata_port_operations mv6_ops = {
696	.inherits		= &ata_bmdma_port_ops,
697
698	.lost_interrupt		= ATA_OP_NULL,
699
700	.qc_defer		= mv_qc_defer,
701	.qc_prep		= mv_qc_prep,
702	.qc_issue		= mv_qc_issue,
703
704	.dev_config             = mv6_dev_config,
705
706	.freeze			= mv_eh_freeze,
707	.thaw			= mv_eh_thaw,
708	.hardreset		= mv_hardreset,
709	.softreset		= mv_softreset,
710	.pmp_hardreset		= mv_pmp_hardreset,
711	.pmp_softreset		= mv_softreset,
712	.error_handler		= mv_pmp_error_handler,
713
714	.scr_read		= mv_scr_read,
715	.scr_write		= mv_scr_write,
716
717	.sff_check_status	= mv_sff_check_status,
718	.sff_irq_clear		= mv_sff_irq_clear,
719	.check_atapi_dma	= mv_check_atapi_dma,
720	.bmdma_setup		= mv_bmdma_setup,
721	.bmdma_start		= mv_bmdma_start,
722	.bmdma_stop		= mv_bmdma_stop,
723	.bmdma_status		= mv_bmdma_status,
724
725	.port_start		= mv_port_start,
726	.port_stop		= mv_port_stop,
727};
728
729static struct ata_port_operations mv_iie_ops = {
730	.inherits		= &mv6_ops,
731	.dev_config		= ATA_OP_NULL,
732	.qc_prep		= mv_qc_prep_iie,
733};
734
735static const struct ata_port_info mv_port_info[] = {
736	{  /* chip_504x */
737		.flags		= MV_GEN_I_FLAGS,
738		.pio_mask	= ATA_PIO4,
739		.udma_mask	= ATA_UDMA6,
740		.port_ops	= &mv5_ops,
741	},
742	{  /* chip_508x */
743		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
744		.pio_mask	= ATA_PIO4,
745		.udma_mask	= ATA_UDMA6,
746		.port_ops	= &mv5_ops,
747	},
748	{  /* chip_5080 */
749		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
750		.pio_mask	= ATA_PIO4,
751		.udma_mask	= ATA_UDMA6,
752		.port_ops	= &mv5_ops,
753	},
754	{  /* chip_604x */
755		.flags		= MV_GEN_II_FLAGS,
756		.pio_mask	= ATA_PIO4,
757		.udma_mask	= ATA_UDMA6,
758		.port_ops	= &mv6_ops,
759	},
760	{  /* chip_608x */
761		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
762		.pio_mask	= ATA_PIO4,
763		.udma_mask	= ATA_UDMA6,
764		.port_ops	= &mv6_ops,
765	},
766	{  /* chip_6042 */
767		.flags		= MV_GEN_IIE_FLAGS,
768		.pio_mask	= ATA_PIO4,
769		.udma_mask	= ATA_UDMA6,
770		.port_ops	= &mv_iie_ops,
771	},
772	{  /* chip_7042 */
773		.flags		= MV_GEN_IIE_FLAGS,
774		.pio_mask	= ATA_PIO4,
775		.udma_mask	= ATA_UDMA6,
776		.port_ops	= &mv_iie_ops,
777	},
778	{  /* chip_soc */
779		.flags		= MV_GEN_IIE_FLAGS,
780		.pio_mask	= ATA_PIO4,
781		.udma_mask	= ATA_UDMA6,
782		.port_ops	= &mv_iie_ops,
783	},
784};
785
786static const struct pci_device_id mv_pci_tbl[] = {
787	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
788	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
789	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
790	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
791	/* RocketRAID 1720/174x have different identifiers */
792	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
793	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
794	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
795
796	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
797	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
798	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
799	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
800	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
801
802	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
803
804	/* Adaptec 1430SA */
805	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
806
807	/* Marvell 7042 support */
808	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
809
810	/* Highpoint RocketRAID PCIe series */
811	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
812	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
813
814	{ }			/* terminate list */
815};
816
817static const struct mv_hw_ops mv5xxx_ops = {
818	.phy_errata		= mv5_phy_errata,
819	.enable_leds		= mv5_enable_leds,
820	.read_preamp		= mv5_read_preamp,
821	.reset_hc		= mv5_reset_hc,
822	.reset_flash		= mv5_reset_flash,
823	.reset_bus		= mv5_reset_bus,
824};
825
826static const struct mv_hw_ops mv6xxx_ops = {
827	.phy_errata		= mv6_phy_errata,
828	.enable_leds		= mv6_enable_leds,
829	.read_preamp		= mv6_read_preamp,
830	.reset_hc		= mv6_reset_hc,
831	.reset_flash		= mv6_reset_flash,
832	.reset_bus		= mv_reset_pci_bus,
833};
834
835static const struct mv_hw_ops mv_soc_ops = {
836	.phy_errata		= mv6_phy_errata,
837	.enable_leds		= mv_soc_enable_leds,
838	.read_preamp		= mv_soc_read_preamp,
839	.reset_hc		= mv_soc_reset_hc,
840	.reset_flash		= mv_soc_reset_flash,
841	.reset_bus		= mv_soc_reset_bus,
842};
843
844static const struct mv_hw_ops mv_soc_65n_ops = {
845	.phy_errata		= mv_soc_65n_phy_errata,
846	.enable_leds		= mv_soc_enable_leds,
847	.reset_hc		= mv_soc_reset_hc,
848	.reset_flash		= mv_soc_reset_flash,
849	.reset_bus		= mv_soc_reset_bus,
850};
851
852/*
853 * Functions
854 */
855
856static inline void writelfl(unsigned long data, void __iomem *addr)
857{
858	writel(data, addr);
859	(void) readl(addr);	/* flush to avoid PCI posted write */
860}
861
862static inline unsigned int mv_hc_from_port(unsigned int port)
863{
864	return port >> MV_PORT_HC_SHIFT;
865}
866
867static inline unsigned int mv_hardport_from_port(unsigned int port)
868{
869	return port & MV_PORT_MASK;
870}
871
872/*
873 * Consolidate some rather tricky bit shift calculations.
874 * This is hot-path stuff, so not a function.
875 * Simple code, with two return values, so macro rather than inline.
876 *
877 * port is the sole input, in range 0..7.
878 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
879 * hardport is the other output, in range 0..3.
880 *
881 * Note that port and hardport may be the same variable in some cases.
882 */
883#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
884{								\
885	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
886	hardport = mv_hardport_from_port(port);			\
887	shift   += hardport * 2;				\
888}
889
890static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
891{
892	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
893}
894
895static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
896						 unsigned int port)
897{
898	return mv_hc_base(base, mv_hc_from_port(port));
899}
900
901static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
902{
903	return  mv_hc_base_from_port(base, port) +
904		MV_SATAHC_ARBTR_REG_SZ +
905		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
906}
907
908static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
909{
910	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
911	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
912
913	return hc_mmio + ofs;
914}
915
916static inline void __iomem *mv_host_base(struct ata_host *host)
917{
918	struct mv_host_priv *hpriv = host->private_data;
919	return hpriv->base;
920}
921
922static inline void __iomem *mv_ap_base(struct ata_port *ap)
923{
924	return mv_port_base(mv_host_base(ap->host), ap->port_no);
925}
926
927static inline int mv_get_hc_count(unsigned long port_flags)
928{
929	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
930}
931
932/**
933 *      mv_save_cached_regs - (re-)initialize cached port registers
934 *      @ap: the port whose registers we are caching
935 *
936 *	Initialize the local cache of port registers,
937 *	so that reading them over and over again can
938 *	be avoided on the hotter paths of this driver.
939 *	This saves a few microseconds each time we switch
940 *	to/from EDMA mode to perform (eg.) a drive cache flush.
941 */
942static void mv_save_cached_regs(struct ata_port *ap)
943{
944	void __iomem *port_mmio = mv_ap_base(ap);
945	struct mv_port_priv *pp = ap->private_data;
946
947	pp->cached.fiscfg = readl(port_mmio + FISCFG);
948	pp->cached.ltmode = readl(port_mmio + LTMODE);
949	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
950	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
951}
952
953/**
954 *      mv_write_cached_reg - write to a cached port register
955 *      @addr: hardware address of the register
956 *      @old: pointer to cached value of the register
957 *      @new: new value for the register
958 *
959 *	Write a new value to a cached register,
960 *	but only if the value is different from before.
961 */
962static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
963{
964	if (new != *old) {
965		unsigned long laddr;
966		*old = new;
967		/*
968		 * Workaround for 88SX60x1-B2 FEr SATA#13:
969		 * Read-after-write is needed to prevent generating 64-bit
970		 * write cycles on the PCI bus for SATA interface registers
971		 * at offsets ending in 0x4 or 0xc.
972		 *
973		 * Looks like a lot of fuss, but it avoids an unnecessary
974		 * +1 usec read-after-write delay for unaffected registers.
975		 */
976		laddr = (unsigned long)addr & 0xffff;
977		if (laddr >= 0x300 && laddr <= 0x33c) {
978			laddr &= 0x000f;
979			if (laddr == 0x4 || laddr == 0xc) {
980				writelfl(new, addr); /* read after write */
981				return;
982			}
983		}
984		writel(new, addr); /* unaffected by the errata */
985	}
986}
987
988static void mv_set_edma_ptrs(void __iomem *port_mmio,
989			     struct mv_host_priv *hpriv,
990			     struct mv_port_priv *pp)
991{
992	u32 index;
993
994	/*
995	 * initialize request queue
996	 */
997	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
998	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
999
1000	WARN_ON(pp->crqb_dma & 0x3ff);
1001	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
1002	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
1003		 port_mmio + EDMA_REQ_Q_IN_PTR);
1004	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
1005
1006	/*
1007	 * initialize response queue
1008	 */
1009	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
1010	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1011
1012	WARN_ON(pp->crpb_dma & 0xff);
1013	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1014	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1015	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1016		 port_mmio + EDMA_RSP_Q_OUT_PTR);
1017}
1018
1019static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1020{
1021	/*
1022	 * When writing to the main_irq_mask in hardware,
1023	 * we must ensure exclusivity between the interrupt coalescing bits
1024	 * and the corresponding individual port DONE_IRQ bits.
1025	 *
1026	 * Note that this register is really an "IRQ enable" register,
1027	 * not an "IRQ mask" register as Marvell's naming might suggest.
1028	 */
1029	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1030		mask &= ~DONE_IRQ_0_3;
1031	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1032		mask &= ~DONE_IRQ_4_7;
1033	writelfl(mask, hpriv->main_irq_mask_addr);
1034}
1035
1036static void mv_set_main_irq_mask(struct ata_host *host,
1037				 u32 disable_bits, u32 enable_bits)
1038{
1039	struct mv_host_priv *hpriv = host->private_data;
1040	u32 old_mask, new_mask;
1041
1042	old_mask = hpriv->main_irq_mask;
1043	new_mask = (old_mask & ~disable_bits) | enable_bits;
1044	if (new_mask != old_mask) {
1045		hpriv->main_irq_mask = new_mask;
1046		mv_write_main_irq_mask(new_mask, hpriv);
1047	}
1048}
1049
1050static void mv_enable_port_irqs(struct ata_port *ap,
1051				     unsigned int port_bits)
1052{
1053	unsigned int shift, hardport, port = ap->port_no;
1054	u32 disable_bits, enable_bits;
1055
1056	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1057
1058	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1059	enable_bits  = port_bits << shift;
1060	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1061}
1062
1063static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1064					  void __iomem *port_mmio,
1065					  unsigned int port_irqs)
1066{
1067	struct mv_host_priv *hpriv = ap->host->private_data;
1068	int hardport = mv_hardport_from_port(ap->port_no);
1069	void __iomem *hc_mmio = mv_hc_base_from_port(
1070				mv_host_base(ap->host), ap->port_no);
1071	u32 hc_irq_cause;
1072
1073	/* clear EDMA event indicators, if any */
1074	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1075
1076	/* clear pending irq events */
1077	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1078	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1079
1080	/* clear FIS IRQ Cause */
1081	if (IS_GEN_IIE(hpriv))
1082		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1083
1084	mv_enable_port_irqs(ap, port_irqs);
1085}
1086
1087static void mv_set_irq_coalescing(struct ata_host *host,
1088				  unsigned int count, unsigned int usecs)
1089{
1090	struct mv_host_priv *hpriv = host->private_data;
1091	void __iomem *mmio = hpriv->base, *hc_mmio;
1092	u32 coal_enable = 0;
1093	unsigned long flags;
1094	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1095	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1096							ALL_PORTS_COAL_DONE;
1097
1098	/* Disable IRQ coalescing if either threshold is zero */
1099	if (!usecs || !count) {
1100		clks = count = 0;
1101	} else {
1102		/* Respect maximum limits of the hardware */
1103		clks = usecs * COAL_CLOCKS_PER_USEC;
1104		if (clks > MAX_COAL_TIME_THRESHOLD)
1105			clks = MAX_COAL_TIME_THRESHOLD;
1106		if (count > MAX_COAL_IO_COUNT)
1107			count = MAX_COAL_IO_COUNT;
1108	}
1109
1110	spin_lock_irqsave(&host->lock, flags);
1111	mv_set_main_irq_mask(host, coal_disable, 0);
1112
1113	if (is_dual_hc && !IS_GEN_I(hpriv)) {
1114		/*
1115		 * GEN_II/GEN_IIE with dual host controllers:
1116		 * one set of global thresholds for the entire chip.
1117		 */
1118		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
1119		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1120		/* clear leftover coal IRQ bit */
1121		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1122		if (count)
1123			coal_enable = ALL_PORTS_COAL_DONE;
1124		clks = count = 0; /* force clearing of regular regs below */
1125	}
1126
1127	/*
1128	 * All chips: independent thresholds for each HC on the chip.
1129	 */
1130	hc_mmio = mv_hc_base_from_port(mmio, 0);
1131	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1132	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1133	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1134	if (count)
1135		coal_enable |= PORTS_0_3_COAL_DONE;
1136	if (is_dual_hc) {
1137		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1138		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1139		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1140		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1141		if (count)
1142			coal_enable |= PORTS_4_7_COAL_DONE;
1143	}
1144
1145	mv_set_main_irq_mask(host, 0, coal_enable);
1146	spin_unlock_irqrestore(&host->lock, flags);
1147}
1148
1149/**
1150 *      mv_start_edma - Enable eDMA engine
1151 *      @base: port base address
1152 *      @pp: port private data
1153 *
1154 *      Verify the local cache of the eDMA state is accurate with a
1155 *      WARN_ON.
1156 *
1157 *      LOCKING:
1158 *      Inherited from caller.
1159 */
1160static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1161			 struct mv_port_priv *pp, u8 protocol)
1162{
1163	int want_ncq = (protocol == ATA_PROT_NCQ);
1164
1165	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1166		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1167		if (want_ncq != using_ncq)
1168			mv_stop_edma(ap);
1169	}
1170	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1171		struct mv_host_priv *hpriv = ap->host->private_data;
1172
1173		mv_edma_cfg(ap, want_ncq, 1);
1174
1175		mv_set_edma_ptrs(port_mmio, hpriv, pp);
1176		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1177
1178		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1179		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1180	}
1181}
1182
1183static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1184{
1185	void __iomem *port_mmio = mv_ap_base(ap);
1186	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1187	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1188	int i;
1189
1190	/*
1191	 * Wait for the EDMA engine to finish transactions in progress.
1192	 * No idea what a good "timeout" value might be, but measurements
1193	 * indicate that it often requires hundreds of microseconds
1194	 * with two drives in-use.  So we use the 15msec value above
1195	 * as a rough guess at what even more drives might require.
1196	 */
1197	for (i = 0; i < timeout; ++i) {
1198		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1199		if ((edma_stat & empty_idle) == empty_idle)
1200			break;
1201		udelay(per_loop);
1202	}
1203	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1204}
1205
1206/**
1207 *      mv_stop_edma_engine - Disable eDMA engine
1208 *      @port_mmio: io base address
1209 *
1210 *      LOCKING:
1211 *      Inherited from caller.
1212 */
1213static int mv_stop_edma_engine(void __iomem *port_mmio)
1214{
1215	int i;
1216
1217	/* Disable eDMA.  The disable bit auto clears. */
1218	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1219
1220	/* Wait for the chip to confirm eDMA is off. */
1221	for (i = 10000; i > 0; i--) {
1222		u32 reg = readl(port_mmio + EDMA_CMD);
1223		if (!(reg & EDMA_EN))
1224			return 0;
1225		udelay(10);
1226	}
1227	return -EIO;
1228}
1229
1230static int mv_stop_edma(struct ata_port *ap)
1231{
1232	void __iomem *port_mmio = mv_ap_base(ap);
1233	struct mv_port_priv *pp = ap->private_data;
1234	int err = 0;
1235
1236	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1237		return 0;
1238	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1239	mv_wait_for_edma_empty_idle(ap);
1240	if (mv_stop_edma_engine(port_mmio)) {
1241		ata_port_err(ap, "Unable to stop eDMA\n");
1242		err = -EIO;
1243	}
1244	mv_edma_cfg(ap, 0, 0);
1245	return err;
1246}
1247
1248#ifdef ATA_DEBUG
1249static void mv_dump_mem(void __iomem *start, unsigned bytes)
1250{
1251	int b, w;
1252	for (b = 0; b < bytes; ) {
1253		DPRINTK("%p: ", start + b);
1254		for (w = 0; b < bytes && w < 4; w++) {
1255			printk("%08x ", readl(start + b));
1256			b += sizeof(u32);
1257		}
1258		printk("\n");
1259	}
1260}
1261#endif
1262#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1263static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1264{
1265#ifdef ATA_DEBUG
1266	int b, w;
1267	u32 dw;
1268	for (b = 0; b < bytes; ) {
1269		DPRINTK("%02x: ", b);
1270		for (w = 0; b < bytes && w < 4; w++) {
1271			(void) pci_read_config_dword(pdev, b, &dw);
1272			printk("%08x ", dw);
1273			b += sizeof(u32);
1274		}
1275		printk("\n");
1276	}
1277#endif
1278}
1279#endif
1280static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1281			     struct pci_dev *pdev)
1282{
1283#ifdef ATA_DEBUG
1284	void __iomem *hc_base = mv_hc_base(mmio_base,
1285					   port >> MV_PORT_HC_SHIFT);
1286	void __iomem *port_base;
1287	int start_port, num_ports, p, start_hc, num_hcs, hc;
1288
1289	if (0 > port) {
1290		start_hc = start_port = 0;
1291		num_ports = 8;		/* shld be benign for 4 port devs */
1292		num_hcs = 2;
1293	} else {
1294		start_hc = port >> MV_PORT_HC_SHIFT;
1295		start_port = port;
1296		num_ports = num_hcs = 1;
1297	}
1298	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1299		num_ports > 1 ? num_ports - 1 : start_port);
1300
1301	if (NULL != pdev) {
1302		DPRINTK("PCI config space regs:\n");
1303		mv_dump_pci_cfg(pdev, 0x68);
1304	}
1305	DPRINTK("PCI regs:\n");
1306	mv_dump_mem(mmio_base+0xc00, 0x3c);
1307	mv_dump_mem(mmio_base+0xd00, 0x34);
1308	mv_dump_mem(mmio_base+0xf00, 0x4);
1309	mv_dump_mem(mmio_base+0x1d00, 0x6c);
1310	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1311		hc_base = mv_hc_base(mmio_base, hc);
1312		DPRINTK("HC regs (HC %i):\n", hc);
1313		mv_dump_mem(hc_base, 0x1c);
1314	}
1315	for (p = start_port; p < start_port + num_ports; p++) {
1316		port_base = mv_port_base(mmio_base, p);
1317		DPRINTK("EDMA regs (port %i):\n", p);
1318		mv_dump_mem(port_base, 0x54);
1319		DPRINTK("SATA regs (port %i):\n", p);
1320		mv_dump_mem(port_base+0x300, 0x60);
1321	}
1322#endif
1323}
1324
1325static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1326{
1327	unsigned int ofs;
1328
1329	switch (sc_reg_in) {
1330	case SCR_STATUS:
1331	case SCR_CONTROL:
1332	case SCR_ERROR:
1333		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1334		break;
1335	case SCR_ACTIVE:
1336		ofs = SATA_ACTIVE;   /* active is not with the others */
1337		break;
1338	default:
1339		ofs = 0xffffffffU;
1340		break;
1341	}
1342	return ofs;
1343}
1344
1345static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1346{
1347	unsigned int ofs = mv_scr_offset(sc_reg_in);
1348
1349	if (ofs != 0xffffffffU) {
1350		*val = readl(mv_ap_base(link->ap) + ofs);
1351		return 0;
1352	} else
1353		return -EINVAL;
1354}
1355
1356static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1357{
1358	unsigned int ofs = mv_scr_offset(sc_reg_in);
1359
1360	if (ofs != 0xffffffffU) {
1361		void __iomem *addr = mv_ap_base(link->ap) + ofs;
1362		struct mv_host_priv *hpriv = link->ap->host->private_data;
1363		if (sc_reg_in == SCR_CONTROL) {
1364			/*
1365			 * Workaround for 88SX60x1 FEr SATA#26:
1366			 *
1367			 * COMRESETs have to take care not to accidentally
1368			 * put the drive to sleep when writing SCR_CONTROL.
1369			 * Setting bits 12..15 prevents this problem.
1370			 *
1371			 * So if we see an outbound COMMRESET, set those bits.
1372			 * Ditto for the followup write that clears the reset.
1373			 *
1374			 * The proprietary driver does this for
1375			 * all chip versions, and so do we.
1376			 */
1377			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1378				val |= 0xf000;
1379
1380			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
1381				void __iomem *lp_phy_addr =
1382					mv_ap_base(link->ap) + LP_PHY_CTL;
1383				/*
1384				 * Set PHY speed according to SControl speed.
1385				 */
1386				u32 lp_phy_val =
1387					LP_PHY_CTL_PIN_PU_PLL |
1388					LP_PHY_CTL_PIN_PU_RX  |
1389					LP_PHY_CTL_PIN_PU_TX;
1390
1391				if ((val & 0xf0) != 0x10)
1392					lp_phy_val |=
1393						LP_PHY_CTL_GEN_TX_3G |
1394						LP_PHY_CTL_GEN_RX_3G;
1395
1396				writelfl(lp_phy_val, lp_phy_addr);
1397			}
1398		}
1399		writelfl(val, addr);
1400		return 0;
1401	} else
1402		return -EINVAL;
1403}
1404
1405static void mv6_dev_config(struct ata_device *adev)
1406{
1407	/*
1408	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1409	 *
1410	 * Gen-II does not support NCQ over a port multiplier
1411	 *  (no FIS-based switching).
1412	 */
1413	if (adev->flags & ATA_DFLAG_NCQ) {
1414		if (sata_pmp_attached(adev->link->ap)) {
1415			adev->flags &= ~ATA_DFLAG_NCQ;
1416			ata_dev_info(adev,
1417				"NCQ disabled for command-based switching\n");
1418		}
1419	}
1420}
1421
1422static int mv_qc_defer(struct ata_queued_cmd *qc)
1423{
1424	struct ata_link *link = qc->dev->link;
1425	struct ata_port *ap = link->ap;
1426	struct mv_port_priv *pp = ap->private_data;
1427
1428	/*
1429	 * Don't allow new commands if we're in a delayed EH state
1430	 * for NCQ and/or FIS-based switching.
1431	 */
1432	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1433		return ATA_DEFER_PORT;
1434
1435	/* PIO commands need exclusive link: no other commands [DMA or PIO]
1436	 * can run concurrently.
1437	 * set excl_link when we want to send a PIO command in DMA mode
1438	 * or a non-NCQ command in NCQ mode.
1439	 * When we receive a command from that link, and there are no
1440	 * outstanding commands, mark a flag to clear excl_link and let
1441	 * the command go through.
1442	 */
1443	if (unlikely(ap->excl_link)) {
1444		if (link == ap->excl_link) {
1445			if (ap->nr_active_links)
1446				return ATA_DEFER_PORT;
1447			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1448			return 0;
1449		} else
1450			return ATA_DEFER_PORT;
1451	}
1452
1453	/*
1454	 * If the port is completely idle, then allow the new qc.
1455	 */
1456	if (ap->nr_active_links == 0)
1457		return 0;
1458
1459	/*
1460	 * The port is operating in host queuing mode (EDMA) with NCQ
1461	 * enabled, allow multiple NCQ commands.  EDMA also allows
1462	 * queueing multiple DMA commands but libata core currently
1463	 * doesn't allow it.
1464	 */
1465	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1466	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1467		if (ata_is_ncq(qc->tf.protocol))
1468			return 0;
1469		else {
1470			ap->excl_link = link;
1471			return ATA_DEFER_PORT;
1472		}
1473	}
1474
1475	return ATA_DEFER_PORT;
1476}
1477
1478static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1479{
1480	struct mv_port_priv *pp = ap->private_data;
1481	void __iomem *port_mmio;
1482
1483	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
1484	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
1485	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1486
1487	ltmode   = *old_ltmode & ~LTMODE_BIT8;
1488	haltcond = *old_haltcond | EDMA_ERR_DEV;
1489
1490	if (want_fbs) {
1491		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1492		ltmode = *old_ltmode | LTMODE_BIT8;
1493		if (want_ncq)
1494			haltcond &= ~EDMA_ERR_DEV;
1495		else
1496			fiscfg |=  FISCFG_WAIT_DEV_ERR;
1497	} else {
1498		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1499	}
1500
1501	port_mmio = mv_ap_base(ap);
1502	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1503	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1504	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1505}
1506
1507static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1508{
1509	struct mv_host_priv *hpriv = ap->host->private_data;
1510	u32 old, new;
1511
1512	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1513	old = readl(hpriv->base + GPIO_PORT_CTL);
1514	if (want_ncq)
1515		new = old | (1 << 22);
1516	else
1517		new = old & ~(1 << 22);
1518	if (new != old)
1519		writel(new, hpriv->base + GPIO_PORT_CTL);
1520}
1521
1522/**
1523 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1524 *	@ap: Port being initialized
1525 *
1526 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
1527 *
1528 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
1529 *	of basic DMA on the GEN_IIE versions of the chips.
1530 *
1531 *	This bit survives EDMA resets, and must be set for basic DMA
1532 *	to function, and should be cleared when EDMA is active.
1533 */
1534static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1535{
1536	struct mv_port_priv *pp = ap->private_data;
1537	u32 new, *old = &pp->cached.unknown_rsvd;
1538
1539	if (enable_bmdma)
1540		new = *old | 1;
1541	else
1542		new = *old & ~1;
1543	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1544}
1545
1546/*
1547 * SOC chips have an issue whereby the HDD LEDs don't always blink
1548 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1549 * of the SOC takes care of it, generating a steady blink rate when
1550 * any drive on the chip is active.
1551 *
1552 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1553 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1554 *
1555 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1556 * LED operation works then, and provides better (more accurate) feedback.
1557 *
1558 * Note that this code assumes that an SOC never has more than one HC onboard.
1559 */
1560static void mv_soc_led_blink_enable(struct ata_port *ap)
1561{
1562	struct ata_host *host = ap->host;
1563	struct mv_host_priv *hpriv = host->private_data;
1564	void __iomem *hc_mmio;
1565	u32 led_ctrl;
1566
1567	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1568		return;
1569	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1570	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1571	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1572	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1573}
1574
1575static void mv_soc_led_blink_disable(struct ata_port *ap)
1576{
1577	struct ata_host *host = ap->host;
1578	struct mv_host_priv *hpriv = host->private_data;
1579	void __iomem *hc_mmio;
1580	u32 led_ctrl;
1581	unsigned int port;
1582
1583	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1584		return;
1585
1586	/* disable led-blink only if no ports are using NCQ */
1587	for (port = 0; port < hpriv->n_ports; port++) {
1588		struct ata_port *this_ap = host->ports[port];
1589		struct mv_port_priv *pp = this_ap->private_data;
1590
1591		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1592			return;
1593	}
1594
1595	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1596	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1597	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1598	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1599}
1600
1601static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1602{
1603	u32 cfg;
1604	struct mv_port_priv *pp    = ap->private_data;
1605	struct mv_host_priv *hpriv = ap->host->private_data;
1606	void __iomem *port_mmio    = mv_ap_base(ap);
1607
1608	/* set up non-NCQ EDMA configuration */
1609	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
1610	pp->pp_flags &=
1611	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1612
1613	if (IS_GEN_I(hpriv))
1614		cfg |= (1 << 8);	/* enab config burst size mask */
1615
1616	else if (IS_GEN_II(hpriv)) {
1617		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1618		mv_60x1_errata_sata25(ap, want_ncq);
1619
1620	} else if (IS_GEN_IIE(hpriv)) {
1621		int want_fbs = sata_pmp_attached(ap);
1622		/*
1623		 * Possible future enhancement:
1624		 *
1625		 * The chip can use FBS with non-NCQ, if we allow it,
1626		 * But first we need to have the error handling in place
1627		 * for this mode (datasheet section 7.3.15.4.2.3).
1628		 * So disallow non-NCQ FBS for now.
1629		 */
1630		want_fbs &= want_ncq;
1631
1632		mv_config_fbs(ap, want_ncq, want_fbs);
1633
1634		if (want_fbs) {
1635			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1636			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1637		}
1638
1639		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
1640		if (want_edma) {
1641			cfg |= (1 << 22); /* enab 4-entry host queue cache */
1642			if (!IS_SOC(hpriv))
1643				cfg |= (1 << 18); /* enab early completion */
1644		}
1645		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1646			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1647		mv_bmdma_enable_iie(ap, !want_edma);
1648
1649		if (IS_SOC(hpriv)) {
1650			if (want_ncq)
1651				mv_soc_led_blink_enable(ap);
1652			else
1653				mv_soc_led_blink_disable(ap);
1654		}
1655	}
1656
1657	if (want_ncq) {
1658		cfg |= EDMA_CFG_NCQ;
1659		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
1660	}
1661
1662	writelfl(cfg, port_mmio + EDMA_CFG);
1663}
1664
1665static void mv_port_free_dma_mem(struct ata_port *ap)
1666{
1667	struct mv_host_priv *hpriv = ap->host->private_data;
1668	struct mv_port_priv *pp = ap->private_data;
1669	int tag;
1670
1671	if (pp->crqb) {
1672		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1673		pp->crqb = NULL;
1674	}
1675	if (pp->crpb) {
1676		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1677		pp->crpb = NULL;
1678	}
1679	/*
1680	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1681	 * For later hardware, we have one unique sg_tbl per NCQ tag.
1682	 */
1683	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1684		if (pp->sg_tbl[tag]) {
1685			if (tag == 0 || !IS_GEN_I(hpriv))
1686				dma_pool_free(hpriv->sg_tbl_pool,
1687					      pp->sg_tbl[tag],
1688					      pp->sg_tbl_dma[tag]);
1689			pp->sg_tbl[tag] = NULL;
1690		}
1691	}
1692}
1693
1694/**
1695 *      mv_port_start - Port specific init/start routine.
1696 *      @ap: ATA channel to manipulate
1697 *
1698 *      Allocate and point to DMA memory, init port private memory,
1699 *      zero indices.
1700 *
1701 *      LOCKING:
1702 *      Inherited from caller.
1703 */
1704static int mv_port_start(struct ata_port *ap)
1705{
1706	struct device *dev = ap->host->dev;
1707	struct mv_host_priv *hpriv = ap->host->private_data;
1708	struct mv_port_priv *pp;
1709	unsigned long flags;
1710	int tag;
1711
1712	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1713	if (!pp)
1714		return -ENOMEM;
1715	ap->private_data = pp;
1716
1717	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1718	if (!pp->crqb)
1719		return -ENOMEM;
1720
1721	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1722	if (!pp->crpb)
1723		goto out_port_free_dma_mem;
1724
1725	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1726	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1727		ap->flags |= ATA_FLAG_AN;
1728	/*
1729	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1730	 * For later hardware, we need one unique sg_tbl per NCQ tag.
1731	 */
1732	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1733		if (tag == 0 || !IS_GEN_I(hpriv)) {
1734			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1735					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1736			if (!pp->sg_tbl[tag])
1737				goto out_port_free_dma_mem;
1738		} else {
1739			pp->sg_tbl[tag]     = pp->sg_tbl[0];
1740			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1741		}
1742	}
1743
1744	spin_lock_irqsave(ap->lock, flags);
1745	mv_save_cached_regs(ap);
1746	mv_edma_cfg(ap, 0, 0);
1747	spin_unlock_irqrestore(ap->lock, flags);
1748
1749	return 0;
1750
1751out_port_free_dma_mem:
1752	mv_port_free_dma_mem(ap);
1753	return -ENOMEM;
1754}
1755
1756/**
1757 *      mv_port_stop - Port specific cleanup/stop routine.
1758 *      @ap: ATA channel to manipulate
1759 *
1760 *      Stop DMA, cleanup port memory.
1761 *
1762 *      LOCKING:
1763 *      This routine uses the host lock to protect the DMA stop.
1764 */
1765static void mv_port_stop(struct ata_port *ap)
1766{
1767	unsigned long flags;
1768
1769	spin_lock_irqsave(ap->lock, flags);
1770	mv_stop_edma(ap);
1771	mv_enable_port_irqs(ap, 0);
1772	spin_unlock_irqrestore(ap->lock, flags);
1773	mv_port_free_dma_mem(ap);
1774}
1775
1776/**
1777 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1778 *      @qc: queued command whose SG list to source from
1779 *
1780 *      Populate the SG list and mark the last entry.
1781 *
1782 *      LOCKING:
1783 *      Inherited from caller.
1784 */
1785static void mv_fill_sg(struct ata_queued_cmd *qc)
1786{
1787	struct mv_port_priv *pp = qc->ap->private_data;
1788	struct scatterlist *sg;
1789	struct mv_sg *mv_sg, *last_sg = NULL;
1790	unsigned int si;
1791
1792	mv_sg = pp->sg_tbl[qc->hw_tag];
1793	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1794		dma_addr_t addr = sg_dma_address(sg);
1795		u32 sg_len = sg_dma_len(sg);
1796
1797		while (sg_len) {
1798			u32 offset = addr & 0xffff;
1799			u32 len = sg_len;
1800
1801			if (offset + len > 0x10000)
1802				len = 0x10000 - offset;
1803
1804			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1805			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1806			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1807			mv_sg->reserved = 0;
1808
1809			sg_len -= len;
1810			addr += len;
1811
1812			last_sg = mv_sg;
1813			mv_sg++;
1814		}
1815	}
1816
1817	if (likely(last_sg))
1818		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1819	mb(); /* ensure data structure is visible to the chipset */
1820}
1821
1822static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1823{
1824	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1825		(last ? CRQB_CMD_LAST : 0);
1826	*cmdw = cpu_to_le16(tmp);
1827}
1828
1829/**
1830 *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
1831 *	@ap: Port associated with this ATA transaction.
1832 *
1833 *	We need this only for ATAPI bmdma transactions,
1834 *	as otherwise we experience spurious interrupts
1835 *	after libata-sff handles the bmdma interrupts.
1836 */
1837static void mv_sff_irq_clear(struct ata_port *ap)
1838{
1839	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1840}
1841
1842/**
1843 *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1844 *	@qc: queued command to check for chipset/DMA compatibility.
1845 *
1846 *	The bmdma engines cannot handle speculative data sizes
1847 *	(bytecount under/over flow).  So only allow DMA for
1848 *	data transfer commands with known data sizes.
1849 *
1850 *	LOCKING:
1851 *	Inherited from caller.
1852 */
1853static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1854{
1855	struct scsi_cmnd *scmd = qc->scsicmd;
1856
1857	if (scmd) {
1858		switch (scmd->cmnd[0]) {
1859		case READ_6:
1860		case READ_10:
1861		case READ_12:
1862		case WRITE_6:
1863		case WRITE_10:
1864		case WRITE_12:
1865		case GPCMD_READ_CD:
1866		case GPCMD_SEND_DVD_STRUCTURE:
1867		case GPCMD_SEND_CUE_SHEET:
1868			return 0; /* DMA is safe */
1869		}
1870	}
1871	return -EOPNOTSUPP; /* use PIO instead */
1872}
1873
1874/**
1875 *	mv_bmdma_setup - Set up BMDMA transaction
1876 *	@qc: queued command to prepare DMA for.
1877 *
1878 *	LOCKING:
1879 *	Inherited from caller.
1880 */
1881static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1882{
1883	struct ata_port *ap = qc->ap;
1884	void __iomem *port_mmio = mv_ap_base(ap);
1885	struct mv_port_priv *pp = ap->private_data;
1886
1887	mv_fill_sg(qc);
1888
1889	/* clear all DMA cmd bits */
1890	writel(0, port_mmio + BMDMA_CMD);
1891
1892	/* load PRD table addr. */
1893	writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
1894		port_mmio + BMDMA_PRD_HIGH);
1895	writelfl(pp->sg_tbl_dma[qc->hw_tag],
1896		port_mmio + BMDMA_PRD_LOW);
1897
1898	/* issue r/w command */
1899	ap->ops->sff_exec_command(ap, &qc->tf);
1900}
1901
1902/**
1903 *	mv_bmdma_start - Start a BMDMA transaction
1904 *	@qc: queued command to start DMA on.
1905 *
1906 *	LOCKING:
1907 *	Inherited from caller.
1908 */
1909static void mv_bmdma_start(struct ata_queued_cmd *qc)
1910{
1911	struct ata_port *ap = qc->ap;
1912	void __iomem *port_mmio = mv_ap_base(ap);
1913	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1914	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1915
1916	/* start host DMA transaction */
1917	writelfl(cmd, port_mmio + BMDMA_CMD);
1918}
1919
1920/**
1921 *	mv_bmdma_stop - Stop BMDMA transfer
1922 *	@qc: queued command to stop DMA on.
1923 *
1924 *	Clears the ATA_DMA_START flag in the bmdma control register
1925 *
1926 *	LOCKING:
1927 *	Inherited from caller.
1928 */
1929static void mv_bmdma_stop_ap(struct ata_port *ap)
1930{
1931	void __iomem *port_mmio = mv_ap_base(ap);
1932	u32 cmd;
1933
1934	/* clear start/stop bit */
1935	cmd = readl(port_mmio + BMDMA_CMD);
1936	if (cmd & ATA_DMA_START) {
1937		cmd &= ~ATA_DMA_START;
1938		writelfl(cmd, port_mmio + BMDMA_CMD);
1939
1940		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1941		ata_sff_dma_pause(ap);
1942	}
1943}
1944
1945static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1946{
1947	mv_bmdma_stop_ap(qc->ap);
1948}
1949
1950/**
1951 *	mv_bmdma_status - Read BMDMA status
1952 *	@ap: port for which to retrieve DMA status.
1953 *
1954 *	Read and return equivalent of the sff BMDMA status register.
1955 *
1956 *	LOCKING:
1957 *	Inherited from caller.
1958 */
1959static u8 mv_bmdma_status(struct ata_port *ap)
1960{
1961	void __iomem *port_mmio = mv_ap_base(ap);
1962	u32 reg, status;
1963
1964	/*
1965	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1966	 * and the ATA_DMA_INTR bit doesn't exist.
1967	 */
1968	reg = readl(port_mmio + BMDMA_STATUS);
1969	if (reg & ATA_DMA_ACTIVE)
1970		status = ATA_DMA_ACTIVE;
1971	else if (reg & ATA_DMA_ERR)
1972		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1973	else {
1974		/*
1975		 * Just because DMA_ACTIVE is 0 (DMA completed),
1976		 * this does _not_ mean the device is "done".
1977		 * So we should not yet be signalling ATA_DMA_INTR
1978		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
1979		 */
1980		mv_bmdma_stop_ap(ap);
1981		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1982			status = 0;
1983		else
1984			status = ATA_DMA_INTR;
1985	}
1986	return status;
1987}
1988
1989static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1990{
1991	struct ata_taskfile *tf = &qc->tf;
1992	/*
1993	 * Workaround for 88SX60x1 FEr SATA#24.
1994	 *
1995	 * Chip may corrupt WRITEs if multi_count >= 4kB.
1996	 * Note that READs are unaffected.
1997	 *
1998	 * It's not clear if this errata really means "4K bytes",
1999	 * or if it always happens for multi_count > 7
2000	 * regardless of device sector_size.
2001	 *
2002	 * So, for safety, any write with multi_count > 7
2003	 * gets converted here into a regular PIO write instead:
2004	 */
2005	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
2006		if (qc->dev->multi_count > 7) {
2007			switch (tf->command) {
2008			case ATA_CMD_WRITE_MULTI:
2009				tf->command = ATA_CMD_PIO_WRITE;
2010				break;
2011			case ATA_CMD_WRITE_MULTI_FUA_EXT:
2012				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
2013				fallthrough;
2014			case ATA_CMD_WRITE_MULTI_EXT:
2015				tf->command = ATA_CMD_PIO_WRITE_EXT;
2016				break;
2017			}
2018		}
2019	}
2020}
2021
2022/**
2023 *      mv_qc_prep - Host specific command preparation.
2024 *      @qc: queued command to prepare
2025 *
2026 *      This routine simply redirects to the general purpose routine
2027 *      if command is not DMA.  Else, it handles prep of the CRQB
2028 *      (command request block), does some sanity checking, and calls
2029 *      the SG load routine.
2030 *
2031 *      LOCKING:
2032 *      Inherited from caller.
2033 */
2034static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
2035{
2036	struct ata_port *ap = qc->ap;
2037	struct mv_port_priv *pp = ap->private_data;
2038	__le16 *cw;
2039	struct ata_taskfile *tf = &qc->tf;
2040	u16 flags = 0;
2041	unsigned in_index;
2042
2043	switch (tf->protocol) {
2044	case ATA_PROT_DMA:
2045		if (tf->command == ATA_CMD_DSM)
2046			return AC_ERR_OK;
2047		fallthrough;
2048	case ATA_PROT_NCQ:
2049		break;	/* continue below */
2050	case ATA_PROT_PIO:
2051		mv_rw_multi_errata_sata24(qc);
2052		return AC_ERR_OK;
2053	default:
2054		return AC_ERR_OK;
2055	}
2056
2057	/* Fill in command request block
2058	 */
2059	if (!(tf->flags & ATA_TFLAG_WRITE))
2060		flags |= CRQB_FLAG_READ;
2061	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2062	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2063	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2064
2065	/* get current queue index from software */
2066	in_index = pp->req_idx;
2067
2068	pp->crqb[in_index].sg_addr =
2069		cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2070	pp->crqb[in_index].sg_addr_hi =
2071		cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2072	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2073
2074	cw = &pp->crqb[in_index].ata_cmd[0];
2075
2076	/* Sadly, the CRQB cannot accommodate all registers--there are
2077	 * only 11 bytes...so we must pick and choose required
2078	 * registers based on the command.  So, we drop feature and
2079	 * hob_feature for [RW] DMA commands, but they are needed for
2080	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
2081	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2082	 */
2083	switch (tf->command) {
2084	case ATA_CMD_READ:
2085	case ATA_CMD_READ_EXT:
2086	case ATA_CMD_WRITE:
2087	case ATA_CMD_WRITE_EXT:
2088	case ATA_CMD_WRITE_FUA_EXT:
2089		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2090		break;
2091	case ATA_CMD_FPDMA_READ:
2092	case ATA_CMD_FPDMA_WRITE:
2093		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2094		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2095		break;
2096	default:
2097		/* The only other commands EDMA supports in non-queued and
2098		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2099		 * of which are defined/used by Linux.  If we get here, this
2100		 * driver needs work.
2101		 */
2102		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
2103				tf->command);
2104		return AC_ERR_INVALID;
2105	}
2106	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2107	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2108	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2109	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2110	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2111	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2112	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2113	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2114	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
2115
2116	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2117		return AC_ERR_OK;
2118	mv_fill_sg(qc);
2119
2120	return AC_ERR_OK;
2121}
2122
2123/**
2124 *      mv_qc_prep_iie - Host specific command preparation.
2125 *      @qc: queued command to prepare
2126 *
2127 *      This routine simply redirects to the general purpose routine
2128 *      if command is not DMA.  Else, it handles prep of the CRQB
2129 *      (command request block), does some sanity checking, and calls
2130 *      the SG load routine.
2131 *
2132 *      LOCKING:
2133 *      Inherited from caller.
2134 */
2135static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
2136{
2137	struct ata_port *ap = qc->ap;
2138	struct mv_port_priv *pp = ap->private_data;
2139	struct mv_crqb_iie *crqb;
2140	struct ata_taskfile *tf = &qc->tf;
2141	unsigned in_index;
2142	u32 flags = 0;
2143
2144	if ((tf->protocol != ATA_PROT_DMA) &&
2145	    (tf->protocol != ATA_PROT_NCQ))
2146		return AC_ERR_OK;
2147	if (tf->command == ATA_CMD_DSM)
2148		return AC_ERR_OK;  /* use bmdma for this */
2149
2150	/* Fill in Gen IIE command request block */
2151	if (!(tf->flags & ATA_TFLAG_WRITE))
2152		flags |= CRQB_FLAG_READ;
2153
2154	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
2155	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
2156	flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
2157	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2158
2159	/* get current queue index from software */
2160	in_index = pp->req_idx;
2161
2162	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2163	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
2164	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
2165	crqb->flags = cpu_to_le32(flags);
2166
2167	crqb->ata_cmd[0] = cpu_to_le32(
2168			(tf->command << 16) |
2169			(tf->feature << 24)
2170		);
2171	crqb->ata_cmd[1] = cpu_to_le32(
2172			(tf->lbal << 0) |
2173			(tf->lbam << 8) |
2174			(tf->lbah << 16) |
2175			(tf->device << 24)
2176		);
2177	crqb->ata_cmd[2] = cpu_to_le32(
2178			(tf->hob_lbal << 0) |
2179			(tf->hob_lbam << 8) |
2180			(tf->hob_lbah << 16) |
2181			(tf->hob_feature << 24)
2182		);
2183	crqb->ata_cmd[3] = cpu_to_le32(
2184			(tf->nsect << 0) |
2185			(tf->hob_nsect << 8)
2186		);
2187
2188	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2189		return AC_ERR_OK;
2190	mv_fill_sg(qc);
2191
2192	return AC_ERR_OK;
2193}
2194
2195/**
2196 *	mv_sff_check_status - fetch device status, if valid
2197 *	@ap: ATA port to fetch status from
2198 *
2199 *	When using command issue via mv_qc_issue_fis(),
2200 *	the initial ATA_BUSY state does not show up in the
2201 *	ATA status (shadow) register.  This can confuse libata!
2202 *
2203 *	So we have a hook here to fake ATA_BUSY for that situation,
2204 *	until the first time a BUSY, DRQ, or ERR bit is seen.
2205 *
2206 *	The rest of the time, it simply returns the ATA status register.
2207 */
2208static u8 mv_sff_check_status(struct ata_port *ap)
2209{
2210	u8 stat = ioread8(ap->ioaddr.status_addr);
2211	struct mv_port_priv *pp = ap->private_data;
2212
2213	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2214		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2215			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2216		else
2217			stat = ATA_BUSY;
2218	}
2219	return stat;
2220}
2221
2222/**
2223 *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2224 *	@fis: fis to be sent
2225 *	@nwords: number of 32-bit words in the fis
2226 */
2227static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2228{
2229	void __iomem *port_mmio = mv_ap_base(ap);
2230	u32 ifctl, old_ifctl, ifstat;
2231	int i, timeout = 200, final_word = nwords - 1;
2232
2233	/* Initiate FIS transmission mode */
2234	old_ifctl = readl(port_mmio + SATA_IFCTL);
2235	ifctl = 0x100 | (old_ifctl & 0xf);
2236	writelfl(ifctl, port_mmio + SATA_IFCTL);
2237
2238	/* Send all words of the FIS except for the final word */
2239	for (i = 0; i < final_word; ++i)
2240		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2241
2242	/* Flag end-of-transmission, and then send the final word */
2243	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2244	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2245
2246	/*
2247	 * Wait for FIS transmission to complete.
2248	 * This typically takes just a single iteration.
2249	 */
2250	do {
2251		ifstat = readl(port_mmio + SATA_IFSTAT);
2252	} while (!(ifstat & 0x1000) && --timeout);
2253
2254	/* Restore original port configuration */
2255	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2256
2257	/* See if it worked */
2258	if ((ifstat & 0x3000) != 0x1000) {
2259		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2260			      __func__, ifstat);
2261		return AC_ERR_OTHER;
2262	}
2263	return 0;
2264}
2265
2266/**
2267 *	mv_qc_issue_fis - Issue a command directly as a FIS
2268 *	@qc: queued command to start
2269 *
2270 *	Note that the ATA shadow registers are not updated
2271 *	after command issue, so the device will appear "READY"
2272 *	if polled, even while it is BUSY processing the command.
2273 *
2274 *	So we use a status hook to fake ATA_BUSY until the drive changes state.
2275 *
2276 *	Note: we don't get updated shadow regs on *completion*
2277 *	of non-data commands. So avoid sending them via this function,
2278 *	as they will appear to have completed immediately.
2279 *
2280 *	GEN_IIE has special registers that we could get the result tf from,
2281 *	but earlier chipsets do not.  For now, we ignore those registers.
2282 */
2283static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2284{
2285	struct ata_port *ap = qc->ap;
2286	struct mv_port_priv *pp = ap->private_data;
2287	struct ata_link *link = qc->dev->link;
2288	u32 fis[5];
2289	int err = 0;
2290
2291	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2292	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2293	if (err)
2294		return err;
2295
2296	switch (qc->tf.protocol) {
2297	case ATAPI_PROT_PIO:
2298		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2299		fallthrough;
2300	case ATAPI_PROT_NODATA:
2301		ap->hsm_task_state = HSM_ST_FIRST;
2302		break;
2303	case ATA_PROT_PIO:
2304		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2305		if (qc->tf.flags & ATA_TFLAG_WRITE)
2306			ap->hsm_task_state = HSM_ST_FIRST;
2307		else
2308			ap->hsm_task_state = HSM_ST;
2309		break;
2310	default:
2311		ap->hsm_task_state = HSM_ST_LAST;
2312		break;
2313	}
2314
2315	if (qc->tf.flags & ATA_TFLAG_POLLING)
2316		ata_sff_queue_pio_task(link, 0);
2317	return 0;
2318}
2319
2320/**
2321 *      mv_qc_issue - Initiate a command to the host
2322 *      @qc: queued command to start
2323 *
2324 *      This routine simply redirects to the general purpose routine
2325 *      if command is not DMA.  Else, it sanity checks our local
2326 *      caches of the request producer/consumer indices then enables
2327 *      DMA and bumps the request producer index.
2328 *
2329 *      LOCKING:
2330 *      Inherited from caller.
2331 */
2332static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2333{
2334	static int limit_warnings = 10;
2335	struct ata_port *ap = qc->ap;
2336	void __iomem *port_mmio = mv_ap_base(ap);
2337	struct mv_port_priv *pp = ap->private_data;
2338	u32 in_index;
2339	unsigned int port_irqs;
2340
2341	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2342
2343	switch (qc->tf.protocol) {
2344	case ATA_PROT_DMA:
2345		if (qc->tf.command == ATA_CMD_DSM) {
2346			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
2347				return AC_ERR_OTHER;
2348			break;  /* use bmdma for this */
2349		}
2350		fallthrough;
2351	case ATA_PROT_NCQ:
2352		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2353		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2354		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2355
2356		/* Write the request in pointer to kick the EDMA to life */
2357		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2358					port_mmio + EDMA_REQ_Q_IN_PTR);
2359		return 0;
2360
2361	case ATA_PROT_PIO:
2362		/*
2363		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2364		 *
2365		 * Someday, we might implement special polling workarounds
2366		 * for these, but it all seems rather unnecessary since we
2367		 * normally use only DMA for commands which transfer more
2368		 * than a single block of data.
2369		 *
2370		 * Much of the time, this could just work regardless.
2371		 * So for now, just log the incident, and allow the attempt.
2372		 */
2373		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2374			--limit_warnings;
2375			ata_link_warn(qc->dev->link, DRV_NAME
2376				      ": attempting PIO w/multiple DRQ: "
2377				      "this may fail due to h/w errata\n");
2378		}
2379		fallthrough;
2380	case ATA_PROT_NODATA:
2381	case ATAPI_PROT_PIO:
2382	case ATAPI_PROT_NODATA:
2383		if (ap->flags & ATA_FLAG_PIO_POLLING)
2384			qc->tf.flags |= ATA_TFLAG_POLLING;
2385		break;
2386	}
2387
2388	if (qc->tf.flags & ATA_TFLAG_POLLING)
2389		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
2390	else
2391		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
2392
2393	/*
2394	 * We're about to send a non-EDMA capable command to the
2395	 * port.  Turn off EDMA so there won't be problems accessing
2396	 * shadow block, etc registers.
2397	 */
2398	mv_stop_edma(ap);
2399	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2400	mv_pmp_select(ap, qc->dev->link->pmp);
2401
2402	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2403		struct mv_host_priv *hpriv = ap->host->private_data;
2404		/*
2405		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2406		 *
2407		 * After any NCQ error, the READ_LOG_EXT command
2408		 * from libata-eh *must* use mv_qc_issue_fis().
2409		 * Otherwise it might fail, due to chip errata.
2410		 *
2411		 * Rather than special-case it, we'll just *always*
2412		 * use this method here for READ_LOG_EXT, making for
2413		 * easier testing.
2414		 */
2415		if (IS_GEN_II(hpriv))
2416			return mv_qc_issue_fis(qc);
2417	}
2418	return ata_bmdma_qc_issue(qc);
2419}
2420
2421static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2422{
2423	struct mv_port_priv *pp = ap->private_data;
2424	struct ata_queued_cmd *qc;
2425
2426	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2427		return NULL;
2428	qc = ata_qc_from_tag(ap, ap->link.active_tag);
2429	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2430		return qc;
2431	return NULL;
2432}
2433
2434static void mv_pmp_error_handler(struct ata_port *ap)
2435{
2436	unsigned int pmp, pmp_map;
2437	struct mv_port_priv *pp = ap->private_data;
2438
2439	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2440		/*
2441		 * Perform NCQ error analysis on failed PMPs
2442		 * before we freeze the port entirely.
2443		 *
2444		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2445		 */
2446		pmp_map = pp->delayed_eh_pmp_map;
2447		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2448		for (pmp = 0; pmp_map != 0; pmp++) {
2449			unsigned int this_pmp = (1 << pmp);
2450			if (pmp_map & this_pmp) {
2451				struct ata_link *link = &ap->pmp_link[pmp];
2452				pmp_map &= ~this_pmp;
2453				ata_eh_analyze_ncq_error(link);
2454			}
2455		}
2456		ata_port_freeze(ap);
2457	}
2458	sata_pmp_error_handler(ap);
2459}
2460
2461static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2462{
2463	void __iomem *port_mmio = mv_ap_base(ap);
2464
2465	return readl(port_mmio + SATA_TESTCTL) >> 16;
2466}
2467
2468static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2469{
2470	unsigned int pmp;
2471
2472	/*
2473	 * Initialize EH info for PMPs which saw device errors
2474	 */
2475	for (pmp = 0; pmp_map != 0; pmp++) {
2476		unsigned int this_pmp = (1 << pmp);
2477		if (pmp_map & this_pmp) {
2478			struct ata_link *link = &ap->pmp_link[pmp];
2479			struct ata_eh_info *ehi = &link->eh_info;
2480
2481			pmp_map &= ~this_pmp;
2482			ata_ehi_clear_desc(ehi);
2483			ata_ehi_push_desc(ehi, "dev err");
2484			ehi->err_mask |= AC_ERR_DEV;
2485			ehi->action |= ATA_EH_RESET;
2486			ata_link_abort(link);
2487		}
2488	}
2489}
2490
2491static int mv_req_q_empty(struct ata_port *ap)
2492{
2493	void __iomem *port_mmio = mv_ap_base(ap);
2494	u32 in_ptr, out_ptr;
2495
2496	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2497			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2498	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2499			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2500	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
2501}
2502
2503static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2504{
2505	struct mv_port_priv *pp = ap->private_data;
2506	int failed_links;
2507	unsigned int old_map, new_map;
2508
2509	/*
2510	 * Device error during FBS+NCQ operation:
2511	 *
2512	 * Set a port flag to prevent further I/O being enqueued.
2513	 * Leave the EDMA running to drain outstanding commands from this port.
2514	 * Perform the post-mortem/EH only when all responses are complete.
2515	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2516	 */
2517	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2518		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2519		pp->delayed_eh_pmp_map = 0;
2520	}
2521	old_map = pp->delayed_eh_pmp_map;
2522	new_map = old_map | mv_get_err_pmp_map(ap);
2523
2524	if (old_map != new_map) {
2525		pp->delayed_eh_pmp_map = new_map;
2526		mv_pmp_eh_prep(ap, new_map & ~old_map);
2527	}
2528	failed_links = hweight16(new_map);
2529
2530	ata_port_info(ap,
2531		      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2532		      __func__, pp->delayed_eh_pmp_map,
2533		      ap->qc_active, failed_links,
2534		      ap->nr_active_links);
2535
2536	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2537		mv_process_crpb_entries(ap, pp);
2538		mv_stop_edma(ap);
2539		mv_eh_freeze(ap);
2540		ata_port_info(ap, "%s: done\n", __func__);
2541		return 1;	/* handled */
2542	}
2543	ata_port_info(ap, "%s: waiting\n", __func__);
2544	return 1;	/* handled */
2545}
2546
2547static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2548{
2549	/*
2550	 * Possible future enhancement:
2551	 *
2552	 * FBS+non-NCQ operation is not yet implemented.
2553	 * See related notes in mv_edma_cfg().
2554	 *
2555	 * Device error during FBS+non-NCQ operation:
2556	 *
2557	 * We need to snapshot the shadow registers for each failed command.
2558	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2559	 */
2560	return 0;	/* not handled */
2561}
2562
2563static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2564{
2565	struct mv_port_priv *pp = ap->private_data;
2566
2567	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2568		return 0;	/* EDMA was not active: not handled */
2569	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2570		return 0;	/* FBS was not active: not handled */
2571
2572	if (!(edma_err_cause & EDMA_ERR_DEV))
2573		return 0;	/* non DEV error: not handled */
2574	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2575	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2576		return 0;	/* other problems: not handled */
2577
2578	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2579		/*
2580		 * EDMA should NOT have self-disabled for this case.
2581		 * If it did, then something is wrong elsewhere,
2582		 * and we cannot handle it here.
2583		 */
2584		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2585			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2586				      __func__, edma_err_cause, pp->pp_flags);
2587			return 0; /* not handled */
2588		}
2589		return mv_handle_fbs_ncq_dev_err(ap);
2590	} else {
2591		/*
2592		 * EDMA should have self-disabled for this case.
2593		 * If it did not, then something is wrong elsewhere,
2594		 * and we cannot handle it here.
2595		 */
2596		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2597			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2598				      __func__, edma_err_cause, pp->pp_flags);
2599			return 0; /* not handled */
2600		}
2601		return mv_handle_fbs_non_ncq_dev_err(ap);
2602	}
2603	return 0;	/* not handled */
2604}
2605
2606static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2607{
2608	struct ata_eh_info *ehi = &ap->link.eh_info;
2609	char *when = "idle";
2610
2611	ata_ehi_clear_desc(ehi);
2612	if (edma_was_enabled) {
2613		when = "EDMA enabled";
2614	} else {
2615		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2616		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2617			when = "polling";
2618	}
2619	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2620	ehi->err_mask |= AC_ERR_OTHER;
2621	ehi->action   |= ATA_EH_RESET;
2622	ata_port_freeze(ap);
2623}
2624
2625/**
2626 *      mv_err_intr - Handle error interrupts on the port
2627 *      @ap: ATA channel to manipulate
2628 *
2629 *      Most cases require a full reset of the chip's state machine,
2630 *      which also performs a COMRESET.
2631 *      Also, if the port disabled DMA, update our cached copy to match.
2632 *
2633 *      LOCKING:
2634 *      Inherited from caller.
2635 */
2636static void mv_err_intr(struct ata_port *ap)
2637{
2638	void __iomem *port_mmio = mv_ap_base(ap);
2639	u32 edma_err_cause, eh_freeze_mask, serr = 0;
2640	u32 fis_cause = 0;
2641	struct mv_port_priv *pp = ap->private_data;
2642	struct mv_host_priv *hpriv = ap->host->private_data;
2643	unsigned int action = 0, err_mask = 0;
2644	struct ata_eh_info *ehi = &ap->link.eh_info;
2645	struct ata_queued_cmd *qc;
2646	int abort = 0;
2647
2648	/*
2649	 * Read and clear the SError and err_cause bits.
2650	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2651	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2652	 */
2653	sata_scr_read(&ap->link, SCR_ERROR, &serr);
2654	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2655
2656	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2657	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2658		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2659		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2660	}
2661	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2662
2663	if (edma_err_cause & EDMA_ERR_DEV) {
2664		/*
2665		 * Device errors during FIS-based switching operation
2666		 * require special handling.
2667		 */
2668		if (mv_handle_dev_err(ap, edma_err_cause))
2669			return;
2670	}
2671
2672	qc = mv_get_active_qc(ap);
2673	ata_ehi_clear_desc(ehi);
2674	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2675			  edma_err_cause, pp->pp_flags);
2676
2677	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2678		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2679		if (fis_cause & FIS_IRQ_CAUSE_AN) {
2680			u32 ec = edma_err_cause &
2681			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2682			sata_async_notification(ap);
2683			if (!ec)
2684				return; /* Just an AN; no need for the nukes */
2685			ata_ehi_push_desc(ehi, "SDB notify");
2686		}
2687	}
2688	/*
2689	 * All generations share these EDMA error cause bits:
2690	 */
2691	if (edma_err_cause & EDMA_ERR_DEV) {
2692		err_mask |= AC_ERR_DEV;
2693		action |= ATA_EH_RESET;
2694		ata_ehi_push_desc(ehi, "dev error");
2695	}
2696	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2697			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2698			EDMA_ERR_INTRL_PAR)) {
2699		err_mask |= AC_ERR_ATA_BUS;
2700		action |= ATA_EH_RESET;
2701		ata_ehi_push_desc(ehi, "parity error");
2702	}
2703	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2704		ata_ehi_hotplugged(ehi);
2705		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2706			"dev disconnect" : "dev connect");
2707		action |= ATA_EH_RESET;
2708	}
2709
2710	/*
2711	 * Gen-I has a different SELF_DIS bit,
2712	 * different FREEZE bits, and no SERR bit:
2713	 */
2714	if (IS_GEN_I(hpriv)) {
2715		eh_freeze_mask = EDMA_EH_FREEZE_5;
2716		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2717			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2718			ata_ehi_push_desc(ehi, "EDMA self-disable");
2719		}
2720	} else {
2721		eh_freeze_mask = EDMA_EH_FREEZE;
2722		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2723			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2724			ata_ehi_push_desc(ehi, "EDMA self-disable");
2725		}
2726		if (edma_err_cause & EDMA_ERR_SERR) {
2727			ata_ehi_push_desc(ehi, "SError=%08x", serr);
2728			err_mask |= AC_ERR_ATA_BUS;
2729			action |= ATA_EH_RESET;
2730		}
2731	}
2732
2733	if (!err_mask) {
2734		err_mask = AC_ERR_OTHER;
2735		action |= ATA_EH_RESET;
2736	}
2737
2738	ehi->serror |= serr;
2739	ehi->action |= action;
2740
2741	if (qc)
2742		qc->err_mask |= err_mask;
2743	else
2744		ehi->err_mask |= err_mask;
2745
2746	if (err_mask == AC_ERR_DEV) {
2747		/*
2748		 * Cannot do ata_port_freeze() here,
2749		 * because it would kill PIO access,
2750		 * which is needed for further diagnosis.
2751		 */
2752		mv_eh_freeze(ap);
2753		abort = 1;
2754	} else if (edma_err_cause & eh_freeze_mask) {
2755		/*
2756		 * Note to self: ata_port_freeze() calls ata_port_abort()
2757		 */
2758		ata_port_freeze(ap);
2759	} else {
2760		abort = 1;
2761	}
2762
2763	if (abort) {
2764		if (qc)
2765			ata_link_abort(qc->dev->link);
2766		else
2767			ata_port_abort(ap);
2768	}
2769}
2770
2771static bool mv_process_crpb_response(struct ata_port *ap,
2772		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2773{
2774	u8 ata_status;
2775	u16 edma_status = le16_to_cpu(response->flags);
2776
2777	/*
2778	 * edma_status from a response queue entry:
2779	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2780	 *   MSB is saved ATA status from command completion.
2781	 */
2782	if (!ncq_enabled) {
2783		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2784		if (err_cause) {
2785			/*
2786			 * Error will be seen/handled by
2787			 * mv_err_intr().  So do nothing at all here.
2788			 */
2789			return false;
2790		}
2791	}
2792	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2793	if (!ac_err_mask(ata_status))
2794		return true;
2795	/* else: leave it for mv_err_intr() */
2796	return false;
2797}
2798
2799static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2800{
2801	void __iomem *port_mmio = mv_ap_base(ap);
2802	struct mv_host_priv *hpriv = ap->host->private_data;
2803	u32 in_index;
2804	bool work_done = false;
2805	u32 done_mask = 0;
2806	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2807
2808	/* Get the hardware queue position index */
2809	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2810			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2811
2812	/* Process new responses from since the last time we looked */
2813	while (in_index != pp->resp_idx) {
2814		unsigned int tag;
2815		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2816
2817		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2818
2819		if (IS_GEN_I(hpriv)) {
2820			/* 50xx: no NCQ, only one command active at a time */
2821			tag = ap->link.active_tag;
2822		} else {
2823			/* Gen II/IIE: get command tag from CRPB entry */
2824			tag = le16_to_cpu(response->id) & 0x1f;
2825		}
2826		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2827			done_mask |= 1 << tag;
2828		work_done = true;
2829	}
2830
2831	if (work_done) {
2832		ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
2833
2834		/* Update the software queue position index in hardware */
2835		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2836			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2837			 port_mmio + EDMA_RSP_Q_OUT_PTR);
2838	}
2839}
2840
2841static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2842{
2843	struct mv_port_priv *pp;
2844	int edma_was_enabled;
2845
2846	/*
2847	 * Grab a snapshot of the EDMA_EN flag setting,
2848	 * so that we have a consistent view for this port,
2849	 * even if something we call of our routines changes it.
2850	 */
2851	pp = ap->private_data;
2852	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2853	/*
2854	 * Process completed CRPB response(s) before other events.
2855	 */
2856	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2857		mv_process_crpb_entries(ap, pp);
2858		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2859			mv_handle_fbs_ncq_dev_err(ap);
2860	}
2861	/*
2862	 * Handle chip-reported errors, or continue on to handle PIO.
2863	 */
2864	if (unlikely(port_cause & ERR_IRQ)) {
2865		mv_err_intr(ap);
2866	} else if (!edma_was_enabled) {
2867		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2868		if (qc)
2869			ata_bmdma_port_intr(ap, qc);
2870		else
2871			mv_unexpected_intr(ap, edma_was_enabled);
2872	}
2873}
2874
2875/**
2876 *      mv_host_intr - Handle all interrupts on the given host controller
2877 *      @host: host specific structure
2878 *      @main_irq_cause: Main interrupt cause register for the chip.
2879 *
2880 *      LOCKING:
2881 *      Inherited from caller.
2882 */
2883static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2884{
2885	struct mv_host_priv *hpriv = host->private_data;
2886	void __iomem *mmio = hpriv->base, *hc_mmio;
2887	unsigned int handled = 0, port;
2888
2889	/* If asserted, clear the "all ports" IRQ coalescing bit */
2890	if (main_irq_cause & ALL_PORTS_COAL_DONE)
2891		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2892
2893	for (port = 0; port < hpriv->n_ports; port++) {
2894		struct ata_port *ap = host->ports[port];
2895		unsigned int p, shift, hardport, port_cause;
2896
2897		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2898		/*
2899		 * Each hc within the host has its own hc_irq_cause register,
2900		 * where the interrupting ports bits get ack'd.
2901		 */
2902		if (hardport == 0) {	/* first port on this hc ? */
2903			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2904			u32 port_mask, ack_irqs;
2905			/*
2906			 * Skip this entire hc if nothing pending for any ports
2907			 */
2908			if (!hc_cause) {
2909				port += MV_PORTS_PER_HC - 1;
2910				continue;
2911			}
2912			/*
2913			 * We don't need/want to read the hc_irq_cause register,
2914			 * because doing so hurts performance, and
2915			 * main_irq_cause already gives us everything we need.
2916			 *
2917			 * But we do have to *write* to the hc_irq_cause to ack
2918			 * the ports that we are handling this time through.
2919			 *
2920			 * This requires that we create a bitmap for those
2921			 * ports which interrupted us, and use that bitmap
2922			 * to ack (only) those ports via hc_irq_cause.
2923			 */
2924			ack_irqs = 0;
2925			if (hc_cause & PORTS_0_3_COAL_DONE)
2926				ack_irqs = HC_COAL_IRQ;
2927			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2928				if ((port + p) >= hpriv->n_ports)
2929					break;
2930				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2931				if (hc_cause & port_mask)
2932					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2933			}
2934			hc_mmio = mv_hc_base_from_port(mmio, port);
2935			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2936			handled = 1;
2937		}
2938		/*
2939		 * Handle interrupts signalled for this port:
2940		 */
2941		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2942		if (port_cause)
2943			mv_port_intr(ap, port_cause);
2944	}
2945	return handled;
2946}
2947
2948static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2949{
2950	struct mv_host_priv *hpriv = host->private_data;
2951	struct ata_port *ap;
2952	struct ata_queued_cmd *qc;
2953	struct ata_eh_info *ehi;
2954	unsigned int i, err_mask, printed = 0;
2955	u32 err_cause;
2956
2957	err_cause = readl(mmio + hpriv->irq_cause_offset);
2958
2959	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2960
2961	DPRINTK("All regs @ PCI error\n");
2962	mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2963
2964	writelfl(0, mmio + hpriv->irq_cause_offset);
2965
2966	for (i = 0; i < host->n_ports; i++) {
2967		ap = host->ports[i];
2968		if (!ata_link_offline(&ap->link)) {
2969			ehi = &ap->link.eh_info;
2970			ata_ehi_clear_desc(ehi);
2971			if (!printed++)
2972				ata_ehi_push_desc(ehi,
2973					"PCI err cause 0x%08x", err_cause);
2974			err_mask = AC_ERR_HOST_BUS;
2975			ehi->action = ATA_EH_RESET;
2976			qc = ata_qc_from_tag(ap, ap->link.active_tag);
2977			if (qc)
2978				qc->err_mask |= err_mask;
2979			else
2980				ehi->err_mask |= err_mask;
2981
2982			ata_port_freeze(ap);
2983		}
2984	}
2985	return 1;	/* handled */
2986}
2987
2988/**
2989 *      mv_interrupt - Main interrupt event handler
2990 *      @irq: unused
2991 *      @dev_instance: private data; in this case the host structure
2992 *
2993 *      Read the read only register to determine if any host
2994 *      controllers have pending interrupts.  If so, call lower level
2995 *      routine to handle.  Also check for PCI errors which are only
2996 *      reported here.
2997 *
2998 *      LOCKING:
2999 *      This routine holds the host lock while processing pending
3000 *      interrupts.
3001 */
3002static irqreturn_t mv_interrupt(int irq, void *dev_instance)
3003{
3004	struct ata_host *host = dev_instance;
3005	struct mv_host_priv *hpriv = host->private_data;
3006	unsigned int handled = 0;
3007	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
3008	u32 main_irq_cause, pending_irqs;
3009
3010	spin_lock(&host->lock);
3011
3012	/* for MSI:  block new interrupts while in here */
3013	if (using_msi)
3014		mv_write_main_irq_mask(0, hpriv);
3015
3016	main_irq_cause = readl(hpriv->main_irq_cause_addr);
3017	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
3018	/*
3019	 * Deal with cases where we either have nothing pending, or have read
3020	 * a bogus register value which can indicate HW removal or PCI fault.
3021	 */
3022	if (pending_irqs && main_irq_cause != 0xffffffffU) {
3023		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3024			handled = mv_pci_error(host, hpriv->base);
3025		else
3026			handled = mv_host_intr(host, pending_irqs);
3027	}
3028
3029	/* for MSI: unmask; interrupt cause bits will retrigger now */
3030	if (using_msi)
3031		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3032
3033	spin_unlock(&host->lock);
3034
3035	return IRQ_RETVAL(handled);
3036}
3037
3038static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3039{
3040	unsigned int ofs;
3041
3042	switch (sc_reg_in) {
3043	case SCR_STATUS:
3044	case SCR_ERROR:
3045	case SCR_CONTROL:
3046		ofs = sc_reg_in * sizeof(u32);
3047		break;
3048	default:
3049		ofs = 0xffffffffU;
3050		break;
3051	}
3052	return ofs;
3053}
3054
3055static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3056{
3057	struct mv_host_priv *hpriv = link->ap->host->private_data;
3058	void __iomem *mmio = hpriv->base;
3059	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3060	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3061
3062	if (ofs != 0xffffffffU) {
3063		*val = readl(addr + ofs);
3064		return 0;
3065	} else
3066		return -EINVAL;
3067}
3068
3069static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3070{
3071	struct mv_host_priv *hpriv = link->ap->host->private_data;
3072	void __iomem *mmio = hpriv->base;
3073	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3074	unsigned int ofs = mv5_scr_offset(sc_reg_in);
3075
3076	if (ofs != 0xffffffffU) {
3077		writelfl(val, addr + ofs);
3078		return 0;
3079	} else
3080		return -EINVAL;
3081}
3082
3083static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3084{
3085	struct pci_dev *pdev = to_pci_dev(host->dev);
3086	int early_5080;
3087
3088	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3089
3090	if (!early_5080) {
3091		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3092		tmp |= (1 << 0);
3093		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3094	}
3095
3096	mv_reset_pci_bus(host, mmio);
3097}
3098
3099static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3100{
3101	writel(0x0fcfffff, mmio + FLASH_CTL);
3102}
3103
3104static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3105			   void __iomem *mmio)
3106{
3107	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3108	u32 tmp;
3109
3110	tmp = readl(phy_mmio + MV5_PHY_MODE);
3111
3112	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
3113	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
3114}
3115
3116static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3117{
3118	u32 tmp;
3119
3120	writel(0, mmio + GPIO_PORT_CTL);
3121
3122	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3123
3124	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3125	tmp |= ~(1 << 0);
3126	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3127}
3128
3129static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3130			   unsigned int port)
3131{
3132	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3133	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3134	u32 tmp;
3135	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3136
3137	if (fix_apm_sq) {
3138		tmp = readl(phy_mmio + MV5_LTMODE);
3139		tmp |= (1 << 19);
3140		writel(tmp, phy_mmio + MV5_LTMODE);
3141
3142		tmp = readl(phy_mmio + MV5_PHY_CTL);
3143		tmp &= ~0x3;
3144		tmp |= 0x1;
3145		writel(tmp, phy_mmio + MV5_PHY_CTL);
3146	}
3147
3148	tmp = readl(phy_mmio + MV5_PHY_MODE);
3149	tmp &= ~mask;
3150	tmp |= hpriv->signal[port].pre;
3151	tmp |= hpriv->signal[port].amps;
3152	writel(tmp, phy_mmio + MV5_PHY_MODE);
3153}
3154
3155
3156#undef ZERO
3157#define ZERO(reg) writel(0, port_mmio + (reg))
3158static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3159			     unsigned int port)
3160{
3161	void __iomem *port_mmio = mv_port_base(mmio, port);
3162
3163	mv_reset_channel(hpriv, mmio, port);
3164
3165	ZERO(0x028);	/* command */
3166	writel(0x11f, port_mmio + EDMA_CFG);
3167	ZERO(0x004);	/* timer */
3168	ZERO(0x008);	/* irq err cause */
3169	ZERO(0x00c);	/* irq err mask */
3170	ZERO(0x010);	/* rq bah */
3171	ZERO(0x014);	/* rq inp */
3172	ZERO(0x018);	/* rq outp */
3173	ZERO(0x01c);	/* respq bah */
3174	ZERO(0x024);	/* respq outp */
3175	ZERO(0x020);	/* respq inp */
3176	ZERO(0x02c);	/* test control */
3177	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3178}
3179#undef ZERO
3180
3181#define ZERO(reg) writel(0, hc_mmio + (reg))
3182static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3183			unsigned int hc)
3184{
3185	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3186	u32 tmp;
3187
3188	ZERO(0x00c);
3189	ZERO(0x010);
3190	ZERO(0x014);
3191	ZERO(0x018);
3192
3193	tmp = readl(hc_mmio + 0x20);
3194	tmp &= 0x1c1c1c1c;
3195	tmp |= 0x03030303;
3196	writel(tmp, hc_mmio + 0x20);
3197}
3198#undef ZERO
3199
3200static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3201			unsigned int n_hc)
3202{
3203	unsigned int hc, port;
3204
3205	for (hc = 0; hc < n_hc; hc++) {
3206		for (port = 0; port < MV_PORTS_PER_HC; port++)
3207			mv5_reset_hc_port(hpriv, mmio,
3208					  (hc * MV_PORTS_PER_HC) + port);
3209
3210		mv5_reset_one_hc(hpriv, mmio, hc);
3211	}
3212
3213	return 0;
3214}
3215
3216#undef ZERO
3217#define ZERO(reg) writel(0, mmio + (reg))
3218static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3219{
3220	struct mv_host_priv *hpriv = host->private_data;
3221	u32 tmp;
3222
3223	tmp = readl(mmio + MV_PCI_MODE);
3224	tmp &= 0xff00ffff;
3225	writel(tmp, mmio + MV_PCI_MODE);
3226
3227	ZERO(MV_PCI_DISC_TIMER);
3228	ZERO(MV_PCI_MSI_TRIGGER);
3229	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3230	ZERO(MV_PCI_SERR_MASK);
3231	ZERO(hpriv->irq_cause_offset);
3232	ZERO(hpriv->irq_mask_offset);
3233	ZERO(MV_PCI_ERR_LOW_ADDRESS);
3234	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3235	ZERO(MV_PCI_ERR_ATTRIBUTE);
3236	ZERO(MV_PCI_ERR_COMMAND);
3237}
3238#undef ZERO
3239
3240static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3241{
3242	u32 tmp;
3243
3244	mv5_reset_flash(hpriv, mmio);
3245
3246	tmp = readl(mmio + GPIO_PORT_CTL);
3247	tmp &= 0x3;
3248	tmp |= (1 << 5) | (1 << 6);
3249	writel(tmp, mmio + GPIO_PORT_CTL);
3250}
3251
3252/**
3253 *      mv6_reset_hc - Perform the 6xxx global soft reset
3254 *      @mmio: base address of the HBA
3255 *
3256 *      This routine only applies to 6xxx parts.
3257 *
3258 *      LOCKING:
3259 *      Inherited from caller.
3260 */
3261static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3262			unsigned int n_hc)
3263{
3264	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3265	int i, rc = 0;
3266	u32 t;
3267
3268	/* Following procedure defined in PCI "main command and status
3269	 * register" table.
3270	 */
3271	t = readl(reg);
3272	writel(t | STOP_PCI_MASTER, reg);
3273
3274	for (i = 0; i < 1000; i++) {
3275		udelay(1);
3276		t = readl(reg);
3277		if (PCI_MASTER_EMPTY & t)
3278			break;
3279	}
3280	if (!(PCI_MASTER_EMPTY & t)) {
3281		printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3282		rc = 1;
3283		goto done;
3284	}
3285
3286	/* set reset */
3287	i = 5;
3288	do {
3289		writel(t | GLOB_SFT_RST, reg);
3290		t = readl(reg);
3291		udelay(1);
3292	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
3293
3294	if (!(GLOB_SFT_RST & t)) {
3295		printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3296		rc = 1;
3297		goto done;
3298	}
3299
3300	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
3301	i = 5;
3302	do {
3303		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3304		t = readl(reg);
3305		udelay(1);
3306	} while ((GLOB_SFT_RST & t) && (i-- > 0));
3307
3308	if (GLOB_SFT_RST & t) {
3309		printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3310		rc = 1;
3311	}
3312done:
3313	return rc;
3314}
3315
3316static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3317			   void __iomem *mmio)
3318{
3319	void __iomem *port_mmio;
3320	u32 tmp;
3321
3322	tmp = readl(mmio + RESET_CFG);
3323	if ((tmp & (1 << 0)) == 0) {
3324		hpriv->signal[idx].amps = 0x7 << 8;
3325		hpriv->signal[idx].pre = 0x1 << 5;
3326		return;
3327	}
3328
3329	port_mmio = mv_port_base(mmio, idx);
3330	tmp = readl(port_mmio + PHY_MODE2);
3331
3332	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3333	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3334}
3335
3336static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3337{
3338	writel(0x00000060, mmio + GPIO_PORT_CTL);
3339}
3340
3341static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3342			   unsigned int port)
3343{
3344	void __iomem *port_mmio = mv_port_base(mmio, port);
3345
3346	u32 hp_flags = hpriv->hp_flags;
3347	int fix_phy_mode2 =
3348		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3349	int fix_phy_mode4 =
3350		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3351	u32 m2, m3;
3352
3353	if (fix_phy_mode2) {
3354		m2 = readl(port_mmio + PHY_MODE2);
3355		m2 &= ~(1 << 16);
3356		m2 |= (1 << 31);
3357		writel(m2, port_mmio + PHY_MODE2);
3358
3359		udelay(200);
3360
3361		m2 = readl(port_mmio + PHY_MODE2);
3362		m2 &= ~((1 << 16) | (1 << 31));
3363		writel(m2, port_mmio + PHY_MODE2);
3364
3365		udelay(200);
3366	}
3367
3368	/*
3369	 * Gen-II/IIe PHY_MODE3 errata RM#2:
3370	 * Achieves better receiver noise performance than the h/w default:
3371	 */
3372	m3 = readl(port_mmio + PHY_MODE3);
3373	m3 = (m3 & 0x1f) | (0x5555601 << 5);
3374
3375	/* Guideline 88F5182 (GL# SATA-S11) */
3376	if (IS_SOC(hpriv))
3377		m3 &= ~0x1c;
3378
3379	if (fix_phy_mode4) {
3380		u32 m4 = readl(port_mmio + PHY_MODE4);
3381		/*
3382		 * Enforce reserved-bit restrictions on GenIIe devices only.
3383		 * For earlier chipsets, force only the internal config field
3384		 *  (workaround for errata FEr SATA#10 part 1).
3385		 */
3386		if (IS_GEN_IIE(hpriv))
3387			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3388		else
3389			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3390		writel(m4, port_mmio + PHY_MODE4);
3391	}
3392	/*
3393	 * Workaround for 60x1-B2 errata SATA#13:
3394	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3395	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3396	 * Or ensure we use writelfl() when writing PHY_MODE4.
3397	 */
3398	writel(m3, port_mmio + PHY_MODE3);
3399
3400	/* Revert values of pre-emphasis and signal amps to the saved ones */
3401	m2 = readl(port_mmio + PHY_MODE2);
3402
3403	m2 &= ~MV_M2_PREAMP_MASK;
3404	m2 |= hpriv->signal[port].amps;
3405	m2 |= hpriv->signal[port].pre;
3406	m2 &= ~(1 << 16);
3407
3408	/* according to mvSata 3.6.1, some IIE values are fixed */
3409	if (IS_GEN_IIE(hpriv)) {
3410		m2 &= ~0xC30FF01F;
3411		m2 |= 0x0000900F;
3412	}
3413
3414	writel(m2, port_mmio + PHY_MODE2);
3415}
3416
3417/* TODO: use the generic LED interface to configure the SATA Presence */
3418/* & Acitivy LEDs on the board */
3419static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3420				      void __iomem *mmio)
3421{
3422	return;
3423}
3424
3425static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3426			   void __iomem *mmio)
3427{
3428	void __iomem *port_mmio;
3429	u32 tmp;
3430
3431	port_mmio = mv_port_base(mmio, idx);
3432	tmp = readl(port_mmio + PHY_MODE2);
3433
3434	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
3435	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
3436}
3437
3438#undef ZERO
3439#define ZERO(reg) writel(0, port_mmio + (reg))
3440static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3441					void __iomem *mmio, unsigned int port)
3442{
3443	void __iomem *port_mmio = mv_port_base(mmio, port);
3444
3445	mv_reset_channel(hpriv, mmio, port);
3446
3447	ZERO(0x028);		/* command */
3448	writel(0x101f, port_mmio + EDMA_CFG);
3449	ZERO(0x004);		/* timer */
3450	ZERO(0x008);		/* irq err cause */
3451	ZERO(0x00c);		/* irq err mask */
3452	ZERO(0x010);		/* rq bah */
3453	ZERO(0x014);		/* rq inp */
3454	ZERO(0x018);		/* rq outp */
3455	ZERO(0x01c);		/* respq bah */
3456	ZERO(0x024);		/* respq outp */
3457	ZERO(0x020);		/* respq inp */
3458	ZERO(0x02c);		/* test control */
3459	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3460}
3461
3462#undef ZERO
3463
3464#define ZERO(reg) writel(0, hc_mmio + (reg))
3465static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3466				       void __iomem *mmio)
3467{
3468	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3469
3470	ZERO(0x00c);
3471	ZERO(0x010);
3472	ZERO(0x014);
3473
3474}
3475
3476#undef ZERO
3477
3478static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3479				  void __iomem *mmio, unsigned int n_hc)
3480{
3481	unsigned int port;
3482
3483	for (port = 0; port < hpriv->n_ports; port++)
3484		mv_soc_reset_hc_port(hpriv, mmio, port);
3485
3486	mv_soc_reset_one_hc(hpriv, mmio);
3487
3488	return 0;
3489}
3490
3491static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3492				      void __iomem *mmio)
3493{
3494	return;
3495}
3496
3497static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3498{
3499	return;
3500}
3501
3502static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3503				  void __iomem *mmio, unsigned int port)
3504{
3505	void __iomem *port_mmio = mv_port_base(mmio, port);
3506	u32	reg;
3507
3508	reg = readl(port_mmio + PHY_MODE3);
3509	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
3510	reg |= (0x1 << 27);
3511	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
3512	reg |= (0x1 << 29);
3513	writel(reg, port_mmio + PHY_MODE3);
3514
3515	reg = readl(port_mmio + PHY_MODE4);
3516	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3517	reg |= (0x1 << 16);
3518	writel(reg, port_mmio + PHY_MODE4);
3519
3520	reg = readl(port_mmio + PHY_MODE9_GEN2);
3521	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3522	reg |= 0x8;
3523	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3524	writel(reg, port_mmio + PHY_MODE9_GEN2);
3525
3526	reg = readl(port_mmio + PHY_MODE9_GEN1);
3527	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
3528	reg |= 0x8;
3529	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
3530	writel(reg, port_mmio + PHY_MODE9_GEN1);
3531}
3532
3533/**
3534 *	soc_is_65 - check if the soc is 65 nano device
3535 *
3536 *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3537 *	register, this register should contain non-zero value and it exists only
3538 *	in the 65 nano devices, when reading it from older devices we get 0.
3539 */
3540static bool soc_is_65n(struct mv_host_priv *hpriv)
3541{
3542	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3543
3544	if (readl(port0_mmio + PHYCFG_OFS))
3545		return true;
3546	return false;
3547}
3548
3549static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3550{
3551	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3552
3553	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
3554	if (want_gen2i)
3555		ifcfg |= (1 << 7);		/* enable gen2i speed */
3556	writelfl(ifcfg, port_mmio + SATA_IFCFG);
3557}
3558
3559static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3560			     unsigned int port_no)
3561{
3562	void __iomem *port_mmio = mv_port_base(mmio, port_no);
3563
3564	/*
3565	 * The datasheet warns against setting EDMA_RESET when EDMA is active
3566	 * (but doesn't say what the problem might be).  So we first try
3567	 * to disable the EDMA engine before doing the EDMA_RESET operation.
3568	 */
3569	mv_stop_edma_engine(port_mmio);
3570	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3571
3572	if (!IS_GEN_I(hpriv)) {
3573		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3574		mv_setup_ifcfg(port_mmio, 1);
3575	}
3576	/*
3577	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3578	 * link, and physical layers.  It resets all SATA interface registers
3579	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3580	 */
3581	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3582	udelay(25);	/* allow reset propagation */
3583	writelfl(0, port_mmio + EDMA_CMD);
3584
3585	hpriv->ops->phy_errata(hpriv, mmio, port_no);
3586
3587	if (IS_GEN_I(hpriv))
3588		usleep_range(500, 1000);
3589}
3590
3591static void mv_pmp_select(struct ata_port *ap, int pmp)
3592{
3593	if (sata_pmp_supported(ap)) {
3594		void __iomem *port_mmio = mv_ap_base(ap);
3595		u32 reg = readl(port_mmio + SATA_IFCTL);
3596		int old = reg & 0xf;
3597
3598		if (old != pmp) {
3599			reg = (reg & ~0xf) | pmp;
3600			writelfl(reg, port_mmio + SATA_IFCTL);
3601		}
3602	}
3603}
3604
3605static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3606				unsigned long deadline)
3607{
3608	mv_pmp_select(link->ap, sata_srst_pmp(link));
3609	return sata_std_hardreset(link, class, deadline);
3610}
3611
3612static int mv_softreset(struct ata_link *link, unsigned int *class,
3613				unsigned long deadline)
3614{
3615	mv_pmp_select(link->ap, sata_srst_pmp(link));
3616	return ata_sff_softreset(link, class, deadline);
3617}
3618
3619static int mv_hardreset(struct ata_link *link, unsigned int *class,
3620			unsigned long deadline)
3621{
3622	struct ata_port *ap = link->ap;
3623	struct mv_host_priv *hpriv = ap->host->private_data;
3624	struct mv_port_priv *pp = ap->private_data;
3625	void __iomem *mmio = hpriv->base;
3626	int rc, attempts = 0, extra = 0;
3627	u32 sstatus;
3628	bool online;
3629
3630	mv_reset_channel(hpriv, mmio, ap->port_no);
3631	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3632	pp->pp_flags &=
3633	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3634
3635	/* Workaround for errata FEr SATA#10 (part 2) */
3636	do {
3637		const unsigned long *timing =
3638				sata_ehc_deb_timing(&link->eh_context);
3639
3640		rc = sata_link_hardreset(link, timing, deadline + extra,
3641					 &online, NULL);
3642		rc = online ? -EAGAIN : rc;
3643		if (rc)
3644			return rc;
3645		sata_scr_read(link, SCR_STATUS, &sstatus);
3646		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3647			/* Force 1.5gb/s link speed and try again */
3648			mv_setup_ifcfg(mv_ap_base(ap), 0);
3649			if (time_after(jiffies + HZ, deadline))
3650				extra = HZ; /* only extend it once, max */
3651		}
3652	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3653	mv_save_cached_regs(ap);
3654	mv_edma_cfg(ap, 0, 0);
3655
3656	return rc;
3657}
3658
3659static void mv_eh_freeze(struct ata_port *ap)
3660{
3661	mv_stop_edma(ap);
3662	mv_enable_port_irqs(ap, 0);
3663}
3664
3665static void mv_eh_thaw(struct ata_port *ap)
3666{
3667	struct mv_host_priv *hpriv = ap->host->private_data;
3668	unsigned int port = ap->port_no;
3669	unsigned int hardport = mv_hardport_from_port(port);
3670	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3671	void __iomem *port_mmio = mv_ap_base(ap);
3672	u32 hc_irq_cause;
3673
3674	/* clear EDMA errors on this port */
3675	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3676
3677	/* clear pending irq events */
3678	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3679	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3680
3681	mv_enable_port_irqs(ap, ERR_IRQ);
3682}
3683
3684/**
3685 *      mv_port_init - Perform some early initialization on a single port.
3686 *      @port: libata data structure storing shadow register addresses
3687 *      @port_mmio: base address of the port
3688 *
3689 *      Initialize shadow register mmio addresses, clear outstanding
3690 *      interrupts on the port, and unmask interrupts for the future
3691 *      start of the port.
3692 *
3693 *      LOCKING:
3694 *      Inherited from caller.
3695 */
3696static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
3697{
3698	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3699
3700	/* PIO related setup
3701	 */
3702	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3703	port->error_addr =
3704		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3705	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3706	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3707	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3708	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3709	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3710	port->status_addr =
3711		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3712	/* special case: control/altstatus doesn't have ATA_REG_ address */
3713	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3714
3715	/* Clear any currently outstanding port interrupt conditions */
3716	serr = port_mmio + mv_scr_offset(SCR_ERROR);
3717	writelfl(readl(serr), serr);
3718	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3719
3720	/* unmask all non-transient EDMA error interrupts */
3721	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3722
3723	VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3724		readl(port_mmio + EDMA_CFG),
3725		readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3726		readl(port_mmio + EDMA_ERR_IRQ_MASK));
3727}
3728
3729static unsigned int mv_in_pcix_mode(struct ata_host *host)
3730{
3731	struct mv_host_priv *hpriv = host->private_data;
3732	void __iomem *mmio = hpriv->base;
3733	u32 reg;
3734
3735	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3736		return 0;	/* not PCI-X capable */
3737	reg = readl(mmio + MV_PCI_MODE);
3738	if ((reg & MV_PCI_MODE_MASK) == 0)
3739		return 0;	/* conventional PCI mode */
3740	return 1;	/* chip is in PCI-X mode */
3741}
3742
3743static int mv_pci_cut_through_okay(struct ata_host *host)
3744{
3745	struct mv_host_priv *hpriv = host->private_data;
3746	void __iomem *mmio = hpriv->base;
3747	u32 reg;
3748
3749	if (!mv_in_pcix_mode(host)) {
3750		reg = readl(mmio + MV_PCI_COMMAND);
3751		if (reg & MV_PCI_COMMAND_MRDTRIG)
3752			return 0; /* not okay */
3753	}
3754	return 1; /* okay */
3755}
3756
3757static void mv_60x1b2_errata_pci7(struct ata_host *host)
3758{
3759	struct mv_host_priv *hpriv = host->private_data;
3760	void __iomem *mmio = hpriv->base;
3761
3762	/* workaround for 60x1-B2 errata PCI#7 */
3763	if (mv_in_pcix_mode(host)) {
3764		u32 reg = readl(mmio + MV_PCI_COMMAND);
3765		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3766	}
3767}
3768
3769static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3770{
3771	struct pci_dev *pdev = to_pci_dev(host->dev);
3772	struct mv_host_priv *hpriv = host->private_data;
3773	u32 hp_flags = hpriv->hp_flags;
3774
3775	switch (board_idx) {
3776	case chip_5080:
3777		hpriv->ops = &mv5xxx_ops;
3778		hp_flags |= MV_HP_GEN_I;
3779
3780		switch (pdev->revision) {
3781		case 0x1:
3782			hp_flags |= MV_HP_ERRATA_50XXB0;
3783			break;
3784		case 0x3:
3785			hp_flags |= MV_HP_ERRATA_50XXB2;
3786			break;
3787		default:
3788			dev_warn(&pdev->dev,
3789				 "Applying 50XXB2 workarounds to unknown rev\n");
3790			hp_flags |= MV_HP_ERRATA_50XXB2;
3791			break;
3792		}
3793		break;
3794
3795	case chip_504x:
3796	case chip_508x:
3797		hpriv->ops = &mv5xxx_ops;
3798		hp_flags |= MV_HP_GEN_I;
3799
3800		switch (pdev->revision) {
3801		case 0x0:
3802			hp_flags |= MV_HP_ERRATA_50XXB0;
3803			break;
3804		case 0x3:
3805			hp_flags |= MV_HP_ERRATA_50XXB2;
3806			break;
3807		default:
3808			dev_warn(&pdev->dev,
3809				 "Applying B2 workarounds to unknown rev\n");
3810			hp_flags |= MV_HP_ERRATA_50XXB2;
3811			break;
3812		}
3813		break;
3814
3815	case chip_604x:
3816	case chip_608x:
3817		hpriv->ops = &mv6xxx_ops;
3818		hp_flags |= MV_HP_GEN_II;
3819
3820		switch (pdev->revision) {
3821		case 0x7:
3822			mv_60x1b2_errata_pci7(host);
3823			hp_flags |= MV_HP_ERRATA_60X1B2;
3824			break;
3825		case 0x9:
3826			hp_flags |= MV_HP_ERRATA_60X1C0;
3827			break;
3828		default:
3829			dev_warn(&pdev->dev,
3830				 "Applying B2 workarounds to unknown rev\n");
3831			hp_flags |= MV_HP_ERRATA_60X1B2;
3832			break;
3833		}
3834		break;
3835
3836	case chip_7042:
3837		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3838		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3839		    (pdev->device == 0x2300 || pdev->device == 0x2310))
3840		{
3841			/*
3842			 * Highpoint RocketRAID PCIe 23xx series cards:
3843			 *
3844			 * Unconfigured drives are treated as "Legacy"
3845			 * by the BIOS, and it overwrites sector 8 with
3846			 * a "Lgcy" metadata block prior to Linux boot.
3847			 *
3848			 * Configured drives (RAID or JBOD) leave sector 8
3849			 * alone, but instead overwrite a high numbered
3850			 * sector for the RAID metadata.  This sector can
3851			 * be determined exactly, by truncating the physical
3852			 * drive capacity to a nice even GB value.
3853			 *
3854			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3855			 *
3856			 * Warn the user, lest they think we're just buggy.
3857			 */
3858			printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3859				" BIOS CORRUPTS DATA on all attached drives,"
3860				" regardless of if/how they are configured."
3861				" BEWARE!\n");
3862			printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3863				" use sectors 8-9 on \"Legacy\" drives,"
3864				" and avoid the final two gigabytes on"
3865				" all RocketRAID BIOS initialized drives.\n");
3866		}
3867		fallthrough;
3868	case chip_6042:
3869		hpriv->ops = &mv6xxx_ops;
3870		hp_flags |= MV_HP_GEN_IIE;
3871		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3872			hp_flags |= MV_HP_CUT_THROUGH;
3873
3874		switch (pdev->revision) {
3875		case 0x2: /* Rev.B0: the first/only public release */
3876			hp_flags |= MV_HP_ERRATA_60X1C0;
3877			break;
3878		default:
3879			dev_warn(&pdev->dev,
3880				 "Applying 60X1C0 workarounds to unknown rev\n");
3881			hp_flags |= MV_HP_ERRATA_60X1C0;
3882			break;
3883		}
3884		break;
3885	case chip_soc:
3886		if (soc_is_65n(hpriv))
3887			hpriv->ops = &mv_soc_65n_ops;
3888		else
3889			hpriv->ops = &mv_soc_ops;
3890		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3891			MV_HP_ERRATA_60X1C0;
3892		break;
3893
3894	default:
3895		dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
3896		return -EINVAL;
3897	}
3898
3899	hpriv->hp_flags = hp_flags;
3900	if (hp_flags & MV_HP_PCIE) {
3901		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
3902		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
3903		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
3904	} else {
3905		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
3906		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
3907		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
3908	}
3909
3910	return 0;
3911}
3912
3913/**
3914 *      mv_init_host - Perform some early initialization of the host.
3915 *	@host: ATA host to initialize
3916 *
3917 *      If possible, do an early global reset of the host.  Then do
3918 *      our port init and clear/unmask all/relevant host interrupts.
3919 *
3920 *      LOCKING:
3921 *      Inherited from caller.
3922 */
3923static int mv_init_host(struct ata_host *host)
3924{
3925	int rc = 0, n_hc, port, hc;
3926	struct mv_host_priv *hpriv = host->private_data;
3927	void __iomem *mmio = hpriv->base;
3928
3929	rc = mv_chip_id(host, hpriv->board_idx);
3930	if (rc)
3931		goto done;
3932
3933	if (IS_SOC(hpriv)) {
3934		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3935		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
3936	} else {
3937		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3938		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
3939	}
3940
3941	/* initialize shadow irq mask with register's value */
3942	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3943
3944	/* global interrupt mask: 0 == mask everything */
3945	mv_set_main_irq_mask(host, ~0, 0);
3946
3947	n_hc = mv_get_hc_count(host->ports[0]->flags);
3948
3949	for (port = 0; port < host->n_ports; port++)
3950		if (hpriv->ops->read_preamp)
3951			hpriv->ops->read_preamp(hpriv, port, mmio);
3952
3953	rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3954	if (rc)
3955		goto done;
3956
3957	hpriv->ops->reset_flash(hpriv, mmio);
3958	hpriv->ops->reset_bus(host, mmio);
3959	hpriv->ops->enable_leds(hpriv, mmio);
3960
3961	for (port = 0; port < host->n_ports; port++) {
3962		struct ata_port *ap = host->ports[port];
3963		void __iomem *port_mmio = mv_port_base(mmio, port);
3964
3965		mv_port_init(&ap->ioaddr, port_mmio);
3966	}
3967
3968	for (hc = 0; hc < n_hc; hc++) {
3969		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3970
3971		VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3972			"(before clear)=0x%08x\n", hc,
3973			readl(hc_mmio + HC_CFG),
3974			readl(hc_mmio + HC_IRQ_CAUSE));
3975
3976		/* Clear any currently outstanding hc interrupt conditions */
3977		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3978	}
3979
3980	if (!IS_SOC(hpriv)) {
3981		/* Clear any currently outstanding host interrupt conditions */
3982		writelfl(0, mmio + hpriv->irq_cause_offset);
3983
3984		/* and unmask interrupt generation for host regs */
3985		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3986	}
3987
3988	/*
3989	 * enable only global host interrupts for now.
3990	 * The per-port interrupts get done later as ports are set up.
3991	 */
3992	mv_set_main_irq_mask(host, 0, PCI_ERR);
3993	mv_set_irq_coalescing(host, irq_coalescing_io_count,
3994				    irq_coalescing_usecs);
3995done:
3996	return rc;
3997}
3998
3999static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
4000{
4001	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
4002							     MV_CRQB_Q_SZ, 0);
4003	if (!hpriv->crqb_pool)
4004		return -ENOMEM;
4005
4006	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
4007							     MV_CRPB_Q_SZ, 0);
4008	if (!hpriv->crpb_pool)
4009		return -ENOMEM;
4010
4011	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
4012							     MV_SG_TBL_SZ, 0);
4013	if (!hpriv->sg_tbl_pool)
4014		return -ENOMEM;
4015
4016	return 0;
4017}
4018
4019static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
4020				 const struct mbus_dram_target_info *dram)
4021{
4022	int i;
4023
4024	for (i = 0; i < 4; i++) {
4025		writel(0, hpriv->base + WINDOW_CTRL(i));
4026		writel(0, hpriv->base + WINDOW_BASE(i));
4027	}
4028
4029	for (i = 0; i < dram->num_cs; i++) {
4030		const struct mbus_dram_window *cs = dram->cs + i;
4031
4032		writel(((cs->size - 1) & 0xffff0000) |
4033			(cs->mbus_attr << 8) |
4034			(dram->mbus_dram_target_id << 4) | 1,
4035			hpriv->base + WINDOW_CTRL(i));
4036		writel(cs->base, hpriv->base + WINDOW_BASE(i));
4037	}
4038}
4039
4040/**
4041 *      mv_platform_probe - handle a positive probe of an soc Marvell
4042 *      host
4043 *      @pdev: platform device found
4044 *
4045 *      LOCKING:
4046 *      Inherited from caller.
4047 */
4048static int mv_platform_probe(struct platform_device *pdev)
4049{
4050	const struct mv_sata_platform_data *mv_platform_data;
4051	const struct mbus_dram_target_info *dram;
4052	const struct ata_port_info *ppi[] =
4053	    { &mv_port_info[chip_soc], NULL };
4054	struct ata_host *host;
4055	struct mv_host_priv *hpriv;
4056	struct resource *res;
4057	int n_ports = 0, irq = 0;
4058	int rc;
4059	int port;
4060
4061	ata_print_version_once(&pdev->dev, DRV_VERSION);
4062
4063	/*
4064	 * Simple resource validation ..
4065	 */
4066	if (unlikely(pdev->num_resources != 2)) {
4067		dev_err(&pdev->dev, "invalid number of resources\n");
4068		return -EINVAL;
4069	}
4070
4071	/*
4072	 * Get the register base first
4073	 */
4074	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4075	if (res == NULL)
4076		return -EINVAL;
4077
4078	/* allocate host */
4079	if (pdev->dev.of_node) {
4080		rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
4081					   &n_ports);
4082		if (rc) {
4083			dev_err(&pdev->dev,
4084				"error parsing nr-ports property: %d\n", rc);
4085			return rc;
4086		}
4087
4088		if (n_ports <= 0) {
4089			dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
4090				n_ports);
4091			return -EINVAL;
4092		}
4093
4094		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4095	} else {
4096		mv_platform_data = dev_get_platdata(&pdev->dev);
4097		n_ports = mv_platform_data->n_ports;
4098		irq = platform_get_irq(pdev, 0);
4099	}
4100	if (irq < 0)
4101		return irq;
4102	if (!irq)
4103		return -EINVAL;
4104
4105	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4106	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4107
4108	if (!host || !hpriv)
4109		return -ENOMEM;
4110	hpriv->port_clks = devm_kcalloc(&pdev->dev,
4111					n_ports, sizeof(struct clk *),
4112					GFP_KERNEL);
4113	if (!hpriv->port_clks)
4114		return -ENOMEM;
4115	hpriv->port_phys = devm_kcalloc(&pdev->dev,
4116					n_ports, sizeof(struct phy *),
4117					GFP_KERNEL);
4118	if (!hpriv->port_phys)
4119		return -ENOMEM;
4120	host->private_data = hpriv;
4121	hpriv->board_idx = chip_soc;
4122
4123	host->iomap = NULL;
4124	hpriv->base = devm_ioremap(&pdev->dev, res->start,
4125				   resource_size(res));
4126	if (!hpriv->base)
4127		return -ENOMEM;
4128
4129	hpriv->base -= SATAHC0_REG_BASE;
4130
4131	hpriv->clk = clk_get(&pdev->dev, NULL);
4132	if (IS_ERR(hpriv->clk))
4133		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4134	else
4135		clk_prepare_enable(hpriv->clk);
4136
4137	for (port = 0; port < n_ports; port++) {
4138		char port_number[16];
4139		sprintf(port_number, "%d", port);
4140		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4141		if (!IS_ERR(hpriv->port_clks[port]))
4142			clk_prepare_enable(hpriv->port_clks[port]);
4143
4144		sprintf(port_number, "port%d", port);
4145		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
4146							       port_number);
4147		if (IS_ERR(hpriv->port_phys[port])) {
4148			rc = PTR_ERR(hpriv->port_phys[port]);
4149			hpriv->port_phys[port] = NULL;
4150			if (rc != -EPROBE_DEFER)
4151				dev_warn(&pdev->dev, "error getting phy %d", rc);
4152
4153			/* Cleanup only the initialized ports */
4154			hpriv->n_ports = port;
4155			goto err;
4156		} else
4157			phy_power_on(hpriv->port_phys[port]);
4158	}
4159
4160	/* All the ports have been initialized */
4161	hpriv->n_ports = n_ports;
4162
4163	/*
4164	 * (Re-)program MBUS remapping windows if we are asked to.
4165	 */
4166	dram = mv_mbus_dram_info();
4167	if (dram)
4168		mv_conf_mbus_windows(hpriv, dram);
4169
4170	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4171	if (rc)
4172		goto err;
4173
4174	/*
4175	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4176	 * updated in the LP_PHY_CTL register.
4177	 */
4178	if (pdev->dev.of_node &&
4179		of_device_is_compatible(pdev->dev.of_node,
4180					"marvell,armada-370-sata"))
4181		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
4182
4183	/* initialize adapter */
4184	rc = mv_init_host(host);
4185	if (rc)
4186		goto err;
4187
4188	dev_info(&pdev->dev, "slots %u ports %d\n",
4189		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
4190
4191	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
4192	if (!rc)
4193		return 0;
4194
4195err:
4196	if (!IS_ERR(hpriv->clk)) {
4197		clk_disable_unprepare(hpriv->clk);
4198		clk_put(hpriv->clk);
4199	}
4200	for (port = 0; port < hpriv->n_ports; port++) {
4201		if (!IS_ERR(hpriv->port_clks[port])) {
4202			clk_disable_unprepare(hpriv->port_clks[port]);
4203			clk_put(hpriv->port_clks[port]);
4204		}
4205		phy_power_off(hpriv->port_phys[port]);
4206	}
4207
4208	return rc;
4209}
4210
4211/*
4212 *
4213 *      mv_platform_remove    -       unplug a platform interface
4214 *      @pdev: platform device
4215 *
4216 *      A platform bus SATA device has been unplugged. Perform the needed
4217 *      cleanup. Also called on module unload for any active devices.
4218 */
4219static int mv_platform_remove(struct platform_device *pdev)
4220{
4221	struct ata_host *host = platform_get_drvdata(pdev);
4222	struct mv_host_priv *hpriv = host->private_data;
4223	int port;
4224	ata_host_detach(host);
4225
4226	if (!IS_ERR(hpriv->clk)) {
4227		clk_disable_unprepare(hpriv->clk);
4228		clk_put(hpriv->clk);
4229	}
4230	for (port = 0; port < host->n_ports; port++) {
4231		if (!IS_ERR(hpriv->port_clks[port])) {
4232			clk_disable_unprepare(hpriv->port_clks[port]);
4233			clk_put(hpriv->port_clks[port]);
4234		}
4235		phy_power_off(hpriv->port_phys[port]);
4236	}
4237	return 0;
4238}
4239
4240#ifdef CONFIG_PM_SLEEP
4241static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4242{
4243	struct ata_host *host = platform_get_drvdata(pdev);
4244	if (host)
4245		return ata_host_suspend(host, state);
4246	else
4247		return 0;
4248}
4249
4250static int mv_platform_resume(struct platform_device *pdev)
4251{
4252	struct ata_host *host = platform_get_drvdata(pdev);
4253	const struct mbus_dram_target_info *dram;
4254	int ret;
4255
4256	if (host) {
4257		struct mv_host_priv *hpriv = host->private_data;
4258
4259		/*
4260		 * (Re-)program MBUS remapping windows if we are asked to.
4261		 */
4262		dram = mv_mbus_dram_info();
4263		if (dram)
4264			mv_conf_mbus_windows(hpriv, dram);
4265
4266		/* initialize adapter */
4267		ret = mv_init_host(host);
4268		if (ret) {
4269			printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4270			return ret;
4271		}
4272		ata_host_resume(host);
4273	}
4274
4275	return 0;
4276}
4277#else
4278#define mv_platform_suspend NULL
4279#define mv_platform_resume NULL
4280#endif
4281
4282#ifdef CONFIG_OF
4283static const struct of_device_id mv_sata_dt_ids[] = {
4284	{ .compatible = "marvell,armada-370-sata", },
4285	{ .compatible = "marvell,orion-sata", },
4286	{},
4287};
4288MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4289#endif
4290
4291static struct platform_driver mv_platform_driver = {
4292	.probe		= mv_platform_probe,
4293	.remove		= mv_platform_remove,
4294	.suspend	= mv_platform_suspend,
4295	.resume		= mv_platform_resume,
4296	.driver		= {
4297		.name = DRV_NAME,
4298		.of_match_table = of_match_ptr(mv_sata_dt_ids),
4299	},
4300};
4301
4302
4303#ifdef CONFIG_PCI
4304static int mv_pci_init_one(struct pci_dev *pdev,
4305			   const struct pci_device_id *ent);
4306#ifdef CONFIG_PM_SLEEP
4307static int mv_pci_device_resume(struct pci_dev *pdev);
4308#endif
4309
4310
4311static struct pci_driver mv_pci_driver = {
4312	.name			= DRV_NAME,
4313	.id_table		= mv_pci_tbl,
4314	.probe			= mv_pci_init_one,
4315	.remove			= ata_pci_remove_one,
4316#ifdef CONFIG_PM_SLEEP
4317	.suspend		= ata_pci_device_suspend,
4318	.resume			= mv_pci_device_resume,
4319#endif
4320
4321};
4322
4323/**
4324 *      mv_print_info - Dump key info to kernel log for perusal.
4325 *      @host: ATA host to print info about
4326 *
4327 *      FIXME: complete this.
4328 *
4329 *      LOCKING:
4330 *      Inherited from caller.
4331 */
4332static void mv_print_info(struct ata_host *host)
4333{
4334	struct pci_dev *pdev = to_pci_dev(host->dev);
4335	struct mv_host_priv *hpriv = host->private_data;
4336	u8 scc;
4337	const char *scc_s, *gen;
4338
4339	/* Use this to determine the HW stepping of the chip so we know
4340	 * what errata to workaround
4341	 */
4342	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4343	if (scc == 0)
4344		scc_s = "SCSI";
4345	else if (scc == 0x01)
4346		scc_s = "RAID";
4347	else
4348		scc_s = "?";
4349
4350	if (IS_GEN_I(hpriv))
4351		gen = "I";
4352	else if (IS_GEN_II(hpriv))
4353		gen = "II";
4354	else if (IS_GEN_IIE(hpriv))
4355		gen = "IIE";
4356	else
4357		gen = "?";
4358
4359	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4360		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4361		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4362}
4363
4364/**
4365 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
4366 *      @pdev: PCI device found
4367 *      @ent: PCI device ID entry for the matched host
4368 *
4369 *      LOCKING:
4370 *      Inherited from caller.
4371 */
4372static int mv_pci_init_one(struct pci_dev *pdev,
4373			   const struct pci_device_id *ent)
4374{
4375	unsigned int board_idx = (unsigned int)ent->driver_data;
4376	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4377	struct ata_host *host;
4378	struct mv_host_priv *hpriv;
4379	int n_ports, port, rc;
4380
4381	ata_print_version_once(&pdev->dev, DRV_VERSION);
4382
4383	/* allocate host */
4384	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4385
4386	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4387	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4388	if (!host || !hpriv)
4389		return -ENOMEM;
4390	host->private_data = hpriv;
4391	hpriv->n_ports = n_ports;
4392	hpriv->board_idx = board_idx;
4393
4394	/* acquire resources */
4395	rc = pcim_enable_device(pdev);
4396	if (rc)
4397		return rc;
4398
4399	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4400	if (rc == -EBUSY)
4401		pcim_pin_device(pdev);
4402	if (rc)
4403		return rc;
4404	host->iomap = pcim_iomap_table(pdev);
4405	hpriv->base = host->iomap[MV_PRIMARY_BAR];
4406
4407	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4408	if (rc) {
4409		dev_err(&pdev->dev, "DMA enable failed\n");
4410		return rc;
4411	}
4412
4413	rc = mv_create_dma_pools(hpriv, &pdev->dev);
4414	if (rc)
4415		return rc;
4416
4417	for (port = 0; port < host->n_ports; port++) {
4418		struct ata_port *ap = host->ports[port];
4419		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4420		unsigned int offset = port_mmio - hpriv->base;
4421
4422		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4423		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4424	}
4425
4426	/* initialize adapter */
4427	rc = mv_init_host(host);
4428	if (rc)
4429		return rc;
4430
4431	/* Enable message-switched interrupts, if requested */
4432	if (msi && pci_enable_msi(pdev) == 0)
4433		hpriv->hp_flags |= MV_HP_FLAG_MSI;
4434
4435	mv_dump_pci_cfg(pdev, 0x68);
4436	mv_print_info(host);
4437
4438	pci_set_master(pdev);
4439	pci_try_set_mwi(pdev);
4440	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4441				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4442}
4443
4444#ifdef CONFIG_PM_SLEEP
4445static int mv_pci_device_resume(struct pci_dev *pdev)
4446{
4447	struct ata_host *host = pci_get_drvdata(pdev);
4448	int rc;
4449
4450	rc = ata_pci_device_do_resume(pdev);
4451	if (rc)
4452		return rc;
4453
4454	/* initialize adapter */
4455	rc = mv_init_host(host);
4456	if (rc)
4457		return rc;
4458
4459	ata_host_resume(host);
4460
4461	return 0;
4462}
4463#endif
4464#endif
4465
4466static int __init mv_init(void)
4467{
4468	int rc = -ENODEV;
4469#ifdef CONFIG_PCI
4470	rc = pci_register_driver(&mv_pci_driver);
4471	if (rc < 0)
4472		return rc;
4473#endif
4474	rc = platform_driver_register(&mv_platform_driver);
4475
4476#ifdef CONFIG_PCI
4477	if (rc < 0)
4478		pci_unregister_driver(&mv_pci_driver);
4479#endif
4480	return rc;
4481}
4482
4483static void __exit mv_exit(void)
4484{
4485#ifdef CONFIG_PCI
4486	pci_unregister_driver(&mv_pci_driver);
4487#endif
4488	platform_driver_unregister(&mv_platform_driver);
4489}
4490
4491MODULE_AUTHOR("Brett Russ");
4492MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4493MODULE_LICENSE("GPL v2");
4494MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4495MODULE_VERSION(DRV_VERSION);
4496MODULE_ALIAS("platform:" DRV_NAME);
4497
4498module_init(mv_init);
4499module_exit(mv_exit);
4500