xref: /kernel/linux/linux-5.10/drivers/ata/libahci.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  libahci.c - Common AHCI SATA low-level routines
4 *
5 *  Maintained by:  Tejun Heo <tj@kernel.org>
6 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
7 *		    on emails.
8 *
9 *  Copyright 2004-2005 Red Hat, Inc.
10 *
11 * libata documentation is available via 'make {ps|pdf}docs',
12 * as Documentation/driver-api/libata.rst
13 *
14 * AHCI hardware documentation:
15 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
16 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
17 */
18
19#include <linux/kernel.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22#include <linux/nospec.h>
23#include <linux/blkdev.h>
24#include <linux/delay.h>
25#include <linux/interrupt.h>
26#include <linux/dma-mapping.h>
27#include <linux/device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <linux/libata.h>
31#include <linux/pci.h>
32#include "ahci.h"
33#include "libata.h"
34
35static int ahci_skip_host_reset;
36int ahci_ignore_sss;
37EXPORT_SYMBOL_GPL(ahci_ignore_sss);
38
39module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
40MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
41
42module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
43MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
44
45static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
46			unsigned hints);
47static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
48static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
49			      size_t size);
50static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
51					ssize_t size);
52
53
54
55static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
56static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
57static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
58static int ahci_port_start(struct ata_port *ap);
59static void ahci_port_stop(struct ata_port *ap);
60static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
61static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
62static void ahci_freeze(struct ata_port *ap);
63static void ahci_thaw(struct ata_port *ap);
64static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
65static void ahci_enable_fbs(struct ata_port *ap);
66static void ahci_disable_fbs(struct ata_port *ap);
67static void ahci_pmp_attach(struct ata_port *ap);
68static void ahci_pmp_detach(struct ata_port *ap);
69static int ahci_softreset(struct ata_link *link, unsigned int *class,
70			  unsigned long deadline);
71static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
72			  unsigned long deadline);
73static int ahci_hardreset(struct ata_link *link, unsigned int *class,
74			  unsigned long deadline);
75static void ahci_postreset(struct ata_link *link, unsigned int *class);
76static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
77static void ahci_dev_config(struct ata_device *dev);
78#ifdef CONFIG_PM
79static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
80#endif
81static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
82static ssize_t ahci_activity_store(struct ata_device *dev,
83				   enum sw_activity val);
84static void ahci_init_sw_activity(struct ata_link *link);
85
86static ssize_t ahci_show_host_caps(struct device *dev,
87				   struct device_attribute *attr, char *buf);
88static ssize_t ahci_show_host_cap2(struct device *dev,
89				   struct device_attribute *attr, char *buf);
90static ssize_t ahci_show_host_version(struct device *dev,
91				      struct device_attribute *attr, char *buf);
92static ssize_t ahci_show_port_cmd(struct device *dev,
93				  struct device_attribute *attr, char *buf);
94static ssize_t ahci_read_em_buffer(struct device *dev,
95				   struct device_attribute *attr, char *buf);
96static ssize_t ahci_store_em_buffer(struct device *dev,
97				    struct device_attribute *attr,
98				    const char *buf, size_t size);
99static ssize_t ahci_show_em_supported(struct device *dev,
100				      struct device_attribute *attr, char *buf);
101static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
102
103static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
104static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
105static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
106static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
107static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
108		   ahci_read_em_buffer, ahci_store_em_buffer);
109static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
110
111struct device_attribute *ahci_shost_attrs[] = {
112	&dev_attr_link_power_management_policy,
113	&dev_attr_em_message_type,
114	&dev_attr_em_message,
115	&dev_attr_ahci_host_caps,
116	&dev_attr_ahci_host_cap2,
117	&dev_attr_ahci_host_version,
118	&dev_attr_ahci_port_cmd,
119	&dev_attr_em_buffer,
120	&dev_attr_em_message_supported,
121	NULL
122};
123EXPORT_SYMBOL_GPL(ahci_shost_attrs);
124
125struct device_attribute *ahci_sdev_attrs[] = {
126	&dev_attr_sw_activity,
127	&dev_attr_unload_heads,
128	&dev_attr_ncq_prio_enable,
129	NULL
130};
131EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
132
133struct ata_port_operations ahci_ops = {
134	.inherits		= &sata_pmp_port_ops,
135
136	.qc_defer		= ahci_pmp_qc_defer,
137	.qc_prep		= ahci_qc_prep,
138	.qc_issue		= ahci_qc_issue,
139	.qc_fill_rtf		= ahci_qc_fill_rtf,
140
141	.freeze			= ahci_freeze,
142	.thaw			= ahci_thaw,
143	.softreset		= ahci_softreset,
144	.hardreset		= ahci_hardreset,
145	.postreset		= ahci_postreset,
146	.pmp_softreset		= ahci_softreset,
147	.error_handler		= ahci_error_handler,
148	.post_internal_cmd	= ahci_post_internal_cmd,
149	.dev_config		= ahci_dev_config,
150
151	.scr_read		= ahci_scr_read,
152	.scr_write		= ahci_scr_write,
153	.pmp_attach		= ahci_pmp_attach,
154	.pmp_detach		= ahci_pmp_detach,
155
156	.set_lpm		= ahci_set_lpm,
157	.em_show		= ahci_led_show,
158	.em_store		= ahci_led_store,
159	.sw_activity_show	= ahci_activity_show,
160	.sw_activity_store	= ahci_activity_store,
161	.transmit_led_message	= ahci_transmit_led_message,
162#ifdef CONFIG_PM
163	.port_suspend		= ahci_port_suspend,
164	.port_resume		= ahci_port_resume,
165#endif
166	.port_start		= ahci_port_start,
167	.port_stop		= ahci_port_stop,
168};
169EXPORT_SYMBOL_GPL(ahci_ops);
170
171struct ata_port_operations ahci_pmp_retry_srst_ops = {
172	.inherits		= &ahci_ops,
173	.softreset		= ahci_pmp_retry_softreset,
174};
175EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
176
177static bool ahci_em_messages __read_mostly = true;
178module_param(ahci_em_messages, bool, 0444);
179/* add other LED protocol types when they become supported */
180MODULE_PARM_DESC(ahci_em_messages,
181	"AHCI Enclosure Management Message control (0 = off, 1 = on)");
182
183/* device sleep idle timeout in ms */
184static int devslp_idle_timeout __read_mostly = 1000;
185module_param(devslp_idle_timeout, int, 0644);
186MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
187
188static void ahci_enable_ahci(void __iomem *mmio)
189{
190	int i;
191	u32 tmp;
192
193	/* turn on AHCI_EN */
194	tmp = readl(mmio + HOST_CTL);
195	if (tmp & HOST_AHCI_EN)
196		return;
197
198	/* Some controllers need AHCI_EN to be written multiple times.
199	 * Try a few times before giving up.
200	 */
201	for (i = 0; i < 5; i++) {
202		tmp |= HOST_AHCI_EN;
203		writel(tmp, mmio + HOST_CTL);
204		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
205		if (tmp & HOST_AHCI_EN)
206			return;
207		msleep(10);
208	}
209
210	WARN_ON(1);
211}
212
213/**
214 *	ahci_rpm_get_port - Make sure the port is powered on
215 *	@ap: Port to power on
216 *
217 *	Whenever there is need to access the AHCI host registers outside of
218 *	normal execution paths, call this function to make sure the host is
219 *	actually powered on.
220 */
221static int ahci_rpm_get_port(struct ata_port *ap)
222{
223	return pm_runtime_get_sync(ap->dev);
224}
225
226/**
227 *	ahci_rpm_put_port - Undoes ahci_rpm_get_port()
228 *	@ap: Port to power down
229 *
230 *	Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
231 *	if it has no more active users.
232 */
233static void ahci_rpm_put_port(struct ata_port *ap)
234{
235	pm_runtime_put(ap->dev);
236}
237
238static ssize_t ahci_show_host_caps(struct device *dev,
239				   struct device_attribute *attr, char *buf)
240{
241	struct Scsi_Host *shost = class_to_shost(dev);
242	struct ata_port *ap = ata_shost_to_port(shost);
243	struct ahci_host_priv *hpriv = ap->host->private_data;
244
245	return sprintf(buf, "%x\n", hpriv->cap);
246}
247
248static ssize_t ahci_show_host_cap2(struct device *dev,
249				   struct device_attribute *attr, char *buf)
250{
251	struct Scsi_Host *shost = class_to_shost(dev);
252	struct ata_port *ap = ata_shost_to_port(shost);
253	struct ahci_host_priv *hpriv = ap->host->private_data;
254
255	return sprintf(buf, "%x\n", hpriv->cap2);
256}
257
258static ssize_t ahci_show_host_version(struct device *dev,
259				   struct device_attribute *attr, char *buf)
260{
261	struct Scsi_Host *shost = class_to_shost(dev);
262	struct ata_port *ap = ata_shost_to_port(shost);
263	struct ahci_host_priv *hpriv = ap->host->private_data;
264
265	return sprintf(buf, "%x\n", hpriv->version);
266}
267
268static ssize_t ahci_show_port_cmd(struct device *dev,
269				  struct device_attribute *attr, char *buf)
270{
271	struct Scsi_Host *shost = class_to_shost(dev);
272	struct ata_port *ap = ata_shost_to_port(shost);
273	void __iomem *port_mmio = ahci_port_base(ap);
274	ssize_t ret;
275
276	ahci_rpm_get_port(ap);
277	ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
278	ahci_rpm_put_port(ap);
279
280	return ret;
281}
282
283static ssize_t ahci_read_em_buffer(struct device *dev,
284				   struct device_attribute *attr, char *buf)
285{
286	struct Scsi_Host *shost = class_to_shost(dev);
287	struct ata_port *ap = ata_shost_to_port(shost);
288	struct ahci_host_priv *hpriv = ap->host->private_data;
289	void __iomem *mmio = hpriv->mmio;
290	void __iomem *em_mmio = mmio + hpriv->em_loc;
291	u32 em_ctl, msg;
292	unsigned long flags;
293	size_t count;
294	int i;
295
296	ahci_rpm_get_port(ap);
297	spin_lock_irqsave(ap->lock, flags);
298
299	em_ctl = readl(mmio + HOST_EM_CTL);
300	if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
301	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
302		spin_unlock_irqrestore(ap->lock, flags);
303		ahci_rpm_put_port(ap);
304		return -EINVAL;
305	}
306
307	if (!(em_ctl & EM_CTL_MR)) {
308		spin_unlock_irqrestore(ap->lock, flags);
309		ahci_rpm_put_port(ap);
310		return -EAGAIN;
311	}
312
313	if (!(em_ctl & EM_CTL_SMB))
314		em_mmio += hpriv->em_buf_sz;
315
316	count = hpriv->em_buf_sz;
317
318	/* the count should not be larger than PAGE_SIZE */
319	if (count > PAGE_SIZE) {
320		if (printk_ratelimit())
321			ata_port_warn(ap,
322				      "EM read buffer size too large: "
323				      "buffer size %u, page size %lu\n",
324				      hpriv->em_buf_sz, PAGE_SIZE);
325		count = PAGE_SIZE;
326	}
327
328	for (i = 0; i < count; i += 4) {
329		msg = readl(em_mmio + i);
330		buf[i] = msg & 0xff;
331		buf[i + 1] = (msg >> 8) & 0xff;
332		buf[i + 2] = (msg >> 16) & 0xff;
333		buf[i + 3] = (msg >> 24) & 0xff;
334	}
335
336	spin_unlock_irqrestore(ap->lock, flags);
337	ahci_rpm_put_port(ap);
338
339	return i;
340}
341
342static ssize_t ahci_store_em_buffer(struct device *dev,
343				    struct device_attribute *attr,
344				    const char *buf, size_t size)
345{
346	struct Scsi_Host *shost = class_to_shost(dev);
347	struct ata_port *ap = ata_shost_to_port(shost);
348	struct ahci_host_priv *hpriv = ap->host->private_data;
349	void __iomem *mmio = hpriv->mmio;
350	void __iomem *em_mmio = mmio + hpriv->em_loc;
351	const unsigned char *msg_buf = buf;
352	u32 em_ctl, msg;
353	unsigned long flags;
354	int i;
355
356	/* check size validity */
357	if (!(ap->flags & ATA_FLAG_EM) ||
358	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
359	    size % 4 || size > hpriv->em_buf_sz)
360		return -EINVAL;
361
362	ahci_rpm_get_port(ap);
363	spin_lock_irqsave(ap->lock, flags);
364
365	em_ctl = readl(mmio + HOST_EM_CTL);
366	if (em_ctl & EM_CTL_TM) {
367		spin_unlock_irqrestore(ap->lock, flags);
368		ahci_rpm_put_port(ap);
369		return -EBUSY;
370	}
371
372	for (i = 0; i < size; i += 4) {
373		msg = msg_buf[i] | msg_buf[i + 1] << 8 |
374		      msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
375		writel(msg, em_mmio + i);
376	}
377
378	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
379
380	spin_unlock_irqrestore(ap->lock, flags);
381	ahci_rpm_put_port(ap);
382
383	return size;
384}
385
386static ssize_t ahci_show_em_supported(struct device *dev,
387				      struct device_attribute *attr, char *buf)
388{
389	struct Scsi_Host *shost = class_to_shost(dev);
390	struct ata_port *ap = ata_shost_to_port(shost);
391	struct ahci_host_priv *hpriv = ap->host->private_data;
392	void __iomem *mmio = hpriv->mmio;
393	u32 em_ctl;
394
395	ahci_rpm_get_port(ap);
396	em_ctl = readl(mmio + HOST_EM_CTL);
397	ahci_rpm_put_port(ap);
398
399	return sprintf(buf, "%s%s%s%s\n",
400		       em_ctl & EM_CTL_LED ? "led " : "",
401		       em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
402		       em_ctl & EM_CTL_SES ? "ses-2 " : "",
403		       em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
404}
405
406/**
407 *	ahci_save_initial_config - Save and fixup initial config values
408 *	@dev: target AHCI device
409 *	@hpriv: host private area to store config values
410 *
411 *	Some registers containing configuration info might be setup by
412 *	BIOS and might be cleared on reset.  This function saves the
413 *	initial values of those registers into @hpriv such that they
414 *	can be restored after controller reset.
415 *
416 *	If inconsistent, config values are fixed up by this function.
417 *
418 *	If it is not set already this function sets hpriv->start_engine to
419 *	ahci_start_engine.
420 *
421 *	LOCKING:
422 *	None.
423 */
424void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
425{
426	void __iomem *mmio = hpriv->mmio;
427	u32 cap, cap2, vers, port_map;
428	int i;
429
430	/* make sure AHCI mode is enabled before accessing CAP */
431	ahci_enable_ahci(mmio);
432
433	/* Values prefixed with saved_ are written back to host after
434	 * reset.  Values without are used for driver operation.
435	 */
436	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
437	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
438
439	/* CAP2 register is only defined for AHCI 1.2 and later */
440	vers = readl(mmio + HOST_VERSION);
441	if ((vers >> 16) > 1 ||
442	   ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
443		hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
444	else
445		hpriv->saved_cap2 = cap2 = 0;
446
447	/* some chips have errata preventing 64bit use */
448	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
449		dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
450		cap &= ~HOST_CAP_64;
451	}
452
453	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
454		dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
455		cap &= ~HOST_CAP_NCQ;
456	}
457
458	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
459		dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
460		cap |= HOST_CAP_NCQ;
461	}
462
463	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
464		dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
465		cap &= ~HOST_CAP_PMP;
466	}
467
468	if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
469		dev_info(dev,
470			 "controller can't do SNTF, turning off CAP_SNTF\n");
471		cap &= ~HOST_CAP_SNTF;
472	}
473
474	if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
475		dev_info(dev,
476			 "controller can't do DEVSLP, turning off\n");
477		cap2 &= ~HOST_CAP2_SDS;
478		cap2 &= ~HOST_CAP2_SADM;
479	}
480
481	if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
482		dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
483		cap |= HOST_CAP_FBS;
484	}
485
486	if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
487		dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
488		cap &= ~HOST_CAP_FBS;
489	}
490
491	if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
492		dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
493		cap |= HOST_CAP_ALPM;
494	}
495
496	if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
497		dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
498		cap &= ~HOST_CAP_SXS;
499	}
500
501	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
502		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
503			 port_map, hpriv->force_port_map);
504		port_map = hpriv->force_port_map;
505		hpriv->saved_port_map = port_map;
506	}
507
508	if (hpriv->mask_port_map) {
509		dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
510			port_map,
511			port_map & hpriv->mask_port_map);
512		port_map &= hpriv->mask_port_map;
513	}
514
515	/* cross check port_map and cap.n_ports */
516	if (port_map) {
517		int map_ports = 0;
518
519		for (i = 0; i < AHCI_MAX_PORTS; i++)
520			if (port_map & (1 << i))
521				map_ports++;
522
523		/* If PI has more ports than n_ports, whine, clear
524		 * port_map and let it be generated from n_ports.
525		 */
526		if (map_ports > ahci_nr_ports(cap)) {
527			dev_warn(dev,
528				 "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
529				 port_map, ahci_nr_ports(cap));
530			port_map = 0;
531		}
532	}
533
534	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
535	if (!port_map && vers < 0x10300) {
536		port_map = (1 << ahci_nr_ports(cap)) - 1;
537		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
538
539		/* write the fixed up value to the PI register */
540		hpriv->saved_port_map = port_map;
541	}
542
543	/* record values to use during operation */
544	hpriv->cap = cap;
545	hpriv->cap2 = cap2;
546	hpriv->version = readl(mmio + HOST_VERSION);
547	hpriv->port_map = port_map;
548
549	if (!hpriv->start_engine)
550		hpriv->start_engine = ahci_start_engine;
551
552	if (!hpriv->stop_engine)
553		hpriv->stop_engine = ahci_stop_engine;
554
555	if (!hpriv->irq_handler)
556		hpriv->irq_handler = ahci_single_level_irq_intr;
557}
558EXPORT_SYMBOL_GPL(ahci_save_initial_config);
559
560/**
561 *	ahci_restore_initial_config - Restore initial config
562 *	@host: target ATA host
563 *
564 *	Restore initial config stored by ahci_save_initial_config().
565 *
566 *	LOCKING:
567 *	None.
568 */
569static void ahci_restore_initial_config(struct ata_host *host)
570{
571	struct ahci_host_priv *hpriv = host->private_data;
572	void __iomem *mmio = hpriv->mmio;
573
574	writel(hpriv->saved_cap, mmio + HOST_CAP);
575	if (hpriv->saved_cap2)
576		writel(hpriv->saved_cap2, mmio + HOST_CAP2);
577	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
578	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
579}
580
581static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
582{
583	static const int offset[] = {
584		[SCR_STATUS]		= PORT_SCR_STAT,
585		[SCR_CONTROL]		= PORT_SCR_CTL,
586		[SCR_ERROR]		= PORT_SCR_ERR,
587		[SCR_ACTIVE]		= PORT_SCR_ACT,
588		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
589	};
590	struct ahci_host_priv *hpriv = ap->host->private_data;
591
592	if (sc_reg < ARRAY_SIZE(offset) &&
593	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
594		return offset[sc_reg];
595	return 0;
596}
597
598static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
599{
600	void __iomem *port_mmio = ahci_port_base(link->ap);
601	int offset = ahci_scr_offset(link->ap, sc_reg);
602
603	if (offset) {
604		*val = readl(port_mmio + offset);
605		return 0;
606	}
607	return -EINVAL;
608}
609
610static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
611{
612	void __iomem *port_mmio = ahci_port_base(link->ap);
613	int offset = ahci_scr_offset(link->ap, sc_reg);
614
615	if (offset) {
616		writel(val, port_mmio + offset);
617		return 0;
618	}
619	return -EINVAL;
620}
621
622void ahci_start_engine(struct ata_port *ap)
623{
624	void __iomem *port_mmio = ahci_port_base(ap);
625	u32 tmp;
626
627	/* start DMA */
628	tmp = readl(port_mmio + PORT_CMD);
629	tmp |= PORT_CMD_START;
630	writel(tmp, port_mmio + PORT_CMD);
631	readl(port_mmio + PORT_CMD); /* flush */
632}
633EXPORT_SYMBOL_GPL(ahci_start_engine);
634
635int ahci_stop_engine(struct ata_port *ap)
636{
637	void __iomem *port_mmio = ahci_port_base(ap);
638	struct ahci_host_priv *hpriv = ap->host->private_data;
639	u32 tmp;
640
641	/*
642	 * On some controllers, stopping a port's DMA engine while the port
643	 * is in ALPM state (partial or slumber) results in failures on
644	 * subsequent DMA engine starts.  For those controllers, put the
645	 * port back in active state before stopping its DMA engine.
646	 */
647	if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
648	    (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
649	    ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
650		dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
651		return -EIO;
652	}
653
654	tmp = readl(port_mmio + PORT_CMD);
655
656	/* check if the HBA is idle */
657	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
658		return 0;
659
660	/*
661	 * Don't try to issue commands but return with ENODEV if the
662	 * AHCI controller not available anymore (e.g. due to PCIe hot
663	 * unplugging). Otherwise a 500ms delay for each port is added.
664	 */
665	if (tmp == 0xffffffff) {
666		dev_err(ap->host->dev, "AHCI controller unavailable!\n");
667		return -ENODEV;
668	}
669
670	/* setting HBA to idle */
671	tmp &= ~PORT_CMD_START;
672	writel(tmp, port_mmio + PORT_CMD);
673
674	/* wait for engine to stop. This could be as long as 500 msec */
675	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
676				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
677	if (tmp & PORT_CMD_LIST_ON)
678		return -EIO;
679
680	return 0;
681}
682EXPORT_SYMBOL_GPL(ahci_stop_engine);
683
684void ahci_start_fis_rx(struct ata_port *ap)
685{
686	void __iomem *port_mmio = ahci_port_base(ap);
687	struct ahci_host_priv *hpriv = ap->host->private_data;
688	struct ahci_port_priv *pp = ap->private_data;
689	u32 tmp;
690
691	/* set FIS registers */
692	if (hpriv->cap & HOST_CAP_64)
693		writel((pp->cmd_slot_dma >> 16) >> 16,
694		       port_mmio + PORT_LST_ADDR_HI);
695	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
696
697	if (hpriv->cap & HOST_CAP_64)
698		writel((pp->rx_fis_dma >> 16) >> 16,
699		       port_mmio + PORT_FIS_ADDR_HI);
700	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
701
702	/* enable FIS reception */
703	tmp = readl(port_mmio + PORT_CMD);
704	tmp |= PORT_CMD_FIS_RX;
705	writel(tmp, port_mmio + PORT_CMD);
706
707	/* flush */
708	readl(port_mmio + PORT_CMD);
709}
710EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
711
712static int ahci_stop_fis_rx(struct ata_port *ap)
713{
714	void __iomem *port_mmio = ahci_port_base(ap);
715	u32 tmp;
716
717	/* disable FIS reception */
718	tmp = readl(port_mmio + PORT_CMD);
719	tmp &= ~PORT_CMD_FIS_RX;
720	writel(tmp, port_mmio + PORT_CMD);
721
722	/* wait for completion, spec says 500ms, give it 1000 */
723	tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
724				PORT_CMD_FIS_ON, 10, 1000);
725	if (tmp & PORT_CMD_FIS_ON)
726		return -EBUSY;
727
728	return 0;
729}
730
731static void ahci_power_up(struct ata_port *ap)
732{
733	struct ahci_host_priv *hpriv = ap->host->private_data;
734	void __iomem *port_mmio = ahci_port_base(ap);
735	u32 cmd;
736
737	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
738
739	/* spin up device */
740	if (hpriv->cap & HOST_CAP_SSS) {
741		cmd |= PORT_CMD_SPIN_UP;
742		writel(cmd, port_mmio + PORT_CMD);
743	}
744
745	/* wake up link */
746	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
747}
748
749static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
750			unsigned int hints)
751{
752	struct ata_port *ap = link->ap;
753	struct ahci_host_priv *hpriv = ap->host->private_data;
754	struct ahci_port_priv *pp = ap->private_data;
755	void __iomem *port_mmio = ahci_port_base(ap);
756
757	if (policy != ATA_LPM_MAX_POWER) {
758		/* wakeup flag only applies to the max power policy */
759		hints &= ~ATA_LPM_WAKE_ONLY;
760
761		/*
762		 * Disable interrupts on Phy Ready. This keeps us from
763		 * getting woken up due to spurious phy ready
764		 * interrupts.
765		 */
766		pp->intr_mask &= ~PORT_IRQ_PHYRDY;
767		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
768
769		sata_link_scr_lpm(link, policy, false);
770	}
771
772	if (hpriv->cap & HOST_CAP_ALPM) {
773		u32 cmd = readl(port_mmio + PORT_CMD);
774
775		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
776			if (!(hints & ATA_LPM_WAKE_ONLY))
777				cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
778			cmd |= PORT_CMD_ICC_ACTIVE;
779
780			writel(cmd, port_mmio + PORT_CMD);
781			readl(port_mmio + PORT_CMD);
782
783			/* wait 10ms to be sure we've come out of LPM state */
784			ata_msleep(ap, 10);
785
786			if (hints & ATA_LPM_WAKE_ONLY)
787				return 0;
788		} else {
789			cmd |= PORT_CMD_ALPE;
790			if (policy == ATA_LPM_MIN_POWER)
791				cmd |= PORT_CMD_ASP;
792			else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
793				cmd &= ~PORT_CMD_ASP;
794
795			/* write out new cmd value */
796			writel(cmd, port_mmio + PORT_CMD);
797		}
798	}
799
800	/* set aggressive device sleep */
801	if ((hpriv->cap2 & HOST_CAP2_SDS) &&
802	    (hpriv->cap2 & HOST_CAP2_SADM) &&
803	    (link->device->flags & ATA_DFLAG_DEVSLP)) {
804		if (policy == ATA_LPM_MIN_POWER ||
805		    policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
806			ahci_set_aggressive_devslp(ap, true);
807		else
808			ahci_set_aggressive_devslp(ap, false);
809	}
810
811	if (policy == ATA_LPM_MAX_POWER) {
812		sata_link_scr_lpm(link, policy, false);
813
814		/* turn PHYRDY IRQ back on */
815		pp->intr_mask |= PORT_IRQ_PHYRDY;
816		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
817	}
818
819	return 0;
820}
821
822#ifdef CONFIG_PM
823static void ahci_power_down(struct ata_port *ap)
824{
825	struct ahci_host_priv *hpriv = ap->host->private_data;
826	void __iomem *port_mmio = ahci_port_base(ap);
827	u32 cmd, scontrol;
828
829	if (!(hpriv->cap & HOST_CAP_SSS))
830		return;
831
832	/* put device into listen mode, first set PxSCTL.DET to 0 */
833	scontrol = readl(port_mmio + PORT_SCR_CTL);
834	scontrol &= ~0xf;
835	writel(scontrol, port_mmio + PORT_SCR_CTL);
836
837	/* then set PxCMD.SUD to 0 */
838	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
839	cmd &= ~PORT_CMD_SPIN_UP;
840	writel(cmd, port_mmio + PORT_CMD);
841}
842#endif
843
844static void ahci_start_port(struct ata_port *ap)
845{
846	struct ahci_host_priv *hpriv = ap->host->private_data;
847	struct ahci_port_priv *pp = ap->private_data;
848	struct ata_link *link;
849	struct ahci_em_priv *emp;
850	ssize_t rc;
851	int i;
852
853	/* enable FIS reception */
854	ahci_start_fis_rx(ap);
855
856	/* enable DMA */
857	if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
858		hpriv->start_engine(ap);
859
860	/* turn on LEDs */
861	if (ap->flags & ATA_FLAG_EM) {
862		ata_for_each_link(link, ap, EDGE) {
863			emp = &pp->em_priv[link->pmp];
864
865			/* EM Transmit bit maybe busy during init */
866			for (i = 0; i < EM_MAX_RETRY; i++) {
867				rc = ap->ops->transmit_led_message(ap,
868							       emp->led_state,
869							       4);
870				/*
871				 * If busy, give a breather but do not
872				 * release EH ownership by using msleep()
873				 * instead of ata_msleep().  EM Transmit
874				 * bit is busy for the whole host and
875				 * releasing ownership will cause other
876				 * ports to fail the same way.
877				 */
878				if (rc == -EBUSY)
879					msleep(1);
880				else
881					break;
882			}
883		}
884	}
885
886	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
887		ata_for_each_link(link, ap, EDGE)
888			ahci_init_sw_activity(link);
889
890}
891
892static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
893{
894	int rc;
895	struct ahci_host_priv *hpriv = ap->host->private_data;
896
897	/* disable DMA */
898	rc = hpriv->stop_engine(ap);
899	if (rc) {
900		*emsg = "failed to stop engine";
901		return rc;
902	}
903
904	/* disable FIS reception */
905	rc = ahci_stop_fis_rx(ap);
906	if (rc) {
907		*emsg = "failed stop FIS RX";
908		return rc;
909	}
910
911	return 0;
912}
913
914int ahci_reset_controller(struct ata_host *host)
915{
916	struct ahci_host_priv *hpriv = host->private_data;
917	void __iomem *mmio = hpriv->mmio;
918	u32 tmp;
919
920	/* we must be in AHCI mode, before using anything
921	 * AHCI-specific, such as HOST_RESET.
922	 */
923	ahci_enable_ahci(mmio);
924
925	/* global controller reset */
926	if (!ahci_skip_host_reset) {
927		tmp = readl(mmio + HOST_CTL);
928		if ((tmp & HOST_RESET) == 0) {
929			writel(tmp | HOST_RESET, mmio + HOST_CTL);
930			readl(mmio + HOST_CTL); /* flush */
931		}
932
933		/*
934		 * to perform host reset, OS should set HOST_RESET
935		 * and poll until this bit is read to be "0".
936		 * reset must complete within 1 second, or
937		 * the hardware should be considered fried.
938		 */
939		tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
940					HOST_RESET, 10, 1000);
941
942		if (tmp & HOST_RESET) {
943			dev_err(host->dev, "controller reset failed (0x%x)\n",
944				tmp);
945			return -EIO;
946		}
947
948		/* turn on AHCI mode */
949		ahci_enable_ahci(mmio);
950
951		/* Some registers might be cleared on reset.  Restore
952		 * initial values.
953		 */
954		if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
955			ahci_restore_initial_config(host);
956	} else
957		dev_info(host->dev, "skipping global host reset\n");
958
959	return 0;
960}
961EXPORT_SYMBOL_GPL(ahci_reset_controller);
962
963static void ahci_sw_activity(struct ata_link *link)
964{
965	struct ata_port *ap = link->ap;
966	struct ahci_port_priv *pp = ap->private_data;
967	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
968
969	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
970		return;
971
972	emp->activity++;
973	if (!timer_pending(&emp->timer))
974		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
975}
976
977static void ahci_sw_activity_blink(struct timer_list *t)
978{
979	struct ahci_em_priv *emp = from_timer(emp, t, timer);
980	struct ata_link *link = emp->link;
981	struct ata_port *ap = link->ap;
982
983	unsigned long led_message = emp->led_state;
984	u32 activity_led_state;
985	unsigned long flags;
986
987	led_message &= EM_MSG_LED_VALUE;
988	led_message |= ap->port_no | (link->pmp << 8);
989
990	/* check to see if we've had activity.  If so,
991	 * toggle state of LED and reset timer.  If not,
992	 * turn LED to desired idle state.
993	 */
994	spin_lock_irqsave(ap->lock, flags);
995	if (emp->saved_activity != emp->activity) {
996		emp->saved_activity = emp->activity;
997		/* get the current LED state */
998		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
999
1000		if (activity_led_state)
1001			activity_led_state = 0;
1002		else
1003			activity_led_state = 1;
1004
1005		/* clear old state */
1006		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1007
1008		/* toggle state */
1009		led_message |= (activity_led_state << 16);
1010		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1011	} else {
1012		/* switch to idle */
1013		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1014		if (emp->blink_policy == BLINK_OFF)
1015			led_message |= (1 << 16);
1016	}
1017	spin_unlock_irqrestore(ap->lock, flags);
1018	ap->ops->transmit_led_message(ap, led_message, 4);
1019}
1020
1021static void ahci_init_sw_activity(struct ata_link *link)
1022{
1023	struct ata_port *ap = link->ap;
1024	struct ahci_port_priv *pp = ap->private_data;
1025	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1026
1027	/* init activity stats, setup timer */
1028	emp->saved_activity = emp->activity = 0;
1029	emp->link = link;
1030	timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
1031
1032	/* check our blink policy and set flag for link if it's enabled */
1033	if (emp->blink_policy)
1034		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1035}
1036
1037int ahci_reset_em(struct ata_host *host)
1038{
1039	struct ahci_host_priv *hpriv = host->private_data;
1040	void __iomem *mmio = hpriv->mmio;
1041	u32 em_ctl;
1042
1043	em_ctl = readl(mmio + HOST_EM_CTL);
1044	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1045		return -EINVAL;
1046
1047	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1048	return 0;
1049}
1050EXPORT_SYMBOL_GPL(ahci_reset_em);
1051
1052static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1053					ssize_t size)
1054{
1055	struct ahci_host_priv *hpriv = ap->host->private_data;
1056	struct ahci_port_priv *pp = ap->private_data;
1057	void __iomem *mmio = hpriv->mmio;
1058	u32 em_ctl;
1059	u32 message[] = {0, 0};
1060	unsigned long flags;
1061	int pmp;
1062	struct ahci_em_priv *emp;
1063
1064	/* get the slot number from the message */
1065	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1066	if (pmp < EM_MAX_SLOTS)
1067		emp = &pp->em_priv[pmp];
1068	else
1069		return -EINVAL;
1070
1071	ahci_rpm_get_port(ap);
1072	spin_lock_irqsave(ap->lock, flags);
1073
1074	/*
1075	 * if we are still busy transmitting a previous message,
1076	 * do not allow
1077	 */
1078	em_ctl = readl(mmio + HOST_EM_CTL);
1079	if (em_ctl & EM_CTL_TM) {
1080		spin_unlock_irqrestore(ap->lock, flags);
1081		ahci_rpm_put_port(ap);
1082		return -EBUSY;
1083	}
1084
1085	if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1086		/*
1087		 * create message header - this is all zero except for
1088		 * the message size, which is 4 bytes.
1089		 */
1090		message[0] |= (4 << 8);
1091
1092		/* ignore 0:4 of byte zero, fill in port info yourself */
1093		message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1094
1095		/* write message to EM_LOC */
1096		writel(message[0], mmio + hpriv->em_loc);
1097		writel(message[1], mmio + hpriv->em_loc+4);
1098
1099		/*
1100		 * tell hardware to transmit the message
1101		 */
1102		writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1103	}
1104
1105	/* save off new led state for port/slot */
1106	emp->led_state = state;
1107
1108	spin_unlock_irqrestore(ap->lock, flags);
1109	ahci_rpm_put_port(ap);
1110
1111	return size;
1112}
1113
1114static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1115{
1116	struct ahci_port_priv *pp = ap->private_data;
1117	struct ata_link *link;
1118	struct ahci_em_priv *emp;
1119	int rc = 0;
1120
1121	ata_for_each_link(link, ap, EDGE) {
1122		emp = &pp->em_priv[link->pmp];
1123		rc += sprintf(buf, "%lx\n", emp->led_state);
1124	}
1125	return rc;
1126}
1127
1128static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1129				size_t size)
1130{
1131	unsigned int state;
1132	int pmp;
1133	struct ahci_port_priv *pp = ap->private_data;
1134	struct ahci_em_priv *emp;
1135
1136	if (kstrtouint(buf, 0, &state) < 0)
1137		return -EINVAL;
1138
1139	/* get the slot number from the message */
1140	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1141	if (pmp < EM_MAX_SLOTS) {
1142		pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
1143		emp = &pp->em_priv[pmp];
1144	} else {
1145		return -EINVAL;
1146	}
1147
1148	/* mask off the activity bits if we are in sw_activity
1149	 * mode, user should turn off sw_activity before setting
1150	 * activity led through em_message
1151	 */
1152	if (emp->blink_policy)
1153		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1154
1155	return ap->ops->transmit_led_message(ap, state, size);
1156}
1157
1158static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1159{
1160	struct ata_link *link = dev->link;
1161	struct ata_port *ap = link->ap;
1162	struct ahci_port_priv *pp = ap->private_data;
1163	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1164	u32 port_led_state = emp->led_state;
1165
1166	/* save the desired Activity LED behavior */
1167	if (val == OFF) {
1168		/* clear LFLAG */
1169		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1170
1171		/* set the LED to OFF */
1172		port_led_state &= EM_MSG_LED_VALUE_OFF;
1173		port_led_state |= (ap->port_no | (link->pmp << 8));
1174		ap->ops->transmit_led_message(ap, port_led_state, 4);
1175	} else {
1176		link->flags |= ATA_LFLAG_SW_ACTIVITY;
1177		if (val == BLINK_OFF) {
1178			/* set LED to ON for idle */
1179			port_led_state &= EM_MSG_LED_VALUE_OFF;
1180			port_led_state |= (ap->port_no | (link->pmp << 8));
1181			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1182			ap->ops->transmit_led_message(ap, port_led_state, 4);
1183		}
1184	}
1185	emp->blink_policy = val;
1186	return 0;
1187}
1188
1189static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1190{
1191	struct ata_link *link = dev->link;
1192	struct ata_port *ap = link->ap;
1193	struct ahci_port_priv *pp = ap->private_data;
1194	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1195
1196	/* display the saved value of activity behavior for this
1197	 * disk.
1198	 */
1199	return sprintf(buf, "%d\n", emp->blink_policy);
1200}
1201
1202static void ahci_port_clear_pending_irq(struct ata_port *ap)
1203{
1204	struct ahci_host_priv *hpriv = ap->host->private_data;
1205	void __iomem *port_mmio = ahci_port_base(ap);
1206	u32 tmp;
1207
1208	/* clear SError */
1209	tmp = readl(port_mmio + PORT_SCR_ERR);
1210	dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
1211	writel(tmp, port_mmio + PORT_SCR_ERR);
1212
1213	/* clear port IRQ */
1214	tmp = readl(port_mmio + PORT_IRQ_STAT);
1215	dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
1216	if (tmp)
1217		writel(tmp, port_mmio + PORT_IRQ_STAT);
1218
1219	writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
1220}
1221
1222static void ahci_port_init(struct device *dev, struct ata_port *ap,
1223			   int port_no, void __iomem *mmio,
1224			   void __iomem *port_mmio)
1225{
1226	struct ahci_host_priv *hpriv = ap->host->private_data;
1227	const char *emsg = NULL;
1228	int rc;
1229	u32 tmp;
1230
1231	/* make sure port is not active */
1232	rc = ahci_deinit_port(ap, &emsg);
1233	if (rc)
1234		dev_warn(dev, "%s (%d)\n", emsg, rc);
1235
1236	ahci_port_clear_pending_irq(ap);
1237
1238	/* mark esata ports */
1239	tmp = readl(port_mmio + PORT_CMD);
1240	if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
1241		ap->pflags |= ATA_PFLAG_EXTERNAL;
1242}
1243
1244void ahci_init_controller(struct ata_host *host)
1245{
1246	struct ahci_host_priv *hpriv = host->private_data;
1247	void __iomem *mmio = hpriv->mmio;
1248	int i;
1249	void __iomem *port_mmio;
1250	u32 tmp;
1251
1252	for (i = 0; i < host->n_ports; i++) {
1253		struct ata_port *ap = host->ports[i];
1254
1255		port_mmio = ahci_port_base(ap);
1256		if (ata_port_is_dummy(ap))
1257			continue;
1258
1259		ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1260	}
1261
1262	tmp = readl(mmio + HOST_CTL);
1263	dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
1264	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1265	tmp = readl(mmio + HOST_CTL);
1266	dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
1267}
1268EXPORT_SYMBOL_GPL(ahci_init_controller);
1269
1270static void ahci_dev_config(struct ata_device *dev)
1271{
1272	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1273
1274	if (hpriv->flags & AHCI_HFLAG_SECT255) {
1275		dev->max_sectors = 255;
1276		ata_dev_info(dev,
1277			     "SB600 AHCI: limiting to 255 sectors per cmd\n");
1278	}
1279}
1280
1281unsigned int ahci_dev_classify(struct ata_port *ap)
1282{
1283	void __iomem *port_mmio = ahci_port_base(ap);
1284	struct ata_taskfile tf;
1285	u32 tmp;
1286
1287	tmp = readl(port_mmio + PORT_SIG);
1288	tf.lbah		= (tmp >> 24)	& 0xff;
1289	tf.lbam		= (tmp >> 16)	& 0xff;
1290	tf.lbal		= (tmp >> 8)	& 0xff;
1291	tf.nsect	= (tmp)		& 0xff;
1292
1293	return ata_dev_classify(&tf);
1294}
1295EXPORT_SYMBOL_GPL(ahci_dev_classify);
1296
1297void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1298			u32 opts)
1299{
1300	dma_addr_t cmd_tbl_dma;
1301
1302	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1303
1304	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1305	pp->cmd_slot[tag].status = 0;
1306	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1307	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1308}
1309EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
1310
1311int ahci_kick_engine(struct ata_port *ap)
1312{
1313	void __iomem *port_mmio = ahci_port_base(ap);
1314	struct ahci_host_priv *hpriv = ap->host->private_data;
1315	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1316	u32 tmp;
1317	int busy, rc;
1318
1319	/* stop engine */
1320	rc = hpriv->stop_engine(ap);
1321	if (rc)
1322		goto out_restart;
1323
1324	/* need to do CLO?
1325	 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1326	 */
1327	busy = status & (ATA_BUSY | ATA_DRQ);
1328	if (!busy && !sata_pmp_attached(ap)) {
1329		rc = 0;
1330		goto out_restart;
1331	}
1332
1333	if (!(hpriv->cap & HOST_CAP_CLO)) {
1334		rc = -EOPNOTSUPP;
1335		goto out_restart;
1336	}
1337
1338	/* perform CLO */
1339	tmp = readl(port_mmio + PORT_CMD);
1340	tmp |= PORT_CMD_CLO;
1341	writel(tmp, port_mmio + PORT_CMD);
1342
1343	rc = 0;
1344	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
1345				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1346	if (tmp & PORT_CMD_CLO)
1347		rc = -EIO;
1348
1349	/* restart engine */
1350 out_restart:
1351	hpriv->start_engine(ap);
1352	return rc;
1353}
1354EXPORT_SYMBOL_GPL(ahci_kick_engine);
1355
1356static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1357				struct ata_taskfile *tf, int is_cmd, u16 flags,
1358				unsigned long timeout_msec)
1359{
1360	const u32 cmd_fis_len = 5; /* five dwords */
1361	struct ahci_port_priv *pp = ap->private_data;
1362	void __iomem *port_mmio = ahci_port_base(ap);
1363	u8 *fis = pp->cmd_tbl;
1364	u32 tmp;
1365
1366	/* prep the command */
1367	ata_tf_to_fis(tf, pmp, is_cmd, fis);
1368	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1369
1370	/* set port value for softreset of Port Multiplier */
1371	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
1372		tmp = readl(port_mmio + PORT_FBS);
1373		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1374		tmp |= pmp << PORT_FBS_DEV_OFFSET;
1375		writel(tmp, port_mmio + PORT_FBS);
1376		pp->fbs_last_dev = pmp;
1377	}
1378
1379	/* issue & wait */
1380	writel(1, port_mmio + PORT_CMD_ISSUE);
1381
1382	if (timeout_msec) {
1383		tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
1384					0x1, 0x1, 1, timeout_msec);
1385		if (tmp & 0x1) {
1386			ahci_kick_engine(ap);
1387			return -EBUSY;
1388		}
1389	} else
1390		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
1391
1392	return 0;
1393}
1394
1395int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1396		      int pmp, unsigned long deadline,
1397		      int (*check_ready)(struct ata_link *link))
1398{
1399	struct ata_port *ap = link->ap;
1400	struct ahci_host_priv *hpriv = ap->host->private_data;
1401	struct ahci_port_priv *pp = ap->private_data;
1402	const char *reason = NULL;
1403	unsigned long now, msecs;
1404	struct ata_taskfile tf;
1405	bool fbs_disabled = false;
1406	int rc;
1407
1408	DPRINTK("ENTER\n");
1409
1410	/* prepare for SRST (AHCI-1.1 10.4.1) */
1411	rc = ahci_kick_engine(ap);
1412	if (rc && rc != -EOPNOTSUPP)
1413		ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
1414
1415	/*
1416	 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
1417	 * clear PxFBS.EN to '0' prior to issuing software reset to devices
1418	 * that is attached to port multiplier.
1419	 */
1420	if (!ata_is_host_link(link) && pp->fbs_enabled) {
1421		ahci_disable_fbs(ap);
1422		fbs_disabled = true;
1423	}
1424
1425	ata_tf_init(link->device, &tf);
1426
1427	/* issue the first H2D Register FIS */
1428	msecs = 0;
1429	now = jiffies;
1430	if (time_after(deadline, now))
1431		msecs = jiffies_to_msecs(deadline - now);
1432
1433	tf.ctl |= ATA_SRST;
1434	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1435				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1436		rc = -EIO;
1437		reason = "1st FIS failed";
1438		goto fail;
1439	}
1440
1441	/* spec says at least 5us, but be generous and sleep for 1ms */
1442	ata_msleep(ap, 1);
1443
1444	/* issue the second H2D Register FIS */
1445	tf.ctl &= ~ATA_SRST;
1446	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1447
1448	/* wait for link to become ready */
1449	rc = ata_wait_after_reset(link, deadline, check_ready);
1450	if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1451		/*
1452		 * Workaround for cases where link online status can't
1453		 * be trusted.  Treat device readiness timeout as link
1454		 * offline.
1455		 */
1456		ata_link_info(link, "device not ready, treating as offline\n");
1457		*class = ATA_DEV_NONE;
1458	} else if (rc) {
1459		/* link occupied, -ENODEV too is an error */
1460		reason = "device not ready";
1461		goto fail;
1462	} else
1463		*class = ahci_dev_classify(ap);
1464
1465	/* re-enable FBS if disabled before */
1466	if (fbs_disabled)
1467		ahci_enable_fbs(ap);
1468
1469	DPRINTK("EXIT, class=%u\n", *class);
1470	return 0;
1471
1472 fail:
1473	ata_link_err(link, "softreset failed (%s)\n", reason);
1474	return rc;
1475}
1476
1477int ahci_check_ready(struct ata_link *link)
1478{
1479	void __iomem *port_mmio = ahci_port_base(link->ap);
1480	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1481
1482	return ata_check_ready(status);
1483}
1484EXPORT_SYMBOL_GPL(ahci_check_ready);
1485
1486static int ahci_softreset(struct ata_link *link, unsigned int *class,
1487			  unsigned long deadline)
1488{
1489	int pmp = sata_srst_pmp(link);
1490
1491	DPRINTK("ENTER\n");
1492
1493	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1494}
1495EXPORT_SYMBOL_GPL(ahci_do_softreset);
1496
1497static int ahci_bad_pmp_check_ready(struct ata_link *link)
1498{
1499	void __iomem *port_mmio = ahci_port_base(link->ap);
1500	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1501	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1502
1503	/*
1504	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1505	 * which can save timeout delay.
1506	 */
1507	if (irq_status & PORT_IRQ_BAD_PMP)
1508		return -EIO;
1509
1510	return ata_check_ready(status);
1511}
1512
1513static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
1514				    unsigned long deadline)
1515{
1516	struct ata_port *ap = link->ap;
1517	void __iomem *port_mmio = ahci_port_base(ap);
1518	int pmp = sata_srst_pmp(link);
1519	int rc;
1520	u32 irq_sts;
1521
1522	DPRINTK("ENTER\n");
1523
1524	rc = ahci_do_softreset(link, class, pmp, deadline,
1525			       ahci_bad_pmp_check_ready);
1526
1527	/*
1528	 * Soft reset fails with IPMS set when PMP is enabled but
1529	 * SATA HDD/ODD is connected to SATA port, do soft reset
1530	 * again to port 0.
1531	 */
1532	if (rc == -EIO) {
1533		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1534		if (irq_sts & PORT_IRQ_BAD_PMP) {
1535			ata_link_warn(link,
1536					"applying PMP SRST workaround "
1537					"and retrying\n");
1538			rc = ahci_do_softreset(link, class, 0, deadline,
1539					       ahci_check_ready);
1540		}
1541	}
1542
1543	return rc;
1544}
1545
1546int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
1547		      unsigned long deadline, bool *online)
1548{
1549	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1550	struct ata_port *ap = link->ap;
1551	struct ahci_port_priv *pp = ap->private_data;
1552	struct ahci_host_priv *hpriv = ap->host->private_data;
1553	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1554	struct ata_taskfile tf;
1555	int rc;
1556
1557	DPRINTK("ENTER\n");
1558
1559	hpriv->stop_engine(ap);
1560
1561	/* clear D2H reception area to properly wait for D2H FIS */
1562	ata_tf_init(link->device, &tf);
1563	tf.command = ATA_BUSY;
1564	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1565
1566	ahci_port_clear_pending_irq(ap);
1567
1568	rc = sata_link_hardreset(link, timing, deadline, online,
1569				 ahci_check_ready);
1570
1571	hpriv->start_engine(ap);
1572
1573	if (*online)
1574		*class = ahci_dev_classify(ap);
1575
1576	DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1577	return rc;
1578}
1579EXPORT_SYMBOL_GPL(ahci_do_hardreset);
1580
1581static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1582			  unsigned long deadline)
1583{
1584	bool online;
1585
1586	return ahci_do_hardreset(link, class, deadline, &online);
1587}
1588
1589static void ahci_postreset(struct ata_link *link, unsigned int *class)
1590{
1591	struct ata_port *ap = link->ap;
1592	void __iomem *port_mmio = ahci_port_base(ap);
1593	u32 new_tmp, tmp;
1594
1595	ata_std_postreset(link, class);
1596
1597	/* Make sure port's ATAPI bit is set appropriately */
1598	new_tmp = tmp = readl(port_mmio + PORT_CMD);
1599	if (*class == ATA_DEV_ATAPI)
1600		new_tmp |= PORT_CMD_ATAPI;
1601	else
1602		new_tmp &= ~PORT_CMD_ATAPI;
1603	if (new_tmp != tmp) {
1604		writel(new_tmp, port_mmio + PORT_CMD);
1605		readl(port_mmio + PORT_CMD); /* flush */
1606	}
1607}
1608
1609static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1610{
1611	struct scatterlist *sg;
1612	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1613	unsigned int si;
1614
1615	VPRINTK("ENTER\n");
1616
1617	/*
1618	 * Next, the S/G list.
1619	 */
1620	for_each_sg(qc->sg, sg, qc->n_elem, si) {
1621		dma_addr_t addr = sg_dma_address(sg);
1622		u32 sg_len = sg_dma_len(sg);
1623
1624		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1625		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1626		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1627	}
1628
1629	return si;
1630}
1631
1632static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1633{
1634	struct ata_port *ap = qc->ap;
1635	struct ahci_port_priv *pp = ap->private_data;
1636
1637	if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1638		return ata_std_qc_defer(qc);
1639	else
1640		return sata_pmp_qc_defer_cmd_switch(qc);
1641}
1642
1643static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
1644{
1645	struct ata_port *ap = qc->ap;
1646	struct ahci_port_priv *pp = ap->private_data;
1647	int is_atapi = ata_is_atapi(qc->tf.protocol);
1648	void *cmd_tbl;
1649	u32 opts;
1650	const u32 cmd_fis_len = 5; /* five dwords */
1651	unsigned int n_elem;
1652
1653	/*
1654	 * Fill in command table information.  First, the header,
1655	 * a SATA Register - Host to Device command FIS.
1656	 */
1657	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
1658
1659	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1660	if (is_atapi) {
1661		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1662		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1663	}
1664
1665	n_elem = 0;
1666	if (qc->flags & ATA_QCFLAG_DMAMAP)
1667		n_elem = ahci_fill_sg(qc, cmd_tbl);
1668
1669	/*
1670	 * Fill in command slot information.
1671	 */
1672	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1673	if (qc->tf.flags & ATA_TFLAG_WRITE)
1674		opts |= AHCI_CMD_WRITE;
1675	if (is_atapi)
1676		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1677
1678	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
1679
1680	return AC_ERR_OK;
1681}
1682
1683static void ahci_fbs_dec_intr(struct ata_port *ap)
1684{
1685	struct ahci_port_priv *pp = ap->private_data;
1686	void __iomem *port_mmio = ahci_port_base(ap);
1687	u32 fbs = readl(port_mmio + PORT_FBS);
1688	int retries = 3;
1689
1690	DPRINTK("ENTER\n");
1691	BUG_ON(!pp->fbs_enabled);
1692
1693	/* time to wait for DEC is not specified by AHCI spec,
1694	 * add a retry loop for safety.
1695	 */
1696	writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1697	fbs = readl(port_mmio + PORT_FBS);
1698	while ((fbs & PORT_FBS_DEC) && retries--) {
1699		udelay(1);
1700		fbs = readl(port_mmio + PORT_FBS);
1701	}
1702
1703	if (fbs & PORT_FBS_DEC)
1704		dev_err(ap->host->dev, "failed to clear device error\n");
1705}
1706
1707static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1708{
1709	struct ahci_host_priv *hpriv = ap->host->private_data;
1710	struct ahci_port_priv *pp = ap->private_data;
1711	struct ata_eh_info *host_ehi = &ap->link.eh_info;
1712	struct ata_link *link = NULL;
1713	struct ata_queued_cmd *active_qc;
1714	struct ata_eh_info *active_ehi;
1715	bool fbs_need_dec = false;
1716	u32 serror;
1717
1718	/* determine active link with error */
1719	if (pp->fbs_enabled) {
1720		void __iomem *port_mmio = ahci_port_base(ap);
1721		u32 fbs = readl(port_mmio + PORT_FBS);
1722		int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1723
1724		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
1725			link = &ap->pmp_link[pmp];
1726			fbs_need_dec = true;
1727		}
1728
1729	} else
1730		ata_for_each_link(link, ap, EDGE)
1731			if (ata_link_active(link))
1732				break;
1733
1734	if (!link)
1735		link = &ap->link;
1736
1737	active_qc = ata_qc_from_tag(ap, link->active_tag);
1738	active_ehi = &link->eh_info;
1739
1740	/* record irq stat */
1741	ata_ehi_clear_desc(host_ehi);
1742	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1743
1744	/* AHCI needs SError cleared; otherwise, it might lock up */
1745	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1746	ahci_scr_write(&ap->link, SCR_ERROR, serror);
1747	host_ehi->serror |= serror;
1748
1749	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
1750	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1751		irq_stat &= ~PORT_IRQ_IF_ERR;
1752
1753	if (irq_stat & PORT_IRQ_TF_ERR) {
1754		/* If qc is active, charge it; otherwise, the active
1755		 * link.  There's no active qc on NCQ errors.  It will
1756		 * be determined by EH by reading log page 10h.
1757		 */
1758		if (active_qc)
1759			active_qc->err_mask |= AC_ERR_DEV;
1760		else
1761			active_ehi->err_mask |= AC_ERR_DEV;
1762
1763		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1764			host_ehi->serror &= ~SERR_INTERNAL;
1765	}
1766
1767	if (irq_stat & PORT_IRQ_UNK_FIS) {
1768		u32 *unk = pp->rx_fis + RX_FIS_UNK;
1769
1770		active_ehi->err_mask |= AC_ERR_HSM;
1771		active_ehi->action |= ATA_EH_RESET;
1772		ata_ehi_push_desc(active_ehi,
1773				  "unknown FIS %08x %08x %08x %08x" ,
1774				  unk[0], unk[1], unk[2], unk[3]);
1775	}
1776
1777	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1778		active_ehi->err_mask |= AC_ERR_HSM;
1779		active_ehi->action |= ATA_EH_RESET;
1780		ata_ehi_push_desc(active_ehi, "incorrect PMP");
1781	}
1782
1783	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1784		host_ehi->err_mask |= AC_ERR_HOST_BUS;
1785		host_ehi->action |= ATA_EH_RESET;
1786		ata_ehi_push_desc(host_ehi, "host bus error");
1787	}
1788
1789	if (irq_stat & PORT_IRQ_IF_ERR) {
1790		if (fbs_need_dec)
1791			active_ehi->err_mask |= AC_ERR_DEV;
1792		else {
1793			host_ehi->err_mask |= AC_ERR_ATA_BUS;
1794			host_ehi->action |= ATA_EH_RESET;
1795		}
1796
1797		ata_ehi_push_desc(host_ehi, "interface fatal error");
1798	}
1799
1800	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1801		ata_ehi_hotplugged(host_ehi);
1802		ata_ehi_push_desc(host_ehi, "%s",
1803			irq_stat & PORT_IRQ_CONNECT ?
1804			"connection status changed" : "PHY RDY changed");
1805	}
1806
1807	/* okay, let's hand over to EH */
1808
1809	if (irq_stat & PORT_IRQ_FREEZE)
1810		ata_port_freeze(ap);
1811	else if (fbs_need_dec) {
1812		ata_link_abort(link);
1813		ahci_fbs_dec_intr(ap);
1814	} else
1815		ata_port_abort(ap);
1816}
1817
1818static void ahci_handle_port_interrupt(struct ata_port *ap,
1819				       void __iomem *port_mmio, u32 status)
1820{
1821	struct ata_eh_info *ehi = &ap->link.eh_info;
1822	struct ahci_port_priv *pp = ap->private_data;
1823	struct ahci_host_priv *hpriv = ap->host->private_data;
1824	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1825	u32 qc_active = 0;
1826	int rc;
1827
1828	/* ignore BAD_PMP while resetting */
1829	if (unlikely(resetting))
1830		status &= ~PORT_IRQ_BAD_PMP;
1831
1832	if (sata_lpm_ignore_phy_events(&ap->link)) {
1833		status &= ~PORT_IRQ_PHYRDY;
1834		ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
1835	}
1836
1837	if (unlikely(status & PORT_IRQ_ERROR)) {
1838		ahci_error_intr(ap, status);
1839		return;
1840	}
1841
1842	if (status & PORT_IRQ_SDB_FIS) {
1843		/* If SNotification is available, leave notification
1844		 * handling to sata_async_notification().  If not,
1845		 * emulate it by snooping SDB FIS RX area.
1846		 *
1847		 * Snooping FIS RX area is probably cheaper than
1848		 * poking SNotification but some constrollers which
1849		 * implement SNotification, ICH9 for example, don't
1850		 * store AN SDB FIS into receive area.
1851		 */
1852		if (hpriv->cap & HOST_CAP_SNTF)
1853			sata_async_notification(ap);
1854		else {
1855			/* If the 'N' bit in word 0 of the FIS is set,
1856			 * we just received asynchronous notification.
1857			 * Tell libata about it.
1858			 *
1859			 * Lack of SNotification should not appear in
1860			 * ahci 1.2, so the workaround is unnecessary
1861			 * when FBS is enabled.
1862			 */
1863			if (pp->fbs_enabled)
1864				WARN_ON_ONCE(1);
1865			else {
1866				const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1867				u32 f0 = le32_to_cpu(f[0]);
1868				if (f0 & (1 << 15))
1869					sata_async_notification(ap);
1870			}
1871		}
1872	}
1873
1874	/* pp->active_link is not reliable once FBS is enabled, both
1875	 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1876	 * NCQ and non-NCQ commands may be in flight at the same time.
1877	 */
1878	if (pp->fbs_enabled) {
1879		if (ap->qc_active) {
1880			qc_active = readl(port_mmio + PORT_SCR_ACT);
1881			qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1882		}
1883	} else {
1884		/* pp->active_link is valid iff any command is in flight */
1885		if (ap->qc_active && pp->active_link->sactive)
1886			qc_active = readl(port_mmio + PORT_SCR_ACT);
1887		else
1888			qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1889	}
1890
1891
1892	rc = ata_qc_complete_multiple(ap, qc_active);
1893
1894	/* while resetting, invalid completions are expected */
1895	if (unlikely(rc < 0 && !resetting)) {
1896		ehi->err_mask |= AC_ERR_HSM;
1897		ehi->action |= ATA_EH_RESET;
1898		ata_port_freeze(ap);
1899	}
1900}
1901
1902static void ahci_port_intr(struct ata_port *ap)
1903{
1904	void __iomem *port_mmio = ahci_port_base(ap);
1905	u32 status;
1906
1907	status = readl(port_mmio + PORT_IRQ_STAT);
1908	writel(status, port_mmio + PORT_IRQ_STAT);
1909
1910	ahci_handle_port_interrupt(ap, port_mmio, status);
1911}
1912
1913static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
1914{
1915	struct ata_port *ap = dev_instance;
1916	void __iomem *port_mmio = ahci_port_base(ap);
1917	u32 status;
1918
1919	status = readl(port_mmio + PORT_IRQ_STAT);
1920	writel(status, port_mmio + PORT_IRQ_STAT);
1921
1922	spin_lock(ap->lock);
1923	ahci_handle_port_interrupt(ap, port_mmio, status);
1924	spin_unlock(ap->lock);
1925
1926	return IRQ_HANDLED;
1927}
1928
1929u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1930{
1931	unsigned int i, handled = 0;
1932
1933	for (i = 0; i < host->n_ports; i++) {
1934		struct ata_port *ap;
1935
1936		if (!(irq_masked & (1 << i)))
1937			continue;
1938
1939		ap = host->ports[i];
1940		if (ap) {
1941			ahci_port_intr(ap);
1942		} else {
1943			if (ata_ratelimit())
1944				dev_warn(host->dev,
1945					 "interrupt on disabled port %u\n", i);
1946		}
1947
1948		handled = 1;
1949	}
1950
1951	return handled;
1952}
1953EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
1954
1955static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
1956{
1957	struct ata_host *host = dev_instance;
1958	struct ahci_host_priv *hpriv;
1959	unsigned int rc = 0;
1960	void __iomem *mmio;
1961	u32 irq_stat, irq_masked;
1962
1963	hpriv = host->private_data;
1964	mmio = hpriv->mmio;
1965
1966	/* sigh.  0xffffffff is a valid return from h/w */
1967	irq_stat = readl(mmio + HOST_IRQ_STAT);
1968	if (!irq_stat)
1969		return IRQ_NONE;
1970
1971	irq_masked = irq_stat & hpriv->port_map;
1972
1973	spin_lock(&host->lock);
1974
1975	rc = ahci_handle_port_intr(host, irq_masked);
1976
1977	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
1978	 * it should be cleared after all the port events are cleared;
1979	 * otherwise, it will raise a spurious interrupt after each
1980	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
1981	 * information.
1982	 *
1983	 * Also, use the unmasked value to clear interrupt as spurious
1984	 * pending event on a dummy port might cause screaming IRQ.
1985	 */
1986	writel(irq_stat, mmio + HOST_IRQ_STAT);
1987
1988	spin_unlock(&host->lock);
1989
1990	return IRQ_RETVAL(rc);
1991}
1992
1993unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1994{
1995	struct ata_port *ap = qc->ap;
1996	void __iomem *port_mmio = ahci_port_base(ap);
1997	struct ahci_port_priv *pp = ap->private_data;
1998
1999	/* Keep track of the currently active link.  It will be used
2000	 * in completion path to determine whether NCQ phase is in
2001	 * progress.
2002	 */
2003	pp->active_link = qc->dev->link;
2004
2005	if (ata_is_ncq(qc->tf.protocol))
2006		writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
2007
2008	if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2009		u32 fbs = readl(port_mmio + PORT_FBS);
2010		fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2011		fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2012		writel(fbs, port_mmio + PORT_FBS);
2013		pp->fbs_last_dev = qc->dev->link->pmp;
2014	}
2015
2016	writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
2017
2018	ahci_sw_activity(qc->dev->link);
2019
2020	return 0;
2021}
2022EXPORT_SYMBOL_GPL(ahci_qc_issue);
2023
2024static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2025{
2026	struct ahci_port_priv *pp = qc->ap->private_data;
2027	u8 *rx_fis = pp->rx_fis;
2028
2029	if (pp->fbs_enabled)
2030		rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2031
2032	/*
2033	 * After a successful execution of an ATA PIO data-in command,
2034	 * the device doesn't send D2H Reg FIS to update the TF and
2035	 * the host should take TF and E_Status from the preceding PIO
2036	 * Setup FIS.
2037	 */
2038	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
2039	    !(qc->flags & ATA_QCFLAG_FAILED)) {
2040		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
2041		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
2042	} else
2043		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
2044
2045	return true;
2046}
2047
2048static void ahci_freeze(struct ata_port *ap)
2049{
2050	void __iomem *port_mmio = ahci_port_base(ap);
2051
2052	/* turn IRQ off */
2053	writel(0, port_mmio + PORT_IRQ_MASK);
2054}
2055
2056static void ahci_thaw(struct ata_port *ap)
2057{
2058	struct ahci_host_priv *hpriv = ap->host->private_data;
2059	void __iomem *mmio = hpriv->mmio;
2060	void __iomem *port_mmio = ahci_port_base(ap);
2061	u32 tmp;
2062	struct ahci_port_priv *pp = ap->private_data;
2063
2064	/* clear IRQ */
2065	tmp = readl(port_mmio + PORT_IRQ_STAT);
2066	writel(tmp, port_mmio + PORT_IRQ_STAT);
2067	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2068
2069	/* turn IRQ back on */
2070	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2071}
2072
2073void ahci_error_handler(struct ata_port *ap)
2074{
2075	struct ahci_host_priv *hpriv = ap->host->private_data;
2076
2077	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2078		/* restart engine */
2079		hpriv->stop_engine(ap);
2080		hpriv->start_engine(ap);
2081	}
2082
2083	sata_pmp_error_handler(ap);
2084
2085	if (!ata_dev_enabled(ap->link.device))
2086		hpriv->stop_engine(ap);
2087}
2088EXPORT_SYMBOL_GPL(ahci_error_handler);
2089
2090static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2091{
2092	struct ata_port *ap = qc->ap;
2093
2094	/* make DMA engine forget about the failed command */
2095	if (qc->flags & ATA_QCFLAG_FAILED)
2096		ahci_kick_engine(ap);
2097}
2098
2099static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
2100{
2101	struct ahci_host_priv *hpriv = ap->host->private_data;
2102	void __iomem *port_mmio = ahci_port_base(ap);
2103	struct ata_device *dev = ap->link.device;
2104	u32 devslp, dm, dito, mdat, deto, dito_conf;
2105	int rc;
2106	unsigned int err_mask;
2107
2108	devslp = readl(port_mmio + PORT_DEVSLP);
2109	if (!(devslp & PORT_DEVSLP_DSP)) {
2110		dev_info(ap->host->dev, "port does not support device sleep\n");
2111		return;
2112	}
2113
2114	/* disable device sleep */
2115	if (!sleep) {
2116		if (devslp & PORT_DEVSLP_ADSE) {
2117			writel(devslp & ~PORT_DEVSLP_ADSE,
2118			       port_mmio + PORT_DEVSLP);
2119			err_mask = ata_dev_set_feature(dev,
2120						       SETFEATURES_SATA_DISABLE,
2121						       SATA_DEVSLP);
2122			if (err_mask && err_mask != AC_ERR_DEV)
2123				ata_dev_warn(dev, "failed to disable DEVSLP\n");
2124		}
2125		return;
2126	}
2127
2128	dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
2129	dito = devslp_idle_timeout / (dm + 1);
2130	if (dito > 0x3ff)
2131		dito = 0x3ff;
2132
2133	dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
2134
2135	/* device sleep was already enabled and same dito */
2136	if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
2137		return;
2138
2139	/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
2140	rc = hpriv->stop_engine(ap);
2141	if (rc)
2142		return;
2143
2144	/* Use the nominal value 10 ms if the read MDAT is zero,
2145	 * the nominal value of DETO is 20 ms.
2146	 */
2147	if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
2148	    ATA_LOG_DEVSLP_VALID_MASK) {
2149		mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
2150		       ATA_LOG_DEVSLP_MDAT_MASK;
2151		if (!mdat)
2152			mdat = 10;
2153		deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
2154		if (!deto)
2155			deto = 20;
2156	} else {
2157		mdat = 10;
2158		deto = 20;
2159	}
2160
2161	/* Make dito, mdat, deto bits to 0s */
2162	devslp &= ~GENMASK_ULL(24, 2);
2163	devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
2164		   (mdat << PORT_DEVSLP_MDAT_OFFSET) |
2165		   (deto << PORT_DEVSLP_DETO_OFFSET) |
2166		   PORT_DEVSLP_ADSE);
2167	writel(devslp, port_mmio + PORT_DEVSLP);
2168
2169	hpriv->start_engine(ap);
2170
2171	/* enable device sleep feature for the drive */
2172	err_mask = ata_dev_set_feature(dev,
2173				       SETFEATURES_SATA_ENABLE,
2174				       SATA_DEVSLP);
2175	if (err_mask && err_mask != AC_ERR_DEV)
2176		ata_dev_warn(dev, "failed to enable DEVSLP\n");
2177}
2178
2179static void ahci_enable_fbs(struct ata_port *ap)
2180{
2181	struct ahci_host_priv *hpriv = ap->host->private_data;
2182	struct ahci_port_priv *pp = ap->private_data;
2183	void __iomem *port_mmio = ahci_port_base(ap);
2184	u32 fbs;
2185	int rc;
2186
2187	if (!pp->fbs_supported)
2188		return;
2189
2190	fbs = readl(port_mmio + PORT_FBS);
2191	if (fbs & PORT_FBS_EN) {
2192		pp->fbs_enabled = true;
2193		pp->fbs_last_dev = -1; /* initialization */
2194		return;
2195	}
2196
2197	rc = hpriv->stop_engine(ap);
2198	if (rc)
2199		return;
2200
2201	writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2202	fbs = readl(port_mmio + PORT_FBS);
2203	if (fbs & PORT_FBS_EN) {
2204		dev_info(ap->host->dev, "FBS is enabled\n");
2205		pp->fbs_enabled = true;
2206		pp->fbs_last_dev = -1; /* initialization */
2207	} else
2208		dev_err(ap->host->dev, "Failed to enable FBS\n");
2209
2210	hpriv->start_engine(ap);
2211}
2212
2213static void ahci_disable_fbs(struct ata_port *ap)
2214{
2215	struct ahci_host_priv *hpriv = ap->host->private_data;
2216	struct ahci_port_priv *pp = ap->private_data;
2217	void __iomem *port_mmio = ahci_port_base(ap);
2218	u32 fbs;
2219	int rc;
2220
2221	if (!pp->fbs_supported)
2222		return;
2223
2224	fbs = readl(port_mmio + PORT_FBS);
2225	if ((fbs & PORT_FBS_EN) == 0) {
2226		pp->fbs_enabled = false;
2227		return;
2228	}
2229
2230	rc = hpriv->stop_engine(ap);
2231	if (rc)
2232		return;
2233
2234	writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2235	fbs = readl(port_mmio + PORT_FBS);
2236	if (fbs & PORT_FBS_EN)
2237		dev_err(ap->host->dev, "Failed to disable FBS\n");
2238	else {
2239		dev_info(ap->host->dev, "FBS is disabled\n");
2240		pp->fbs_enabled = false;
2241	}
2242
2243	hpriv->start_engine(ap);
2244}
2245
2246static void ahci_pmp_attach(struct ata_port *ap)
2247{
2248	void __iomem *port_mmio = ahci_port_base(ap);
2249	struct ahci_port_priv *pp = ap->private_data;
2250	u32 cmd;
2251
2252	cmd = readl(port_mmio + PORT_CMD);
2253	cmd |= PORT_CMD_PMP;
2254	writel(cmd, port_mmio + PORT_CMD);
2255
2256	ahci_enable_fbs(ap);
2257
2258	pp->intr_mask |= PORT_IRQ_BAD_PMP;
2259
2260	/*
2261	 * We must not change the port interrupt mask register if the
2262	 * port is marked frozen, the value in pp->intr_mask will be
2263	 * restored later when the port is thawed.
2264	 *
2265	 * Note that during initialization, the port is marked as
2266	 * frozen since the irq handler is not yet registered.
2267	 */
2268	if (!(ap->pflags & ATA_PFLAG_FROZEN))
2269		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2270}
2271
2272static void ahci_pmp_detach(struct ata_port *ap)
2273{
2274	void __iomem *port_mmio = ahci_port_base(ap);
2275	struct ahci_port_priv *pp = ap->private_data;
2276	u32 cmd;
2277
2278	ahci_disable_fbs(ap);
2279
2280	cmd = readl(port_mmio + PORT_CMD);
2281	cmd &= ~PORT_CMD_PMP;
2282	writel(cmd, port_mmio + PORT_CMD);
2283
2284	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2285
2286	/* see comment above in ahci_pmp_attach() */
2287	if (!(ap->pflags & ATA_PFLAG_FROZEN))
2288		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2289}
2290
2291int ahci_port_resume(struct ata_port *ap)
2292{
2293	ahci_rpm_get_port(ap);
2294
2295	ahci_power_up(ap);
2296	ahci_start_port(ap);
2297
2298	if (sata_pmp_attached(ap))
2299		ahci_pmp_attach(ap);
2300	else
2301		ahci_pmp_detach(ap);
2302
2303	return 0;
2304}
2305EXPORT_SYMBOL_GPL(ahci_port_resume);
2306
2307#ifdef CONFIG_PM
2308static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2309{
2310	const char *emsg = NULL;
2311	int rc;
2312
2313	rc = ahci_deinit_port(ap, &emsg);
2314	if (rc == 0)
2315		ahci_power_down(ap);
2316	else {
2317		ata_port_err(ap, "%s (%d)\n", emsg, rc);
2318		ata_port_freeze(ap);
2319	}
2320
2321	ahci_rpm_put_port(ap);
2322	return rc;
2323}
2324#endif
2325
2326static int ahci_port_start(struct ata_port *ap)
2327{
2328	struct ahci_host_priv *hpriv = ap->host->private_data;
2329	struct device *dev = ap->host->dev;
2330	struct ahci_port_priv *pp;
2331	void *mem;
2332	dma_addr_t mem_dma;
2333	size_t dma_sz, rx_fis_sz;
2334
2335	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2336	if (!pp)
2337		return -ENOMEM;
2338
2339	if (ap->host->n_ports > 1) {
2340		pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL);
2341		if (!pp->irq_desc) {
2342			devm_kfree(dev, pp);
2343			return -ENOMEM;
2344		}
2345		snprintf(pp->irq_desc, 8,
2346			 "%s%d", dev_driver_string(dev), ap->port_no);
2347	}
2348
2349	/* check FBS capability */
2350	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2351		void __iomem *port_mmio = ahci_port_base(ap);
2352		u32 cmd = readl(port_mmio + PORT_CMD);
2353		if (cmd & PORT_CMD_FBSCP)
2354			pp->fbs_supported = true;
2355		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2356			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
2357				 ap->port_no);
2358			pp->fbs_supported = true;
2359		} else
2360			dev_warn(dev, "port %d is not capable of FBS\n",
2361				 ap->port_no);
2362	}
2363
2364	if (pp->fbs_supported) {
2365		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2366		rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2367	} else {
2368		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2369		rx_fis_sz = AHCI_RX_FIS_SZ;
2370	}
2371
2372	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2373	if (!mem)
2374		return -ENOMEM;
2375
2376	/*
2377	 * First item in chunk of DMA memory: 32-slot command table,
2378	 * 32 bytes each in size
2379	 */
2380	pp->cmd_slot = mem;
2381	pp->cmd_slot_dma = mem_dma;
2382
2383	mem += AHCI_CMD_SLOT_SZ;
2384	mem_dma += AHCI_CMD_SLOT_SZ;
2385
2386	/*
2387	 * Second item: Received-FIS area
2388	 */
2389	pp->rx_fis = mem;
2390	pp->rx_fis_dma = mem_dma;
2391
2392	mem += rx_fis_sz;
2393	mem_dma += rx_fis_sz;
2394
2395	/*
2396	 * Third item: data area for storing a single command
2397	 * and its scatter-gather table
2398	 */
2399	pp->cmd_tbl = mem;
2400	pp->cmd_tbl_dma = mem_dma;
2401
2402	/*
2403	 * Save off initial list of interrupts to be enabled.
2404	 * This could be changed later
2405	 */
2406	pp->intr_mask = DEF_PORT_IRQ;
2407
2408	/*
2409	 * Switch to per-port locking in case each port has its own MSI vector.
2410	 */
2411	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
2412		spin_lock_init(&pp->lock);
2413		ap->lock = &pp->lock;
2414	}
2415
2416	ap->private_data = pp;
2417
2418	/* engage engines, captain */
2419	return ahci_port_resume(ap);
2420}
2421
2422static void ahci_port_stop(struct ata_port *ap)
2423{
2424	const char *emsg = NULL;
2425	struct ahci_host_priv *hpriv = ap->host->private_data;
2426	void __iomem *host_mmio = hpriv->mmio;
2427	int rc;
2428
2429	/* de-initialize port */
2430	rc = ahci_deinit_port(ap, &emsg);
2431	if (rc)
2432		ata_port_warn(ap, "%s (%d)\n", emsg, rc);
2433
2434	/*
2435	 * Clear GHC.IS to prevent stuck INTx after disabling MSI and
2436	 * re-enabling INTx.
2437	 */
2438	writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
2439
2440	ahci_rpm_put_port(ap);
2441}
2442
2443void ahci_print_info(struct ata_host *host, const char *scc_s)
2444{
2445	struct ahci_host_priv *hpriv = host->private_data;
2446	u32 vers, cap, cap2, impl, speed;
2447	const char *speed_s;
2448
2449	vers = hpriv->version;
2450	cap = hpriv->cap;
2451	cap2 = hpriv->cap2;
2452	impl = hpriv->port_map;
2453
2454	speed = (cap >> 20) & 0xf;
2455	if (speed == 1)
2456		speed_s = "1.5";
2457	else if (speed == 2)
2458		speed_s = "3";
2459	else if (speed == 3)
2460		speed_s = "6";
2461	else
2462		speed_s = "?";
2463
2464	dev_info(host->dev,
2465		"AHCI %02x%02x.%02x%02x "
2466		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2467		,
2468
2469		(vers >> 24) & 0xff,
2470		(vers >> 16) & 0xff,
2471		(vers >> 8) & 0xff,
2472		vers & 0xff,
2473
2474		((cap >> 8) & 0x1f) + 1,
2475		(cap & 0x1f) + 1,
2476		speed_s,
2477		impl,
2478		scc_s);
2479
2480	dev_info(host->dev,
2481		"flags: "
2482		"%s%s%s%s%s%s%s"
2483		"%s%s%s%s%s%s%s"
2484		"%s%s%s%s%s%s%s"
2485		"%s%s\n"
2486		,
2487
2488		cap & HOST_CAP_64 ? "64bit " : "",
2489		cap & HOST_CAP_NCQ ? "ncq " : "",
2490		cap & HOST_CAP_SNTF ? "sntf " : "",
2491		cap & HOST_CAP_MPS ? "ilck " : "",
2492		cap & HOST_CAP_SSS ? "stag " : "",
2493		cap & HOST_CAP_ALPM ? "pm " : "",
2494		cap & HOST_CAP_LED ? "led " : "",
2495		cap & HOST_CAP_CLO ? "clo " : "",
2496		cap & HOST_CAP_ONLY ? "only " : "",
2497		cap & HOST_CAP_PMP ? "pmp " : "",
2498		cap & HOST_CAP_FBS ? "fbs " : "",
2499		cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2500		cap & HOST_CAP_SSC ? "slum " : "",
2501		cap & HOST_CAP_PART ? "part " : "",
2502		cap & HOST_CAP_CCC ? "ccc " : "",
2503		cap & HOST_CAP_EMS ? "ems " : "",
2504		cap & HOST_CAP_SXS ? "sxs " : "",
2505		cap2 & HOST_CAP2_DESO ? "deso " : "",
2506		cap2 & HOST_CAP2_SADM ? "sadm " : "",
2507		cap2 & HOST_CAP2_SDS ? "sds " : "",
2508		cap2 & HOST_CAP2_APST ? "apst " : "",
2509		cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2510		cap2 & HOST_CAP2_BOH ? "boh " : ""
2511		);
2512}
2513EXPORT_SYMBOL_GPL(ahci_print_info);
2514
2515void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2516			  struct ata_port_info *pi)
2517{
2518	u8 messages;
2519	void __iomem *mmio = hpriv->mmio;
2520	u32 em_loc = readl(mmio + HOST_EM_LOC);
2521	u32 em_ctl = readl(mmio + HOST_EM_CTL);
2522
2523	if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2524		return;
2525
2526	messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2527
2528	if (messages) {
2529		/* store em_loc */
2530		hpriv->em_loc = ((em_loc >> 16) * 4);
2531		hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2532		hpriv->em_msg_type = messages;
2533		pi->flags |= ATA_FLAG_EM;
2534		if (!(em_ctl & EM_CTL_ALHD))
2535			pi->flags |= ATA_FLAG_SW_ACTIVITY;
2536	}
2537}
2538EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2539
2540static int ahci_host_activate_multi_irqs(struct ata_host *host,
2541					 struct scsi_host_template *sht)
2542{
2543	struct ahci_host_priv *hpriv = host->private_data;
2544	int i, rc;
2545
2546	rc = ata_host_start(host);
2547	if (rc)
2548		return rc;
2549	/*
2550	 * Requests IRQs according to AHCI-1.1 when multiple MSIs were
2551	 * allocated. That is one MSI per port, starting from @irq.
2552	 */
2553	for (i = 0; i < host->n_ports; i++) {
2554		struct ahci_port_priv *pp = host->ports[i]->private_data;
2555		int irq = hpriv->get_irq_vector(host, i);
2556
2557		/* Do not receive interrupts sent by dummy ports */
2558		if (!pp) {
2559			disable_irq(irq);
2560			continue;
2561		}
2562
2563		rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard,
2564				0, pp->irq_desc, host->ports[i]);
2565
2566		if (rc)
2567			return rc;
2568		ata_port_desc(host->ports[i], "irq %d", irq);
2569	}
2570
2571	return ata_host_register(host, sht);
2572}
2573
2574/**
2575 *	ahci_host_activate - start AHCI host, request IRQs and register it
2576 *	@host: target ATA host
2577 *	@sht: scsi_host_template to use when registering the host
2578 *
2579 *	LOCKING:
2580 *	Inherited from calling layer (may sleep).
2581 *
2582 *	RETURNS:
2583 *	0 on success, -errno otherwise.
2584 */
2585int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
2586{
2587	struct ahci_host_priv *hpriv = host->private_data;
2588	int irq = hpriv->irq;
2589	int rc;
2590
2591	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
2592		if (hpriv->irq_handler &&
2593		    hpriv->irq_handler != ahci_single_level_irq_intr)
2594			dev_warn(host->dev,
2595			         "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
2596		if (!hpriv->get_irq_vector) {
2597			dev_err(host->dev,
2598				"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
2599			return -EIO;
2600		}
2601
2602		rc = ahci_host_activate_multi_irqs(host, sht);
2603	} else {
2604		rc = ata_host_activate(host, irq, hpriv->irq_handler,
2605				       IRQF_SHARED, sht);
2606	}
2607
2608
2609	return rc;
2610}
2611EXPORT_SYMBOL_GPL(ahci_host_activate);
2612
2613MODULE_AUTHOR("Jeff Garzik");
2614MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2615MODULE_LICENSE("GPL");
2616