cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

libahci.c (69554B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  libahci.c - Common AHCI SATA low-level routines
      4 *
      5 *  Maintained by:  Tejun Heo <tj@kernel.org>
      6 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
      7 *		    on emails.
      8 *
      9 *  Copyright 2004-2005 Red Hat, Inc.
     10 *
     11 * libata documentation is available via 'make {ps|pdf}docs',
     12 * as Documentation/driver-api/libata.rst
     13 *
     14 * AHCI hardware documentation:
     15 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
     16 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
     17 */
     18
     19#include <linux/kernel.h>
     20#include <linux/gfp.h>
     21#include <linux/module.h>
     22#include <linux/nospec.h>
     23#include <linux/blkdev.h>
     24#include <linux/delay.h>
     25#include <linux/interrupt.h>
     26#include <linux/dma-mapping.h>
     27#include <linux/device.h>
     28#include <scsi/scsi_host.h>
     29#include <scsi/scsi_cmnd.h>
     30#include <linux/libata.h>
     31#include <linux/pci.h>
     32#include "ahci.h"
     33#include "libata.h"
     34
     35static int ahci_skip_host_reset;
     36int ahci_ignore_sss;
     37EXPORT_SYMBOL_GPL(ahci_ignore_sss);
     38
     39module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
     40MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
     41
     42module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
     43MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
     44
     45static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
     46			unsigned hints);
     47static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
     48static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
     49			      size_t size);
     50static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
     51					ssize_t size);
     52
     53
     54
     55static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
     56static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
     57static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
     58static int ahci_port_start(struct ata_port *ap);
     59static void ahci_port_stop(struct ata_port *ap);
     60static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
     61static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
     62static void ahci_freeze(struct ata_port *ap);
     63static void ahci_thaw(struct ata_port *ap);
     64static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep);
     65static void ahci_enable_fbs(struct ata_port *ap);
     66static void ahci_disable_fbs(struct ata_port *ap);
     67static void ahci_pmp_attach(struct ata_port *ap);
     68static void ahci_pmp_detach(struct ata_port *ap);
     69static int ahci_softreset(struct ata_link *link, unsigned int *class,
     70			  unsigned long deadline);
     71static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
     72			  unsigned long deadline);
     73static int ahci_hardreset(struct ata_link *link, unsigned int *class,
     74			  unsigned long deadline);
     75static void ahci_postreset(struct ata_link *link, unsigned int *class);
     76static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
     77static void ahci_dev_config(struct ata_device *dev);
     78#ifdef CONFIG_PM
     79static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
     80#endif
     81static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
     82static ssize_t ahci_activity_store(struct ata_device *dev,
     83				   enum sw_activity val);
     84static void ahci_init_sw_activity(struct ata_link *link);
     85
     86static ssize_t ahci_show_host_caps(struct device *dev,
     87				   struct device_attribute *attr, char *buf);
     88static ssize_t ahci_show_host_cap2(struct device *dev,
     89				   struct device_attribute *attr, char *buf);
     90static ssize_t ahci_show_host_version(struct device *dev,
     91				      struct device_attribute *attr, char *buf);
     92static ssize_t ahci_show_port_cmd(struct device *dev,
     93				  struct device_attribute *attr, char *buf);
     94static ssize_t ahci_read_em_buffer(struct device *dev,
     95				   struct device_attribute *attr, char *buf);
     96static ssize_t ahci_store_em_buffer(struct device *dev,
     97				    struct device_attribute *attr,
     98				    const char *buf, size_t size);
     99static ssize_t ahci_show_em_supported(struct device *dev,
    100				      struct device_attribute *attr, char *buf);
    101static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
    102
    103static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
    104static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
    105static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
    106static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
    107static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
    108		   ahci_read_em_buffer, ahci_store_em_buffer);
    109static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
    110
    111static struct attribute *ahci_shost_attrs[] = {
    112	&dev_attr_link_power_management_policy.attr,
    113	&dev_attr_em_message_type.attr,
    114	&dev_attr_em_message.attr,
    115	&dev_attr_ahci_host_caps.attr,
    116	&dev_attr_ahci_host_cap2.attr,
    117	&dev_attr_ahci_host_version.attr,
    118	&dev_attr_ahci_port_cmd.attr,
    119	&dev_attr_em_buffer.attr,
    120	&dev_attr_em_message_supported.attr,
    121	NULL
    122};
    123
    124static const struct attribute_group ahci_shost_attr_group = {
    125	.attrs = ahci_shost_attrs
    126};
    127
    128const struct attribute_group *ahci_shost_groups[] = {
    129	&ahci_shost_attr_group,
    130	NULL
    131};
    132EXPORT_SYMBOL_GPL(ahci_shost_groups);
    133
    134static struct attribute *ahci_sdev_attrs[] = {
    135	&dev_attr_sw_activity.attr,
    136	&dev_attr_unload_heads.attr,
    137	&dev_attr_ncq_prio_supported.attr,
    138	&dev_attr_ncq_prio_enable.attr,
    139	NULL
    140};
    141
    142static const struct attribute_group ahci_sdev_attr_group = {
    143	.attrs = ahci_sdev_attrs
    144};
    145
    146const struct attribute_group *ahci_sdev_groups[] = {
    147	&ahci_sdev_attr_group,
    148	NULL
    149};
    150EXPORT_SYMBOL_GPL(ahci_sdev_groups);
    151
    152struct ata_port_operations ahci_ops = {
    153	.inherits		= &sata_pmp_port_ops,
    154
    155	.qc_defer		= ahci_pmp_qc_defer,
    156	.qc_prep		= ahci_qc_prep,
    157	.qc_issue		= ahci_qc_issue,
    158	.qc_fill_rtf		= ahci_qc_fill_rtf,
    159
    160	.freeze			= ahci_freeze,
    161	.thaw			= ahci_thaw,
    162	.softreset		= ahci_softreset,
    163	.hardreset		= ahci_hardreset,
    164	.postreset		= ahci_postreset,
    165	.pmp_softreset		= ahci_softreset,
    166	.error_handler		= ahci_error_handler,
    167	.post_internal_cmd	= ahci_post_internal_cmd,
    168	.dev_config		= ahci_dev_config,
    169
    170	.scr_read		= ahci_scr_read,
    171	.scr_write		= ahci_scr_write,
    172	.pmp_attach		= ahci_pmp_attach,
    173	.pmp_detach		= ahci_pmp_detach,
    174
    175	.set_lpm		= ahci_set_lpm,
    176	.em_show		= ahci_led_show,
    177	.em_store		= ahci_led_store,
    178	.sw_activity_show	= ahci_activity_show,
    179	.sw_activity_store	= ahci_activity_store,
    180	.transmit_led_message	= ahci_transmit_led_message,
    181#ifdef CONFIG_PM
    182	.port_suspend		= ahci_port_suspend,
    183	.port_resume		= ahci_port_resume,
    184#endif
    185	.port_start		= ahci_port_start,
    186	.port_stop		= ahci_port_stop,
    187};
    188EXPORT_SYMBOL_GPL(ahci_ops);
    189
    190struct ata_port_operations ahci_pmp_retry_srst_ops = {
    191	.inherits		= &ahci_ops,
    192	.softreset		= ahci_pmp_retry_softreset,
    193};
    194EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
    195
    196static bool ahci_em_messages __read_mostly = true;
    197module_param(ahci_em_messages, bool, 0444);
    198/* add other LED protocol types when they become supported */
    199MODULE_PARM_DESC(ahci_em_messages,
    200	"AHCI Enclosure Management Message control (0 = off, 1 = on)");
    201
    202/* device sleep idle timeout in ms */
    203static int devslp_idle_timeout __read_mostly = 1000;
    204module_param(devslp_idle_timeout, int, 0644);
    205MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
    206
    207static void ahci_enable_ahci(void __iomem *mmio)
    208{
    209	int i;
    210	u32 tmp;
    211
    212	/* turn on AHCI_EN */
    213	tmp = readl(mmio + HOST_CTL);
    214	if (tmp & HOST_AHCI_EN)
    215		return;
    216
    217	/* Some controllers need AHCI_EN to be written multiple times.
    218	 * Try a few times before giving up.
    219	 */
    220	for (i = 0; i < 5; i++) {
    221		tmp |= HOST_AHCI_EN;
    222		writel(tmp, mmio + HOST_CTL);
    223		tmp = readl(mmio + HOST_CTL);	/* flush && sanity check */
    224		if (tmp & HOST_AHCI_EN)
    225			return;
    226		msleep(10);
    227	}
    228
    229	WARN_ON(1);
    230}
    231
    232/**
    233 *	ahci_rpm_get_port - Make sure the port is powered on
    234 *	@ap: Port to power on
    235 *
    236 *	Whenever there is need to access the AHCI host registers outside of
    237 *	normal execution paths, call this function to make sure the host is
    238 *	actually powered on.
    239 */
    240static int ahci_rpm_get_port(struct ata_port *ap)
    241{
    242	return pm_runtime_get_sync(ap->dev);
    243}
    244
    245/**
    246 *	ahci_rpm_put_port - Undoes ahci_rpm_get_port()
    247 *	@ap: Port to power down
    248 *
    249 *	Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
    250 *	if it has no more active users.
    251 */
    252static void ahci_rpm_put_port(struct ata_port *ap)
    253{
    254	pm_runtime_put(ap->dev);
    255}
    256
    257static ssize_t ahci_show_host_caps(struct device *dev,
    258				   struct device_attribute *attr, char *buf)
    259{
    260	struct Scsi_Host *shost = class_to_shost(dev);
    261	struct ata_port *ap = ata_shost_to_port(shost);
    262	struct ahci_host_priv *hpriv = ap->host->private_data;
    263
    264	return sprintf(buf, "%x\n", hpriv->cap);
    265}
    266
    267static ssize_t ahci_show_host_cap2(struct device *dev,
    268				   struct device_attribute *attr, char *buf)
    269{
    270	struct Scsi_Host *shost = class_to_shost(dev);
    271	struct ata_port *ap = ata_shost_to_port(shost);
    272	struct ahci_host_priv *hpriv = ap->host->private_data;
    273
    274	return sprintf(buf, "%x\n", hpriv->cap2);
    275}
    276
    277static ssize_t ahci_show_host_version(struct device *dev,
    278				   struct device_attribute *attr, char *buf)
    279{
    280	struct Scsi_Host *shost = class_to_shost(dev);
    281	struct ata_port *ap = ata_shost_to_port(shost);
    282	struct ahci_host_priv *hpriv = ap->host->private_data;
    283
    284	return sprintf(buf, "%x\n", hpriv->version);
    285}
    286
    287static ssize_t ahci_show_port_cmd(struct device *dev,
    288				  struct device_attribute *attr, char *buf)
    289{
    290	struct Scsi_Host *shost = class_to_shost(dev);
    291	struct ata_port *ap = ata_shost_to_port(shost);
    292	void __iomem *port_mmio = ahci_port_base(ap);
    293	ssize_t ret;
    294
    295	ahci_rpm_get_port(ap);
    296	ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
    297	ahci_rpm_put_port(ap);
    298
    299	return ret;
    300}
    301
    302static ssize_t ahci_read_em_buffer(struct device *dev,
    303				   struct device_attribute *attr, char *buf)
    304{
    305	struct Scsi_Host *shost = class_to_shost(dev);
    306	struct ata_port *ap = ata_shost_to_port(shost);
    307	struct ahci_host_priv *hpriv = ap->host->private_data;
    308	void __iomem *mmio = hpriv->mmio;
    309	void __iomem *em_mmio = mmio + hpriv->em_loc;
    310	u32 em_ctl, msg;
    311	unsigned long flags;
    312	size_t count;
    313	int i;
    314
    315	ahci_rpm_get_port(ap);
    316	spin_lock_irqsave(ap->lock, flags);
    317
    318	em_ctl = readl(mmio + HOST_EM_CTL);
    319	if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
    320	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
    321		spin_unlock_irqrestore(ap->lock, flags);
    322		ahci_rpm_put_port(ap);
    323		return -EINVAL;
    324	}
    325
    326	if (!(em_ctl & EM_CTL_MR)) {
    327		spin_unlock_irqrestore(ap->lock, flags);
    328		ahci_rpm_put_port(ap);
    329		return -EAGAIN;
    330	}
    331
    332	if (!(em_ctl & EM_CTL_SMB))
    333		em_mmio += hpriv->em_buf_sz;
    334
    335	count = hpriv->em_buf_sz;
    336
    337	/* the count should not be larger than PAGE_SIZE */
    338	if (count > PAGE_SIZE) {
    339		if (printk_ratelimit())
    340			ata_port_warn(ap,
    341				      "EM read buffer size too large: "
    342				      "buffer size %u, page size %lu\n",
    343				      hpriv->em_buf_sz, PAGE_SIZE);
    344		count = PAGE_SIZE;
    345	}
    346
    347	for (i = 0; i < count; i += 4) {
    348		msg = readl(em_mmio + i);
    349		buf[i] = msg & 0xff;
    350		buf[i + 1] = (msg >> 8) & 0xff;
    351		buf[i + 2] = (msg >> 16) & 0xff;
    352		buf[i + 3] = (msg >> 24) & 0xff;
    353	}
    354
    355	spin_unlock_irqrestore(ap->lock, flags);
    356	ahci_rpm_put_port(ap);
    357
    358	return i;
    359}
    360
    361static ssize_t ahci_store_em_buffer(struct device *dev,
    362				    struct device_attribute *attr,
    363				    const char *buf, size_t size)
    364{
    365	struct Scsi_Host *shost = class_to_shost(dev);
    366	struct ata_port *ap = ata_shost_to_port(shost);
    367	struct ahci_host_priv *hpriv = ap->host->private_data;
    368	void __iomem *mmio = hpriv->mmio;
    369	void __iomem *em_mmio = mmio + hpriv->em_loc;
    370	const unsigned char *msg_buf = buf;
    371	u32 em_ctl, msg;
    372	unsigned long flags;
    373	int i;
    374
    375	/* check size validity */
    376	if (!(ap->flags & ATA_FLAG_EM) ||
    377	    !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
    378	    size % 4 || size > hpriv->em_buf_sz)
    379		return -EINVAL;
    380
    381	ahci_rpm_get_port(ap);
    382	spin_lock_irqsave(ap->lock, flags);
    383
    384	em_ctl = readl(mmio + HOST_EM_CTL);
    385	if (em_ctl & EM_CTL_TM) {
    386		spin_unlock_irqrestore(ap->lock, flags);
    387		ahci_rpm_put_port(ap);
    388		return -EBUSY;
    389	}
    390
    391	for (i = 0; i < size; i += 4) {
    392		msg = msg_buf[i] | msg_buf[i + 1] << 8 |
    393		      msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
    394		writel(msg, em_mmio + i);
    395	}
    396
    397	writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
    398
    399	spin_unlock_irqrestore(ap->lock, flags);
    400	ahci_rpm_put_port(ap);
    401
    402	return size;
    403}
    404
    405static ssize_t ahci_show_em_supported(struct device *dev,
    406				      struct device_attribute *attr, char *buf)
    407{
    408	struct Scsi_Host *shost = class_to_shost(dev);
    409	struct ata_port *ap = ata_shost_to_port(shost);
    410	struct ahci_host_priv *hpriv = ap->host->private_data;
    411	void __iomem *mmio = hpriv->mmio;
    412	u32 em_ctl;
    413
    414	ahci_rpm_get_port(ap);
    415	em_ctl = readl(mmio + HOST_EM_CTL);
    416	ahci_rpm_put_port(ap);
    417
    418	return sprintf(buf, "%s%s%s%s\n",
    419		       em_ctl & EM_CTL_LED ? "led " : "",
    420		       em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
    421		       em_ctl & EM_CTL_SES ? "ses-2 " : "",
    422		       em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
    423}
    424
    425/**
    426 *	ahci_save_initial_config - Save and fixup initial config values
    427 *	@dev: target AHCI device
    428 *	@hpriv: host private area to store config values
    429 *
    430 *	Some registers containing configuration info might be setup by
    431 *	BIOS and might be cleared on reset.  This function saves the
    432 *	initial values of those registers into @hpriv such that they
    433 *	can be restored after controller reset.
    434 *
    435 *	If inconsistent, config values are fixed up by this function.
    436 *
    437 *	If it is not set already this function sets hpriv->start_engine to
    438 *	ahci_start_engine.
    439 *
    440 *	LOCKING:
    441 *	None.
    442 */
    443void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
    444{
    445	void __iomem *mmio = hpriv->mmio;
    446	u32 cap, cap2, vers, port_map;
    447	int i;
    448
    449	/* make sure AHCI mode is enabled before accessing CAP */
    450	ahci_enable_ahci(mmio);
    451
    452	/* Values prefixed with saved_ are written back to host after
    453	 * reset.  Values without are used for driver operation.
    454	 */
    455	hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
    456	hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
    457
    458	/* CAP2 register is only defined for AHCI 1.2 and later */
    459	vers = readl(mmio + HOST_VERSION);
    460	if ((vers >> 16) > 1 ||
    461	   ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
    462		hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
    463	else
    464		hpriv->saved_cap2 = cap2 = 0;
    465
    466	/* some chips have errata preventing 64bit use */
    467	if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
    468		dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
    469		cap &= ~HOST_CAP_64;
    470	}
    471
    472	if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
    473		dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
    474		cap &= ~HOST_CAP_NCQ;
    475	}
    476
    477	if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
    478		dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
    479		cap |= HOST_CAP_NCQ;
    480	}
    481
    482	if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
    483		dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
    484		cap &= ~HOST_CAP_PMP;
    485	}
    486
    487	if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
    488		dev_info(dev,
    489			 "controller can't do SNTF, turning off CAP_SNTF\n");
    490		cap &= ~HOST_CAP_SNTF;
    491	}
    492
    493	if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
    494		dev_info(dev,
    495			 "controller can't do DEVSLP, turning off\n");
    496		cap2 &= ~HOST_CAP2_SDS;
    497		cap2 &= ~HOST_CAP2_SADM;
    498	}
    499
    500	if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
    501		dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
    502		cap |= HOST_CAP_FBS;
    503	}
    504
    505	if ((cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_NO_FBS)) {
    506		dev_info(dev, "controller can't do FBS, turning off CAP_FBS\n");
    507		cap &= ~HOST_CAP_FBS;
    508	}
    509
    510	if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
    511		dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
    512		cap |= HOST_CAP_ALPM;
    513	}
    514
    515	if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
    516		dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
    517		cap &= ~HOST_CAP_SXS;
    518	}
    519
    520	if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
    521		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
    522			 port_map, hpriv->force_port_map);
    523		port_map = hpriv->force_port_map;
    524		hpriv->saved_port_map = port_map;
    525	}
    526
    527	if (hpriv->mask_port_map) {
    528		dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
    529			port_map,
    530			port_map & hpriv->mask_port_map);
    531		port_map &= hpriv->mask_port_map;
    532	}
    533
    534	/* cross check port_map and cap.n_ports */
    535	if (port_map) {
    536		int map_ports = 0;
    537
    538		for (i = 0; i < AHCI_MAX_PORTS; i++)
    539			if (port_map & (1 << i))
    540				map_ports++;
    541
    542		/* If PI has more ports than n_ports, whine, clear
    543		 * port_map and let it be generated from n_ports.
    544		 */
    545		if (map_ports > ahci_nr_ports(cap)) {
    546			dev_warn(dev,
    547				 "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
    548				 port_map, ahci_nr_ports(cap));
    549			port_map = 0;
    550		}
    551	}
    552
    553	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
    554	if (!port_map && vers < 0x10300) {
    555		port_map = (1 << ahci_nr_ports(cap)) - 1;
    556		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
    557
    558		/* write the fixed up value to the PI register */
    559		hpriv->saved_port_map = port_map;
    560	}
    561
    562	/* record values to use during operation */
    563	hpriv->cap = cap;
    564	hpriv->cap2 = cap2;
    565	hpriv->version = readl(mmio + HOST_VERSION);
    566	hpriv->port_map = port_map;
    567
    568	if (!hpriv->start_engine)
    569		hpriv->start_engine = ahci_start_engine;
    570
    571	if (!hpriv->stop_engine)
    572		hpriv->stop_engine = ahci_stop_engine;
    573
    574	if (!hpriv->irq_handler)
    575		hpriv->irq_handler = ahci_single_level_irq_intr;
    576}
    577EXPORT_SYMBOL_GPL(ahci_save_initial_config);
    578
    579/**
    580 *	ahci_restore_initial_config - Restore initial config
    581 *	@host: target ATA host
    582 *
    583 *	Restore initial config stored by ahci_save_initial_config().
    584 *
    585 *	LOCKING:
    586 *	None.
    587 */
    588static void ahci_restore_initial_config(struct ata_host *host)
    589{
    590	struct ahci_host_priv *hpriv = host->private_data;
    591	void __iomem *mmio = hpriv->mmio;
    592
    593	writel(hpriv->saved_cap, mmio + HOST_CAP);
    594	if (hpriv->saved_cap2)
    595		writel(hpriv->saved_cap2, mmio + HOST_CAP2);
    596	writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
    597	(void) readl(mmio + HOST_PORTS_IMPL);	/* flush */
    598}
    599
    600static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
    601{
    602	static const int offset[] = {
    603		[SCR_STATUS]		= PORT_SCR_STAT,
    604		[SCR_CONTROL]		= PORT_SCR_CTL,
    605		[SCR_ERROR]		= PORT_SCR_ERR,
    606		[SCR_ACTIVE]		= PORT_SCR_ACT,
    607		[SCR_NOTIFICATION]	= PORT_SCR_NTF,
    608	};
    609	struct ahci_host_priv *hpriv = ap->host->private_data;
    610
    611	if (sc_reg < ARRAY_SIZE(offset) &&
    612	    (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
    613		return offset[sc_reg];
    614	return 0;
    615}
    616
    617static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
    618{
    619	void __iomem *port_mmio = ahci_port_base(link->ap);
    620	int offset = ahci_scr_offset(link->ap, sc_reg);
    621
    622	if (offset) {
    623		*val = readl(port_mmio + offset);
    624		return 0;
    625	}
    626	return -EINVAL;
    627}
    628
    629static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
    630{
    631	void __iomem *port_mmio = ahci_port_base(link->ap);
    632	int offset = ahci_scr_offset(link->ap, sc_reg);
    633
    634	if (offset) {
    635		writel(val, port_mmio + offset);
    636		return 0;
    637	}
    638	return -EINVAL;
    639}
    640
    641void ahci_start_engine(struct ata_port *ap)
    642{
    643	void __iomem *port_mmio = ahci_port_base(ap);
    644	u32 tmp;
    645
    646	/* start DMA */
    647	tmp = readl(port_mmio + PORT_CMD);
    648	tmp |= PORT_CMD_START;
    649	writel(tmp, port_mmio + PORT_CMD);
    650	readl(port_mmio + PORT_CMD); /* flush */
    651}
    652EXPORT_SYMBOL_GPL(ahci_start_engine);
    653
    654int ahci_stop_engine(struct ata_port *ap)
    655{
    656	void __iomem *port_mmio = ahci_port_base(ap);
    657	struct ahci_host_priv *hpriv = ap->host->private_data;
    658	u32 tmp;
    659
    660	/*
    661	 * On some controllers, stopping a port's DMA engine while the port
    662	 * is in ALPM state (partial or slumber) results in failures on
    663	 * subsequent DMA engine starts.  For those controllers, put the
    664	 * port back in active state before stopping its DMA engine.
    665	 */
    666	if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
    667	    (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
    668	    ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
    669		dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
    670		return -EIO;
    671	}
    672
    673	tmp = readl(port_mmio + PORT_CMD);
    674
    675	/* check if the HBA is idle */
    676	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
    677		return 0;
    678
    679	/*
    680	 * Don't try to issue commands but return with ENODEV if the
    681	 * AHCI controller not available anymore (e.g. due to PCIe hot
    682	 * unplugging). Otherwise a 500ms delay for each port is added.
    683	 */
    684	if (tmp == 0xffffffff) {
    685		dev_err(ap->host->dev, "AHCI controller unavailable!\n");
    686		return -ENODEV;
    687	}
    688
    689	/* setting HBA to idle */
    690	tmp &= ~PORT_CMD_START;
    691	writel(tmp, port_mmio + PORT_CMD);
    692
    693	/* wait for engine to stop. This could be as long as 500 msec */
    694	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
    695				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
    696	if (tmp & PORT_CMD_LIST_ON)
    697		return -EIO;
    698
    699	return 0;
    700}
    701EXPORT_SYMBOL_GPL(ahci_stop_engine);
    702
    703void ahci_start_fis_rx(struct ata_port *ap)
    704{
    705	void __iomem *port_mmio = ahci_port_base(ap);
    706	struct ahci_host_priv *hpriv = ap->host->private_data;
    707	struct ahci_port_priv *pp = ap->private_data;
    708	u32 tmp;
    709
    710	/* set FIS registers */
    711	if (hpriv->cap & HOST_CAP_64)
    712		writel((pp->cmd_slot_dma >> 16) >> 16,
    713		       port_mmio + PORT_LST_ADDR_HI);
    714	writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
    715
    716	if (hpriv->cap & HOST_CAP_64)
    717		writel((pp->rx_fis_dma >> 16) >> 16,
    718		       port_mmio + PORT_FIS_ADDR_HI);
    719	writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
    720
    721	/* enable FIS reception */
    722	tmp = readl(port_mmio + PORT_CMD);
    723	tmp |= PORT_CMD_FIS_RX;
    724	writel(tmp, port_mmio + PORT_CMD);
    725
    726	/* flush */
    727	readl(port_mmio + PORT_CMD);
    728}
    729EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
    730
    731static int ahci_stop_fis_rx(struct ata_port *ap)
    732{
    733	void __iomem *port_mmio = ahci_port_base(ap);
    734	u32 tmp;
    735
    736	/* disable FIS reception */
    737	tmp = readl(port_mmio + PORT_CMD);
    738	tmp &= ~PORT_CMD_FIS_RX;
    739	writel(tmp, port_mmio + PORT_CMD);
    740
    741	/* wait for completion, spec says 500ms, give it 1000 */
    742	tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
    743				PORT_CMD_FIS_ON, 10, 1000);
    744	if (tmp & PORT_CMD_FIS_ON)
    745		return -EBUSY;
    746
    747	return 0;
    748}
    749
    750static void ahci_power_up(struct ata_port *ap)
    751{
    752	struct ahci_host_priv *hpriv = ap->host->private_data;
    753	void __iomem *port_mmio = ahci_port_base(ap);
    754	u32 cmd;
    755
    756	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
    757
    758	/* spin up device */
    759	if (hpriv->cap & HOST_CAP_SSS) {
    760		cmd |= PORT_CMD_SPIN_UP;
    761		writel(cmd, port_mmio + PORT_CMD);
    762	}
    763
    764	/* wake up link */
    765	writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
    766}
    767
    768static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
    769			unsigned int hints)
    770{
    771	struct ata_port *ap = link->ap;
    772	struct ahci_host_priv *hpriv = ap->host->private_data;
    773	struct ahci_port_priv *pp = ap->private_data;
    774	void __iomem *port_mmio = ahci_port_base(ap);
    775
    776	if (policy != ATA_LPM_MAX_POWER) {
    777		/* wakeup flag only applies to the max power policy */
    778		hints &= ~ATA_LPM_WAKE_ONLY;
    779
    780		/*
    781		 * Disable interrupts on Phy Ready. This keeps us from
    782		 * getting woken up due to spurious phy ready
    783		 * interrupts.
    784		 */
    785		pp->intr_mask &= ~PORT_IRQ_PHYRDY;
    786		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
    787
    788		sata_link_scr_lpm(link, policy, false);
    789	}
    790
    791	if (hpriv->cap & HOST_CAP_ALPM) {
    792		u32 cmd = readl(port_mmio + PORT_CMD);
    793
    794		if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
    795			if (!(hints & ATA_LPM_WAKE_ONLY))
    796				cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
    797			cmd |= PORT_CMD_ICC_ACTIVE;
    798
    799			writel(cmd, port_mmio + PORT_CMD);
    800			readl(port_mmio + PORT_CMD);
    801
    802			/* wait 10ms to be sure we've come out of LPM state */
    803			ata_msleep(ap, 10);
    804
    805			if (hints & ATA_LPM_WAKE_ONLY)
    806				return 0;
    807		} else {
    808			cmd |= PORT_CMD_ALPE;
    809			if (policy == ATA_LPM_MIN_POWER)
    810				cmd |= PORT_CMD_ASP;
    811			else if (policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
    812				cmd &= ~PORT_CMD_ASP;
    813
    814			/* write out new cmd value */
    815			writel(cmd, port_mmio + PORT_CMD);
    816		}
    817	}
    818
    819	/* set aggressive device sleep */
    820	if ((hpriv->cap2 & HOST_CAP2_SDS) &&
    821	    (hpriv->cap2 & HOST_CAP2_SADM) &&
    822	    (link->device->flags & ATA_DFLAG_DEVSLP)) {
    823		if (policy == ATA_LPM_MIN_POWER ||
    824		    policy == ATA_LPM_MIN_POWER_WITH_PARTIAL)
    825			ahci_set_aggressive_devslp(ap, true);
    826		else
    827			ahci_set_aggressive_devslp(ap, false);
    828	}
    829
    830	if (policy == ATA_LPM_MAX_POWER) {
    831		sata_link_scr_lpm(link, policy, false);
    832
    833		/* turn PHYRDY IRQ back on */
    834		pp->intr_mask |= PORT_IRQ_PHYRDY;
    835		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
    836	}
    837
    838	return 0;
    839}
    840
    841#ifdef CONFIG_PM
    842static void ahci_power_down(struct ata_port *ap)
    843{
    844	struct ahci_host_priv *hpriv = ap->host->private_data;
    845	void __iomem *port_mmio = ahci_port_base(ap);
    846	u32 cmd, scontrol;
    847
    848	if (!(hpriv->cap & HOST_CAP_SSS))
    849		return;
    850
    851	/* put device into listen mode, first set PxSCTL.DET to 0 */
    852	scontrol = readl(port_mmio + PORT_SCR_CTL);
    853	scontrol &= ~0xf;
    854	writel(scontrol, port_mmio + PORT_SCR_CTL);
    855
    856	/* then set PxCMD.SUD to 0 */
    857	cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
    858	cmd &= ~PORT_CMD_SPIN_UP;
    859	writel(cmd, port_mmio + PORT_CMD);
    860}
    861#endif
    862
    863static void ahci_start_port(struct ata_port *ap)
    864{
    865	struct ahci_host_priv *hpriv = ap->host->private_data;
    866	struct ahci_port_priv *pp = ap->private_data;
    867	struct ata_link *link;
    868	struct ahci_em_priv *emp;
    869	ssize_t rc;
    870	int i;
    871
    872	/* enable FIS reception */
    873	ahci_start_fis_rx(ap);
    874
    875	/* enable DMA */
    876	if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
    877		hpriv->start_engine(ap);
    878
    879	/* turn on LEDs */
    880	if (ap->flags & ATA_FLAG_EM) {
    881		ata_for_each_link(link, ap, EDGE) {
    882			emp = &pp->em_priv[link->pmp];
    883
    884			/* EM Transmit bit maybe busy during init */
    885			for (i = 0; i < EM_MAX_RETRY; i++) {
    886				rc = ap->ops->transmit_led_message(ap,
    887							       emp->led_state,
    888							       4);
    889				/*
    890				 * If busy, give a breather but do not
    891				 * release EH ownership by using msleep()
    892				 * instead of ata_msleep().  EM Transmit
    893				 * bit is busy for the whole host and
    894				 * releasing ownership will cause other
    895				 * ports to fail the same way.
    896				 */
    897				if (rc == -EBUSY)
    898					msleep(1);
    899				else
    900					break;
    901			}
    902		}
    903	}
    904
    905	if (ap->flags & ATA_FLAG_SW_ACTIVITY)
    906		ata_for_each_link(link, ap, EDGE)
    907			ahci_init_sw_activity(link);
    908
    909}
    910
    911static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
    912{
    913	int rc;
    914	struct ahci_host_priv *hpriv = ap->host->private_data;
    915
    916	/* disable DMA */
    917	rc = hpriv->stop_engine(ap);
    918	if (rc) {
    919		*emsg = "failed to stop engine";
    920		return rc;
    921	}
    922
    923	/* disable FIS reception */
    924	rc = ahci_stop_fis_rx(ap);
    925	if (rc) {
    926		*emsg = "failed stop FIS RX";
    927		return rc;
    928	}
    929
    930	return 0;
    931}
    932
    933int ahci_reset_controller(struct ata_host *host)
    934{
    935	struct ahci_host_priv *hpriv = host->private_data;
    936	void __iomem *mmio = hpriv->mmio;
    937	u32 tmp;
    938
    939	/* we must be in AHCI mode, before using anything
    940	 * AHCI-specific, such as HOST_RESET.
    941	 */
    942	ahci_enable_ahci(mmio);
    943
    944	/* global controller reset */
    945	if (!ahci_skip_host_reset) {
    946		tmp = readl(mmio + HOST_CTL);
    947		if ((tmp & HOST_RESET) == 0) {
    948			writel(tmp | HOST_RESET, mmio + HOST_CTL);
    949			readl(mmio + HOST_CTL); /* flush */
    950		}
    951
    952		/*
    953		 * to perform host reset, OS should set HOST_RESET
    954		 * and poll until this bit is read to be "0".
    955		 * reset must complete within 1 second, or
    956		 * the hardware should be considered fried.
    957		 */
    958		tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET,
    959					HOST_RESET, 10, 1000);
    960
    961		if (tmp & HOST_RESET) {
    962			dev_err(host->dev, "controller reset failed (0x%x)\n",
    963				tmp);
    964			return -EIO;
    965		}
    966
    967		/* turn on AHCI mode */
    968		ahci_enable_ahci(mmio);
    969
    970		/* Some registers might be cleared on reset.  Restore
    971		 * initial values.
    972		 */
    973		if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
    974			ahci_restore_initial_config(host);
    975	} else
    976		dev_info(host->dev, "skipping global host reset\n");
    977
    978	return 0;
    979}
    980EXPORT_SYMBOL_GPL(ahci_reset_controller);
    981
    982static void ahci_sw_activity(struct ata_link *link)
    983{
    984	struct ata_port *ap = link->ap;
    985	struct ahci_port_priv *pp = ap->private_data;
    986	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
    987
    988	if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
    989		return;
    990
    991	emp->activity++;
    992	if (!timer_pending(&emp->timer))
    993		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
    994}
    995
    996static void ahci_sw_activity_blink(struct timer_list *t)
    997{
    998	struct ahci_em_priv *emp = from_timer(emp, t, timer);
    999	struct ata_link *link = emp->link;
   1000	struct ata_port *ap = link->ap;
   1001
   1002	unsigned long led_message = emp->led_state;
   1003	u32 activity_led_state;
   1004	unsigned long flags;
   1005
   1006	led_message &= EM_MSG_LED_VALUE;
   1007	led_message |= ap->port_no | (link->pmp << 8);
   1008
   1009	/* check to see if we've had activity.  If so,
   1010	 * toggle state of LED and reset timer.  If not,
   1011	 * turn LED to desired idle state.
   1012	 */
   1013	spin_lock_irqsave(ap->lock, flags);
   1014	if (emp->saved_activity != emp->activity) {
   1015		emp->saved_activity = emp->activity;
   1016		/* get the current LED state */
   1017		activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
   1018
   1019		if (activity_led_state)
   1020			activity_led_state = 0;
   1021		else
   1022			activity_led_state = 1;
   1023
   1024		/* clear old state */
   1025		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
   1026
   1027		/* toggle state */
   1028		led_message |= (activity_led_state << 16);
   1029		mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
   1030	} else {
   1031		/* switch to idle */
   1032		led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
   1033		if (emp->blink_policy == BLINK_OFF)
   1034			led_message |= (1 << 16);
   1035	}
   1036	spin_unlock_irqrestore(ap->lock, flags);
   1037	ap->ops->transmit_led_message(ap, led_message, 4);
   1038}
   1039
   1040static void ahci_init_sw_activity(struct ata_link *link)
   1041{
   1042	struct ata_port *ap = link->ap;
   1043	struct ahci_port_priv *pp = ap->private_data;
   1044	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
   1045
   1046	/* init activity stats, setup timer */
   1047	emp->saved_activity = emp->activity = 0;
   1048	emp->link = link;
   1049	timer_setup(&emp->timer, ahci_sw_activity_blink, 0);
   1050
   1051	/* check our blink policy and set flag for link if it's enabled */
   1052	if (emp->blink_policy)
   1053		link->flags |= ATA_LFLAG_SW_ACTIVITY;
   1054}
   1055
   1056int ahci_reset_em(struct ata_host *host)
   1057{
   1058	struct ahci_host_priv *hpriv = host->private_data;
   1059	void __iomem *mmio = hpriv->mmio;
   1060	u32 em_ctl;
   1061
   1062	em_ctl = readl(mmio + HOST_EM_CTL);
   1063	if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
   1064		return -EINVAL;
   1065
   1066	writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
   1067	return 0;
   1068}
   1069EXPORT_SYMBOL_GPL(ahci_reset_em);
   1070
   1071static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
   1072					ssize_t size)
   1073{
   1074	struct ahci_host_priv *hpriv = ap->host->private_data;
   1075	struct ahci_port_priv *pp = ap->private_data;
   1076	void __iomem *mmio = hpriv->mmio;
   1077	u32 em_ctl;
   1078	u32 message[] = {0, 0};
   1079	unsigned long flags;
   1080	int pmp;
   1081	struct ahci_em_priv *emp;
   1082
   1083	/* get the slot number from the message */
   1084	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
   1085	if (pmp < EM_MAX_SLOTS)
   1086		emp = &pp->em_priv[pmp];
   1087	else
   1088		return -EINVAL;
   1089
   1090	ahci_rpm_get_port(ap);
   1091	spin_lock_irqsave(ap->lock, flags);
   1092
   1093	/*
   1094	 * if we are still busy transmitting a previous message,
   1095	 * do not allow
   1096	 */
   1097	em_ctl = readl(mmio + HOST_EM_CTL);
   1098	if (em_ctl & EM_CTL_TM) {
   1099		spin_unlock_irqrestore(ap->lock, flags);
   1100		ahci_rpm_put_port(ap);
   1101		return -EBUSY;
   1102	}
   1103
   1104	if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
   1105		/*
   1106		 * create message header - this is all zero except for
   1107		 * the message size, which is 4 bytes.
   1108		 */
   1109		message[0] |= (4 << 8);
   1110
   1111		/* ignore 0:4 of byte zero, fill in port info yourself */
   1112		message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
   1113
   1114		/* write message to EM_LOC */
   1115		writel(message[0], mmio + hpriv->em_loc);
   1116		writel(message[1], mmio + hpriv->em_loc+4);
   1117
   1118		/*
   1119		 * tell hardware to transmit the message
   1120		 */
   1121		writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
   1122	}
   1123
   1124	/* save off new led state for port/slot */
   1125	emp->led_state = state;
   1126
   1127	spin_unlock_irqrestore(ap->lock, flags);
   1128	ahci_rpm_put_port(ap);
   1129
   1130	return size;
   1131}
   1132
   1133static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
   1134{
   1135	struct ahci_port_priv *pp = ap->private_data;
   1136	struct ata_link *link;
   1137	struct ahci_em_priv *emp;
   1138	int rc = 0;
   1139
   1140	ata_for_each_link(link, ap, EDGE) {
   1141		emp = &pp->em_priv[link->pmp];
   1142		rc += sprintf(buf, "%lx\n", emp->led_state);
   1143	}
   1144	return rc;
   1145}
   1146
   1147static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
   1148				size_t size)
   1149{
   1150	unsigned int state;
   1151	int pmp;
   1152	struct ahci_port_priv *pp = ap->private_data;
   1153	struct ahci_em_priv *emp;
   1154
   1155	if (kstrtouint(buf, 0, &state) < 0)
   1156		return -EINVAL;
   1157
   1158	/* get the slot number from the message */
   1159	pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
   1160	if (pmp < EM_MAX_SLOTS) {
   1161		pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
   1162		emp = &pp->em_priv[pmp];
   1163	} else {
   1164		return -EINVAL;
   1165	}
   1166
   1167	/* mask off the activity bits if we are in sw_activity
   1168	 * mode, user should turn off sw_activity before setting
   1169	 * activity led through em_message
   1170	 */
   1171	if (emp->blink_policy)
   1172		state &= ~EM_MSG_LED_VALUE_ACTIVITY;
   1173
   1174	return ap->ops->transmit_led_message(ap, state, size);
   1175}
   1176
   1177static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
   1178{
   1179	struct ata_link *link = dev->link;
   1180	struct ata_port *ap = link->ap;
   1181	struct ahci_port_priv *pp = ap->private_data;
   1182	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
   1183	u32 port_led_state = emp->led_state;
   1184
   1185	/* save the desired Activity LED behavior */
   1186	if (val == OFF) {
   1187		/* clear LFLAG */
   1188		link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
   1189
   1190		/* set the LED to OFF */
   1191		port_led_state &= EM_MSG_LED_VALUE_OFF;
   1192		port_led_state |= (ap->port_no | (link->pmp << 8));
   1193		ap->ops->transmit_led_message(ap, port_led_state, 4);
   1194	} else {
   1195		link->flags |= ATA_LFLAG_SW_ACTIVITY;
   1196		if (val == BLINK_OFF) {
   1197			/* set LED to ON for idle */
   1198			port_led_state &= EM_MSG_LED_VALUE_OFF;
   1199			port_led_state |= (ap->port_no | (link->pmp << 8));
   1200			port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
   1201			ap->ops->transmit_led_message(ap, port_led_state, 4);
   1202		}
   1203	}
   1204	emp->blink_policy = val;
   1205	return 0;
   1206}
   1207
   1208static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
   1209{
   1210	struct ata_link *link = dev->link;
   1211	struct ata_port *ap = link->ap;
   1212	struct ahci_port_priv *pp = ap->private_data;
   1213	struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
   1214
   1215	/* display the saved value of activity behavior for this
   1216	 * disk.
   1217	 */
   1218	return sprintf(buf, "%d\n", emp->blink_policy);
   1219}
   1220
   1221static void ahci_port_init(struct device *dev, struct ata_port *ap,
   1222			   int port_no, void __iomem *mmio,
   1223			   void __iomem *port_mmio)
   1224{
   1225	struct ahci_host_priv *hpriv = ap->host->private_data;
   1226	const char *emsg = NULL;
   1227	int rc;
   1228	u32 tmp;
   1229
   1230	/* make sure port is not active */
   1231	rc = ahci_deinit_port(ap, &emsg);
   1232	if (rc)
   1233		dev_warn(dev, "%s (%d)\n", emsg, rc);
   1234
   1235	/* clear SError */
   1236	tmp = readl(port_mmio + PORT_SCR_ERR);
   1237	dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
   1238	writel(tmp, port_mmio + PORT_SCR_ERR);
   1239
   1240	/* clear port IRQ */
   1241	tmp = readl(port_mmio + PORT_IRQ_STAT);
   1242	dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
   1243	if (tmp)
   1244		writel(tmp, port_mmio + PORT_IRQ_STAT);
   1245
   1246	writel(1 << port_no, mmio + HOST_IRQ_STAT);
   1247
   1248	/* mark esata ports */
   1249	tmp = readl(port_mmio + PORT_CMD);
   1250	if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
   1251		ap->pflags |= ATA_PFLAG_EXTERNAL;
   1252}
   1253
   1254void ahci_init_controller(struct ata_host *host)
   1255{
   1256	struct ahci_host_priv *hpriv = host->private_data;
   1257	void __iomem *mmio = hpriv->mmio;
   1258	int i;
   1259	void __iomem *port_mmio;
   1260	u32 tmp;
   1261
   1262	for (i = 0; i < host->n_ports; i++) {
   1263		struct ata_port *ap = host->ports[i];
   1264
   1265		port_mmio = ahci_port_base(ap);
   1266		if (ata_port_is_dummy(ap))
   1267			continue;
   1268
   1269		ahci_port_init(host->dev, ap, i, mmio, port_mmio);
   1270	}
   1271
   1272	tmp = readl(mmio + HOST_CTL);
   1273	dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
   1274	writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
   1275	tmp = readl(mmio + HOST_CTL);
   1276	dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
   1277}
   1278EXPORT_SYMBOL_GPL(ahci_init_controller);
   1279
   1280static void ahci_dev_config(struct ata_device *dev)
   1281{
   1282	struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
   1283
   1284	if (hpriv->flags & AHCI_HFLAG_SECT255) {
   1285		dev->max_sectors = 255;
   1286		ata_dev_info(dev,
   1287			     "SB600 AHCI: limiting to 255 sectors per cmd\n");
   1288	}
   1289}
   1290
   1291unsigned int ahci_dev_classify(struct ata_port *ap)
   1292{
   1293	void __iomem *port_mmio = ahci_port_base(ap);
   1294	struct ata_taskfile tf;
   1295	u32 tmp;
   1296
   1297	tmp = readl(port_mmio + PORT_SIG);
   1298	tf.lbah		= (tmp >> 24)	& 0xff;
   1299	tf.lbam		= (tmp >> 16)	& 0xff;
   1300	tf.lbal		= (tmp >> 8)	& 0xff;
   1301	tf.nsect	= (tmp)		& 0xff;
   1302
   1303	return ata_port_classify(ap, &tf);
   1304}
   1305EXPORT_SYMBOL_GPL(ahci_dev_classify);
   1306
   1307void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
   1308			u32 opts)
   1309{
   1310	dma_addr_t cmd_tbl_dma;
   1311
   1312	cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
   1313
   1314	pp->cmd_slot[tag].opts = cpu_to_le32(opts);
   1315	pp->cmd_slot[tag].status = 0;
   1316	pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
   1317	pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
   1318}
   1319EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot);
   1320
   1321int ahci_kick_engine(struct ata_port *ap)
   1322{
   1323	void __iomem *port_mmio = ahci_port_base(ap);
   1324	struct ahci_host_priv *hpriv = ap->host->private_data;
   1325	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
   1326	u32 tmp;
   1327	int busy, rc;
   1328
   1329	/* stop engine */
   1330	rc = hpriv->stop_engine(ap);
   1331	if (rc)
   1332		goto out_restart;
   1333
   1334	/* need to do CLO?
   1335	 * always do CLO if PMP is attached (AHCI-1.3 9.2)
   1336	 */
   1337	busy = status & (ATA_BUSY | ATA_DRQ);
   1338	if (!busy && !sata_pmp_attached(ap)) {
   1339		rc = 0;
   1340		goto out_restart;
   1341	}
   1342
   1343	if (!(hpriv->cap & HOST_CAP_CLO)) {
   1344		rc = -EOPNOTSUPP;
   1345		goto out_restart;
   1346	}
   1347
   1348	/* perform CLO */
   1349	tmp = readl(port_mmio + PORT_CMD);
   1350	tmp |= PORT_CMD_CLO;
   1351	writel(tmp, port_mmio + PORT_CMD);
   1352
   1353	rc = 0;
   1354	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
   1355				PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
   1356	if (tmp & PORT_CMD_CLO)
   1357		rc = -EIO;
   1358
   1359	/* restart engine */
   1360 out_restart:
   1361	hpriv->start_engine(ap);
   1362	return rc;
   1363}
   1364EXPORT_SYMBOL_GPL(ahci_kick_engine);
   1365
   1366static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
   1367				struct ata_taskfile *tf, int is_cmd, u16 flags,
   1368				unsigned long timeout_msec)
   1369{
   1370	const u32 cmd_fis_len = 5; /* five dwords */
   1371	struct ahci_port_priv *pp = ap->private_data;
   1372	void __iomem *port_mmio = ahci_port_base(ap);
   1373	u8 *fis = pp->cmd_tbl;
   1374	u32 tmp;
   1375
   1376	/* prep the command */
   1377	ata_tf_to_fis(tf, pmp, is_cmd, fis);
   1378	ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
   1379
   1380	/* set port value for softreset of Port Multiplier */
   1381	if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
   1382		tmp = readl(port_mmio + PORT_FBS);
   1383		tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
   1384		tmp |= pmp << PORT_FBS_DEV_OFFSET;
   1385		writel(tmp, port_mmio + PORT_FBS);
   1386		pp->fbs_last_dev = pmp;
   1387	}
   1388
   1389	/* issue & wait */
   1390	writel(1, port_mmio + PORT_CMD_ISSUE);
   1391
   1392	if (timeout_msec) {
   1393		tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE,
   1394					0x1, 0x1, 1, timeout_msec);
   1395		if (tmp & 0x1) {
   1396			ahci_kick_engine(ap);
   1397			return -EBUSY;
   1398		}
   1399	} else
   1400		readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
   1401
   1402	return 0;
   1403}
   1404
   1405int ahci_do_softreset(struct ata_link *link, unsigned int *class,
   1406		      int pmp, unsigned long deadline,
   1407		      int (*check_ready)(struct ata_link *link))
   1408{
   1409	struct ata_port *ap = link->ap;
   1410	struct ahci_host_priv *hpriv = ap->host->private_data;
   1411	struct ahci_port_priv *pp = ap->private_data;
   1412	const char *reason = NULL;
   1413	unsigned long now, msecs;
   1414	struct ata_taskfile tf;
   1415	bool fbs_disabled = false;
   1416	int rc;
   1417
   1418	/* prepare for SRST (AHCI-1.1 10.4.1) */
   1419	rc = ahci_kick_engine(ap);
   1420	if (rc && rc != -EOPNOTSUPP)
   1421		ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
   1422
   1423	/*
   1424	 * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
   1425	 * clear PxFBS.EN to '0' prior to issuing software reset to devices
   1426	 * that is attached to port multiplier.
   1427	 */
   1428	if (!ata_is_host_link(link) && pp->fbs_enabled) {
   1429		ahci_disable_fbs(ap);
   1430		fbs_disabled = true;
   1431	}
   1432
   1433	ata_tf_init(link->device, &tf);
   1434
   1435	/* issue the first H2D Register FIS */
   1436	msecs = 0;
   1437	now = jiffies;
   1438	if (time_after(deadline, now))
   1439		msecs = jiffies_to_msecs(deadline - now);
   1440
   1441	tf.ctl |= ATA_SRST;
   1442	if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
   1443				 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
   1444		rc = -EIO;
   1445		reason = "1st FIS failed";
   1446		goto fail;
   1447	}
   1448
   1449	/* spec says at least 5us, but be generous and sleep for 1ms */
   1450	ata_msleep(ap, 1);
   1451
   1452	/* issue the second H2D Register FIS */
   1453	tf.ctl &= ~ATA_SRST;
   1454	ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
   1455
   1456	/* wait for link to become ready */
   1457	rc = ata_wait_after_reset(link, deadline, check_ready);
   1458	if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
   1459		/*
   1460		 * Workaround for cases where link online status can't
   1461		 * be trusted.  Treat device readiness timeout as link
   1462		 * offline.
   1463		 */
   1464		ata_link_info(link, "device not ready, treating as offline\n");
   1465		*class = ATA_DEV_NONE;
   1466	} else if (rc) {
   1467		/* link occupied, -ENODEV too is an error */
   1468		reason = "device not ready";
   1469		goto fail;
   1470	} else
   1471		*class = ahci_dev_classify(ap);
   1472
   1473	/* re-enable FBS if disabled before */
   1474	if (fbs_disabled)
   1475		ahci_enable_fbs(ap);
   1476
   1477	return 0;
   1478
   1479 fail:
   1480	ata_link_err(link, "softreset failed (%s)\n", reason);
   1481	return rc;
   1482}
   1483
   1484int ahci_check_ready(struct ata_link *link)
   1485{
   1486	void __iomem *port_mmio = ahci_port_base(link->ap);
   1487	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
   1488
   1489	return ata_check_ready(status);
   1490}
   1491EXPORT_SYMBOL_GPL(ahci_check_ready);
   1492
   1493static int ahci_softreset(struct ata_link *link, unsigned int *class,
   1494			  unsigned long deadline)
   1495{
   1496	int pmp = sata_srst_pmp(link);
   1497
   1498	return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
   1499}
   1500EXPORT_SYMBOL_GPL(ahci_do_softreset);
   1501
   1502static int ahci_bad_pmp_check_ready(struct ata_link *link)
   1503{
   1504	void __iomem *port_mmio = ahci_port_base(link->ap);
   1505	u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
   1506	u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
   1507
   1508	/*
   1509	 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
   1510	 * which can save timeout delay.
   1511	 */
   1512	if (irq_status & PORT_IRQ_BAD_PMP)
   1513		return -EIO;
   1514
   1515	return ata_check_ready(status);
   1516}
   1517
   1518static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
   1519				    unsigned long deadline)
   1520{
   1521	struct ata_port *ap = link->ap;
   1522	void __iomem *port_mmio = ahci_port_base(ap);
   1523	int pmp = sata_srst_pmp(link);
   1524	int rc;
   1525	u32 irq_sts;
   1526
   1527	rc = ahci_do_softreset(link, class, pmp, deadline,
   1528			       ahci_bad_pmp_check_ready);
   1529
   1530	/*
   1531	 * Soft reset fails with IPMS set when PMP is enabled but
   1532	 * SATA HDD/ODD is connected to SATA port, do soft reset
   1533	 * again to port 0.
   1534	 */
   1535	if (rc == -EIO) {
   1536		irq_sts = readl(port_mmio + PORT_IRQ_STAT);
   1537		if (irq_sts & PORT_IRQ_BAD_PMP) {
   1538			ata_link_warn(link,
   1539					"applying PMP SRST workaround "
   1540					"and retrying\n");
   1541			rc = ahci_do_softreset(link, class, 0, deadline,
   1542					       ahci_check_ready);
   1543		}
   1544	}
   1545
   1546	return rc;
   1547}
   1548
   1549int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
   1550		      unsigned long deadline, bool *online)
   1551{
   1552	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
   1553	struct ata_port *ap = link->ap;
   1554	struct ahci_port_priv *pp = ap->private_data;
   1555	struct ahci_host_priv *hpriv = ap->host->private_data;
   1556	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
   1557	struct ata_taskfile tf;
   1558	int rc;
   1559
   1560	hpriv->stop_engine(ap);
   1561
   1562	/* clear D2H reception area to properly wait for D2H FIS */
   1563	ata_tf_init(link->device, &tf);
   1564	tf.status = ATA_BUSY;
   1565	ata_tf_to_fis(&tf, 0, 0, d2h_fis);
   1566
   1567	rc = sata_link_hardreset(link, timing, deadline, online,
   1568				 ahci_check_ready);
   1569
   1570	hpriv->start_engine(ap);
   1571
   1572	if (*online)
   1573		*class = ahci_dev_classify(ap);
   1574
   1575	return rc;
   1576}
   1577EXPORT_SYMBOL_GPL(ahci_do_hardreset);
   1578
   1579static int ahci_hardreset(struct ata_link *link, unsigned int *class,
   1580			  unsigned long deadline)
   1581{
   1582	bool online;
   1583
   1584	return ahci_do_hardreset(link, class, deadline, &online);
   1585}
   1586
   1587static void ahci_postreset(struct ata_link *link, unsigned int *class)
   1588{
   1589	struct ata_port *ap = link->ap;
   1590	void __iomem *port_mmio = ahci_port_base(ap);
   1591	u32 new_tmp, tmp;
   1592
   1593	ata_std_postreset(link, class);
   1594
   1595	/* Make sure port's ATAPI bit is set appropriately */
   1596	new_tmp = tmp = readl(port_mmio + PORT_CMD);
   1597	if (*class == ATA_DEV_ATAPI)
   1598		new_tmp |= PORT_CMD_ATAPI;
   1599	else
   1600		new_tmp &= ~PORT_CMD_ATAPI;
   1601	if (new_tmp != tmp) {
   1602		writel(new_tmp, port_mmio + PORT_CMD);
   1603		readl(port_mmio + PORT_CMD); /* flush */
   1604	}
   1605}
   1606
   1607static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
   1608{
   1609	struct scatterlist *sg;
   1610	struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
   1611	unsigned int si;
   1612
   1613	/*
   1614	 * Next, the S/G list.
   1615	 */
   1616	for_each_sg(qc->sg, sg, qc->n_elem, si) {
   1617		dma_addr_t addr = sg_dma_address(sg);
   1618		u32 sg_len = sg_dma_len(sg);
   1619
   1620		ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
   1621		ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
   1622		ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
   1623	}
   1624
   1625	return si;
   1626}
   1627
   1628static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
   1629{
   1630	struct ata_port *ap = qc->ap;
   1631	struct ahci_port_priv *pp = ap->private_data;
   1632
   1633	if (!sata_pmp_attached(ap) || pp->fbs_enabled)
   1634		return ata_std_qc_defer(qc);
   1635	else
   1636		return sata_pmp_qc_defer_cmd_switch(qc);
   1637}
   1638
   1639static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc)
   1640{
   1641	struct ata_port *ap = qc->ap;
   1642	struct ahci_port_priv *pp = ap->private_data;
   1643	int is_atapi = ata_is_atapi(qc->tf.protocol);
   1644	void *cmd_tbl;
   1645	u32 opts;
   1646	const u32 cmd_fis_len = 5; /* five dwords */
   1647	unsigned int n_elem;
   1648
   1649	/*
   1650	 * Fill in command table information.  First, the header,
   1651	 * a SATA Register - Host to Device command FIS.
   1652	 */
   1653	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
   1654
   1655	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
   1656	if (is_atapi) {
   1657		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
   1658		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
   1659	}
   1660
   1661	n_elem = 0;
   1662	if (qc->flags & ATA_QCFLAG_DMAMAP)
   1663		n_elem = ahci_fill_sg(qc, cmd_tbl);
   1664
   1665	/*
   1666	 * Fill in command slot information.
   1667	 */
   1668	opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
   1669	if (qc->tf.flags & ATA_TFLAG_WRITE)
   1670		opts |= AHCI_CMD_WRITE;
   1671	if (is_atapi)
   1672		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
   1673
   1674	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
   1675
   1676	return AC_ERR_OK;
   1677}
   1678
   1679static void ahci_fbs_dec_intr(struct ata_port *ap)
   1680{
   1681	struct ahci_port_priv *pp = ap->private_data;
   1682	void __iomem *port_mmio = ahci_port_base(ap);
   1683	u32 fbs = readl(port_mmio + PORT_FBS);
   1684	int retries = 3;
   1685
   1686	BUG_ON(!pp->fbs_enabled);
   1687
   1688	/* time to wait for DEC is not specified by AHCI spec,
   1689	 * add a retry loop for safety.
   1690	 */
   1691	writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
   1692	fbs = readl(port_mmio + PORT_FBS);
   1693	while ((fbs & PORT_FBS_DEC) && retries--) {
   1694		udelay(1);
   1695		fbs = readl(port_mmio + PORT_FBS);
   1696	}
   1697
   1698	if (fbs & PORT_FBS_DEC)
   1699		dev_err(ap->host->dev, "failed to clear device error\n");
   1700}
   1701
   1702static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
   1703{
   1704	struct ahci_host_priv *hpriv = ap->host->private_data;
   1705	struct ahci_port_priv *pp = ap->private_data;
   1706	struct ata_eh_info *host_ehi = &ap->link.eh_info;
   1707	struct ata_link *link = NULL;
   1708	struct ata_queued_cmd *active_qc;
   1709	struct ata_eh_info *active_ehi;
   1710	bool fbs_need_dec = false;
   1711	u32 serror;
   1712
   1713	/* determine active link with error */
   1714	if (pp->fbs_enabled) {
   1715		void __iomem *port_mmio = ahci_port_base(ap);
   1716		u32 fbs = readl(port_mmio + PORT_FBS);
   1717		int pmp = fbs >> PORT_FBS_DWE_OFFSET;
   1718
   1719		if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) {
   1720			link = &ap->pmp_link[pmp];
   1721			fbs_need_dec = true;
   1722		}
   1723
   1724	} else
   1725		ata_for_each_link(link, ap, EDGE)
   1726			if (ata_link_active(link))
   1727				break;
   1728
   1729	if (!link)
   1730		link = &ap->link;
   1731
   1732	active_qc = ata_qc_from_tag(ap, link->active_tag);
   1733	active_ehi = &link->eh_info;
   1734
   1735	/* record irq stat */
   1736	ata_ehi_clear_desc(host_ehi);
   1737	ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
   1738
   1739	/* AHCI needs SError cleared; otherwise, it might lock up */
   1740	ahci_scr_read(&ap->link, SCR_ERROR, &serror);
   1741	ahci_scr_write(&ap->link, SCR_ERROR, serror);
   1742	host_ehi->serror |= serror;
   1743
   1744	/* some controllers set IRQ_IF_ERR on device errors, ignore it */
   1745	if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
   1746		irq_stat &= ~PORT_IRQ_IF_ERR;
   1747
   1748	if (irq_stat & PORT_IRQ_TF_ERR) {
   1749		/* If qc is active, charge it; otherwise, the active
   1750		 * link.  There's no active qc on NCQ errors.  It will
   1751		 * be determined by EH by reading log page 10h.
   1752		 */
   1753		if (active_qc)
   1754			active_qc->err_mask |= AC_ERR_DEV;
   1755		else
   1756			active_ehi->err_mask |= AC_ERR_DEV;
   1757
   1758		if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
   1759			host_ehi->serror &= ~SERR_INTERNAL;
   1760	}
   1761
   1762	if (irq_stat & PORT_IRQ_UNK_FIS) {
   1763		u32 *unk = pp->rx_fis + RX_FIS_UNK;
   1764
   1765		active_ehi->err_mask |= AC_ERR_HSM;
   1766		active_ehi->action |= ATA_EH_RESET;
   1767		ata_ehi_push_desc(active_ehi,
   1768				  "unknown FIS %08x %08x %08x %08x" ,
   1769				  unk[0], unk[1], unk[2], unk[3]);
   1770	}
   1771
   1772	if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
   1773		active_ehi->err_mask |= AC_ERR_HSM;
   1774		active_ehi->action |= ATA_EH_RESET;
   1775		ata_ehi_push_desc(active_ehi, "incorrect PMP");
   1776	}
   1777
   1778	if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
   1779		host_ehi->err_mask |= AC_ERR_HOST_BUS;
   1780		host_ehi->action |= ATA_EH_RESET;
   1781		ata_ehi_push_desc(host_ehi, "host bus error");
   1782	}
   1783
   1784	if (irq_stat & PORT_IRQ_IF_ERR) {
   1785		if (fbs_need_dec)
   1786			active_ehi->err_mask |= AC_ERR_DEV;
   1787		else {
   1788			host_ehi->err_mask |= AC_ERR_ATA_BUS;
   1789			host_ehi->action |= ATA_EH_RESET;
   1790		}
   1791
   1792		ata_ehi_push_desc(host_ehi, "interface fatal error");
   1793	}
   1794
   1795	if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
   1796		ata_ehi_hotplugged(host_ehi);
   1797		ata_ehi_push_desc(host_ehi, "%s",
   1798			irq_stat & PORT_IRQ_CONNECT ?
   1799			"connection status changed" : "PHY RDY changed");
   1800	}
   1801
   1802	/* okay, let's hand over to EH */
   1803
   1804	if (irq_stat & PORT_IRQ_FREEZE)
   1805		ata_port_freeze(ap);
   1806	else if (fbs_need_dec) {
   1807		ata_link_abort(link);
   1808		ahci_fbs_dec_intr(ap);
   1809	} else
   1810		ata_port_abort(ap);
   1811}
   1812
   1813static void ahci_handle_port_interrupt(struct ata_port *ap,
   1814				       void __iomem *port_mmio, u32 status)
   1815{
   1816	struct ata_eh_info *ehi = &ap->link.eh_info;
   1817	struct ahci_port_priv *pp = ap->private_data;
   1818	struct ahci_host_priv *hpriv = ap->host->private_data;
   1819	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
   1820	u32 qc_active = 0;
   1821	int rc;
   1822
   1823	/* ignore BAD_PMP while resetting */
   1824	if (unlikely(resetting))
   1825		status &= ~PORT_IRQ_BAD_PMP;
   1826
   1827	if (sata_lpm_ignore_phy_events(&ap->link)) {
   1828		status &= ~PORT_IRQ_PHYRDY;
   1829		ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
   1830	}
   1831
   1832	if (unlikely(status & PORT_IRQ_ERROR)) {
   1833		ahci_error_intr(ap, status);
   1834		return;
   1835	}
   1836
   1837	if (status & PORT_IRQ_SDB_FIS) {
   1838		/* If SNotification is available, leave notification
   1839		 * handling to sata_async_notification().  If not,
   1840		 * emulate it by snooping SDB FIS RX area.
   1841		 *
   1842		 * Snooping FIS RX area is probably cheaper than
   1843		 * poking SNotification but some constrollers which
   1844		 * implement SNotification, ICH9 for example, don't
   1845		 * store AN SDB FIS into receive area.
   1846		 */
   1847		if (hpriv->cap & HOST_CAP_SNTF)
   1848			sata_async_notification(ap);
   1849		else {
   1850			/* If the 'N' bit in word 0 of the FIS is set,
   1851			 * we just received asynchronous notification.
   1852			 * Tell libata about it.
   1853			 *
   1854			 * Lack of SNotification should not appear in
   1855			 * ahci 1.2, so the workaround is unnecessary
   1856			 * when FBS is enabled.
   1857			 */
   1858			if (pp->fbs_enabled)
   1859				WARN_ON_ONCE(1);
   1860			else {
   1861				const __le32 *f = pp->rx_fis + RX_FIS_SDB;
   1862				u32 f0 = le32_to_cpu(f[0]);
   1863				if (f0 & (1 << 15))
   1864					sata_async_notification(ap);
   1865			}
   1866		}
   1867	}
   1868
   1869	/* pp->active_link is not reliable once FBS is enabled, both
   1870	 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
   1871	 * NCQ and non-NCQ commands may be in flight at the same time.
   1872	 */
   1873	if (pp->fbs_enabled) {
   1874		if (ap->qc_active) {
   1875			qc_active = readl(port_mmio + PORT_SCR_ACT);
   1876			qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
   1877		}
   1878	} else {
   1879		/* pp->active_link is valid iff any command is in flight */
   1880		if (ap->qc_active && pp->active_link->sactive)
   1881			qc_active = readl(port_mmio + PORT_SCR_ACT);
   1882		else
   1883			qc_active = readl(port_mmio + PORT_CMD_ISSUE);
   1884	}
   1885
   1886
   1887	rc = ata_qc_complete_multiple(ap, qc_active);
   1888
   1889	/* while resetting, invalid completions are expected */
   1890	if (unlikely(rc < 0 && !resetting)) {
   1891		ehi->err_mask |= AC_ERR_HSM;
   1892		ehi->action |= ATA_EH_RESET;
   1893		ata_port_freeze(ap);
   1894	}
   1895}
   1896
   1897static void ahci_port_intr(struct ata_port *ap)
   1898{
   1899	void __iomem *port_mmio = ahci_port_base(ap);
   1900	u32 status;
   1901
   1902	status = readl(port_mmio + PORT_IRQ_STAT);
   1903	writel(status, port_mmio + PORT_IRQ_STAT);
   1904
   1905	ahci_handle_port_interrupt(ap, port_mmio, status);
   1906}
   1907
   1908static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
   1909{
   1910	struct ata_port *ap = dev_instance;
   1911	void __iomem *port_mmio = ahci_port_base(ap);
   1912	u32 status;
   1913
   1914	status = readl(port_mmio + PORT_IRQ_STAT);
   1915	writel(status, port_mmio + PORT_IRQ_STAT);
   1916
   1917	spin_lock(ap->lock);
   1918	ahci_handle_port_interrupt(ap, port_mmio, status);
   1919	spin_unlock(ap->lock);
   1920
   1921	return IRQ_HANDLED;
   1922}
   1923
   1924u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
   1925{
   1926	unsigned int i, handled = 0;
   1927
   1928	for (i = 0; i < host->n_ports; i++) {
   1929		struct ata_port *ap;
   1930
   1931		if (!(irq_masked & (1 << i)))
   1932			continue;
   1933
   1934		ap = host->ports[i];
   1935		if (ap) {
   1936			ahci_port_intr(ap);
   1937		} else {
   1938			if (ata_ratelimit())
   1939				dev_warn(host->dev,
   1940					 "interrupt on disabled port %u\n", i);
   1941		}
   1942
   1943		handled = 1;
   1944	}
   1945
   1946	return handled;
   1947}
   1948EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
   1949
   1950static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
   1951{
   1952	struct ata_host *host = dev_instance;
   1953	struct ahci_host_priv *hpriv;
   1954	unsigned int rc = 0;
   1955	void __iomem *mmio;
   1956	u32 irq_stat, irq_masked;
   1957
   1958	hpriv = host->private_data;
   1959	mmio = hpriv->mmio;
   1960
   1961	/* sigh.  0xffffffff is a valid return from h/w */
   1962	irq_stat = readl(mmio + HOST_IRQ_STAT);
   1963	if (!irq_stat)
   1964		return IRQ_NONE;
   1965
   1966	irq_masked = irq_stat & hpriv->port_map;
   1967
   1968	spin_lock(&host->lock);
   1969
   1970	rc = ahci_handle_port_intr(host, irq_masked);
   1971
   1972	/* HOST_IRQ_STAT behaves as level triggered latch meaning that
   1973	 * it should be cleared after all the port events are cleared;
   1974	 * otherwise, it will raise a spurious interrupt after each
   1975	 * valid one.  Please read section 10.6.2 of ahci 1.1 for more
   1976	 * information.
   1977	 *
   1978	 * Also, use the unmasked value to clear interrupt as spurious
   1979	 * pending event on a dummy port might cause screaming IRQ.
   1980	 */
   1981	writel(irq_stat, mmio + HOST_IRQ_STAT);
   1982
   1983	spin_unlock(&host->lock);
   1984
   1985	return IRQ_RETVAL(rc);
   1986}
   1987
   1988unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
   1989{
   1990	struct ata_port *ap = qc->ap;
   1991	void __iomem *port_mmio = ahci_port_base(ap);
   1992	struct ahci_port_priv *pp = ap->private_data;
   1993
   1994	/* Keep track of the currently active link.  It will be used
   1995	 * in completion path to determine whether NCQ phase is in
   1996	 * progress.
   1997	 */
   1998	pp->active_link = qc->dev->link;
   1999
   2000	if (ata_is_ncq(qc->tf.protocol))
   2001		writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
   2002
   2003	if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
   2004		u32 fbs = readl(port_mmio + PORT_FBS);
   2005		fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
   2006		fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
   2007		writel(fbs, port_mmio + PORT_FBS);
   2008		pp->fbs_last_dev = qc->dev->link->pmp;
   2009	}
   2010
   2011	writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
   2012
   2013	ahci_sw_activity(qc->dev->link);
   2014
   2015	return 0;
   2016}
   2017EXPORT_SYMBOL_GPL(ahci_qc_issue);
   2018
   2019static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
   2020{
   2021	struct ahci_port_priv *pp = qc->ap->private_data;
   2022	u8 *rx_fis = pp->rx_fis;
   2023
   2024	if (pp->fbs_enabled)
   2025		rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
   2026
   2027	/*
   2028	 * After a successful execution of an ATA PIO data-in command,
   2029	 * the device doesn't send D2H Reg FIS to update the TF and
   2030	 * the host should take TF and E_Status from the preceding PIO
   2031	 * Setup FIS.
   2032	 */
   2033	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
   2034	    !(qc->flags & ATA_QCFLAG_FAILED)) {
   2035		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
   2036		qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
   2037	} else
   2038		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
   2039
   2040	return true;
   2041}
   2042
   2043static void ahci_freeze(struct ata_port *ap)
   2044{
   2045	void __iomem *port_mmio = ahci_port_base(ap);
   2046
   2047	/* turn IRQ off */
   2048	writel(0, port_mmio + PORT_IRQ_MASK);
   2049}
   2050
   2051static void ahci_thaw(struct ata_port *ap)
   2052{
   2053	struct ahci_host_priv *hpriv = ap->host->private_data;
   2054	void __iomem *mmio = hpriv->mmio;
   2055	void __iomem *port_mmio = ahci_port_base(ap);
   2056	u32 tmp;
   2057	struct ahci_port_priv *pp = ap->private_data;
   2058
   2059	/* clear IRQ */
   2060	tmp = readl(port_mmio + PORT_IRQ_STAT);
   2061	writel(tmp, port_mmio + PORT_IRQ_STAT);
   2062	writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
   2063
   2064	/* turn IRQ back on */
   2065	writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
   2066}
   2067
   2068void ahci_error_handler(struct ata_port *ap)
   2069{
   2070	struct ahci_host_priv *hpriv = ap->host->private_data;
   2071
   2072	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
   2073		/* restart engine */
   2074		hpriv->stop_engine(ap);
   2075		hpriv->start_engine(ap);
   2076	}
   2077
   2078	sata_pmp_error_handler(ap);
   2079
   2080	if (!ata_dev_enabled(ap->link.device))
   2081		hpriv->stop_engine(ap);
   2082}
   2083EXPORT_SYMBOL_GPL(ahci_error_handler);
   2084
   2085static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
   2086{
   2087	struct ata_port *ap = qc->ap;
   2088
   2089	/* make DMA engine forget about the failed command */
   2090	if (qc->flags & ATA_QCFLAG_FAILED)
   2091		ahci_kick_engine(ap);
   2092}
   2093
   2094static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
   2095{
   2096	struct ahci_host_priv *hpriv = ap->host->private_data;
   2097	void __iomem *port_mmio = ahci_port_base(ap);
   2098	struct ata_device *dev = ap->link.device;
   2099	u32 devslp, dm, dito, mdat, deto, dito_conf;
   2100	int rc;
   2101	unsigned int err_mask;
   2102
   2103	devslp = readl(port_mmio + PORT_DEVSLP);
   2104	if (!(devslp & PORT_DEVSLP_DSP)) {
   2105		dev_info(ap->host->dev, "port does not support device sleep\n");
   2106		return;
   2107	}
   2108
   2109	/* disable device sleep */
   2110	if (!sleep) {
   2111		if (devslp & PORT_DEVSLP_ADSE) {
   2112			writel(devslp & ~PORT_DEVSLP_ADSE,
   2113			       port_mmio + PORT_DEVSLP);
   2114			err_mask = ata_dev_set_feature(dev,
   2115						       SETFEATURES_SATA_DISABLE,
   2116						       SATA_DEVSLP);
   2117			if (err_mask && err_mask != AC_ERR_DEV)
   2118				ata_dev_warn(dev, "failed to disable DEVSLP\n");
   2119		}
   2120		return;
   2121	}
   2122
   2123	dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
   2124	dito = devslp_idle_timeout / (dm + 1);
   2125	if (dito > 0x3ff)
   2126		dito = 0x3ff;
   2127
   2128	dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
   2129
   2130	/* device sleep was already enabled and same dito */
   2131	if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
   2132		return;
   2133
   2134	/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
   2135	rc = hpriv->stop_engine(ap);
   2136	if (rc)
   2137		return;
   2138
   2139	/* Use the nominal value 10 ms if the read MDAT is zero,
   2140	 * the nominal value of DETO is 20 ms.
   2141	 */
   2142	if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
   2143	    ATA_LOG_DEVSLP_VALID_MASK) {
   2144		mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
   2145		       ATA_LOG_DEVSLP_MDAT_MASK;
   2146		if (!mdat)
   2147			mdat = 10;
   2148		deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
   2149		if (!deto)
   2150			deto = 20;
   2151	} else {
   2152		mdat = 10;
   2153		deto = 20;
   2154	}
   2155
   2156	/* Make dito, mdat, deto bits to 0s */
   2157	devslp &= ~GENMASK_ULL(24, 2);
   2158	devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
   2159		   (mdat << PORT_DEVSLP_MDAT_OFFSET) |
   2160		   (deto << PORT_DEVSLP_DETO_OFFSET) |
   2161		   PORT_DEVSLP_ADSE);
   2162	writel(devslp, port_mmio + PORT_DEVSLP);
   2163
   2164	hpriv->start_engine(ap);
   2165
   2166	/* enable device sleep feature for the drive */
   2167	err_mask = ata_dev_set_feature(dev,
   2168				       SETFEATURES_SATA_ENABLE,
   2169				       SATA_DEVSLP);
   2170	if (err_mask && err_mask != AC_ERR_DEV)
   2171		ata_dev_warn(dev, "failed to enable DEVSLP\n");
   2172}
   2173
   2174static void ahci_enable_fbs(struct ata_port *ap)
   2175{
   2176	struct ahci_host_priv *hpriv = ap->host->private_data;
   2177	struct ahci_port_priv *pp = ap->private_data;
   2178	void __iomem *port_mmio = ahci_port_base(ap);
   2179	u32 fbs;
   2180	int rc;
   2181
   2182	if (!pp->fbs_supported)
   2183		return;
   2184
   2185	fbs = readl(port_mmio + PORT_FBS);
   2186	if (fbs & PORT_FBS_EN) {
   2187		pp->fbs_enabled = true;
   2188		pp->fbs_last_dev = -1; /* initialization */
   2189		return;
   2190	}
   2191
   2192	rc = hpriv->stop_engine(ap);
   2193	if (rc)
   2194		return;
   2195
   2196	writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
   2197	fbs = readl(port_mmio + PORT_FBS);
   2198	if (fbs & PORT_FBS_EN) {
   2199		dev_info(ap->host->dev, "FBS is enabled\n");
   2200		pp->fbs_enabled = true;
   2201		pp->fbs_last_dev = -1; /* initialization */
   2202	} else
   2203		dev_err(ap->host->dev, "Failed to enable FBS\n");
   2204
   2205	hpriv->start_engine(ap);
   2206}
   2207
   2208static void ahci_disable_fbs(struct ata_port *ap)
   2209{
   2210	struct ahci_host_priv *hpriv = ap->host->private_data;
   2211	struct ahci_port_priv *pp = ap->private_data;
   2212	void __iomem *port_mmio = ahci_port_base(ap);
   2213	u32 fbs;
   2214	int rc;
   2215
   2216	if (!pp->fbs_supported)
   2217		return;
   2218
   2219	fbs = readl(port_mmio + PORT_FBS);
   2220	if ((fbs & PORT_FBS_EN) == 0) {
   2221		pp->fbs_enabled = false;
   2222		return;
   2223	}
   2224
   2225	rc = hpriv->stop_engine(ap);
   2226	if (rc)
   2227		return;
   2228
   2229	writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
   2230	fbs = readl(port_mmio + PORT_FBS);
   2231	if (fbs & PORT_FBS_EN)
   2232		dev_err(ap->host->dev, "Failed to disable FBS\n");
   2233	else {
   2234		dev_info(ap->host->dev, "FBS is disabled\n");
   2235		pp->fbs_enabled = false;
   2236	}
   2237
   2238	hpriv->start_engine(ap);
   2239}
   2240
   2241static void ahci_pmp_attach(struct ata_port *ap)
   2242{
   2243	void __iomem *port_mmio = ahci_port_base(ap);
   2244	struct ahci_port_priv *pp = ap->private_data;
   2245	u32 cmd;
   2246
   2247	cmd = readl(port_mmio + PORT_CMD);
   2248	cmd |= PORT_CMD_PMP;
   2249	writel(cmd, port_mmio + PORT_CMD);
   2250
   2251	ahci_enable_fbs(ap);
   2252
   2253	pp->intr_mask |= PORT_IRQ_BAD_PMP;
   2254
   2255	/*
   2256	 * We must not change the port interrupt mask register if the
   2257	 * port is marked frozen, the value in pp->intr_mask will be
   2258	 * restored later when the port is thawed.
   2259	 *
   2260	 * Note that during initialization, the port is marked as
   2261	 * frozen since the irq handler is not yet registered.
   2262	 */
   2263	if (!(ap->pflags & ATA_PFLAG_FROZEN))
   2264		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
   2265}
   2266
   2267static void ahci_pmp_detach(struct ata_port *ap)
   2268{
   2269	void __iomem *port_mmio = ahci_port_base(ap);
   2270	struct ahci_port_priv *pp = ap->private_data;
   2271	u32 cmd;
   2272
   2273	ahci_disable_fbs(ap);
   2274
   2275	cmd = readl(port_mmio + PORT_CMD);
   2276	cmd &= ~PORT_CMD_PMP;
   2277	writel(cmd, port_mmio + PORT_CMD);
   2278
   2279	pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
   2280
   2281	/* see comment above in ahci_pmp_attach() */
   2282	if (!(ap->pflags & ATA_PFLAG_FROZEN))
   2283		writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
   2284}
   2285
   2286int ahci_port_resume(struct ata_port *ap)
   2287{
   2288	ahci_rpm_get_port(ap);
   2289
   2290	ahci_power_up(ap);
   2291	ahci_start_port(ap);
   2292
   2293	if (sata_pmp_attached(ap))
   2294		ahci_pmp_attach(ap);
   2295	else
   2296		ahci_pmp_detach(ap);
   2297
   2298	return 0;
   2299}
   2300EXPORT_SYMBOL_GPL(ahci_port_resume);
   2301
   2302#ifdef CONFIG_PM
   2303static void ahci_handle_s2idle(struct ata_port *ap)
   2304{
   2305	void __iomem *port_mmio = ahci_port_base(ap);
   2306	u32 devslp;
   2307
   2308	if (pm_suspend_via_firmware())
   2309		return;
   2310	devslp = readl(port_mmio + PORT_DEVSLP);
   2311	if ((devslp & PORT_DEVSLP_ADSE))
   2312		ata_msleep(ap, devslp_idle_timeout);
   2313}
   2314
   2315static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
   2316{
   2317	const char *emsg = NULL;
   2318	int rc;
   2319
   2320	rc = ahci_deinit_port(ap, &emsg);
   2321	if (rc == 0)
   2322		ahci_power_down(ap);
   2323	else {
   2324		ata_port_err(ap, "%s (%d)\n", emsg, rc);
   2325		ata_port_freeze(ap);
   2326	}
   2327
   2328	if (acpi_storage_d3(ap->host->dev))
   2329		ahci_handle_s2idle(ap);
   2330
   2331	ahci_rpm_put_port(ap);
   2332	return rc;
   2333}
   2334#endif
   2335
   2336static int ahci_port_start(struct ata_port *ap)
   2337{
   2338	struct ahci_host_priv *hpriv = ap->host->private_data;
   2339	struct device *dev = ap->host->dev;
   2340	struct ahci_port_priv *pp;
   2341	void *mem;
   2342	dma_addr_t mem_dma;
   2343	size_t dma_sz, rx_fis_sz;
   2344
   2345	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
   2346	if (!pp)
   2347		return -ENOMEM;
   2348
   2349	if (ap->host->n_ports > 1) {
   2350		pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL);
   2351		if (!pp->irq_desc) {
   2352			devm_kfree(dev, pp);
   2353			return -ENOMEM;
   2354		}
   2355		snprintf(pp->irq_desc, 8,
   2356			 "%s%d", dev_driver_string(dev), ap->port_no);
   2357	}
   2358
   2359	/* check FBS capability */
   2360	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
   2361		void __iomem *port_mmio = ahci_port_base(ap);
   2362		u32 cmd = readl(port_mmio + PORT_CMD);
   2363		if (cmd & PORT_CMD_FBSCP)
   2364			pp->fbs_supported = true;
   2365		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
   2366			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
   2367				 ap->port_no);
   2368			pp->fbs_supported = true;
   2369		} else
   2370			dev_warn(dev, "port %d is not capable of FBS\n",
   2371				 ap->port_no);
   2372	}
   2373
   2374	if (pp->fbs_supported) {
   2375		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
   2376		rx_fis_sz = AHCI_RX_FIS_SZ * 16;
   2377	} else {
   2378		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
   2379		rx_fis_sz = AHCI_RX_FIS_SZ;
   2380	}
   2381
   2382	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
   2383	if (!mem)
   2384		return -ENOMEM;
   2385
   2386	/*
   2387	 * First item in chunk of DMA memory: 32-slot command table,
   2388	 * 32 bytes each in size
   2389	 */
   2390	pp->cmd_slot = mem;
   2391	pp->cmd_slot_dma = mem_dma;
   2392
   2393	mem += AHCI_CMD_SLOT_SZ;
   2394	mem_dma += AHCI_CMD_SLOT_SZ;
   2395
   2396	/*
   2397	 * Second item: Received-FIS area
   2398	 */
   2399	pp->rx_fis = mem;
   2400	pp->rx_fis_dma = mem_dma;
   2401
   2402	mem += rx_fis_sz;
   2403	mem_dma += rx_fis_sz;
   2404
   2405	/*
   2406	 * Third item: data area for storing a single command
   2407	 * and its scatter-gather table
   2408	 */
   2409	pp->cmd_tbl = mem;
   2410	pp->cmd_tbl_dma = mem_dma;
   2411
   2412	/*
   2413	 * Save off initial list of interrupts to be enabled.
   2414	 * This could be changed later
   2415	 */
   2416	pp->intr_mask = DEF_PORT_IRQ;
   2417
   2418	/*
   2419	 * Switch to per-port locking in case each port has its own MSI vector.
   2420	 */
   2421	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
   2422		spin_lock_init(&pp->lock);
   2423		ap->lock = &pp->lock;
   2424	}
   2425
   2426	ap->private_data = pp;
   2427
   2428	/* engage engines, captain */
   2429	return ahci_port_resume(ap);
   2430}
   2431
   2432static void ahci_port_stop(struct ata_port *ap)
   2433{
   2434	const char *emsg = NULL;
   2435	struct ahci_host_priv *hpriv = ap->host->private_data;
   2436	void __iomem *host_mmio = hpriv->mmio;
   2437	int rc;
   2438
   2439	/* de-initialize port */
   2440	rc = ahci_deinit_port(ap, &emsg);
   2441	if (rc)
   2442		ata_port_warn(ap, "%s (%d)\n", emsg, rc);
   2443
   2444	/*
   2445	 * Clear GHC.IS to prevent stuck INTx after disabling MSI and
   2446	 * re-enabling INTx.
   2447	 */
   2448	writel(1 << ap->port_no, host_mmio + HOST_IRQ_STAT);
   2449
   2450	ahci_rpm_put_port(ap);
   2451}
   2452
   2453void ahci_print_info(struct ata_host *host, const char *scc_s)
   2454{
   2455	struct ahci_host_priv *hpriv = host->private_data;
   2456	u32 vers, cap, cap2, impl, speed;
   2457	const char *speed_s;
   2458
   2459	vers = hpriv->version;
   2460	cap = hpriv->cap;
   2461	cap2 = hpriv->cap2;
   2462	impl = hpriv->port_map;
   2463
   2464	speed = (cap >> 20) & 0xf;
   2465	if (speed == 1)
   2466		speed_s = "1.5";
   2467	else if (speed == 2)
   2468		speed_s = "3";
   2469	else if (speed == 3)
   2470		speed_s = "6";
   2471	else
   2472		speed_s = "?";
   2473
   2474	dev_info(host->dev,
   2475		"AHCI %02x%02x.%02x%02x "
   2476		"%u slots %u ports %s Gbps 0x%x impl %s mode\n"
   2477		,
   2478
   2479		(vers >> 24) & 0xff,
   2480		(vers >> 16) & 0xff,
   2481		(vers >> 8) & 0xff,
   2482		vers & 0xff,
   2483
   2484		((cap >> 8) & 0x1f) + 1,
   2485		(cap & 0x1f) + 1,
   2486		speed_s,
   2487		impl,
   2488		scc_s);
   2489
   2490	dev_info(host->dev,
   2491		"flags: "
   2492		"%s%s%s%s%s%s%s"
   2493		"%s%s%s%s%s%s%s"
   2494		"%s%s%s%s%s%s%s"
   2495		"%s%s\n"
   2496		,
   2497
   2498		cap & HOST_CAP_64 ? "64bit " : "",
   2499		cap & HOST_CAP_NCQ ? "ncq " : "",
   2500		cap & HOST_CAP_SNTF ? "sntf " : "",
   2501		cap & HOST_CAP_MPS ? "ilck " : "",
   2502		cap & HOST_CAP_SSS ? "stag " : "",
   2503		cap & HOST_CAP_ALPM ? "pm " : "",
   2504		cap & HOST_CAP_LED ? "led " : "",
   2505		cap & HOST_CAP_CLO ? "clo " : "",
   2506		cap & HOST_CAP_ONLY ? "only " : "",
   2507		cap & HOST_CAP_PMP ? "pmp " : "",
   2508		cap & HOST_CAP_FBS ? "fbs " : "",
   2509		cap & HOST_CAP_PIO_MULTI ? "pio " : "",
   2510		cap & HOST_CAP_SSC ? "slum " : "",
   2511		cap & HOST_CAP_PART ? "part " : "",
   2512		cap & HOST_CAP_CCC ? "ccc " : "",
   2513		cap & HOST_CAP_EMS ? "ems " : "",
   2514		cap & HOST_CAP_SXS ? "sxs " : "",
   2515		cap2 & HOST_CAP2_DESO ? "deso " : "",
   2516		cap2 & HOST_CAP2_SADM ? "sadm " : "",
   2517		cap2 & HOST_CAP2_SDS ? "sds " : "",
   2518		cap2 & HOST_CAP2_APST ? "apst " : "",
   2519		cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
   2520		cap2 & HOST_CAP2_BOH ? "boh " : ""
   2521		);
   2522}
   2523EXPORT_SYMBOL_GPL(ahci_print_info);
   2524
   2525void ahci_set_em_messages(struct ahci_host_priv *hpriv,
   2526			  struct ata_port_info *pi)
   2527{
   2528	u8 messages;
   2529	void __iomem *mmio = hpriv->mmio;
   2530	u32 em_loc = readl(mmio + HOST_EM_LOC);
   2531	u32 em_ctl = readl(mmio + HOST_EM_CTL);
   2532
   2533	if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
   2534		return;
   2535
   2536	messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
   2537
   2538	if (messages) {
   2539		/* store em_loc */
   2540		hpriv->em_loc = ((em_loc >> 16) * 4);
   2541		hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
   2542		hpriv->em_msg_type = messages;
   2543		pi->flags |= ATA_FLAG_EM;
   2544		if (!(em_ctl & EM_CTL_ALHD))
   2545			pi->flags |= ATA_FLAG_SW_ACTIVITY;
   2546	}
   2547}
   2548EXPORT_SYMBOL_GPL(ahci_set_em_messages);
   2549
   2550static int ahci_host_activate_multi_irqs(struct ata_host *host,
   2551					 struct scsi_host_template *sht)
   2552{
   2553	struct ahci_host_priv *hpriv = host->private_data;
   2554	int i, rc;
   2555
   2556	rc = ata_host_start(host);
   2557	if (rc)
   2558		return rc;
   2559	/*
   2560	 * Requests IRQs according to AHCI-1.1 when multiple MSIs were
   2561	 * allocated. That is one MSI per port, starting from @irq.
   2562	 */
   2563	for (i = 0; i < host->n_ports; i++) {
   2564		struct ahci_port_priv *pp = host->ports[i]->private_data;
   2565		int irq = hpriv->get_irq_vector(host, i);
   2566
   2567		/* Do not receive interrupts sent by dummy ports */
   2568		if (!pp) {
   2569			disable_irq(irq);
   2570			continue;
   2571		}
   2572
   2573		rc = devm_request_irq(host->dev, irq, ahci_multi_irqs_intr_hard,
   2574				0, pp->irq_desc, host->ports[i]);
   2575
   2576		if (rc)
   2577			return rc;
   2578		ata_port_desc(host->ports[i], "irq %d", irq);
   2579	}
   2580
   2581	return ata_host_register(host, sht);
   2582}
   2583
   2584/**
   2585 *	ahci_host_activate - start AHCI host, request IRQs and register it
   2586 *	@host: target ATA host
   2587 *	@sht: scsi_host_template to use when registering the host
   2588 *
   2589 *	LOCKING:
   2590 *	Inherited from calling layer (may sleep).
   2591 *
   2592 *	RETURNS:
   2593 *	0 on success, -errno otherwise.
   2594 */
   2595int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
   2596{
   2597	struct ahci_host_priv *hpriv = host->private_data;
   2598	int irq = hpriv->irq;
   2599	int rc;
   2600
   2601	if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
   2602		if (hpriv->irq_handler &&
   2603		    hpriv->irq_handler != ahci_single_level_irq_intr)
   2604			dev_warn(host->dev,
   2605			         "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
   2606		if (!hpriv->get_irq_vector) {
   2607			dev_err(host->dev,
   2608				"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
   2609			return -EIO;
   2610		}
   2611
   2612		rc = ahci_host_activate_multi_irqs(host, sht);
   2613	} else {
   2614		rc = ata_host_activate(host, irq, hpriv->irq_handler,
   2615				       IRQF_SHARED, sht);
   2616	}
   2617
   2618
   2619	return rc;
   2620}
   2621EXPORT_SYMBOL_GPL(ahci_host_activate);
   2622
   2623MODULE_AUTHOR("Jeff Garzik");
   2624MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
   2625MODULE_LICENSE("GPL");