cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

libata-core.c (169616B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  libata-core.c - helper library for ATA
      4 *
      5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
      6 *  Copyright 2003-2004 Jeff Garzik
      7 *
      8 *  libata documentation is available via 'make {ps|pdf}docs',
      9 *  as Documentation/driver-api/libata.rst
     10 *
     11 *  Hardware documentation available from http://www.t13.org/ and
     12 *  http://www.sata-io.org/
     13 *
     14 *  Standards documents from:
     15 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
     16 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
     17 *	http://www.sata-io.org (SATA)
     18 *	http://www.compactflash.org (CF)
     19 *	http://www.qic.org (QIC157 - Tape and DSC)
     20 *	http://www.ce-ata.org (CE-ATA: not supported)
     21 *
     22 * libata is essentially a library of internal helper functions for
     23 * low-level ATA host controller drivers.  As such, the API/ABI is
     24 * likely to change as new drivers are added and updated.
     25 * Do not depend on ABI/API stability.
     26 */
     27
     28#include <linux/kernel.h>
     29#include <linux/module.h>
     30#include <linux/pci.h>
     31#include <linux/init.h>
     32#include <linux/list.h>
     33#include <linux/mm.h>
     34#include <linux/spinlock.h>
     35#include <linux/blkdev.h>
     36#include <linux/delay.h>
     37#include <linux/timer.h>
     38#include <linux/time.h>
     39#include <linux/interrupt.h>
     40#include <linux/completion.h>
     41#include <linux/suspend.h>
     42#include <linux/workqueue.h>
     43#include <linux/scatterlist.h>
     44#include <linux/io.h>
     45#include <linux/log2.h>
     46#include <linux/slab.h>
     47#include <linux/glob.h>
     48#include <scsi/scsi.h>
     49#include <scsi/scsi_cmnd.h>
     50#include <scsi/scsi_host.h>
     51#include <linux/libata.h>
     52#include <asm/byteorder.h>
     53#include <asm/unaligned.h>
     54#include <linux/cdrom.h>
     55#include <linux/ratelimit.h>
     56#include <linux/leds.h>
     57#include <linux/pm_runtime.h>
     58#include <linux/platform_device.h>
     59#include <asm/setup.h>
     60
     61#define CREATE_TRACE_POINTS
     62#include <trace/events/libata.h>
     63
     64#include "libata.h"
     65#include "libata-transport.h"
     66
     67const struct ata_port_operations ata_base_port_ops = {
     68	.prereset		= ata_std_prereset,
     69	.postreset		= ata_std_postreset,
     70	.error_handler		= ata_std_error_handler,
     71	.sched_eh		= ata_std_sched_eh,
     72	.end_eh			= ata_std_end_eh,
     73};
     74
     75const struct ata_port_operations sata_port_ops = {
     76	.inherits		= &ata_base_port_ops,
     77
     78	.qc_defer		= ata_std_qc_defer,
     79	.hardreset		= sata_std_hardreset,
     80};
     81EXPORT_SYMBOL_GPL(sata_port_ops);
     82
     83static unsigned int ata_dev_init_params(struct ata_device *dev,
     84					u16 heads, u16 sectors);
     85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
     86static void ata_dev_xfermask(struct ata_device *dev);
     87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
     88
     89atomic_t ata_print_id = ATOMIC_INIT(0);
     90
     91#ifdef CONFIG_ATA_FORCE
     92struct ata_force_param {
     93	const char	*name;
     94	u8		cbl;
     95	u8		spd_limit;
     96	unsigned long	xfer_mask;
     97	unsigned int	horkage_on;
     98	unsigned int	horkage_off;
     99	u16		lflags_on;
    100	u16		lflags_off;
    101};
    102
    103struct ata_force_ent {
    104	int			port;
    105	int			device;
    106	struct ata_force_param	param;
    107};
    108
    109static struct ata_force_ent *ata_force_tbl;
    110static int ata_force_tbl_size;
    111
    112static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
    113/* param_buf is thrown away after initialization, disallow read */
    114module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
    115MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
    116#endif
    117
    118static int atapi_enabled = 1;
    119module_param(atapi_enabled, int, 0444);
    120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
    121
    122static int atapi_dmadir = 0;
    123module_param(atapi_dmadir, int, 0444);
    124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
    125
    126int atapi_passthru16 = 1;
    127module_param(atapi_passthru16, int, 0444);
    128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
    129
    130int libata_fua = 0;
    131module_param_named(fua, libata_fua, int, 0444);
    132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
    133
    134static int ata_ignore_hpa;
    135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
    136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
    137
    138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
    139module_param_named(dma, libata_dma_mask, int, 0444);
    140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
    141
    142static int ata_probe_timeout;
    143module_param(ata_probe_timeout, int, 0444);
    144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
    145
    146int libata_noacpi = 0;
    147module_param_named(noacpi, libata_noacpi, int, 0444);
    148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
    149
    150int libata_allow_tpm = 0;
    151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
    152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
    153
    154static int atapi_an;
    155module_param(atapi_an, int, 0444);
    156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
    157
    158MODULE_AUTHOR("Jeff Garzik");
    159MODULE_DESCRIPTION("Library module for ATA devices");
    160MODULE_LICENSE("GPL");
    161MODULE_VERSION(DRV_VERSION);
    162
    163static inline bool ata_dev_print_info(struct ata_device *dev)
    164{
    165	struct ata_eh_context *ehc = &dev->link->eh_context;
    166
    167	return ehc->i.flags & ATA_EHI_PRINTINFO;
    168}
    169
    170static bool ata_sstatus_online(u32 sstatus)
    171{
    172	return (sstatus & 0xf) == 0x3;
    173}
    174
    175/**
    176 *	ata_link_next - link iteration helper
    177 *	@link: the previous link, NULL to start
    178 *	@ap: ATA port containing links to iterate
    179 *	@mode: iteration mode, one of ATA_LITER_*
    180 *
    181 *	LOCKING:
    182 *	Host lock or EH context.
    183 *
    184 *	RETURNS:
    185 *	Pointer to the next link.
    186 */
    187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
    188			       enum ata_link_iter_mode mode)
    189{
    190	BUG_ON(mode != ATA_LITER_EDGE &&
    191	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
    192
    193	/* NULL link indicates start of iteration */
    194	if (!link)
    195		switch (mode) {
    196		case ATA_LITER_EDGE:
    197		case ATA_LITER_PMP_FIRST:
    198			if (sata_pmp_attached(ap))
    199				return ap->pmp_link;
    200			fallthrough;
    201		case ATA_LITER_HOST_FIRST:
    202			return &ap->link;
    203		}
    204
    205	/* we just iterated over the host link, what's next? */
    206	if (link == &ap->link)
    207		switch (mode) {
    208		case ATA_LITER_HOST_FIRST:
    209			if (sata_pmp_attached(ap))
    210				return ap->pmp_link;
    211			fallthrough;
    212		case ATA_LITER_PMP_FIRST:
    213			if (unlikely(ap->slave_link))
    214				return ap->slave_link;
    215			fallthrough;
    216		case ATA_LITER_EDGE:
    217			return NULL;
    218		}
    219
    220	/* slave_link excludes PMP */
    221	if (unlikely(link == ap->slave_link))
    222		return NULL;
    223
    224	/* we were over a PMP link */
    225	if (++link < ap->pmp_link + ap->nr_pmp_links)
    226		return link;
    227
    228	if (mode == ATA_LITER_PMP_FIRST)
    229		return &ap->link;
    230
    231	return NULL;
    232}
    233EXPORT_SYMBOL_GPL(ata_link_next);
    234
    235/**
    236 *	ata_dev_next - device iteration helper
    237 *	@dev: the previous device, NULL to start
    238 *	@link: ATA link containing devices to iterate
    239 *	@mode: iteration mode, one of ATA_DITER_*
    240 *
    241 *	LOCKING:
    242 *	Host lock or EH context.
    243 *
    244 *	RETURNS:
    245 *	Pointer to the next device.
    246 */
    247struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
    248				enum ata_dev_iter_mode mode)
    249{
    250	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
    251	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
    252
    253	/* NULL dev indicates start of iteration */
    254	if (!dev)
    255		switch (mode) {
    256		case ATA_DITER_ENABLED:
    257		case ATA_DITER_ALL:
    258			dev = link->device;
    259			goto check;
    260		case ATA_DITER_ENABLED_REVERSE:
    261		case ATA_DITER_ALL_REVERSE:
    262			dev = link->device + ata_link_max_devices(link) - 1;
    263			goto check;
    264		}
    265
    266 next:
    267	/* move to the next one */
    268	switch (mode) {
    269	case ATA_DITER_ENABLED:
    270	case ATA_DITER_ALL:
    271		if (++dev < link->device + ata_link_max_devices(link))
    272			goto check;
    273		return NULL;
    274	case ATA_DITER_ENABLED_REVERSE:
    275	case ATA_DITER_ALL_REVERSE:
    276		if (--dev >= link->device)
    277			goto check;
    278		return NULL;
    279	}
    280
    281 check:
    282	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
    283	    !ata_dev_enabled(dev))
    284		goto next;
    285	return dev;
    286}
    287EXPORT_SYMBOL_GPL(ata_dev_next);
    288
    289/**
    290 *	ata_dev_phys_link - find physical link for a device
    291 *	@dev: ATA device to look up physical link for
    292 *
    293 *	Look up physical link which @dev is attached to.  Note that
    294 *	this is different from @dev->link only when @dev is on slave
    295 *	link.  For all other cases, it's the same as @dev->link.
    296 *
    297 *	LOCKING:
    298 *	Don't care.
    299 *
    300 *	RETURNS:
    301 *	Pointer to the found physical link.
    302 */
    303struct ata_link *ata_dev_phys_link(struct ata_device *dev)
    304{
    305	struct ata_port *ap = dev->link->ap;
    306
    307	if (!ap->slave_link)
    308		return dev->link;
    309	if (!dev->devno)
    310		return &ap->link;
    311	return ap->slave_link;
    312}
    313
    314#ifdef CONFIG_ATA_FORCE
    315/**
    316 *	ata_force_cbl - force cable type according to libata.force
    317 *	@ap: ATA port of interest
    318 *
    319 *	Force cable type according to libata.force and whine about it.
    320 *	The last entry which has matching port number is used, so it
    321 *	can be specified as part of device force parameters.  For
    322 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
    323 *	same effect.
    324 *
    325 *	LOCKING:
    326 *	EH context.
    327 */
    328void ata_force_cbl(struct ata_port *ap)
    329{
    330	int i;
    331
    332	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
    333		const struct ata_force_ent *fe = &ata_force_tbl[i];
    334
    335		if (fe->port != -1 && fe->port != ap->print_id)
    336			continue;
    337
    338		if (fe->param.cbl == ATA_CBL_NONE)
    339			continue;
    340
    341		ap->cbl = fe->param.cbl;
    342		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
    343		return;
    344	}
    345}
    346
    347/**
    348 *	ata_force_link_limits - force link limits according to libata.force
    349 *	@link: ATA link of interest
    350 *
    351 *	Force link flags and SATA spd limit according to libata.force
    352 *	and whine about it.  When only the port part is specified
    353 *	(e.g. 1:), the limit applies to all links connected to both
    354 *	the host link and all fan-out ports connected via PMP.  If the
    355 *	device part is specified as 0 (e.g. 1.00:), it specifies the
    356 *	first fan-out link not the host link.  Device number 15 always
    357 *	points to the host link whether PMP is attached or not.  If the
    358 *	controller has slave link, device number 16 points to it.
    359 *
    360 *	LOCKING:
    361 *	EH context.
    362 */
    363static void ata_force_link_limits(struct ata_link *link)
    364{
    365	bool did_spd = false;
    366	int linkno = link->pmp;
    367	int i;
    368
    369	if (ata_is_host_link(link))
    370		linkno += 15;
    371
    372	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
    373		const struct ata_force_ent *fe = &ata_force_tbl[i];
    374
    375		if (fe->port != -1 && fe->port != link->ap->print_id)
    376			continue;
    377
    378		if (fe->device != -1 && fe->device != linkno)
    379			continue;
    380
    381		/* only honor the first spd limit */
    382		if (!did_spd && fe->param.spd_limit) {
    383			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
    384			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
    385					fe->param.name);
    386			did_spd = true;
    387		}
    388
    389		/* let lflags stack */
    390		if (fe->param.lflags_on) {
    391			link->flags |= fe->param.lflags_on;
    392			ata_link_notice(link,
    393					"FORCE: link flag 0x%x forced -> 0x%x\n",
    394					fe->param.lflags_on, link->flags);
    395		}
    396		if (fe->param.lflags_off) {
    397			link->flags &= ~fe->param.lflags_off;
    398			ata_link_notice(link,
    399				"FORCE: link flag 0x%x cleared -> 0x%x\n",
    400				fe->param.lflags_off, link->flags);
    401		}
    402	}
    403}
    404
    405/**
    406 *	ata_force_xfermask - force xfermask according to libata.force
    407 *	@dev: ATA device of interest
    408 *
    409 *	Force xfer_mask according to libata.force and whine about it.
    410 *	For consistency with link selection, device number 15 selects
    411 *	the first device connected to the host link.
    412 *
    413 *	LOCKING:
    414 *	EH context.
    415 */
    416static void ata_force_xfermask(struct ata_device *dev)
    417{
    418	int devno = dev->link->pmp + dev->devno;
    419	int alt_devno = devno;
    420	int i;
    421
    422	/* allow n.15/16 for devices attached to host port */
    423	if (ata_is_host_link(dev->link))
    424		alt_devno += 15;
    425
    426	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
    427		const struct ata_force_ent *fe = &ata_force_tbl[i];
    428		unsigned long pio_mask, mwdma_mask, udma_mask;
    429
    430		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
    431			continue;
    432
    433		if (fe->device != -1 && fe->device != devno &&
    434		    fe->device != alt_devno)
    435			continue;
    436
    437		if (!fe->param.xfer_mask)
    438			continue;
    439
    440		ata_unpack_xfermask(fe->param.xfer_mask,
    441				    &pio_mask, &mwdma_mask, &udma_mask);
    442		if (udma_mask)
    443			dev->udma_mask = udma_mask;
    444		else if (mwdma_mask) {
    445			dev->udma_mask = 0;
    446			dev->mwdma_mask = mwdma_mask;
    447		} else {
    448			dev->udma_mask = 0;
    449			dev->mwdma_mask = 0;
    450			dev->pio_mask = pio_mask;
    451		}
    452
    453		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
    454			       fe->param.name);
    455		return;
    456	}
    457}
    458
    459/**
    460 *	ata_force_horkage - force horkage according to libata.force
    461 *	@dev: ATA device of interest
    462 *
    463 *	Force horkage according to libata.force and whine about it.
    464 *	For consistency with link selection, device number 15 selects
    465 *	the first device connected to the host link.
    466 *
    467 *	LOCKING:
    468 *	EH context.
    469 */
    470static void ata_force_horkage(struct ata_device *dev)
    471{
    472	int devno = dev->link->pmp + dev->devno;
    473	int alt_devno = devno;
    474	int i;
    475
    476	/* allow n.15/16 for devices attached to host port */
    477	if (ata_is_host_link(dev->link))
    478		alt_devno += 15;
    479
    480	for (i = 0; i < ata_force_tbl_size; i++) {
    481		const struct ata_force_ent *fe = &ata_force_tbl[i];
    482
    483		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
    484			continue;
    485
    486		if (fe->device != -1 && fe->device != devno &&
    487		    fe->device != alt_devno)
    488			continue;
    489
    490		if (!(~dev->horkage & fe->param.horkage_on) &&
    491		    !(dev->horkage & fe->param.horkage_off))
    492			continue;
    493
    494		dev->horkage |= fe->param.horkage_on;
    495		dev->horkage &= ~fe->param.horkage_off;
    496
    497		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
    498			       fe->param.name);
    499	}
    500}
    501#else
    502static inline void ata_force_link_limits(struct ata_link *link) { }
    503static inline void ata_force_xfermask(struct ata_device *dev) { }
    504static inline void ata_force_horkage(struct ata_device *dev) { }
    505#endif
    506
    507/**
    508 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
    509 *	@opcode: SCSI opcode
    510 *
    511 *	Determine ATAPI command type from @opcode.
    512 *
    513 *	LOCKING:
    514 *	None.
    515 *
    516 *	RETURNS:
    517 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
    518 */
    519int atapi_cmd_type(u8 opcode)
    520{
    521	switch (opcode) {
    522	case GPCMD_READ_10:
    523	case GPCMD_READ_12:
    524		return ATAPI_READ;
    525
    526	case GPCMD_WRITE_10:
    527	case GPCMD_WRITE_12:
    528	case GPCMD_WRITE_AND_VERIFY_10:
    529		return ATAPI_WRITE;
    530
    531	case GPCMD_READ_CD:
    532	case GPCMD_READ_CD_MSF:
    533		return ATAPI_READ_CD;
    534
    535	case ATA_16:
    536	case ATA_12:
    537		if (atapi_passthru16)
    538			return ATAPI_PASS_THRU;
    539		fallthrough;
    540	default:
    541		return ATAPI_MISC;
    542	}
    543}
    544EXPORT_SYMBOL_GPL(atapi_cmd_type);
    545
    546static const u8 ata_rw_cmds[] = {
    547	/* pio multi */
    548	ATA_CMD_READ_MULTI,
    549	ATA_CMD_WRITE_MULTI,
    550	ATA_CMD_READ_MULTI_EXT,
    551	ATA_CMD_WRITE_MULTI_EXT,
    552	0,
    553	0,
    554	0,
    555	ATA_CMD_WRITE_MULTI_FUA_EXT,
    556	/* pio */
    557	ATA_CMD_PIO_READ,
    558	ATA_CMD_PIO_WRITE,
    559	ATA_CMD_PIO_READ_EXT,
    560	ATA_CMD_PIO_WRITE_EXT,
    561	0,
    562	0,
    563	0,
    564	0,
    565	/* dma */
    566	ATA_CMD_READ,
    567	ATA_CMD_WRITE,
    568	ATA_CMD_READ_EXT,
    569	ATA_CMD_WRITE_EXT,
    570	0,
    571	0,
    572	0,
    573	ATA_CMD_WRITE_FUA_EXT
    574};
    575
    576/**
    577 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
    578 *	@tf: command to examine and configure
    579 *	@dev: device tf belongs to
    580 *
    581 *	Examine the device configuration and tf->flags to calculate
    582 *	the proper read/write commands and protocol to use.
    583 *
    584 *	LOCKING:
    585 *	caller.
    586 */
    587static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
    588{
    589	u8 cmd;
    590
    591	int index, fua, lba48, write;
    592
    593	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
    594	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
    595	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
    596
    597	if (dev->flags & ATA_DFLAG_PIO) {
    598		tf->protocol = ATA_PROT_PIO;
    599		index = dev->multi_count ? 0 : 8;
    600	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
    601		/* Unable to use DMA due to host limitation */
    602		tf->protocol = ATA_PROT_PIO;
    603		index = dev->multi_count ? 0 : 8;
    604	} else {
    605		tf->protocol = ATA_PROT_DMA;
    606		index = 16;
    607	}
    608
    609	cmd = ata_rw_cmds[index + fua + lba48 + write];
    610	if (cmd) {
    611		tf->command = cmd;
    612		return 0;
    613	}
    614	return -1;
    615}
    616
    617/**
    618 *	ata_tf_read_block - Read block address from ATA taskfile
    619 *	@tf: ATA taskfile of interest
    620 *	@dev: ATA device @tf belongs to
    621 *
    622 *	LOCKING:
    623 *	None.
    624 *
    625 *	Read block address from @tf.  This function can handle all
    626 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
    627 *	flags select the address format to use.
    628 *
    629 *	RETURNS:
    630 *	Block address read from @tf.
    631 */
    632u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
    633{
    634	u64 block = 0;
    635
    636	if (tf->flags & ATA_TFLAG_LBA) {
    637		if (tf->flags & ATA_TFLAG_LBA48) {
    638			block |= (u64)tf->hob_lbah << 40;
    639			block |= (u64)tf->hob_lbam << 32;
    640			block |= (u64)tf->hob_lbal << 24;
    641		} else
    642			block |= (tf->device & 0xf) << 24;
    643
    644		block |= tf->lbah << 16;
    645		block |= tf->lbam << 8;
    646		block |= tf->lbal;
    647	} else {
    648		u32 cyl, head, sect;
    649
    650		cyl = tf->lbam | (tf->lbah << 8);
    651		head = tf->device & 0xf;
    652		sect = tf->lbal;
    653
    654		if (!sect) {
    655			ata_dev_warn(dev,
    656				     "device reported invalid CHS sector 0\n");
    657			return U64_MAX;
    658		}
    659
    660		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
    661	}
    662
    663	return block;
    664}
    665
    666/**
    667 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
    668 *	@tf: Target ATA taskfile
    669 *	@dev: ATA device @tf belongs to
    670 *	@block: Block address
    671 *	@n_block: Number of blocks
    672 *	@tf_flags: RW/FUA etc...
    673 *	@tag: tag
    674 *	@class: IO priority class
    675 *
    676 *	LOCKING:
    677 *	None.
    678 *
    679 *	Build ATA taskfile @tf for read/write request described by
    680 *	@block, @n_block, @tf_flags and @tag on @dev.
    681 *
    682 *	RETURNS:
    683 *
    684 *	0 on success, -ERANGE if the request is too large for @dev,
    685 *	-EINVAL if the request is invalid.
    686 */
    687int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
    688		    u64 block, u32 n_block, unsigned int tf_flags,
    689		    unsigned int tag, int class)
    690{
    691	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
    692	tf->flags |= tf_flags;
    693
    694	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
    695		/* yay, NCQ */
    696		if (!lba_48_ok(block, n_block))
    697			return -ERANGE;
    698
    699		tf->protocol = ATA_PROT_NCQ;
    700		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
    701
    702		if (tf->flags & ATA_TFLAG_WRITE)
    703			tf->command = ATA_CMD_FPDMA_WRITE;
    704		else
    705			tf->command = ATA_CMD_FPDMA_READ;
    706
    707		tf->nsect = tag << 3;
    708		tf->hob_feature = (n_block >> 8) & 0xff;
    709		tf->feature = n_block & 0xff;
    710
    711		tf->hob_lbah = (block >> 40) & 0xff;
    712		tf->hob_lbam = (block >> 32) & 0xff;
    713		tf->hob_lbal = (block >> 24) & 0xff;
    714		tf->lbah = (block >> 16) & 0xff;
    715		tf->lbam = (block >> 8) & 0xff;
    716		tf->lbal = block & 0xff;
    717
    718		tf->device = ATA_LBA;
    719		if (tf->flags & ATA_TFLAG_FUA)
    720			tf->device |= 1 << 7;
    721
    722		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE &&
    723		    class == IOPRIO_CLASS_RT)
    724			tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
    725	} else if (dev->flags & ATA_DFLAG_LBA) {
    726		tf->flags |= ATA_TFLAG_LBA;
    727
    728		if (lba_28_ok(block, n_block)) {
    729			/* use LBA28 */
    730			tf->device |= (block >> 24) & 0xf;
    731		} else if (lba_48_ok(block, n_block)) {
    732			if (!(dev->flags & ATA_DFLAG_LBA48))
    733				return -ERANGE;
    734
    735			/* use LBA48 */
    736			tf->flags |= ATA_TFLAG_LBA48;
    737
    738			tf->hob_nsect = (n_block >> 8) & 0xff;
    739
    740			tf->hob_lbah = (block >> 40) & 0xff;
    741			tf->hob_lbam = (block >> 32) & 0xff;
    742			tf->hob_lbal = (block >> 24) & 0xff;
    743		} else
    744			/* request too large even for LBA48 */
    745			return -ERANGE;
    746
    747		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
    748			return -EINVAL;
    749
    750		tf->nsect = n_block & 0xff;
    751
    752		tf->lbah = (block >> 16) & 0xff;
    753		tf->lbam = (block >> 8) & 0xff;
    754		tf->lbal = block & 0xff;
    755
    756		tf->device |= ATA_LBA;
    757	} else {
    758		/* CHS */
    759		u32 sect, head, cyl, track;
    760
    761		/* The request -may- be too large for CHS addressing. */
    762		if (!lba_28_ok(block, n_block))
    763			return -ERANGE;
    764
    765		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
    766			return -EINVAL;
    767
    768		/* Convert LBA to CHS */
    769		track = (u32)block / dev->sectors;
    770		cyl   = track / dev->heads;
    771		head  = track % dev->heads;
    772		sect  = (u32)block % dev->sectors + 1;
    773
    774		/* Check whether the converted CHS can fit.
    775		   Cylinder: 0-65535
    776		   Head: 0-15
    777		   Sector: 1-255*/
    778		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
    779			return -ERANGE;
    780
    781		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
    782		tf->lbal = sect;
    783		tf->lbam = cyl;
    784		tf->lbah = cyl >> 8;
    785		tf->device |= head;
    786	}
    787
    788	return 0;
    789}
    790
    791/**
    792 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
    793 *	@pio_mask: pio_mask
    794 *	@mwdma_mask: mwdma_mask
    795 *	@udma_mask: udma_mask
    796 *
    797 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
    798 *	unsigned int xfer_mask.
    799 *
    800 *	LOCKING:
    801 *	None.
    802 *
    803 *	RETURNS:
    804 *	Packed xfer_mask.
    805 */
    806unsigned long ata_pack_xfermask(unsigned long pio_mask,
    807				unsigned long mwdma_mask,
    808				unsigned long udma_mask)
    809{
    810	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
    811		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
    812		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
    813}
    814EXPORT_SYMBOL_GPL(ata_pack_xfermask);
    815
    816/**
    817 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
    818 *	@xfer_mask: xfer_mask to unpack
    819 *	@pio_mask: resulting pio_mask
    820 *	@mwdma_mask: resulting mwdma_mask
    821 *	@udma_mask: resulting udma_mask
    822 *
    823 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
    824 *	Any NULL destination masks will be ignored.
    825 */
    826void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
    827			 unsigned long *mwdma_mask, unsigned long *udma_mask)
    828{
    829	if (pio_mask)
    830		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
    831	if (mwdma_mask)
    832		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
    833	if (udma_mask)
    834		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
    835}
    836
    837static const struct ata_xfer_ent {
    838	int shift, bits;
    839	u8 base;
    840} ata_xfer_tbl[] = {
    841	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
    842	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
    843	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
    844	{ -1, },
    845};
    846
    847/**
    848 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
    849 *	@xfer_mask: xfer_mask of interest
    850 *
    851 *	Return matching XFER_* value for @xfer_mask.  Only the highest
    852 *	bit of @xfer_mask is considered.
    853 *
    854 *	LOCKING:
    855 *	None.
    856 *
    857 *	RETURNS:
    858 *	Matching XFER_* value, 0xff if no match found.
    859 */
    860u8 ata_xfer_mask2mode(unsigned long xfer_mask)
    861{
    862	int highbit = fls(xfer_mask) - 1;
    863	const struct ata_xfer_ent *ent;
    864
    865	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
    866		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
    867			return ent->base + highbit - ent->shift;
    868	return 0xff;
    869}
    870EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
    871
    872/**
    873 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
    874 *	@xfer_mode: XFER_* of interest
    875 *
    876 *	Return matching xfer_mask for @xfer_mode.
    877 *
    878 *	LOCKING:
    879 *	None.
    880 *
    881 *	RETURNS:
    882 *	Matching xfer_mask, 0 if no match found.
    883 */
    884unsigned long ata_xfer_mode2mask(u8 xfer_mode)
    885{
    886	const struct ata_xfer_ent *ent;
    887
    888	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
    889		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
    890			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
    891				& ~((1 << ent->shift) - 1);
    892	return 0;
    893}
    894EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
    895
    896/**
    897 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
    898 *	@xfer_mode: XFER_* of interest
    899 *
    900 *	Return matching xfer_shift for @xfer_mode.
    901 *
    902 *	LOCKING:
    903 *	None.
    904 *
    905 *	RETURNS:
    906 *	Matching xfer_shift, -1 if no match found.
    907 */
    908int ata_xfer_mode2shift(u8 xfer_mode)
    909{
    910	const struct ata_xfer_ent *ent;
    911
    912	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
    913		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
    914			return ent->shift;
    915	return -1;
    916}
    917EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
    918
    919/**
    920 *	ata_mode_string - convert xfer_mask to string
    921 *	@xfer_mask: mask of bits supported; only highest bit counts.
    922 *
    923 *	Determine string which represents the highest speed
    924 *	(highest bit in @modemask).
    925 *
    926 *	LOCKING:
    927 *	None.
    928 *
    929 *	RETURNS:
    930 *	Constant C string representing highest speed listed in
    931 *	@mode_mask, or the constant C string "<n/a>".
    932 */
    933const char *ata_mode_string(unsigned long xfer_mask)
    934{
    935	static const char * const xfer_mode_str[] = {
    936		"PIO0",
    937		"PIO1",
    938		"PIO2",
    939		"PIO3",
    940		"PIO4",
    941		"PIO5",
    942		"PIO6",
    943		"MWDMA0",
    944		"MWDMA1",
    945		"MWDMA2",
    946		"MWDMA3",
    947		"MWDMA4",
    948		"UDMA/16",
    949		"UDMA/25",
    950		"UDMA/33",
    951		"UDMA/44",
    952		"UDMA/66",
    953		"UDMA/100",
    954		"UDMA/133",
    955		"UDMA7",
    956	};
    957	int highbit;
    958
    959	highbit = fls(xfer_mask) - 1;
    960	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
    961		return xfer_mode_str[highbit];
    962	return "<n/a>";
    963}
    964EXPORT_SYMBOL_GPL(ata_mode_string);
    965
    966const char *sata_spd_string(unsigned int spd)
    967{
    968	static const char * const spd_str[] = {
    969		"1.5 Gbps",
    970		"3.0 Gbps",
    971		"6.0 Gbps",
    972	};
    973
    974	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
    975		return "<unknown>";
    976	return spd_str[spd - 1];
    977}
    978
    979/**
    980 *	ata_dev_classify - determine device type based on ATA-spec signature
    981 *	@tf: ATA taskfile register set for device to be identified
    982 *
    983 *	Determine from taskfile register contents whether a device is
    984 *	ATA or ATAPI, as per "Signature and persistence" section
    985 *	of ATA/PI spec (volume 1, sect 5.14).
    986 *
    987 *	LOCKING:
    988 *	None.
    989 *
    990 *	RETURNS:
    991 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
    992 *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
    993 */
    994unsigned int ata_dev_classify(const struct ata_taskfile *tf)
    995{
    996	/* Apple's open source Darwin code hints that some devices only
    997	 * put a proper signature into the LBA mid/high registers,
    998	 * So, we only check those.  It's sufficient for uniqueness.
    999	 *
   1000	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
   1001	 * signatures for ATA and ATAPI devices attached on SerialATA,
   1002	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
   1003	 * spec has never mentioned about using different signatures
   1004	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
   1005	 * Multiplier specification began to use 0x69/0x96 to identify
   1006	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
   1007	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
   1008	 * 0x69/0x96 shortly and described them as reserved for
   1009	 * SerialATA.
   1010	 *
   1011	 * We follow the current spec and consider that 0x69/0x96
   1012	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
   1013	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
   1014	 * SEMB signature.  This is worked around in
   1015	 * ata_dev_read_id().
   1016	 */
   1017	if (tf->lbam == 0 && tf->lbah == 0)
   1018		return ATA_DEV_ATA;
   1019
   1020	if (tf->lbam == 0x14 && tf->lbah == 0xeb)
   1021		return ATA_DEV_ATAPI;
   1022
   1023	if (tf->lbam == 0x69 && tf->lbah == 0x96)
   1024		return ATA_DEV_PMP;
   1025
   1026	if (tf->lbam == 0x3c && tf->lbah == 0xc3)
   1027		return ATA_DEV_SEMB;
   1028
   1029	if (tf->lbam == 0xcd && tf->lbah == 0xab)
   1030		return ATA_DEV_ZAC;
   1031
   1032	return ATA_DEV_UNKNOWN;
   1033}
   1034EXPORT_SYMBOL_GPL(ata_dev_classify);
   1035
   1036/**
   1037 *	ata_id_string - Convert IDENTIFY DEVICE page into string
   1038 *	@id: IDENTIFY DEVICE results we will examine
   1039 *	@s: string into which data is output
   1040 *	@ofs: offset into identify device page
   1041 *	@len: length of string to return. must be an even number.
   1042 *
   1043 *	The strings in the IDENTIFY DEVICE page are broken up into
   1044 *	16-bit chunks.  Run through the string, and output each
   1045 *	8-bit chunk linearly, regardless of platform.
   1046 *
   1047 *	LOCKING:
   1048 *	caller.
   1049 */
   1050
   1051void ata_id_string(const u16 *id, unsigned char *s,
   1052		   unsigned int ofs, unsigned int len)
   1053{
   1054	unsigned int c;
   1055
   1056	BUG_ON(len & 1);
   1057
   1058	while (len > 0) {
   1059		c = id[ofs] >> 8;
   1060		*s = c;
   1061		s++;
   1062
   1063		c = id[ofs] & 0xff;
   1064		*s = c;
   1065		s++;
   1066
   1067		ofs++;
   1068		len -= 2;
   1069	}
   1070}
   1071EXPORT_SYMBOL_GPL(ata_id_string);
   1072
   1073/**
   1074 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
   1075 *	@id: IDENTIFY DEVICE results we will examine
   1076 *	@s: string into which data is output
   1077 *	@ofs: offset into identify device page
   1078 *	@len: length of string to return. must be an odd number.
   1079 *
   1080 *	This function is identical to ata_id_string except that it
   1081 *	trims trailing spaces and terminates the resulting string with
   1082 *	null.  @len must be actual maximum length (even number) + 1.
   1083 *
   1084 *	LOCKING:
   1085 *	caller.
   1086 */
   1087void ata_id_c_string(const u16 *id, unsigned char *s,
   1088		     unsigned int ofs, unsigned int len)
   1089{
   1090	unsigned char *p;
   1091
   1092	ata_id_string(id, s, ofs, len - 1);
   1093
   1094	p = s + strnlen(s, len - 1);
   1095	while (p > s && p[-1] == ' ')
   1096		p--;
   1097	*p = '\0';
   1098}
   1099EXPORT_SYMBOL_GPL(ata_id_c_string);
   1100
   1101static u64 ata_id_n_sectors(const u16 *id)
   1102{
   1103	if (ata_id_has_lba(id)) {
   1104		if (ata_id_has_lba48(id))
   1105			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
   1106		else
   1107			return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
   1108	} else {
   1109		if (ata_id_current_chs_valid(id))
   1110			return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
   1111			       id[ATA_ID_CUR_SECTORS];
   1112		else
   1113			return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
   1114			       id[ATA_ID_SECTORS];
   1115	}
   1116}
   1117
   1118u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
   1119{
   1120	u64 sectors = 0;
   1121
   1122	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
   1123	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
   1124	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
   1125	sectors |= (tf->lbah & 0xff) << 16;
   1126	sectors |= (tf->lbam & 0xff) << 8;
   1127	sectors |= (tf->lbal & 0xff);
   1128
   1129	return sectors;
   1130}
   1131
   1132u64 ata_tf_to_lba(const struct ata_taskfile *tf)
   1133{
   1134	u64 sectors = 0;
   1135
   1136	sectors |= (tf->device & 0x0f) << 24;
   1137	sectors |= (tf->lbah & 0xff) << 16;
   1138	sectors |= (tf->lbam & 0xff) << 8;
   1139	sectors |= (tf->lbal & 0xff);
   1140
   1141	return sectors;
   1142}
   1143
   1144/**
   1145 *	ata_read_native_max_address - Read native max address
   1146 *	@dev: target device
   1147 *	@max_sectors: out parameter for the result native max address
   1148 *
   1149 *	Perform an LBA48 or LBA28 native size query upon the device in
   1150 *	question.
   1151 *
   1152 *	RETURNS:
   1153 *	0 on success, -EACCES if command is aborted by the drive.
   1154 *	-EIO on other errors.
   1155 */
   1156static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
   1157{
   1158	unsigned int err_mask;
   1159	struct ata_taskfile tf;
   1160	int lba48 = ata_id_has_lba48(dev->id);
   1161
   1162	ata_tf_init(dev, &tf);
   1163
   1164	/* always clear all address registers */
   1165	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
   1166
   1167	if (lba48) {
   1168		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
   1169		tf.flags |= ATA_TFLAG_LBA48;
   1170	} else
   1171		tf.command = ATA_CMD_READ_NATIVE_MAX;
   1172
   1173	tf.protocol = ATA_PROT_NODATA;
   1174	tf.device |= ATA_LBA;
   1175
   1176	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
   1177	if (err_mask) {
   1178		ata_dev_warn(dev,
   1179			     "failed to read native max address (err_mask=0x%x)\n",
   1180			     err_mask);
   1181		if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
   1182			return -EACCES;
   1183		return -EIO;
   1184	}
   1185
   1186	if (lba48)
   1187		*max_sectors = ata_tf_to_lba48(&tf) + 1;
   1188	else
   1189		*max_sectors = ata_tf_to_lba(&tf) + 1;
   1190	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
   1191		(*max_sectors)--;
   1192	return 0;
   1193}
   1194
   1195/**
   1196 *	ata_set_max_sectors - Set max sectors
   1197 *	@dev: target device
   1198 *	@new_sectors: new max sectors value to set for the device
   1199 *
   1200 *	Set max sectors of @dev to @new_sectors.
   1201 *
   1202 *	RETURNS:
   1203 *	0 on success, -EACCES if command is aborted or denied (due to
   1204 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
   1205 *	errors.
   1206 */
   1207static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
   1208{
   1209	unsigned int err_mask;
   1210	struct ata_taskfile tf;
   1211	int lba48 = ata_id_has_lba48(dev->id);
   1212
   1213	new_sectors--;
   1214
   1215	ata_tf_init(dev, &tf);
   1216
   1217	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
   1218
   1219	if (lba48) {
   1220		tf.command = ATA_CMD_SET_MAX_EXT;
   1221		tf.flags |= ATA_TFLAG_LBA48;
   1222
   1223		tf.hob_lbal = (new_sectors >> 24) & 0xff;
   1224		tf.hob_lbam = (new_sectors >> 32) & 0xff;
   1225		tf.hob_lbah = (new_sectors >> 40) & 0xff;
   1226	} else {
   1227		tf.command = ATA_CMD_SET_MAX;
   1228
   1229		tf.device |= (new_sectors >> 24) & 0xf;
   1230	}
   1231
   1232	tf.protocol = ATA_PROT_NODATA;
   1233	tf.device |= ATA_LBA;
   1234
   1235	tf.lbal = (new_sectors >> 0) & 0xff;
   1236	tf.lbam = (new_sectors >> 8) & 0xff;
   1237	tf.lbah = (new_sectors >> 16) & 0xff;
   1238
   1239	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
   1240	if (err_mask) {
   1241		ata_dev_warn(dev,
   1242			     "failed to set max address (err_mask=0x%x)\n",
   1243			     err_mask);
   1244		if (err_mask == AC_ERR_DEV &&
   1245		    (tf.error & (ATA_ABORTED | ATA_IDNF)))
   1246			return -EACCES;
   1247		return -EIO;
   1248	}
   1249
   1250	return 0;
   1251}
   1252
   1253/**
   1254 *	ata_hpa_resize		-	Resize a device with an HPA set
   1255 *	@dev: Device to resize
   1256 *
   1257 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
   1258 *	it if required to the full size of the media. The caller must check
   1259 *	the drive has the HPA feature set enabled.
   1260 *
   1261 *	RETURNS:
   1262 *	0 on success, -errno on failure.
   1263 */
   1264static int ata_hpa_resize(struct ata_device *dev)
   1265{
   1266	bool print_info = ata_dev_print_info(dev);
   1267	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
   1268	u64 sectors = ata_id_n_sectors(dev->id);
   1269	u64 native_sectors;
   1270	int rc;
   1271
   1272	/* do we need to do it? */
   1273	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
   1274	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
   1275	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
   1276		return 0;
   1277
   1278	/* read native max address */
   1279	rc = ata_read_native_max_address(dev, &native_sectors);
   1280	if (rc) {
   1281		/* If device aborted the command or HPA isn't going to
   1282		 * be unlocked, skip HPA resizing.
   1283		 */
   1284		if (rc == -EACCES || !unlock_hpa) {
   1285			ata_dev_warn(dev,
   1286				     "HPA support seems broken, skipping HPA handling\n");
   1287			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
   1288
   1289			/* we can continue if device aborted the command */
   1290			if (rc == -EACCES)
   1291				rc = 0;
   1292		}
   1293
   1294		return rc;
   1295	}
   1296	dev->n_native_sectors = native_sectors;
   1297
   1298	/* nothing to do? */
   1299	if (native_sectors <= sectors || !unlock_hpa) {
   1300		if (!print_info || native_sectors == sectors)
   1301			return 0;
   1302
   1303		if (native_sectors > sectors)
   1304			ata_dev_info(dev,
   1305				"HPA detected: current %llu, native %llu\n",
   1306				(unsigned long long)sectors,
   1307				(unsigned long long)native_sectors);
   1308		else if (native_sectors < sectors)
   1309			ata_dev_warn(dev,
   1310				"native sectors (%llu) is smaller than sectors (%llu)\n",
   1311				(unsigned long long)native_sectors,
   1312				(unsigned long long)sectors);
   1313		return 0;
   1314	}
   1315
   1316	/* let's unlock HPA */
   1317	rc = ata_set_max_sectors(dev, native_sectors);
   1318	if (rc == -EACCES) {
   1319		/* if device aborted the command, skip HPA resizing */
   1320		ata_dev_warn(dev,
   1321			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
   1322			     (unsigned long long)sectors,
   1323			     (unsigned long long)native_sectors);
   1324		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
   1325		return 0;
   1326	} else if (rc)
   1327		return rc;
   1328
   1329	/* re-read IDENTIFY data */
   1330	rc = ata_dev_reread_id(dev, 0);
   1331	if (rc) {
   1332		ata_dev_err(dev,
   1333			    "failed to re-read IDENTIFY data after HPA resizing\n");
   1334		return rc;
   1335	}
   1336
   1337	if (print_info) {
   1338		u64 new_sectors = ata_id_n_sectors(dev->id);
   1339		ata_dev_info(dev,
   1340			"HPA unlocked: %llu -> %llu, native %llu\n",
   1341			(unsigned long long)sectors,
   1342			(unsigned long long)new_sectors,
   1343			(unsigned long long)native_sectors);
   1344	}
   1345
   1346	return 0;
   1347}
   1348
   1349/**
   1350 *	ata_dump_id - IDENTIFY DEVICE info debugging output
   1351 *	@dev: device from which the information is fetched
   1352 *	@id: IDENTIFY DEVICE page to dump
   1353 *
   1354 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
   1355 *	page.
   1356 *
   1357 *	LOCKING:
   1358 *	caller.
   1359 */
   1360
   1361static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
   1362{
   1363	ata_dev_dbg(dev,
   1364		"49==0x%04x  53==0x%04x  63==0x%04x  64==0x%04x  75==0x%04x\n"
   1365		"80==0x%04x  81==0x%04x  82==0x%04x  83==0x%04x  84==0x%04x\n"
   1366		"88==0x%04x  93==0x%04x\n",
   1367		id[49], id[53], id[63], id[64], id[75], id[80],
   1368		id[81], id[82], id[83], id[84], id[88], id[93]);
   1369}
   1370
   1371/**
   1372 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
   1373 *	@id: IDENTIFY data to compute xfer mask from
   1374 *
   1375 *	Compute the xfermask for this device. This is not as trivial
   1376 *	as it seems if we must consider early devices correctly.
   1377 *
   1378 *	FIXME: pre IDE drive timing (do we care ?).
   1379 *
   1380 *	LOCKING:
   1381 *	None.
   1382 *
   1383 *	RETURNS:
   1384 *	Computed xfermask
   1385 */
   1386unsigned long ata_id_xfermask(const u16 *id)
   1387{
   1388	unsigned long pio_mask, mwdma_mask, udma_mask;
   1389
   1390	/* Usual case. Word 53 indicates word 64 is valid */
   1391	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
   1392		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
   1393		pio_mask <<= 3;
   1394		pio_mask |= 0x7;
   1395	} else {
   1396		/* If word 64 isn't valid then Word 51 high byte holds
   1397		 * the PIO timing number for the maximum. Turn it into
   1398		 * a mask.
   1399		 */
   1400		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
   1401		if (mode < 5)	/* Valid PIO range */
   1402			pio_mask = (2 << mode) - 1;
   1403		else
   1404			pio_mask = 1;
   1405
   1406		/* But wait.. there's more. Design your standards by
   1407		 * committee and you too can get a free iordy field to
   1408		 * process. However it is the speeds not the modes that
   1409		 * are supported... Note drivers using the timing API
   1410		 * will get this right anyway
   1411		 */
   1412	}
   1413
   1414	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
   1415
   1416	if (ata_id_is_cfa(id)) {
   1417		/*
   1418		 *	Process compact flash extended modes
   1419		 */
   1420		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
   1421		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
   1422
   1423		if (pio)
   1424			pio_mask |= (1 << 5);
   1425		if (pio > 1)
   1426			pio_mask |= (1 << 6);
   1427		if (dma)
   1428			mwdma_mask |= (1 << 3);
   1429		if (dma > 1)
   1430			mwdma_mask |= (1 << 4);
   1431	}
   1432
   1433	udma_mask = 0;
   1434	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
   1435		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
   1436
   1437	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
   1438}
   1439EXPORT_SYMBOL_GPL(ata_id_xfermask);
   1440
   1441static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
   1442{
   1443	struct completion *waiting = qc->private_data;
   1444
   1445	complete(waiting);
   1446}
   1447
   1448/**
   1449 *	ata_exec_internal_sg - execute libata internal command
   1450 *	@dev: Device to which the command is sent
   1451 *	@tf: Taskfile registers for the command and the result
   1452 *	@cdb: CDB for packet command
   1453 *	@dma_dir: Data transfer direction of the command
   1454 *	@sgl: sg list for the data buffer of the command
   1455 *	@n_elem: Number of sg entries
   1456 *	@timeout: Timeout in msecs (0 for default)
   1457 *
   1458 *	Executes libata internal command with timeout.  @tf contains
   1459 *	command on entry and result on return.  Timeout and error
   1460 *	conditions are reported via return value.  No recovery action
   1461 *	is taken after a command times out.  It's caller's duty to
   1462 *	clean up after timeout.
   1463 *
   1464 *	LOCKING:
   1465 *	None.  Should be called with kernel context, might sleep.
   1466 *
   1467 *	RETURNS:
   1468 *	Zero on success, AC_ERR_* mask on failure
   1469 */
   1470unsigned ata_exec_internal_sg(struct ata_device *dev,
   1471			      struct ata_taskfile *tf, const u8 *cdb,
   1472			      int dma_dir, struct scatterlist *sgl,
   1473			      unsigned int n_elem, unsigned long timeout)
   1474{
   1475	struct ata_link *link = dev->link;
   1476	struct ata_port *ap = link->ap;
   1477	u8 command = tf->command;
   1478	int auto_timeout = 0;
   1479	struct ata_queued_cmd *qc;
   1480	unsigned int preempted_tag;
   1481	u32 preempted_sactive;
   1482	u64 preempted_qc_active;
   1483	int preempted_nr_active_links;
   1484	DECLARE_COMPLETION_ONSTACK(wait);
   1485	unsigned long flags;
   1486	unsigned int err_mask;
   1487	int rc;
   1488
   1489	spin_lock_irqsave(ap->lock, flags);
   1490
   1491	/* no internal command while frozen */
   1492	if (ap->pflags & ATA_PFLAG_FROZEN) {
   1493		spin_unlock_irqrestore(ap->lock, flags);
   1494		return AC_ERR_SYSTEM;
   1495	}
   1496
   1497	/* initialize internal qc */
   1498	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
   1499
   1500	qc->tag = ATA_TAG_INTERNAL;
   1501	qc->hw_tag = 0;
   1502	qc->scsicmd = NULL;
   1503	qc->ap = ap;
   1504	qc->dev = dev;
   1505	ata_qc_reinit(qc);
   1506
   1507	preempted_tag = link->active_tag;
   1508	preempted_sactive = link->sactive;
   1509	preempted_qc_active = ap->qc_active;
   1510	preempted_nr_active_links = ap->nr_active_links;
   1511	link->active_tag = ATA_TAG_POISON;
   1512	link->sactive = 0;
   1513	ap->qc_active = 0;
   1514	ap->nr_active_links = 0;
   1515
   1516	/* prepare & issue qc */
   1517	qc->tf = *tf;
   1518	if (cdb)
   1519		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
   1520
   1521	/* some SATA bridges need us to indicate data xfer direction */
   1522	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
   1523	    dma_dir == DMA_FROM_DEVICE)
   1524		qc->tf.feature |= ATAPI_DMADIR;
   1525
   1526	qc->flags |= ATA_QCFLAG_RESULT_TF;
   1527	qc->dma_dir = dma_dir;
   1528	if (dma_dir != DMA_NONE) {
   1529		unsigned int i, buflen = 0;
   1530		struct scatterlist *sg;
   1531
   1532		for_each_sg(sgl, sg, n_elem, i)
   1533			buflen += sg->length;
   1534
   1535		ata_sg_init(qc, sgl, n_elem);
   1536		qc->nbytes = buflen;
   1537	}
   1538
   1539	qc->private_data = &wait;
   1540	qc->complete_fn = ata_qc_complete_internal;
   1541
   1542	ata_qc_issue(qc);
   1543
   1544	spin_unlock_irqrestore(ap->lock, flags);
   1545
   1546	if (!timeout) {
   1547		if (ata_probe_timeout)
   1548			timeout = ata_probe_timeout * 1000;
   1549		else {
   1550			timeout = ata_internal_cmd_timeout(dev, command);
   1551			auto_timeout = 1;
   1552		}
   1553	}
   1554
   1555	if (ap->ops->error_handler)
   1556		ata_eh_release(ap);
   1557
   1558	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
   1559
   1560	if (ap->ops->error_handler)
   1561		ata_eh_acquire(ap);
   1562
   1563	ata_sff_flush_pio_task(ap);
   1564
   1565	if (!rc) {
   1566		spin_lock_irqsave(ap->lock, flags);
   1567
   1568		/* We're racing with irq here.  If we lose, the
   1569		 * following test prevents us from completing the qc
   1570		 * twice.  If we win, the port is frozen and will be
   1571		 * cleaned up by ->post_internal_cmd().
   1572		 */
   1573		if (qc->flags & ATA_QCFLAG_ACTIVE) {
   1574			qc->err_mask |= AC_ERR_TIMEOUT;
   1575
   1576			if (ap->ops->error_handler)
   1577				ata_port_freeze(ap);
   1578			else
   1579				ata_qc_complete(qc);
   1580
   1581			ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
   1582				     command);
   1583		}
   1584
   1585		spin_unlock_irqrestore(ap->lock, flags);
   1586	}
   1587
   1588	/* do post_internal_cmd */
   1589	if (ap->ops->post_internal_cmd)
   1590		ap->ops->post_internal_cmd(qc);
   1591
   1592	/* perform minimal error analysis */
   1593	if (qc->flags & ATA_QCFLAG_FAILED) {
   1594		if (qc->result_tf.status & (ATA_ERR | ATA_DF))
   1595			qc->err_mask |= AC_ERR_DEV;
   1596
   1597		if (!qc->err_mask)
   1598			qc->err_mask |= AC_ERR_OTHER;
   1599
   1600		if (qc->err_mask & ~AC_ERR_OTHER)
   1601			qc->err_mask &= ~AC_ERR_OTHER;
   1602	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
   1603		qc->result_tf.status |= ATA_SENSE;
   1604	}
   1605
   1606	/* finish up */
   1607	spin_lock_irqsave(ap->lock, flags);
   1608
   1609	*tf = qc->result_tf;
   1610	err_mask = qc->err_mask;
   1611
   1612	ata_qc_free(qc);
   1613	link->active_tag = preempted_tag;
   1614	link->sactive = preempted_sactive;
   1615	ap->qc_active = preempted_qc_active;
   1616	ap->nr_active_links = preempted_nr_active_links;
   1617
   1618	spin_unlock_irqrestore(ap->lock, flags);
   1619
   1620	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
   1621		ata_internal_cmd_timed_out(dev, command);
   1622
   1623	return err_mask;
   1624}
   1625
   1626/**
   1627 *	ata_exec_internal - execute libata internal command
   1628 *	@dev: Device to which the command is sent
   1629 *	@tf: Taskfile registers for the command and the result
   1630 *	@cdb: CDB for packet command
   1631 *	@dma_dir: Data transfer direction of the command
   1632 *	@buf: Data buffer of the command
   1633 *	@buflen: Length of data buffer
   1634 *	@timeout: Timeout in msecs (0 for default)
   1635 *
   1636 *	Wrapper around ata_exec_internal_sg() which takes simple
   1637 *	buffer instead of sg list.
   1638 *
   1639 *	LOCKING:
   1640 *	None.  Should be called with kernel context, might sleep.
   1641 *
   1642 *	RETURNS:
   1643 *	Zero on success, AC_ERR_* mask on failure
   1644 */
   1645unsigned ata_exec_internal(struct ata_device *dev,
   1646			   struct ata_taskfile *tf, const u8 *cdb,
   1647			   int dma_dir, void *buf, unsigned int buflen,
   1648			   unsigned long timeout)
   1649{
   1650	struct scatterlist *psg = NULL, sg;
   1651	unsigned int n_elem = 0;
   1652
   1653	if (dma_dir != DMA_NONE) {
   1654		WARN_ON(!buf);
   1655		sg_init_one(&sg, buf, buflen);
   1656		psg = &sg;
   1657		n_elem++;
   1658	}
   1659
   1660	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
   1661				    timeout);
   1662}
   1663
   1664/**
   1665 *	ata_pio_need_iordy	-	check if iordy needed
   1666 *	@adev: ATA device
   1667 *
   1668 *	Check if the current speed of the device requires IORDY. Used
   1669 *	by various controllers for chip configuration.
   1670 */
   1671unsigned int ata_pio_need_iordy(const struct ata_device *adev)
   1672{
   1673	/* Don't set IORDY if we're preparing for reset.  IORDY may
   1674	 * lead to controller lock up on certain controllers if the
   1675	 * port is not occupied.  See bko#11703 for details.
   1676	 */
   1677	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
   1678		return 0;
   1679	/* Controller doesn't support IORDY.  Probably a pointless
   1680	 * check as the caller should know this.
   1681	 */
   1682	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
   1683		return 0;
   1684	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
   1685	if (ata_id_is_cfa(adev->id)
   1686	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
   1687		return 0;
   1688	/* PIO3 and higher it is mandatory */
   1689	if (adev->pio_mode > XFER_PIO_2)
   1690		return 1;
   1691	/* We turn it on when possible */
   1692	if (ata_id_has_iordy(adev->id))
   1693		return 1;
   1694	return 0;
   1695}
   1696EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
   1697
   1698/**
   1699 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
   1700 *	@adev: ATA device
   1701 *
   1702 *	Compute the highest mode possible if we are not using iordy. Return
   1703 *	-1 if no iordy mode is available.
   1704 */
   1705static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
   1706{
   1707	/* If we have no drive specific rule, then PIO 2 is non IORDY */
   1708	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
   1709		u16 pio = adev->id[ATA_ID_EIDE_PIO];
   1710		/* Is the speed faster than the drive allows non IORDY ? */
   1711		if (pio) {
   1712			/* This is cycle times not frequency - watch the logic! */
   1713			if (pio > 240)	/* PIO2 is 240nS per cycle */
   1714				return 3 << ATA_SHIFT_PIO;
   1715			return 7 << ATA_SHIFT_PIO;
   1716		}
   1717	}
   1718	return 3 << ATA_SHIFT_PIO;
   1719}
   1720
   1721/**
   1722 *	ata_do_dev_read_id		-	default ID read method
   1723 *	@dev: device
   1724 *	@tf: proposed taskfile
   1725 *	@id: data buffer
   1726 *
   1727 *	Issue the identify taskfile and hand back the buffer containing
   1728 *	identify data. For some RAID controllers and for pre ATA devices
   1729 *	this function is wrapped or replaced by the driver
   1730 */
   1731unsigned int ata_do_dev_read_id(struct ata_device *dev,
   1732				struct ata_taskfile *tf, __le16 *id)
   1733{
   1734	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
   1735				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
   1736}
   1737EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
   1738
   1739/**
   1740 *	ata_dev_read_id - Read ID data from the specified device
   1741 *	@dev: target device
   1742 *	@p_class: pointer to class of the target device (may be changed)
   1743 *	@flags: ATA_READID_* flags
   1744 *	@id: buffer to read IDENTIFY data into
   1745 *
   1746 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
   1747 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
   1748 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
   1749 *	for pre-ATA4 drives.
   1750 *
   1751 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
   1752 *	now we abort if we hit that case.
   1753 *
   1754 *	LOCKING:
   1755 *	Kernel thread context (may sleep)
   1756 *
   1757 *	RETURNS:
   1758 *	0 on success, -errno otherwise.
   1759 */
   1760int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
   1761		    unsigned int flags, u16 *id)
   1762{
   1763	struct ata_port *ap = dev->link->ap;
   1764	unsigned int class = *p_class;
   1765	struct ata_taskfile tf;
   1766	unsigned int err_mask = 0;
   1767	const char *reason;
   1768	bool is_semb = class == ATA_DEV_SEMB;
   1769	int may_fallback = 1, tried_spinup = 0;
   1770	int rc;
   1771
   1772retry:
   1773	ata_tf_init(dev, &tf);
   1774
   1775	switch (class) {
   1776	case ATA_DEV_SEMB:
   1777		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
   1778		fallthrough;
   1779	case ATA_DEV_ATA:
   1780	case ATA_DEV_ZAC:
   1781		tf.command = ATA_CMD_ID_ATA;
   1782		break;
   1783	case ATA_DEV_ATAPI:
   1784		tf.command = ATA_CMD_ID_ATAPI;
   1785		break;
   1786	default:
   1787		rc = -ENODEV;
   1788		reason = "unsupported class";
   1789		goto err_out;
   1790	}
   1791
   1792	tf.protocol = ATA_PROT_PIO;
   1793
   1794	/* Some devices choke if TF registers contain garbage.  Make
   1795	 * sure those are properly initialized.
   1796	 */
   1797	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
   1798
   1799	/* Device presence detection is unreliable on some
   1800	 * controllers.  Always poll IDENTIFY if available.
   1801	 */
   1802	tf.flags |= ATA_TFLAG_POLLING;
   1803
   1804	if (ap->ops->read_id)
   1805		err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
   1806	else
   1807		err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
   1808
   1809	if (err_mask) {
   1810		if (err_mask & AC_ERR_NODEV_HINT) {
   1811			ata_dev_dbg(dev, "NODEV after polling detection\n");
   1812			return -ENOENT;
   1813		}
   1814
   1815		if (is_semb) {
   1816			ata_dev_info(dev,
   1817		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
   1818			/* SEMB is not supported yet */
   1819			*p_class = ATA_DEV_SEMB_UNSUP;
   1820			return 0;
   1821		}
   1822
   1823		if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
   1824			/* Device or controller might have reported
   1825			 * the wrong device class.  Give a shot at the
   1826			 * other IDENTIFY if the current one is
   1827			 * aborted by the device.
   1828			 */
   1829			if (may_fallback) {
   1830				may_fallback = 0;
   1831
   1832				if (class == ATA_DEV_ATA)
   1833					class = ATA_DEV_ATAPI;
   1834				else
   1835					class = ATA_DEV_ATA;
   1836				goto retry;
   1837			}
   1838
   1839			/* Control reaches here iff the device aborted
   1840			 * both flavors of IDENTIFYs which happens
   1841			 * sometimes with phantom devices.
   1842			 */
   1843			ata_dev_dbg(dev,
   1844				    "both IDENTIFYs aborted, assuming NODEV\n");
   1845			return -ENOENT;
   1846		}
   1847
   1848		rc = -EIO;
   1849		reason = "I/O error";
   1850		goto err_out;
   1851	}
   1852
   1853	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
   1854		ata_dev_info(dev, "dumping IDENTIFY data, "
   1855			    "class=%d may_fallback=%d tried_spinup=%d\n",
   1856			    class, may_fallback, tried_spinup);
   1857		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
   1858			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
   1859	}
   1860
   1861	/* Falling back doesn't make sense if ID data was read
   1862	 * successfully at least once.
   1863	 */
   1864	may_fallback = 0;
   1865
   1866	swap_buf_le16(id, ATA_ID_WORDS);
   1867
   1868	/* sanity check */
   1869	rc = -EINVAL;
   1870	reason = "device reports invalid type";
   1871
   1872	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
   1873		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
   1874			goto err_out;
   1875		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
   1876							ata_id_is_ata(id)) {
   1877			ata_dev_dbg(dev,
   1878				"host indicates ignore ATA devices, ignored\n");
   1879			return -ENOENT;
   1880		}
   1881	} else {
   1882		if (ata_id_is_ata(id))
   1883			goto err_out;
   1884	}
   1885
   1886	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
   1887		tried_spinup = 1;
   1888		/*
   1889		 * Drive powered-up in standby mode, and requires a specific
   1890		 * SET_FEATURES spin-up subcommand before it will accept
   1891		 * anything other than the original IDENTIFY command.
   1892		 */
   1893		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
   1894		if (err_mask && id[2] != 0x738c) {
   1895			rc = -EIO;
   1896			reason = "SPINUP failed";
   1897			goto err_out;
   1898		}
   1899		/*
   1900		 * If the drive initially returned incomplete IDENTIFY info,
   1901		 * we now must reissue the IDENTIFY command.
   1902		 */
   1903		if (id[2] == 0x37c8)
   1904			goto retry;
   1905	}
   1906
   1907	if ((flags & ATA_READID_POSTRESET) &&
   1908	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
   1909		/*
   1910		 * The exact sequence expected by certain pre-ATA4 drives is:
   1911		 * SRST RESET
   1912		 * IDENTIFY (optional in early ATA)
   1913		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
   1914		 * anything else..
   1915		 * Some drives were very specific about that exact sequence.
   1916		 *
   1917		 * Note that ATA4 says lba is mandatory so the second check
   1918		 * should never trigger.
   1919		 */
   1920		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
   1921			err_mask = ata_dev_init_params(dev, id[3], id[6]);
   1922			if (err_mask) {
   1923				rc = -EIO;
   1924				reason = "INIT_DEV_PARAMS failed";
   1925				goto err_out;
   1926			}
   1927
   1928			/* current CHS translation info (id[53-58]) might be
   1929			 * changed. reread the identify device info.
   1930			 */
   1931			flags &= ~ATA_READID_POSTRESET;
   1932			goto retry;
   1933		}
   1934	}
   1935
   1936	*p_class = class;
   1937
   1938	return 0;
   1939
   1940 err_out:
   1941	ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
   1942		     reason, err_mask);
   1943	return rc;
   1944}
   1945
   1946/**
   1947 *	ata_read_log_page - read a specific log page
   1948 *	@dev: target device
   1949 *	@log: log to read
   1950 *	@page: page to read
   1951 *	@buf: buffer to store read page
   1952 *	@sectors: number of sectors to read
   1953 *
   1954 *	Read log page using READ_LOG_EXT command.
   1955 *
   1956 *	LOCKING:
   1957 *	Kernel thread context (may sleep).
   1958 *
   1959 *	RETURNS:
   1960 *	0 on success, AC_ERR_* mask otherwise.
   1961 */
   1962unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
   1963			       u8 page, void *buf, unsigned int sectors)
   1964{
   1965	unsigned long ap_flags = dev->link->ap->flags;
   1966	struct ata_taskfile tf;
   1967	unsigned int err_mask;
   1968	bool dma = false;
   1969
   1970	ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
   1971
   1972	/*
   1973	 * Return error without actually issuing the command on controllers
   1974	 * which e.g. lockup on a read log page.
   1975	 */
   1976	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
   1977		return AC_ERR_DEV;
   1978
   1979retry:
   1980	ata_tf_init(dev, &tf);
   1981	if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
   1982	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
   1983		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
   1984		tf.protocol = ATA_PROT_DMA;
   1985		dma = true;
   1986	} else {
   1987		tf.command = ATA_CMD_READ_LOG_EXT;
   1988		tf.protocol = ATA_PROT_PIO;
   1989		dma = false;
   1990	}
   1991	tf.lbal = log;
   1992	tf.lbam = page;
   1993	tf.nsect = sectors;
   1994	tf.hob_nsect = sectors >> 8;
   1995	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
   1996
   1997	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
   1998				     buf, sectors * ATA_SECT_SIZE, 0);
   1999
   2000	if (err_mask) {
   2001		if (dma) {
   2002			dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
   2003			goto retry;
   2004		}
   2005		ata_dev_err(dev,
   2006			    "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
   2007			    (unsigned int)log, (unsigned int)page, err_mask);
   2008	}
   2009
   2010	return err_mask;
   2011}
   2012
   2013static int ata_log_supported(struct ata_device *dev, u8 log)
   2014{
   2015	struct ata_port *ap = dev->link->ap;
   2016
   2017	if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
   2018		return 0;
   2019
   2020	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
   2021		return 0;
   2022	return get_unaligned_le16(&ap->sector_buf[log * 2]);
   2023}
   2024
   2025static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
   2026{
   2027	struct ata_port *ap = dev->link->ap;
   2028	unsigned int err, i;
   2029
   2030	if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
   2031		return false;
   2032
   2033	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
   2034		/*
   2035		 * IDENTIFY DEVICE data log is defined as mandatory starting
   2036		 * with ACS-3 (ATA version 10). Warn about the missing log
   2037		 * for drives which implement this ATA level or above.
   2038		 */
   2039		if (ata_id_major_version(dev->id) >= 10)
   2040			ata_dev_warn(dev,
   2041				"ATA Identify Device Log not supported\n");
   2042		dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
   2043		return false;
   2044	}
   2045
   2046	/*
   2047	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
   2048	 * supported.
   2049	 */
   2050	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
   2051				1);
   2052	if (err)
   2053		return false;
   2054
   2055	for (i = 0; i < ap->sector_buf[8]; i++) {
   2056		if (ap->sector_buf[9 + i] == page)
   2057			return true;
   2058	}
   2059
   2060	return false;
   2061}
   2062
   2063static int ata_do_link_spd_horkage(struct ata_device *dev)
   2064{
   2065	struct ata_link *plink = ata_dev_phys_link(dev);
   2066	u32 target, target_limit;
   2067
   2068	if (!sata_scr_valid(plink))
   2069		return 0;
   2070
   2071	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
   2072		target = 1;
   2073	else
   2074		return 0;
   2075
   2076	target_limit = (1 << target) - 1;
   2077
   2078	/* if already on stricter limit, no need to push further */
   2079	if (plink->sata_spd_limit <= target_limit)
   2080		return 0;
   2081
   2082	plink->sata_spd_limit = target_limit;
   2083
   2084	/* Request another EH round by returning -EAGAIN if link is
   2085	 * going faster than the target speed.  Forward progress is
   2086	 * guaranteed by setting sata_spd_limit to target_limit above.
   2087	 */
   2088	if (plink->sata_spd > target) {
   2089		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
   2090			     sata_spd_string(target));
   2091		return -EAGAIN;
   2092	}
   2093	return 0;
   2094}
   2095
   2096static inline u8 ata_dev_knobble(struct ata_device *dev)
   2097{
   2098	struct ata_port *ap = dev->link->ap;
   2099
   2100	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
   2101		return 0;
   2102
   2103	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
   2104}
   2105
   2106static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
   2107{
   2108	struct ata_port *ap = dev->link->ap;
   2109	unsigned int err_mask;
   2110
   2111	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
   2112		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
   2113		return;
   2114	}
   2115	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
   2116				     0, ap->sector_buf, 1);
   2117	if (!err_mask) {
   2118		u8 *cmds = dev->ncq_send_recv_cmds;
   2119
   2120		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
   2121		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
   2122
   2123		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
   2124			ata_dev_dbg(dev, "disabling queued TRIM support\n");
   2125			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
   2126				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
   2127		}
   2128	}
   2129}
   2130
   2131static void ata_dev_config_ncq_non_data(struct ata_device *dev)
   2132{
   2133	struct ata_port *ap = dev->link->ap;
   2134	unsigned int err_mask;
   2135
   2136	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
   2137		ata_dev_warn(dev,
   2138			     "NCQ Send/Recv Log not supported\n");
   2139		return;
   2140	}
   2141	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
   2142				     0, ap->sector_buf, 1);
   2143	if (!err_mask) {
   2144		u8 *cmds = dev->ncq_non_data_cmds;
   2145
   2146		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
   2147	}
   2148}
   2149
   2150static void ata_dev_config_ncq_prio(struct ata_device *dev)
   2151{
   2152	struct ata_port *ap = dev->link->ap;
   2153	unsigned int err_mask;
   2154
   2155	if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
   2156		return;
   2157
   2158	err_mask = ata_read_log_page(dev,
   2159				     ATA_LOG_IDENTIFY_DEVICE,
   2160				     ATA_LOG_SATA_SETTINGS,
   2161				     ap->sector_buf,
   2162				     1);
   2163	if (err_mask)
   2164		goto not_supported;
   2165
   2166	if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
   2167		goto not_supported;
   2168
   2169	dev->flags |= ATA_DFLAG_NCQ_PRIO;
   2170
   2171	return;
   2172
   2173not_supported:
   2174	dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
   2175	dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
   2176}
   2177
   2178static bool ata_dev_check_adapter(struct ata_device *dev,
   2179				  unsigned short vendor_id)
   2180{
   2181	struct pci_dev *pcidev = NULL;
   2182	struct device *parent_dev = NULL;
   2183
   2184	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
   2185	     parent_dev = parent_dev->parent) {
   2186		if (dev_is_pci(parent_dev)) {
   2187			pcidev = to_pci_dev(parent_dev);
   2188			if (pcidev->vendor == vendor_id)
   2189				return true;
   2190			break;
   2191		}
   2192	}
   2193
   2194	return false;
   2195}
   2196
   2197static int ata_dev_config_ncq(struct ata_device *dev,
   2198			       char *desc, size_t desc_sz)
   2199{
   2200	struct ata_port *ap = dev->link->ap;
   2201	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
   2202	unsigned int err_mask;
   2203	char *aa_desc = "";
   2204
   2205	if (!ata_id_has_ncq(dev->id)) {
   2206		desc[0] = '\0';
   2207		return 0;
   2208	}
   2209	if (!IS_ENABLED(CONFIG_SATA_HOST))
   2210		return 0;
   2211	if (dev->horkage & ATA_HORKAGE_NONCQ) {
   2212		snprintf(desc, desc_sz, "NCQ (not used)");
   2213		return 0;
   2214	}
   2215
   2216	if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
   2217	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
   2218		snprintf(desc, desc_sz, "NCQ (not used)");
   2219		return 0;
   2220	}
   2221
   2222	if (ap->flags & ATA_FLAG_NCQ) {
   2223		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
   2224		dev->flags |= ATA_DFLAG_NCQ;
   2225	}
   2226
   2227	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
   2228		(ap->flags & ATA_FLAG_FPDMA_AA) &&
   2229		ata_id_has_fpdma_aa(dev->id)) {
   2230		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
   2231			SATA_FPDMA_AA);
   2232		if (err_mask) {
   2233			ata_dev_err(dev,
   2234				    "failed to enable AA (error_mask=0x%x)\n",
   2235				    err_mask);
   2236			if (err_mask != AC_ERR_DEV) {
   2237				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
   2238				return -EIO;
   2239			}
   2240		} else
   2241			aa_desc = ", AA";
   2242	}
   2243
   2244	if (hdepth >= ddepth)
   2245		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
   2246	else
   2247		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
   2248			ddepth, aa_desc);
   2249
   2250	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
   2251		if (ata_id_has_ncq_send_and_recv(dev->id))
   2252			ata_dev_config_ncq_send_recv(dev);
   2253		if (ata_id_has_ncq_non_data(dev->id))
   2254			ata_dev_config_ncq_non_data(dev);
   2255		if (ata_id_has_ncq_prio(dev->id))
   2256			ata_dev_config_ncq_prio(dev);
   2257	}
   2258
   2259	return 0;
   2260}
   2261
   2262static void ata_dev_config_sense_reporting(struct ata_device *dev)
   2263{
   2264	unsigned int err_mask;
   2265
   2266	if (!ata_id_has_sense_reporting(dev->id))
   2267		return;
   2268
   2269	if (ata_id_sense_reporting_enabled(dev->id))
   2270		return;
   2271
   2272	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
   2273	if (err_mask) {
   2274		ata_dev_dbg(dev,
   2275			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
   2276			    err_mask);
   2277	}
   2278}
   2279
   2280static void ata_dev_config_zac(struct ata_device *dev)
   2281{
   2282	struct ata_port *ap = dev->link->ap;
   2283	unsigned int err_mask;
   2284	u8 *identify_buf = ap->sector_buf;
   2285
   2286	dev->zac_zones_optimal_open = U32_MAX;
   2287	dev->zac_zones_optimal_nonseq = U32_MAX;
   2288	dev->zac_zones_max_open = U32_MAX;
   2289
   2290	/*
   2291	 * Always set the 'ZAC' flag for Host-managed devices.
   2292	 */
   2293	if (dev->class == ATA_DEV_ZAC)
   2294		dev->flags |= ATA_DFLAG_ZAC;
   2295	else if (ata_id_zoned_cap(dev->id) == 0x01)
   2296		/*
   2297		 * Check for host-aware devices.
   2298		 */
   2299		dev->flags |= ATA_DFLAG_ZAC;
   2300
   2301	if (!(dev->flags & ATA_DFLAG_ZAC))
   2302		return;
   2303
   2304	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
   2305		ata_dev_warn(dev,
   2306			     "ATA Zoned Information Log not supported\n");
   2307		return;
   2308	}
   2309
   2310	/*
   2311	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
   2312	 */
   2313	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
   2314				     ATA_LOG_ZONED_INFORMATION,
   2315				     identify_buf, 1);
   2316	if (!err_mask) {
   2317		u64 zoned_cap, opt_open, opt_nonseq, max_open;
   2318
   2319		zoned_cap = get_unaligned_le64(&identify_buf[8]);
   2320		if ((zoned_cap >> 63))
   2321			dev->zac_zoned_cap = (zoned_cap & 1);
   2322		opt_open = get_unaligned_le64(&identify_buf[24]);
   2323		if ((opt_open >> 63))
   2324			dev->zac_zones_optimal_open = (u32)opt_open;
   2325		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
   2326		if ((opt_nonseq >> 63))
   2327			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
   2328		max_open = get_unaligned_le64(&identify_buf[40]);
   2329		if ((max_open >> 63))
   2330			dev->zac_zones_max_open = (u32)max_open;
   2331	}
   2332}
   2333
   2334static void ata_dev_config_trusted(struct ata_device *dev)
   2335{
   2336	struct ata_port *ap = dev->link->ap;
   2337	u64 trusted_cap;
   2338	unsigned int err;
   2339
   2340	if (!ata_id_has_trusted(dev->id))
   2341		return;
   2342
   2343	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
   2344		ata_dev_warn(dev,
   2345			     "Security Log not supported\n");
   2346		return;
   2347	}
   2348
   2349	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
   2350			ap->sector_buf, 1);
   2351	if (err)
   2352		return;
   2353
   2354	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
   2355	if (!(trusted_cap & (1ULL << 63))) {
   2356		ata_dev_dbg(dev,
   2357			    "Trusted Computing capability qword not valid!\n");
   2358		return;
   2359	}
   2360
   2361	if (trusted_cap & (1 << 0))
   2362		dev->flags |= ATA_DFLAG_TRUSTED;
   2363}
   2364
   2365static int ata_dev_config_lba(struct ata_device *dev)
   2366{
   2367	const u16 *id = dev->id;
   2368	const char *lba_desc;
   2369	char ncq_desc[24];
   2370	int ret;
   2371
   2372	dev->flags |= ATA_DFLAG_LBA;
   2373
   2374	if (ata_id_has_lba48(id)) {
   2375		lba_desc = "LBA48";
   2376		dev->flags |= ATA_DFLAG_LBA48;
   2377		if (dev->n_sectors >= (1UL << 28) &&
   2378		    ata_id_has_flush_ext(id))
   2379			dev->flags |= ATA_DFLAG_FLUSH_EXT;
   2380	} else {
   2381		lba_desc = "LBA";
   2382	}
   2383
   2384	/* config NCQ */
   2385	ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
   2386
   2387	/* print device info to dmesg */
   2388	if (ata_dev_print_info(dev))
   2389		ata_dev_info(dev,
   2390			     "%llu sectors, multi %u: %s %s\n",
   2391			     (unsigned long long)dev->n_sectors,
   2392			     dev->multi_count, lba_desc, ncq_desc);
   2393
   2394	return ret;
   2395}
   2396
   2397static void ata_dev_config_chs(struct ata_device *dev)
   2398{
   2399	const u16 *id = dev->id;
   2400
   2401	if (ata_id_current_chs_valid(id)) {
   2402		/* Current CHS translation is valid. */
   2403		dev->cylinders = id[54];
   2404		dev->heads     = id[55];
   2405		dev->sectors   = id[56];
   2406	} else {
   2407		/* Default translation */
   2408		dev->cylinders	= id[1];
   2409		dev->heads	= id[3];
   2410		dev->sectors	= id[6];
   2411	}
   2412
   2413	/* print device info to dmesg */
   2414	if (ata_dev_print_info(dev))
   2415		ata_dev_info(dev,
   2416			     "%llu sectors, multi %u, CHS %u/%u/%u\n",
   2417			     (unsigned long long)dev->n_sectors,
   2418			     dev->multi_count, dev->cylinders,
   2419			     dev->heads, dev->sectors);
   2420}
   2421
   2422static void ata_dev_config_devslp(struct ata_device *dev)
   2423{
   2424	u8 *sata_setting = dev->link->ap->sector_buf;
   2425	unsigned int err_mask;
   2426	int i, j;
   2427
   2428	/*
   2429	 * Check device sleep capability. Get DevSlp timing variables
   2430	 * from SATA Settings page of Identify Device Data Log.
   2431	 */
   2432	if (!ata_id_has_devslp(dev->id) ||
   2433	    !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
   2434		return;
   2435
   2436	err_mask = ata_read_log_page(dev,
   2437				     ATA_LOG_IDENTIFY_DEVICE,
   2438				     ATA_LOG_SATA_SETTINGS,
   2439				     sata_setting, 1);
   2440	if (err_mask)
   2441		return;
   2442
   2443	dev->flags |= ATA_DFLAG_DEVSLP;
   2444	for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
   2445		j = ATA_LOG_DEVSLP_OFFSET + i;
   2446		dev->devslp_timing[i] = sata_setting[j];
   2447	}
   2448}
   2449
   2450static void ata_dev_config_cpr(struct ata_device *dev)
   2451{
   2452	unsigned int err_mask;
   2453	size_t buf_len;
   2454	int i, nr_cpr = 0;
   2455	struct ata_cpr_log *cpr_log = NULL;
   2456	u8 *desc, *buf = NULL;
   2457
   2458	if (ata_id_major_version(dev->id) < 11)
   2459		goto out;
   2460
   2461	buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
   2462	if (buf_len == 0)
   2463		goto out;
   2464
   2465	/*
   2466	 * Read the concurrent positioning ranges log (0x47). We can have at
   2467	 * most 255 32B range descriptors plus a 64B header. This log varies in
   2468	 * size, so use the size reported in the GPL directory. Reading beyond
   2469	 * the supported length will result in an error.
   2470	 */
   2471	buf_len <<= 9;
   2472	buf = kzalloc(buf_len, GFP_KERNEL);
   2473	if (!buf)
   2474		goto out;
   2475
   2476	err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
   2477				     0, buf, buf_len >> 9);
   2478	if (err_mask)
   2479		goto out;
   2480
   2481	nr_cpr = buf[0];
   2482	if (!nr_cpr)
   2483		goto out;
   2484
   2485	cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
   2486	if (!cpr_log)
   2487		goto out;
   2488
   2489	cpr_log->nr_cpr = nr_cpr;
   2490	desc = &buf[64];
   2491	for (i = 0; i < nr_cpr; i++, desc += 32) {
   2492		cpr_log->cpr[i].num = desc[0];
   2493		cpr_log->cpr[i].num_storage_elements = desc[1];
   2494		cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
   2495		cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
   2496	}
   2497
   2498out:
   2499	swap(dev->cpr_log, cpr_log);
   2500	kfree(cpr_log);
   2501	kfree(buf);
   2502}
   2503
   2504static void ata_dev_print_features(struct ata_device *dev)
   2505{
   2506	if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
   2507		return;
   2508
   2509	ata_dev_info(dev,
   2510		     "Features:%s%s%s%s%s%s\n",
   2511		     dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
   2512		     dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
   2513		     dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
   2514		     dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
   2515		     dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
   2516		     dev->cpr_log ? " CPR" : "");
   2517}
   2518
   2519/**
   2520 *	ata_dev_configure - Configure the specified ATA/ATAPI device
   2521 *	@dev: Target device to configure
   2522 *
   2523 *	Configure @dev according to @dev->id.  Generic and low-level
   2524 *	driver specific fixups are also applied.
   2525 *
   2526 *	LOCKING:
   2527 *	Kernel thread context (may sleep)
   2528 *
   2529 *	RETURNS:
   2530 *	0 on success, -errno otherwise
   2531 */
   2532int ata_dev_configure(struct ata_device *dev)
   2533{
   2534	struct ata_port *ap = dev->link->ap;
   2535	bool print_info = ata_dev_print_info(dev);
   2536	const u16 *id = dev->id;
   2537	unsigned long xfer_mask;
   2538	unsigned int err_mask;
   2539	char revbuf[7];		/* XYZ-99\0 */
   2540	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
   2541	char modelbuf[ATA_ID_PROD_LEN+1];
   2542	int rc;
   2543
   2544	if (!ata_dev_enabled(dev)) {
   2545		ata_dev_dbg(dev, "no device\n");
   2546		return 0;
   2547	}
   2548
   2549	/* set horkage */
   2550	dev->horkage |= ata_dev_blacklisted(dev);
   2551	ata_force_horkage(dev);
   2552
   2553	if (dev->horkage & ATA_HORKAGE_DISABLE) {
   2554		ata_dev_info(dev, "unsupported device, disabling\n");
   2555		ata_dev_disable(dev);
   2556		return 0;
   2557	}
   2558
   2559	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
   2560	    dev->class == ATA_DEV_ATAPI) {
   2561		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
   2562			     atapi_enabled ? "not supported with this driver"
   2563			     : "disabled");
   2564		ata_dev_disable(dev);
   2565		return 0;
   2566	}
   2567
   2568	rc = ata_do_link_spd_horkage(dev);
   2569	if (rc)
   2570		return rc;
   2571
   2572	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
   2573	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
   2574	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
   2575		dev->horkage |= ATA_HORKAGE_NOLPM;
   2576
   2577	if (ap->flags & ATA_FLAG_NO_LPM)
   2578		dev->horkage |= ATA_HORKAGE_NOLPM;
   2579
   2580	if (dev->horkage & ATA_HORKAGE_NOLPM) {
   2581		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
   2582		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
   2583	}
   2584
   2585	/* let ACPI work its magic */
   2586	rc = ata_acpi_on_devcfg(dev);
   2587	if (rc)
   2588		return rc;
   2589
   2590	/* massage HPA, do it early as it might change IDENTIFY data */
   2591	rc = ata_hpa_resize(dev);
   2592	if (rc)
   2593		return rc;
   2594
   2595	/* print device capabilities */
   2596	ata_dev_dbg(dev,
   2597		    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
   2598		    "85:%04x 86:%04x 87:%04x 88:%04x\n",
   2599		    __func__,
   2600		    id[49], id[82], id[83], id[84],
   2601		    id[85], id[86], id[87], id[88]);
   2602
   2603	/* initialize to-be-configured parameters */
   2604	dev->flags &= ~ATA_DFLAG_CFG_MASK;
   2605	dev->max_sectors = 0;
   2606	dev->cdb_len = 0;
   2607	dev->n_sectors = 0;
   2608	dev->cylinders = 0;
   2609	dev->heads = 0;
   2610	dev->sectors = 0;
   2611	dev->multi_count = 0;
   2612
   2613	/*
   2614	 * common ATA, ATAPI feature tests
   2615	 */
   2616
   2617	/* find max transfer mode; for printk only */
   2618	xfer_mask = ata_id_xfermask(id);
   2619
   2620	ata_dump_id(dev, id);
   2621
   2622	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
   2623	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
   2624			sizeof(fwrevbuf));
   2625
   2626	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
   2627			sizeof(modelbuf));
   2628
   2629	/* ATA-specific feature tests */
   2630	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
   2631		if (ata_id_is_cfa(id)) {
   2632			/* CPRM may make this media unusable */
   2633			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
   2634				ata_dev_warn(dev,
   2635	"supports DRM functions and may not be fully accessible\n");
   2636			snprintf(revbuf, 7, "CFA");
   2637		} else {
   2638			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
   2639			/* Warn the user if the device has TPM extensions */
   2640			if (ata_id_has_tpm(id))
   2641				ata_dev_warn(dev,
   2642	"supports DRM functions and may not be fully accessible\n");
   2643		}
   2644
   2645		dev->n_sectors = ata_id_n_sectors(id);
   2646
   2647		/* get current R/W Multiple count setting */
   2648		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
   2649			unsigned int max = dev->id[47] & 0xff;
   2650			unsigned int cnt = dev->id[59] & 0xff;
   2651			/* only recognize/allow powers of two here */
   2652			if (is_power_of_2(max) && is_power_of_2(cnt))
   2653				if (cnt <= max)
   2654					dev->multi_count = cnt;
   2655		}
   2656
   2657		/* print device info to dmesg */
   2658		if (print_info)
   2659			ata_dev_info(dev, "%s: %s, %s, max %s\n",
   2660				     revbuf, modelbuf, fwrevbuf,
   2661				     ata_mode_string(xfer_mask));
   2662
   2663		if (ata_id_has_lba(id)) {
   2664			rc = ata_dev_config_lba(dev);
   2665			if (rc)
   2666				return rc;
   2667		} else {
   2668			ata_dev_config_chs(dev);
   2669		}
   2670
   2671		ata_dev_config_devslp(dev);
   2672		ata_dev_config_sense_reporting(dev);
   2673		ata_dev_config_zac(dev);
   2674		ata_dev_config_trusted(dev);
   2675		ata_dev_config_cpr(dev);
   2676		dev->cdb_len = 32;
   2677
   2678		if (print_info)
   2679			ata_dev_print_features(dev);
   2680	}
   2681
   2682	/* ATAPI-specific feature tests */
   2683	else if (dev->class == ATA_DEV_ATAPI) {
   2684		const char *cdb_intr_string = "";
   2685		const char *atapi_an_string = "";
   2686		const char *dma_dir_string = "";
   2687		u32 sntf;
   2688
   2689		rc = atapi_cdb_len(id);
   2690		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
   2691			ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
   2692			rc = -EINVAL;
   2693			goto err_out_nosup;
   2694		}
   2695		dev->cdb_len = (unsigned int) rc;
   2696
   2697		/* Enable ATAPI AN if both the host and device have
   2698		 * the support.  If PMP is attached, SNTF is required
   2699		 * to enable ATAPI AN to discern between PHY status
   2700		 * changed notifications and ATAPI ANs.
   2701		 */
   2702		if (atapi_an &&
   2703		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
   2704		    (!sata_pmp_attached(ap) ||
   2705		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
   2706			/* issue SET feature command to turn this on */
   2707			err_mask = ata_dev_set_feature(dev,
   2708					SETFEATURES_SATA_ENABLE, SATA_AN);
   2709			if (err_mask)
   2710				ata_dev_err(dev,
   2711					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
   2712					    err_mask);
   2713			else {
   2714				dev->flags |= ATA_DFLAG_AN;
   2715				atapi_an_string = ", ATAPI AN";
   2716			}
   2717		}
   2718
   2719		if (ata_id_cdb_intr(dev->id)) {
   2720			dev->flags |= ATA_DFLAG_CDB_INTR;
   2721			cdb_intr_string = ", CDB intr";
   2722		}
   2723
   2724		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
   2725			dev->flags |= ATA_DFLAG_DMADIR;
   2726			dma_dir_string = ", DMADIR";
   2727		}
   2728
   2729		if (ata_id_has_da(dev->id)) {
   2730			dev->flags |= ATA_DFLAG_DA;
   2731			zpodd_init(dev);
   2732		}
   2733
   2734		/* print device info to dmesg */
   2735		if (print_info)
   2736			ata_dev_info(dev,
   2737				     "ATAPI: %s, %s, max %s%s%s%s\n",
   2738				     modelbuf, fwrevbuf,
   2739				     ata_mode_string(xfer_mask),
   2740				     cdb_intr_string, atapi_an_string,
   2741				     dma_dir_string);
   2742	}
   2743
   2744	/* determine max_sectors */
   2745	dev->max_sectors = ATA_MAX_SECTORS;
   2746	if (dev->flags & ATA_DFLAG_LBA48)
   2747		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
   2748
   2749	/* Limit PATA drive on SATA cable bridge transfers to udma5,
   2750	   200 sectors */
   2751	if (ata_dev_knobble(dev)) {
   2752		if (print_info)
   2753			ata_dev_info(dev, "applying bridge limits\n");
   2754		dev->udma_mask &= ATA_UDMA5;
   2755		dev->max_sectors = ATA_MAX_SECTORS;
   2756	}
   2757
   2758	if ((dev->class == ATA_DEV_ATAPI) &&
   2759	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
   2760		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
   2761		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
   2762	}
   2763
   2764	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
   2765		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
   2766					 dev->max_sectors);
   2767
   2768	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
   2769		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
   2770					 dev->max_sectors);
   2771
   2772	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
   2773		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
   2774
   2775	if (ap->ops->dev_config)
   2776		ap->ops->dev_config(dev);
   2777
   2778	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
   2779		/* Let the user know. We don't want to disallow opens for
   2780		   rescue purposes, or in case the vendor is just a blithering
   2781		   idiot. Do this after the dev_config call as some controllers
   2782		   with buggy firmware may want to avoid reporting false device
   2783		   bugs */
   2784
   2785		if (print_info) {
   2786			ata_dev_warn(dev,
   2787"Drive reports diagnostics failure. This may indicate a drive\n");
   2788			ata_dev_warn(dev,
   2789"fault or invalid emulation. Contact drive vendor for information.\n");
   2790		}
   2791	}
   2792
   2793	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
   2794		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
   2795		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
   2796	}
   2797
   2798	return 0;
   2799
   2800err_out_nosup:
   2801	return rc;
   2802}
   2803
   2804/**
   2805 *	ata_cable_40wire	-	return 40 wire cable type
   2806 *	@ap: port
   2807 *
   2808 *	Helper method for drivers which want to hardwire 40 wire cable
   2809 *	detection.
   2810 */
   2811
   2812int ata_cable_40wire(struct ata_port *ap)
   2813{
   2814	return ATA_CBL_PATA40;
   2815}
   2816EXPORT_SYMBOL_GPL(ata_cable_40wire);
   2817
   2818/**
   2819 *	ata_cable_80wire	-	return 80 wire cable type
   2820 *	@ap: port
   2821 *
   2822 *	Helper method for drivers which want to hardwire 80 wire cable
   2823 *	detection.
   2824 */
   2825
   2826int ata_cable_80wire(struct ata_port *ap)
   2827{
   2828	return ATA_CBL_PATA80;
   2829}
   2830EXPORT_SYMBOL_GPL(ata_cable_80wire);
   2831
   2832/**
   2833 *	ata_cable_unknown	-	return unknown PATA cable.
   2834 *	@ap: port
   2835 *
   2836 *	Helper method for drivers which have no PATA cable detection.
   2837 */
   2838
   2839int ata_cable_unknown(struct ata_port *ap)
   2840{
   2841	return ATA_CBL_PATA_UNK;
   2842}
   2843EXPORT_SYMBOL_GPL(ata_cable_unknown);
   2844
   2845/**
   2846 *	ata_cable_ignore	-	return ignored PATA cable.
   2847 *	@ap: port
   2848 *
   2849 *	Helper method for drivers which don't use cable type to limit
   2850 *	transfer mode.
   2851 */
   2852int ata_cable_ignore(struct ata_port *ap)
   2853{
   2854	return ATA_CBL_PATA_IGN;
   2855}
   2856EXPORT_SYMBOL_GPL(ata_cable_ignore);
   2857
   2858/**
   2859 *	ata_cable_sata	-	return SATA cable type
   2860 *	@ap: port
   2861 *
   2862 *	Helper method for drivers which have SATA cables
   2863 */
   2864
   2865int ata_cable_sata(struct ata_port *ap)
   2866{
   2867	return ATA_CBL_SATA;
   2868}
   2869EXPORT_SYMBOL_GPL(ata_cable_sata);
   2870
   2871/**
   2872 *	ata_bus_probe - Reset and probe ATA bus
   2873 *	@ap: Bus to probe
   2874 *
   2875 *	Master ATA bus probing function.  Initiates a hardware-dependent
   2876 *	bus reset, then attempts to identify any devices found on
   2877 *	the bus.
   2878 *
   2879 *	LOCKING:
   2880 *	PCI/etc. bus probe sem.
   2881 *
   2882 *	RETURNS:
   2883 *	Zero on success, negative errno otherwise.
   2884 */
   2885
   2886int ata_bus_probe(struct ata_port *ap)
   2887{
   2888	unsigned int classes[ATA_MAX_DEVICES];
   2889	int tries[ATA_MAX_DEVICES];
   2890	int rc;
   2891	struct ata_device *dev;
   2892
   2893	ata_for_each_dev(dev, &ap->link, ALL)
   2894		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
   2895
   2896 retry:
   2897	ata_for_each_dev(dev, &ap->link, ALL) {
   2898		/* If we issue an SRST then an ATA drive (not ATAPI)
   2899		 * may change configuration and be in PIO0 timing. If
   2900		 * we do a hard reset (or are coming from power on)
   2901		 * this is true for ATA or ATAPI. Until we've set a
   2902		 * suitable controller mode we should not touch the
   2903		 * bus as we may be talking too fast.
   2904		 */
   2905		dev->pio_mode = XFER_PIO_0;
   2906		dev->dma_mode = 0xff;
   2907
   2908		/* If the controller has a pio mode setup function
   2909		 * then use it to set the chipset to rights. Don't
   2910		 * touch the DMA setup as that will be dealt with when
   2911		 * configuring devices.
   2912		 */
   2913		if (ap->ops->set_piomode)
   2914			ap->ops->set_piomode(ap, dev);
   2915	}
   2916
   2917	/* reset and determine device classes */
   2918	ap->ops->phy_reset(ap);
   2919
   2920	ata_for_each_dev(dev, &ap->link, ALL) {
   2921		if (dev->class != ATA_DEV_UNKNOWN)
   2922			classes[dev->devno] = dev->class;
   2923		else
   2924			classes[dev->devno] = ATA_DEV_NONE;
   2925
   2926		dev->class = ATA_DEV_UNKNOWN;
   2927	}
   2928
   2929	/* read IDENTIFY page and configure devices. We have to do the identify
   2930	   specific sequence bass-ackwards so that PDIAG- is released by
   2931	   the slave device */
   2932
   2933	ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
   2934		if (tries[dev->devno])
   2935			dev->class = classes[dev->devno];
   2936
   2937		if (!ata_dev_enabled(dev))
   2938			continue;
   2939
   2940		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
   2941				     dev->id);
   2942		if (rc)
   2943			goto fail;
   2944	}
   2945
   2946	/* Now ask for the cable type as PDIAG- should have been released */
   2947	if (ap->ops->cable_detect)
   2948		ap->cbl = ap->ops->cable_detect(ap);
   2949
   2950	/* We may have SATA bridge glue hiding here irrespective of
   2951	 * the reported cable types and sensed types.  When SATA
   2952	 * drives indicate we have a bridge, we don't know which end
   2953	 * of the link the bridge is which is a problem.
   2954	 */
   2955	ata_for_each_dev(dev, &ap->link, ENABLED)
   2956		if (ata_id_is_sata(dev->id))
   2957			ap->cbl = ATA_CBL_SATA;
   2958
   2959	/* After the identify sequence we can now set up the devices. We do
   2960	   this in the normal order so that the user doesn't get confused */
   2961
   2962	ata_for_each_dev(dev, &ap->link, ENABLED) {
   2963		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
   2964		rc = ata_dev_configure(dev);
   2965		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
   2966		if (rc)
   2967			goto fail;
   2968	}
   2969
   2970	/* configure transfer mode */
   2971	rc = ata_set_mode(&ap->link, &dev);
   2972	if (rc)
   2973		goto fail;
   2974
   2975	ata_for_each_dev(dev, &ap->link, ENABLED)
   2976		return 0;
   2977
   2978	return -ENODEV;
   2979
   2980 fail:
   2981	tries[dev->devno]--;
   2982
   2983	switch (rc) {
   2984	case -EINVAL:
   2985		/* eeek, something went very wrong, give up */
   2986		tries[dev->devno] = 0;
   2987		break;
   2988
   2989	case -ENODEV:
   2990		/* give it just one more chance */
   2991		tries[dev->devno] = min(tries[dev->devno], 1);
   2992		fallthrough;
   2993	case -EIO:
   2994		if (tries[dev->devno] == 1) {
   2995			/* This is the last chance, better to slow
   2996			 * down than lose it.
   2997			 */
   2998			sata_down_spd_limit(&ap->link, 0);
   2999			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
   3000		}
   3001	}
   3002
   3003	if (!tries[dev->devno])
   3004		ata_dev_disable(dev);
   3005
   3006	goto retry;
   3007}
   3008
   3009/**
   3010 *	sata_print_link_status - Print SATA link status
   3011 *	@link: SATA link to printk link status about
   3012 *
   3013 *	This function prints link speed and status of a SATA link.
   3014 *
   3015 *	LOCKING:
   3016 *	None.
   3017 */
   3018static void sata_print_link_status(struct ata_link *link)
   3019{
   3020	u32 sstatus, scontrol, tmp;
   3021
   3022	if (sata_scr_read(link, SCR_STATUS, &sstatus))
   3023		return;
   3024	sata_scr_read(link, SCR_CONTROL, &scontrol);
   3025
   3026	if (ata_phys_link_online(link)) {
   3027		tmp = (sstatus >> 4) & 0xf;
   3028		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
   3029			      sata_spd_string(tmp), sstatus, scontrol);
   3030	} else {
   3031		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
   3032			      sstatus, scontrol);
   3033	}
   3034}
   3035
   3036/**
   3037 *	ata_dev_pair		-	return other device on cable
   3038 *	@adev: device
   3039 *
   3040 *	Obtain the other device on the same cable, or if none is
   3041 *	present NULL is returned
   3042 */
   3043
   3044struct ata_device *ata_dev_pair(struct ata_device *adev)
   3045{
   3046	struct ata_link *link = adev->link;
   3047	struct ata_device *pair = &link->device[1 - adev->devno];
   3048	if (!ata_dev_enabled(pair))
   3049		return NULL;
   3050	return pair;
   3051}
   3052EXPORT_SYMBOL_GPL(ata_dev_pair);
   3053
   3054/**
   3055 *	sata_down_spd_limit - adjust SATA spd limit downward
   3056 *	@link: Link to adjust SATA spd limit for
   3057 *	@spd_limit: Additional limit
   3058 *
   3059 *	Adjust SATA spd limit of @link downward.  Note that this
   3060 *	function only adjusts the limit.  The change must be applied
   3061 *	using sata_set_spd().
   3062 *
   3063 *	If @spd_limit is non-zero, the speed is limited to equal to or
   3064 *	lower than @spd_limit if such speed is supported.  If
   3065 *	@spd_limit is slower than any supported speed, only the lowest
   3066 *	supported speed is allowed.
   3067 *
   3068 *	LOCKING:
   3069 *	Inherited from caller.
   3070 *
   3071 *	RETURNS:
   3072 *	0 on success, negative errno on failure
   3073 */
   3074int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
   3075{
   3076	u32 sstatus, spd, mask;
   3077	int rc, bit;
   3078
   3079	if (!sata_scr_valid(link))
   3080		return -EOPNOTSUPP;
   3081
   3082	/* If SCR can be read, use it to determine the current SPD.
   3083	 * If not, use cached value in link->sata_spd.
   3084	 */
   3085	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
   3086	if (rc == 0 && ata_sstatus_online(sstatus))
   3087		spd = (sstatus >> 4) & 0xf;
   3088	else
   3089		spd = link->sata_spd;
   3090
   3091	mask = link->sata_spd_limit;
   3092	if (mask <= 1)
   3093		return -EINVAL;
   3094
   3095	/* unconditionally mask off the highest bit */
   3096	bit = fls(mask) - 1;
   3097	mask &= ~(1 << bit);
   3098
   3099	/*
   3100	 * Mask off all speeds higher than or equal to the current one.  At
   3101	 * this point, if current SPD is not available and we previously
   3102	 * recorded the link speed from SStatus, the driver has already
   3103	 * masked off the highest bit so mask should already be 1 or 0.
   3104	 * Otherwise, we should not force 1.5Gbps on a link where we have
   3105	 * not previously recorded speed from SStatus.  Just return in this
   3106	 * case.
   3107	 */
   3108	if (spd > 1)
   3109		mask &= (1 << (spd - 1)) - 1;
   3110	else
   3111		return -EINVAL;
   3112
   3113	/* were we already at the bottom? */
   3114	if (!mask)
   3115		return -EINVAL;
   3116
   3117	if (spd_limit) {
   3118		if (mask & ((1 << spd_limit) - 1))
   3119			mask &= (1 << spd_limit) - 1;
   3120		else {
   3121			bit = ffs(mask) - 1;
   3122			mask = 1 << bit;
   3123		}
   3124	}
   3125
   3126	link->sata_spd_limit = mask;
   3127
   3128	ata_link_warn(link, "limiting SATA link speed to %s\n",
   3129		      sata_spd_string(fls(mask)));
   3130
   3131	return 0;
   3132}
   3133
   3134#ifdef CONFIG_ATA_ACPI
   3135/**
   3136 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
   3137 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
   3138 *	@cycle: cycle duration in ns
   3139 *
   3140 *	Return matching xfer mode for @cycle.  The returned mode is of
   3141 *	the transfer type specified by @xfer_shift.  If @cycle is too
   3142 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
   3143 *	than the fastest known mode, the fasted mode is returned.
   3144 *
   3145 *	LOCKING:
   3146 *	None.
   3147 *
   3148 *	RETURNS:
   3149 *	Matching xfer_mode, 0xff if no match found.
   3150 */
   3151u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
   3152{
   3153	u8 base_mode = 0xff, last_mode = 0xff;
   3154	const struct ata_xfer_ent *ent;
   3155	const struct ata_timing *t;
   3156
   3157	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
   3158		if (ent->shift == xfer_shift)
   3159			base_mode = ent->base;
   3160
   3161	for (t = ata_timing_find_mode(base_mode);
   3162	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
   3163		unsigned short this_cycle;
   3164
   3165		switch (xfer_shift) {
   3166		case ATA_SHIFT_PIO:
   3167		case ATA_SHIFT_MWDMA:
   3168			this_cycle = t->cycle;
   3169			break;
   3170		case ATA_SHIFT_UDMA:
   3171			this_cycle = t->udma;
   3172			break;
   3173		default:
   3174			return 0xff;
   3175		}
   3176
   3177		if (cycle > this_cycle)
   3178			break;
   3179
   3180		last_mode = t->mode;
   3181	}
   3182
   3183	return last_mode;
   3184}
   3185#endif
   3186
   3187/**
   3188 *	ata_down_xfermask_limit - adjust dev xfer masks downward
   3189 *	@dev: Device to adjust xfer masks
   3190 *	@sel: ATA_DNXFER_* selector
   3191 *
   3192 *	Adjust xfer masks of @dev downward.  Note that this function
   3193 *	does not apply the change.  Invoking ata_set_mode() afterwards
   3194 *	will apply the limit.
   3195 *
   3196 *	LOCKING:
   3197 *	Inherited from caller.
   3198 *
   3199 *	RETURNS:
   3200 *	0 on success, negative errno on failure
   3201 */
   3202int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
   3203{
   3204	char buf[32];
   3205	unsigned long orig_mask, xfer_mask;
   3206	unsigned long pio_mask, mwdma_mask, udma_mask;
   3207	int quiet, highbit;
   3208
   3209	quiet = !!(sel & ATA_DNXFER_QUIET);
   3210	sel &= ~ATA_DNXFER_QUIET;
   3211
   3212	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
   3213						  dev->mwdma_mask,
   3214						  dev->udma_mask);
   3215	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
   3216
   3217	switch (sel) {
   3218	case ATA_DNXFER_PIO:
   3219		highbit = fls(pio_mask) - 1;
   3220		pio_mask &= ~(1 << highbit);
   3221		break;
   3222
   3223	case ATA_DNXFER_DMA:
   3224		if (udma_mask) {
   3225			highbit = fls(udma_mask) - 1;
   3226			udma_mask &= ~(1 << highbit);
   3227			if (!udma_mask)
   3228				return -ENOENT;
   3229		} else if (mwdma_mask) {
   3230			highbit = fls(mwdma_mask) - 1;
   3231			mwdma_mask &= ~(1 << highbit);
   3232			if (!mwdma_mask)
   3233				return -ENOENT;
   3234		}
   3235		break;
   3236
   3237	case ATA_DNXFER_40C:
   3238		udma_mask &= ATA_UDMA_MASK_40C;
   3239		break;
   3240
   3241	case ATA_DNXFER_FORCE_PIO0:
   3242		pio_mask &= 1;
   3243		fallthrough;
   3244	case ATA_DNXFER_FORCE_PIO:
   3245		mwdma_mask = 0;
   3246		udma_mask = 0;
   3247		break;
   3248
   3249	default:
   3250		BUG();
   3251	}
   3252
   3253	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
   3254
   3255	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
   3256		return -ENOENT;
   3257
   3258	if (!quiet) {
   3259		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
   3260			snprintf(buf, sizeof(buf), "%s:%s",
   3261				 ata_mode_string(xfer_mask),
   3262				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
   3263		else
   3264			snprintf(buf, sizeof(buf), "%s",
   3265				 ata_mode_string(xfer_mask));
   3266
   3267		ata_dev_warn(dev, "limiting speed to %s\n", buf);
   3268	}
   3269
   3270	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
   3271			    &dev->udma_mask);
   3272
   3273	return 0;
   3274}
   3275
   3276static int ata_dev_set_mode(struct ata_device *dev)
   3277{
   3278	struct ata_port *ap = dev->link->ap;
   3279	struct ata_eh_context *ehc = &dev->link->eh_context;
   3280	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
   3281	const char *dev_err_whine = "";
   3282	int ign_dev_err = 0;
   3283	unsigned int err_mask = 0;
   3284	int rc;
   3285
   3286	dev->flags &= ~ATA_DFLAG_PIO;
   3287	if (dev->xfer_shift == ATA_SHIFT_PIO)
   3288		dev->flags |= ATA_DFLAG_PIO;
   3289
   3290	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
   3291		dev_err_whine = " (SET_XFERMODE skipped)";
   3292	else {
   3293		if (nosetxfer)
   3294			ata_dev_warn(dev,
   3295				     "NOSETXFER but PATA detected - can't "
   3296				     "skip SETXFER, might malfunction\n");
   3297		err_mask = ata_dev_set_xfermode(dev);
   3298	}
   3299
   3300	if (err_mask & ~AC_ERR_DEV)
   3301		goto fail;
   3302
   3303	/* revalidate */
   3304	ehc->i.flags |= ATA_EHI_POST_SETMODE;
   3305	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
   3306	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
   3307	if (rc)
   3308		return rc;
   3309
   3310	if (dev->xfer_shift == ATA_SHIFT_PIO) {
   3311		/* Old CFA may refuse this command, which is just fine */
   3312		if (ata_id_is_cfa(dev->id))
   3313			ign_dev_err = 1;
   3314		/* Catch several broken garbage emulations plus some pre
   3315		   ATA devices */
   3316		if (ata_id_major_version(dev->id) == 0 &&
   3317					dev->pio_mode <= XFER_PIO_2)
   3318			ign_dev_err = 1;
   3319		/* Some very old devices and some bad newer ones fail
   3320		   any kind of SET_XFERMODE request but support PIO0-2
   3321		   timings and no IORDY */
   3322		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
   3323			ign_dev_err = 1;
   3324	}
   3325	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
   3326	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
   3327	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
   3328	    dev->dma_mode == XFER_MW_DMA_0 &&
   3329	    (dev->id[63] >> 8) & 1)
   3330		ign_dev_err = 1;
   3331
   3332	/* if the device is actually configured correctly, ignore dev err */
   3333	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
   3334		ign_dev_err = 1;
   3335
   3336	if (err_mask & AC_ERR_DEV) {
   3337		if (!ign_dev_err)
   3338			goto fail;
   3339		else
   3340			dev_err_whine = " (device error ignored)";
   3341	}
   3342
   3343	ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
   3344		    dev->xfer_shift, (int)dev->xfer_mode);
   3345
   3346	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
   3347	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
   3348		ata_dev_info(dev, "configured for %s%s\n",
   3349			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
   3350			     dev_err_whine);
   3351
   3352	return 0;
   3353
   3354 fail:
   3355	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
   3356	return -EIO;
   3357}
   3358
   3359/**
   3360 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
   3361 *	@link: link on which timings will be programmed
   3362 *	@r_failed_dev: out parameter for failed device
   3363 *
   3364 *	Standard implementation of the function used to tune and set
   3365 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
   3366 *	ata_dev_set_mode() fails, pointer to the failing device is
   3367 *	returned in @r_failed_dev.
   3368 *
   3369 *	LOCKING:
   3370 *	PCI/etc. bus probe sem.
   3371 *
   3372 *	RETURNS:
   3373 *	0 on success, negative errno otherwise
   3374 */
   3375
   3376int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
   3377{
   3378	struct ata_port *ap = link->ap;
   3379	struct ata_device *dev;
   3380	int rc = 0, used_dma = 0, found = 0;
   3381
   3382	/* step 1: calculate xfer_mask */
   3383	ata_for_each_dev(dev, link, ENABLED) {
   3384		unsigned long pio_mask, dma_mask;
   3385		unsigned int mode_mask;
   3386
   3387		mode_mask = ATA_DMA_MASK_ATA;
   3388		if (dev->class == ATA_DEV_ATAPI)
   3389			mode_mask = ATA_DMA_MASK_ATAPI;
   3390		else if (ata_id_is_cfa(dev->id))
   3391			mode_mask = ATA_DMA_MASK_CFA;
   3392
   3393		ata_dev_xfermask(dev);
   3394		ata_force_xfermask(dev);
   3395
   3396		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
   3397
   3398		if (libata_dma_mask & mode_mask)
   3399			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
   3400						     dev->udma_mask);
   3401		else
   3402			dma_mask = 0;
   3403
   3404		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
   3405		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
   3406
   3407		found = 1;
   3408		if (ata_dma_enabled(dev))
   3409			used_dma = 1;
   3410	}
   3411	if (!found)
   3412		goto out;
   3413
   3414	/* step 2: always set host PIO timings */
   3415	ata_for_each_dev(dev, link, ENABLED) {
   3416		if (dev->pio_mode == 0xff) {
   3417			ata_dev_warn(dev, "no PIO support\n");
   3418			rc = -EINVAL;
   3419			goto out;
   3420		}
   3421
   3422		dev->xfer_mode = dev->pio_mode;
   3423		dev->xfer_shift = ATA_SHIFT_PIO;
   3424		if (ap->ops->set_piomode)
   3425			ap->ops->set_piomode(ap, dev);
   3426	}
   3427
   3428	/* step 3: set host DMA timings */
   3429	ata_for_each_dev(dev, link, ENABLED) {
   3430		if (!ata_dma_enabled(dev))
   3431			continue;
   3432
   3433		dev->xfer_mode = dev->dma_mode;
   3434		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
   3435		if (ap->ops->set_dmamode)
   3436			ap->ops->set_dmamode(ap, dev);
   3437	}
   3438
   3439	/* step 4: update devices' xfer mode */
   3440	ata_for_each_dev(dev, link, ENABLED) {
   3441		rc = ata_dev_set_mode(dev);
   3442		if (rc)
   3443			goto out;
   3444	}
   3445
   3446	/* Record simplex status. If we selected DMA then the other
   3447	 * host channels are not permitted to do so.
   3448	 */
   3449	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
   3450		ap->host->simplex_claimed = ap;
   3451
   3452 out:
   3453	if (rc)
   3454		*r_failed_dev = dev;
   3455	return rc;
   3456}
   3457EXPORT_SYMBOL_GPL(ata_do_set_mode);
   3458
   3459/**
   3460 *	ata_wait_ready - wait for link to become ready
   3461 *	@link: link to be waited on
   3462 *	@deadline: deadline jiffies for the operation
   3463 *	@check_ready: callback to check link readiness
   3464 *
   3465 *	Wait for @link to become ready.  @check_ready should return
   3466 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
   3467 *	link doesn't seem to be occupied, other errno for other error
   3468 *	conditions.
   3469 *
   3470 *	Transient -ENODEV conditions are allowed for
   3471 *	ATA_TMOUT_FF_WAIT.
   3472 *
   3473 *	LOCKING:
   3474 *	EH context.
   3475 *
   3476 *	RETURNS:
   3477 *	0 if @link is ready before @deadline; otherwise, -errno.
   3478 */
   3479int ata_wait_ready(struct ata_link *link, unsigned long deadline,
   3480		   int (*check_ready)(struct ata_link *link))
   3481{
   3482	unsigned long start = jiffies;
   3483	unsigned long nodev_deadline;
   3484	int warned = 0;
   3485
   3486	/* choose which 0xff timeout to use, read comment in libata.h */
   3487	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
   3488		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
   3489	else
   3490		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
   3491
   3492	/* Slave readiness can't be tested separately from master.  On
   3493	 * M/S emulation configuration, this function should be called
   3494	 * only on the master and it will handle both master and slave.
   3495	 */
   3496	WARN_ON(link == link->ap->slave_link);
   3497
   3498	if (time_after(nodev_deadline, deadline))
   3499		nodev_deadline = deadline;
   3500
   3501	while (1) {
   3502		unsigned long now = jiffies;
   3503		int ready, tmp;
   3504
   3505		ready = tmp = check_ready(link);
   3506		if (ready > 0)
   3507			return 0;
   3508
   3509		/*
   3510		 * -ENODEV could be transient.  Ignore -ENODEV if link
   3511		 * is online.  Also, some SATA devices take a long
   3512		 * time to clear 0xff after reset.  Wait for
   3513		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
   3514		 * offline.
   3515		 *
   3516		 * Note that some PATA controllers (pata_ali) explode
   3517		 * if status register is read more than once when
   3518		 * there's no device attached.
   3519		 */
   3520		if (ready == -ENODEV) {
   3521			if (ata_link_online(link))
   3522				ready = 0;
   3523			else if ((link->ap->flags & ATA_FLAG_SATA) &&
   3524				 !ata_link_offline(link) &&
   3525				 time_before(now, nodev_deadline))
   3526				ready = 0;
   3527		}
   3528
   3529		if (ready)
   3530			return ready;
   3531		if (time_after(now, deadline))
   3532			return -EBUSY;
   3533
   3534		if (!warned && time_after(now, start + 5 * HZ) &&
   3535		    (deadline - now > 3 * HZ)) {
   3536			ata_link_warn(link,
   3537				"link is slow to respond, please be patient "
   3538				"(ready=%d)\n", tmp);
   3539			warned = 1;
   3540		}
   3541
   3542		ata_msleep(link->ap, 50);
   3543	}
   3544}
   3545
   3546/**
   3547 *	ata_wait_after_reset - wait for link to become ready after reset
   3548 *	@link: link to be waited on
   3549 *	@deadline: deadline jiffies for the operation
   3550 *	@check_ready: callback to check link readiness
   3551 *
   3552 *	Wait for @link to become ready after reset.
   3553 *
   3554 *	LOCKING:
   3555 *	EH context.
   3556 *
   3557 *	RETURNS:
   3558 *	0 if @link is ready before @deadline; otherwise, -errno.
   3559 */
   3560int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
   3561				int (*check_ready)(struct ata_link *link))
   3562{
   3563	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
   3564
   3565	return ata_wait_ready(link, deadline, check_ready);
   3566}
   3567EXPORT_SYMBOL_GPL(ata_wait_after_reset);
   3568
   3569/**
   3570 *	ata_std_prereset - prepare for reset
   3571 *	@link: ATA link to be reset
   3572 *	@deadline: deadline jiffies for the operation
   3573 *
   3574 *	@link is about to be reset.  Initialize it.  Failure from
   3575 *	prereset makes libata abort whole reset sequence and give up
   3576 *	that port, so prereset should be best-effort.  It does its
   3577 *	best to prepare for reset sequence but if things go wrong, it
   3578 *	should just whine, not fail.
   3579 *
   3580 *	LOCKING:
   3581 *	Kernel thread context (may sleep)
   3582 *
   3583 *	RETURNS:
   3584 *	Always 0.
   3585 */
   3586int ata_std_prereset(struct ata_link *link, unsigned long deadline)
   3587{
   3588	struct ata_port *ap = link->ap;
   3589	struct ata_eh_context *ehc = &link->eh_context;
   3590	const unsigned long *timing = sata_ehc_deb_timing(ehc);
   3591	int rc;
   3592
   3593	/* if we're about to do hardreset, nothing more to do */
   3594	if (ehc->i.action & ATA_EH_HARDRESET)
   3595		return 0;
   3596
   3597	/* if SATA, resume link */
   3598	if (ap->flags & ATA_FLAG_SATA) {
   3599		rc = sata_link_resume(link, timing, deadline);
   3600		/* whine about phy resume failure but proceed */
   3601		if (rc && rc != -EOPNOTSUPP)
   3602			ata_link_warn(link,
   3603				      "failed to resume link for reset (errno=%d)\n",
   3604				      rc);
   3605	}
   3606
   3607	/* no point in trying softreset on offline link */
   3608	if (ata_phys_link_offline(link))
   3609		ehc->i.action &= ~ATA_EH_SOFTRESET;
   3610
   3611	return 0;
   3612}
   3613EXPORT_SYMBOL_GPL(ata_std_prereset);
   3614
   3615/**
   3616 *	sata_std_hardreset - COMRESET w/o waiting or classification
   3617 *	@link: link to reset
   3618 *	@class: resulting class of attached device
   3619 *	@deadline: deadline jiffies for the operation
   3620 *
   3621 *	Standard SATA COMRESET w/o waiting or classification.
   3622 *
   3623 *	LOCKING:
   3624 *	Kernel thread context (may sleep)
   3625 *
   3626 *	RETURNS:
   3627 *	0 if link offline, -EAGAIN if link online, -errno on errors.
   3628 */
   3629int sata_std_hardreset(struct ata_link *link, unsigned int *class,
   3630		       unsigned long deadline)
   3631{
   3632	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
   3633	bool online;
   3634	int rc;
   3635
   3636	/* do hardreset */
   3637	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
   3638	return online ? -EAGAIN : rc;
   3639}
   3640EXPORT_SYMBOL_GPL(sata_std_hardreset);
   3641
   3642/**
   3643 *	ata_std_postreset - standard postreset callback
   3644 *	@link: the target ata_link
   3645 *	@classes: classes of attached devices
   3646 *
   3647 *	This function is invoked after a successful reset.  Note that
   3648 *	the device might have been reset more than once using
   3649 *	different reset methods before postreset is invoked.
   3650 *
   3651 *	LOCKING:
   3652 *	Kernel thread context (may sleep)
   3653 */
   3654void ata_std_postreset(struct ata_link *link, unsigned int *classes)
   3655{
   3656	u32 serror;
   3657
   3658	/* reset complete, clear SError */
   3659	if (!sata_scr_read(link, SCR_ERROR, &serror))
   3660		sata_scr_write(link, SCR_ERROR, serror);
   3661
   3662	/* print link status */
   3663	sata_print_link_status(link);
   3664}
   3665EXPORT_SYMBOL_GPL(ata_std_postreset);
   3666
   3667/**
   3668 *	ata_dev_same_device - Determine whether new ID matches configured device
   3669 *	@dev: device to compare against
   3670 *	@new_class: class of the new device
   3671 *	@new_id: IDENTIFY page of the new device
   3672 *
   3673 *	Compare @new_class and @new_id against @dev and determine
   3674 *	whether @dev is the device indicated by @new_class and
   3675 *	@new_id.
   3676 *
   3677 *	LOCKING:
   3678 *	None.
   3679 *
   3680 *	RETURNS:
   3681 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
   3682 */
   3683static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
   3684			       const u16 *new_id)
   3685{
   3686	const u16 *old_id = dev->id;
   3687	unsigned char model[2][ATA_ID_PROD_LEN + 1];
   3688	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
   3689
   3690	if (dev->class != new_class) {
   3691		ata_dev_info(dev, "class mismatch %d != %d\n",
   3692			     dev->class, new_class);
   3693		return 0;
   3694	}
   3695
   3696	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
   3697	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
   3698	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
   3699	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
   3700
   3701	if (strcmp(model[0], model[1])) {
   3702		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
   3703			     model[0], model[1]);
   3704		return 0;
   3705	}
   3706
   3707	if (strcmp(serial[0], serial[1])) {
   3708		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
   3709			     serial[0], serial[1]);
   3710		return 0;
   3711	}
   3712
   3713	return 1;
   3714}
   3715
   3716/**
   3717 *	ata_dev_reread_id - Re-read IDENTIFY data
   3718 *	@dev: target ATA device
   3719 *	@readid_flags: read ID flags
   3720 *
   3721 *	Re-read IDENTIFY page and make sure @dev is still attached to
   3722 *	the port.
   3723 *
   3724 *	LOCKING:
   3725 *	Kernel thread context (may sleep)
   3726 *
   3727 *	RETURNS:
   3728 *	0 on success, negative errno otherwise
   3729 */
   3730int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
   3731{
   3732	unsigned int class = dev->class;
   3733	u16 *id = (void *)dev->link->ap->sector_buf;
   3734	int rc;
   3735
   3736	/* read ID data */
   3737	rc = ata_dev_read_id(dev, &class, readid_flags, id);
   3738	if (rc)
   3739		return rc;
   3740
   3741	/* is the device still there? */
   3742	if (!ata_dev_same_device(dev, class, id))
   3743		return -ENODEV;
   3744
   3745	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
   3746	return 0;
   3747}
   3748
   3749/**
   3750 *	ata_dev_revalidate - Revalidate ATA device
   3751 *	@dev: device to revalidate
   3752 *	@new_class: new class code
   3753 *	@readid_flags: read ID flags
   3754 *
   3755 *	Re-read IDENTIFY page, make sure @dev is still attached to the
   3756 *	port and reconfigure it according to the new IDENTIFY page.
   3757 *
   3758 *	LOCKING:
   3759 *	Kernel thread context (may sleep)
   3760 *
   3761 *	RETURNS:
   3762 *	0 on success, negative errno otherwise
   3763 */
   3764int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
   3765		       unsigned int readid_flags)
   3766{
   3767	u64 n_sectors = dev->n_sectors;
   3768	u64 n_native_sectors = dev->n_native_sectors;
   3769	int rc;
   3770
   3771	if (!ata_dev_enabled(dev))
   3772		return -ENODEV;
   3773
   3774	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
   3775	if (ata_class_enabled(new_class) &&
   3776	    new_class != ATA_DEV_ATA &&
   3777	    new_class != ATA_DEV_ATAPI &&
   3778	    new_class != ATA_DEV_ZAC &&
   3779	    new_class != ATA_DEV_SEMB) {
   3780		ata_dev_info(dev, "class mismatch %u != %u\n",
   3781			     dev->class, new_class);
   3782		rc = -ENODEV;
   3783		goto fail;
   3784	}
   3785
   3786	/* re-read ID */
   3787	rc = ata_dev_reread_id(dev, readid_flags);
   3788	if (rc)
   3789		goto fail;
   3790
   3791	/* configure device according to the new ID */
   3792	rc = ata_dev_configure(dev);
   3793	if (rc)
   3794		goto fail;
   3795
   3796	/* verify n_sectors hasn't changed */
   3797	if (dev->class != ATA_DEV_ATA || !n_sectors ||
   3798	    dev->n_sectors == n_sectors)
   3799		return 0;
   3800
   3801	/* n_sectors has changed */
   3802	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
   3803		     (unsigned long long)n_sectors,
   3804		     (unsigned long long)dev->n_sectors);
   3805
   3806	/*
   3807	 * Something could have caused HPA to be unlocked
   3808	 * involuntarily.  If n_native_sectors hasn't changed and the
   3809	 * new size matches it, keep the device.
   3810	 */
   3811	if (dev->n_native_sectors == n_native_sectors &&
   3812	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
   3813		ata_dev_warn(dev,
   3814			     "new n_sectors matches native, probably "
   3815			     "late HPA unlock, n_sectors updated\n");
   3816		/* use the larger n_sectors */
   3817		return 0;
   3818	}
   3819
   3820	/*
   3821	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
   3822	 * unlocking HPA in those cases.
   3823	 *
   3824	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
   3825	 */
   3826	if (dev->n_native_sectors == n_native_sectors &&
   3827	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
   3828	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
   3829		ata_dev_warn(dev,
   3830			     "old n_sectors matches native, probably "
   3831			     "late HPA lock, will try to unlock HPA\n");
   3832		/* try unlocking HPA */
   3833		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
   3834		rc = -EIO;
   3835	} else
   3836		rc = -ENODEV;
   3837
   3838	/* restore original n_[native_]sectors and fail */
   3839	dev->n_native_sectors = n_native_sectors;
   3840	dev->n_sectors = n_sectors;
   3841 fail:
   3842	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
   3843	return rc;
   3844}
   3845
   3846struct ata_blacklist_entry {
   3847	const char *model_num;
   3848	const char *model_rev;
   3849	unsigned long horkage;
   3850};
   3851
   3852static const struct ata_blacklist_entry ata_device_blacklist [] = {
   3853	/* Devices with DMA related problems under Linux */
   3854	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
   3855	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
   3856	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
   3857	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
   3858	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
   3859	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
   3860	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
   3861	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
   3862	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
   3863	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
   3864	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
   3865	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
   3866	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
   3867	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
   3868	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
   3869	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
   3870	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
   3871	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
   3872	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
   3873	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
   3874	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
   3875	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
   3876	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
   3877	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
   3878	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
   3879	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
   3880	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
   3881	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
   3882	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
   3883	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
   3884	/* Odd clown on sil3726/4726 PMPs */
   3885	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
   3886	/* Similar story with ASMedia 1092 */
   3887	{ "ASMT109x- Config",	NULL,		ATA_HORKAGE_DISABLE },
   3888
   3889	/* Weird ATAPI devices */
   3890	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
   3891	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
   3892	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
   3893	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
   3894
   3895	/*
   3896	 * Causes silent data corruption with higher max sects.
   3897	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
   3898	 */
   3899	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
   3900
   3901	/*
   3902	 * These devices time out with higher max sects.
   3903	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
   3904	 */
   3905	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
   3906	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
   3907
   3908	/* Devices we expect to fail diagnostics */
   3909
   3910	/* Devices where NCQ should be avoided */
   3911	/* NCQ is slow */
   3912	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
   3913	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ },
   3914	/* http://thread.gmane.org/gmane.linux.ide/14907 */
   3915	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
   3916	/* NCQ is broken */
   3917	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
   3918	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
   3919	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
   3920	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
   3921	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
   3922
   3923	/* Seagate NCQ + FLUSH CACHE firmware bug */
   3924	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
   3925						ATA_HORKAGE_FIRMWARE_WARN },
   3926
   3927	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
   3928						ATA_HORKAGE_FIRMWARE_WARN },
   3929
   3930	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
   3931						ATA_HORKAGE_FIRMWARE_WARN },
   3932
   3933	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
   3934						ATA_HORKAGE_FIRMWARE_WARN },
   3935
   3936	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
   3937	   the ST disks also have LPM issues */
   3938	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_HORKAGE_BROKEN_FPDMA_AA |
   3939						ATA_HORKAGE_NOLPM },
   3940	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
   3941
   3942	/* Blacklist entries taken from Silicon Image 3124/3132
   3943	   Windows driver .inf file - also several Linux problem reports */
   3944	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ },
   3945	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ },
   3946	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ },
   3947
   3948	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
   3949	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ },
   3950
   3951	/* Sandisk SD7/8/9s lock up hard on large trims */
   3952	{ "SanDisk SD[789]*",	NULL,		ATA_HORKAGE_MAX_TRIM_128M },
   3953
   3954	/* devices which puke on READ_NATIVE_MAX */
   3955	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA },
   3956	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
   3957	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
   3958	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
   3959
   3960	/* this one allows HPA unlocking but fails IOs on the area */
   3961	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
   3962
   3963	/* Devices which report 1 sector over size HPA */
   3964	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE },
   3965	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE },
   3966	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE },
   3967
   3968	/* Devices which get the IVB wrong */
   3969	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
   3970	/* Maybe we should just blacklist TSSTcorp... */
   3971	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB },
   3972
   3973	/* Devices that do not need bridging limits applied */
   3974	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK },
   3975	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK },
   3976
   3977	/* Devices which aren't very happy with higher link speeds */
   3978	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS },
   3979	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS },
   3980
   3981	/*
   3982	 * Devices which choke on SETXFER.  Applies only if both the
   3983	 * device and controller are SATA.
   3984	 */
   3985	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
   3986	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
   3987	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
   3988	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
   3989	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
   3990
   3991	/* Crucial BX100 SSD 500GB has broken LPM support */
   3992	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
   3993
   3994	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
   3995	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
   3996						ATA_HORKAGE_ZERO_AFTER_TRIM |
   3997						ATA_HORKAGE_NOLPM },
   3998	/* 512GB MX100 with newer firmware has only LPM issues */
   3999	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
   4000						ATA_HORKAGE_NOLPM },
   4001
   4002	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
   4003	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4004						ATA_HORKAGE_ZERO_AFTER_TRIM |
   4005						ATA_HORKAGE_NOLPM },
   4006	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4007						ATA_HORKAGE_ZERO_AFTER_TRIM |
   4008						ATA_HORKAGE_NOLPM },
   4009
   4010	/* These specific Samsung models/firmware-revs do not handle LPM well */
   4011	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
   4012	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
   4013	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM },
   4014	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
   4015
   4016	/* devices that don't properly handle queued TRIM commands */
   4017	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
   4018						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4019	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4020						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4021	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4022						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4023	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
   4024						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4025	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
   4026						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4027	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
   4028						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4029	{ "Samsung SSD 840 EVO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4030						ATA_HORKAGE_NO_DMA_LOG |
   4031						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4032	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4033						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4034	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4035						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4036	{ "Samsung SSD 860*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4037						ATA_HORKAGE_ZERO_AFTER_TRIM |
   4038						ATA_HORKAGE_NO_NCQ_ON_ATI },
   4039	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4040						ATA_HORKAGE_ZERO_AFTER_TRIM |
   4041						ATA_HORKAGE_NO_NCQ_ON_ATI },
   4042	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
   4043						ATA_HORKAGE_ZERO_AFTER_TRIM },
   4044
   4045	/* devices that don't properly handle TRIM commands */
   4046	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM },
   4047	{ "M88V29*",			NULL,	ATA_HORKAGE_NOTRIM },
   4048
   4049	/*
   4050	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
   4051	 * (Return Zero After Trim) flags in the ATA Command Set are
   4052	 * unreliable in the sense that they only define what happens if
   4053	 * the device successfully executed the DSM TRIM command. TRIM
   4054	 * is only advisory, however, and the device is free to silently
   4055	 * ignore all or parts of the request.
   4056	 *
   4057	 * Whitelist drives that are known to reliably return zeroes
   4058	 * after TRIM.
   4059	 */
   4060
   4061	/*
   4062	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
   4063	 * that model before whitelisting all other intel SSDs.
   4064	 */
   4065	{ "INTEL*SSDSC2MH*",		NULL,	0 },
   4066
   4067	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4068	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4069	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4070	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4071	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4072	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4073	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4074	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
   4075
   4076	/*
   4077	 * Some WD SATA-I drives spin up and down erratically when the link
   4078	 * is put into the slumber mode.  We don't have full list of the
   4079	 * affected devices.  Disable LPM if the device matches one of the
   4080	 * known prefixes and is SATA-1.  As a side effect LPM partial is
   4081	 * lost too.
   4082	 *
   4083	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
   4084	 */
   4085	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4086	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4087	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4088	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4089	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4090	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4091	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
   4092
   4093	/*
   4094	 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
   4095	 * log page is accessed. Ensure we never ask for this log page with
   4096	 * these devices.
   4097	 */
   4098	{ "SATADOM-ML 3ME",		NULL,	ATA_HORKAGE_NO_LOG_DIR },
   4099
   4100	/* End Marker */
   4101	{ }
   4102};
   4103
   4104static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
   4105{
   4106	unsigned char model_num[ATA_ID_PROD_LEN + 1];
   4107	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
   4108	const struct ata_blacklist_entry *ad = ata_device_blacklist;
   4109
   4110	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
   4111	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
   4112
   4113	while (ad->model_num) {
   4114		if (glob_match(ad->model_num, model_num)) {
   4115			if (ad->model_rev == NULL)
   4116				return ad->horkage;
   4117			if (glob_match(ad->model_rev, model_rev))
   4118				return ad->horkage;
   4119		}
   4120		ad++;
   4121	}
   4122	return 0;
   4123}
   4124
   4125static int ata_dma_blacklisted(const struct ata_device *dev)
   4126{
   4127	/* We don't support polling DMA.
   4128	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
   4129	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
   4130	 */
   4131	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
   4132	    (dev->flags & ATA_DFLAG_CDB_INTR))
   4133		return 1;
   4134	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
   4135}
   4136
   4137/**
   4138 *	ata_is_40wire		-	check drive side detection
   4139 *	@dev: device
   4140 *
   4141 *	Perform drive side detection decoding, allowing for device vendors
   4142 *	who can't follow the documentation.
   4143 */
   4144
   4145static int ata_is_40wire(struct ata_device *dev)
   4146{
   4147	if (dev->horkage & ATA_HORKAGE_IVB)
   4148		return ata_drive_40wire_relaxed(dev->id);
   4149	return ata_drive_40wire(dev->id);
   4150}
   4151
   4152/**
   4153 *	cable_is_40wire		-	40/80/SATA decider
   4154 *	@ap: port to consider
   4155 *
   4156 *	This function encapsulates the policy for speed management
   4157 *	in one place. At the moment we don't cache the result but
   4158 *	there is a good case for setting ap->cbl to the result when
   4159 *	we are called with unknown cables (and figuring out if it
   4160 *	impacts hotplug at all).
   4161 *
   4162 *	Return 1 if the cable appears to be 40 wire.
   4163 */
   4164
   4165static int cable_is_40wire(struct ata_port *ap)
   4166{
   4167	struct ata_link *link;
   4168	struct ata_device *dev;
   4169
   4170	/* If the controller thinks we are 40 wire, we are. */
   4171	if (ap->cbl == ATA_CBL_PATA40)
   4172		return 1;
   4173
   4174	/* If the controller thinks we are 80 wire, we are. */
   4175	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
   4176		return 0;
   4177
   4178	/* If the system is known to be 40 wire short cable (eg
   4179	 * laptop), then we allow 80 wire modes even if the drive
   4180	 * isn't sure.
   4181	 */
   4182	if (ap->cbl == ATA_CBL_PATA40_SHORT)
   4183		return 0;
   4184
   4185	/* If the controller doesn't know, we scan.
   4186	 *
   4187	 * Note: We look for all 40 wire detects at this point.  Any
   4188	 *       80 wire detect is taken to be 80 wire cable because
   4189	 * - in many setups only the one drive (slave if present) will
   4190	 *   give a valid detect
   4191	 * - if you have a non detect capable drive you don't want it
   4192	 *   to colour the choice
   4193	 */
   4194	ata_for_each_link(link, ap, EDGE) {
   4195		ata_for_each_dev(dev, link, ENABLED) {
   4196			if (!ata_is_40wire(dev))
   4197				return 0;
   4198		}
   4199	}
   4200	return 1;
   4201}
   4202
   4203/**
   4204 *	ata_dev_xfermask - Compute supported xfermask of the given device
   4205 *	@dev: Device to compute xfermask for
   4206 *
   4207 *	Compute supported xfermask of @dev and store it in
   4208 *	dev->*_mask.  This function is responsible for applying all
   4209 *	known limits including host controller limits, device
   4210 *	blacklist, etc...
   4211 *
   4212 *	LOCKING:
   4213 *	None.
   4214 */
   4215static void ata_dev_xfermask(struct ata_device *dev)
   4216{
   4217	struct ata_link *link = dev->link;
   4218	struct ata_port *ap = link->ap;
   4219	struct ata_host *host = ap->host;
   4220	unsigned long xfer_mask;
   4221
   4222	/* controller modes available */
   4223	xfer_mask = ata_pack_xfermask(ap->pio_mask,
   4224				      ap->mwdma_mask, ap->udma_mask);
   4225
   4226	/* drive modes available */
   4227	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
   4228				       dev->mwdma_mask, dev->udma_mask);
   4229	xfer_mask &= ata_id_xfermask(dev->id);
   4230
   4231	/*
   4232	 *	CFA Advanced TrueIDE timings are not allowed on a shared
   4233	 *	cable
   4234	 */
   4235	if (ata_dev_pair(dev)) {
   4236		/* No PIO5 or PIO6 */
   4237		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
   4238		/* No MWDMA3 or MWDMA 4 */
   4239		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
   4240	}
   4241
   4242	if (ata_dma_blacklisted(dev)) {
   4243		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
   4244		ata_dev_warn(dev,
   4245			     "device is on DMA blacklist, disabling DMA\n");
   4246	}
   4247
   4248	if ((host->flags & ATA_HOST_SIMPLEX) &&
   4249	    host->simplex_claimed && host->simplex_claimed != ap) {
   4250		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
   4251		ata_dev_warn(dev,
   4252			     "simplex DMA is claimed by other device, disabling DMA\n");
   4253	}
   4254
   4255	if (ap->flags & ATA_FLAG_NO_IORDY)
   4256		xfer_mask &= ata_pio_mask_no_iordy(dev);
   4257
   4258	if (ap->ops->mode_filter)
   4259		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
   4260
   4261	/* Apply cable rule here.  Don't apply it early because when
   4262	 * we handle hot plug the cable type can itself change.
   4263	 * Check this last so that we know if the transfer rate was
   4264	 * solely limited by the cable.
   4265	 * Unknown or 80 wire cables reported host side are checked
   4266	 * drive side as well. Cases where we know a 40wire cable
   4267	 * is used safely for 80 are not checked here.
   4268	 */
   4269	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
   4270		/* UDMA/44 or higher would be available */
   4271		if (cable_is_40wire(ap)) {
   4272			ata_dev_warn(dev,
   4273				     "limited to UDMA/33 due to 40-wire cable\n");
   4274			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
   4275		}
   4276
   4277	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
   4278			    &dev->mwdma_mask, &dev->udma_mask);
   4279}
   4280
   4281/**
   4282 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
   4283 *	@dev: Device to which command will be sent
   4284 *
   4285 *	Issue SET FEATURES - XFER MODE command to device @dev
   4286 *	on port @ap.
   4287 *
   4288 *	LOCKING:
   4289 *	PCI/etc. bus probe sem.
   4290 *
   4291 *	RETURNS:
   4292 *	0 on success, AC_ERR_* mask otherwise.
   4293 */
   4294
   4295static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
   4296{
   4297	struct ata_taskfile tf;
   4298	unsigned int err_mask;
   4299
   4300	/* set up set-features taskfile */
   4301	ata_dev_dbg(dev, "set features - xfer mode\n");
   4302
   4303	/* Some controllers and ATAPI devices show flaky interrupt
   4304	 * behavior after setting xfer mode.  Use polling instead.
   4305	 */
   4306	ata_tf_init(dev, &tf);
   4307	tf.command = ATA_CMD_SET_FEATURES;
   4308	tf.feature = SETFEATURES_XFER;
   4309	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
   4310	tf.protocol = ATA_PROT_NODATA;
   4311	/* If we are using IORDY we must send the mode setting command */
   4312	if (ata_pio_need_iordy(dev))
   4313		tf.nsect = dev->xfer_mode;
   4314	/* If the device has IORDY and the controller does not - turn it off */
   4315 	else if (ata_id_has_iordy(dev->id))
   4316		tf.nsect = 0x01;
   4317	else /* In the ancient relic department - skip all of this */
   4318		return 0;
   4319
   4320	/* On some disks, this command causes spin-up, so we need longer timeout */
   4321	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
   4322
   4323	return err_mask;
   4324}
   4325
   4326/**
   4327 *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
   4328 *	@dev: Device to which command will be sent
   4329 *	@enable: Whether to enable or disable the feature
   4330 *	@feature: The sector count represents the feature to set
   4331 *
   4332 *	Issue SET FEATURES - SATA FEATURES command to device @dev
   4333 *	on port @ap with sector count
   4334 *
   4335 *	LOCKING:
   4336 *	PCI/etc. bus probe sem.
   4337 *
   4338 *	RETURNS:
   4339 *	0 on success, AC_ERR_* mask otherwise.
   4340 */
   4341unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
   4342{
   4343	struct ata_taskfile tf;
   4344	unsigned int err_mask;
   4345	unsigned long timeout = 0;
   4346
   4347	/* set up set-features taskfile */
   4348	ata_dev_dbg(dev, "set features - SATA features\n");
   4349
   4350	ata_tf_init(dev, &tf);
   4351	tf.command = ATA_CMD_SET_FEATURES;
   4352	tf.feature = enable;
   4353	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
   4354	tf.protocol = ATA_PROT_NODATA;
   4355	tf.nsect = feature;
   4356
   4357	if (enable == SETFEATURES_SPINUP)
   4358		timeout = ata_probe_timeout ?
   4359			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
   4360	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
   4361
   4362	return err_mask;
   4363}
   4364EXPORT_SYMBOL_GPL(ata_dev_set_feature);
   4365
   4366/**
   4367 *	ata_dev_init_params - Issue INIT DEV PARAMS command
   4368 *	@dev: Device to which command will be sent
   4369 *	@heads: Number of heads (taskfile parameter)
   4370 *	@sectors: Number of sectors (taskfile parameter)
   4371 *
   4372 *	LOCKING:
   4373 *	Kernel thread context (may sleep)
   4374 *
   4375 *	RETURNS:
   4376 *	0 on success, AC_ERR_* mask otherwise.
   4377 */
   4378static unsigned int ata_dev_init_params(struct ata_device *dev,
   4379					u16 heads, u16 sectors)
   4380{
   4381	struct ata_taskfile tf;
   4382	unsigned int err_mask;
   4383
   4384	/* Number of sectors per track 1-255. Number of heads 1-16 */
   4385	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
   4386		return AC_ERR_INVALID;
   4387
   4388	/* set up init dev params taskfile */
   4389	ata_dev_dbg(dev, "init dev params \n");
   4390
   4391	ata_tf_init(dev, &tf);
   4392	tf.command = ATA_CMD_INIT_DEV_PARAMS;
   4393	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
   4394	tf.protocol = ATA_PROT_NODATA;
   4395	tf.nsect = sectors;
   4396	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
   4397
   4398	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
   4399	/* A clean abort indicates an original or just out of spec drive
   4400	   and we should continue as we issue the setup based on the
   4401	   drive reported working geometry */
   4402	if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
   4403		err_mask = 0;
   4404
   4405	return err_mask;
   4406}
   4407
   4408/**
   4409 *	atapi_check_dma - Check whether ATAPI DMA can be supported
   4410 *	@qc: Metadata associated with taskfile to check
   4411 *
   4412 *	Allow low-level driver to filter ATA PACKET commands, returning
   4413 *	a status indicating whether or not it is OK to use DMA for the
   4414 *	supplied PACKET command.
   4415 *
   4416 *	LOCKING:
   4417 *	spin_lock_irqsave(host lock)
   4418 *
   4419 *	RETURNS: 0 when ATAPI DMA can be used
   4420 *               nonzero otherwise
   4421 */
   4422int atapi_check_dma(struct ata_queued_cmd *qc)
   4423{
   4424	struct ata_port *ap = qc->ap;
   4425
   4426	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
   4427	 * few ATAPI devices choke on such DMA requests.
   4428	 */
   4429	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
   4430	    unlikely(qc->nbytes & 15))
   4431		return 1;
   4432
   4433	if (ap->ops->check_atapi_dma)
   4434		return ap->ops->check_atapi_dma(qc);
   4435
   4436	return 0;
   4437}
   4438
   4439/**
   4440 *	ata_std_qc_defer - Check whether a qc needs to be deferred
   4441 *	@qc: ATA command in question
   4442 *
   4443 *	Non-NCQ commands cannot run with any other command, NCQ or
   4444 *	not.  As upper layer only knows the queue depth, we are
   4445 *	responsible for maintaining exclusion.  This function checks
   4446 *	whether a new command @qc can be issued.
   4447 *
   4448 *	LOCKING:
   4449 *	spin_lock_irqsave(host lock)
   4450 *
   4451 *	RETURNS:
   4452 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
   4453 */
   4454int ata_std_qc_defer(struct ata_queued_cmd *qc)
   4455{
   4456	struct ata_link *link = qc->dev->link;
   4457
   4458	if (ata_is_ncq(qc->tf.protocol)) {
   4459		if (!ata_tag_valid(link->active_tag))
   4460			return 0;
   4461	} else {
   4462		if (!ata_tag_valid(link->active_tag) && !link->sactive)
   4463			return 0;
   4464	}
   4465
   4466	return ATA_DEFER_LINK;
   4467}
   4468EXPORT_SYMBOL_GPL(ata_std_qc_defer);
   4469
   4470enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
   4471{
   4472	return AC_ERR_OK;
   4473}
   4474EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
   4475
   4476/**
   4477 *	ata_sg_init - Associate command with scatter-gather table.
   4478 *	@qc: Command to be associated
   4479 *	@sg: Scatter-gather table.
   4480 *	@n_elem: Number of elements in s/g table.
   4481 *
   4482 *	Initialize the data-related elements of queued_cmd @qc
   4483 *	to point to a scatter-gather table @sg, containing @n_elem
   4484 *	elements.
   4485 *
   4486 *	LOCKING:
   4487 *	spin_lock_irqsave(host lock)
   4488 */
   4489void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
   4490		 unsigned int n_elem)
   4491{
   4492	qc->sg = sg;
   4493	qc->n_elem = n_elem;
   4494	qc->cursg = qc->sg;
   4495}
   4496
   4497#ifdef CONFIG_HAS_DMA
   4498
   4499/**
   4500 *	ata_sg_clean - Unmap DMA memory associated with command
   4501 *	@qc: Command containing DMA memory to be released
   4502 *
   4503 *	Unmap all mapped DMA memory associated with this command.
   4504 *
   4505 *	LOCKING:
   4506 *	spin_lock_irqsave(host lock)
   4507 */
   4508static void ata_sg_clean(struct ata_queued_cmd *qc)
   4509{
   4510	struct ata_port *ap = qc->ap;
   4511	struct scatterlist *sg = qc->sg;
   4512	int dir = qc->dma_dir;
   4513
   4514	WARN_ON_ONCE(sg == NULL);
   4515
   4516	if (qc->n_elem)
   4517		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
   4518
   4519	qc->flags &= ~ATA_QCFLAG_DMAMAP;
   4520	qc->sg = NULL;
   4521}
   4522
   4523/**
   4524 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
   4525 *	@qc: Command with scatter-gather table to be mapped.
   4526 *
   4527 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
   4528 *
   4529 *	LOCKING:
   4530 *	spin_lock_irqsave(host lock)
   4531 *
   4532 *	RETURNS:
   4533 *	Zero on success, negative on error.
   4534 *
   4535 */
   4536static int ata_sg_setup(struct ata_queued_cmd *qc)
   4537{
   4538	struct ata_port *ap = qc->ap;
   4539	unsigned int n_elem;
   4540
   4541	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
   4542	if (n_elem < 1)
   4543		return -1;
   4544
   4545	qc->orig_n_elem = qc->n_elem;
   4546	qc->n_elem = n_elem;
   4547	qc->flags |= ATA_QCFLAG_DMAMAP;
   4548
   4549	return 0;
   4550}
   4551
   4552#else /* !CONFIG_HAS_DMA */
   4553
   4554static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
   4555static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
   4556
   4557#endif /* !CONFIG_HAS_DMA */
   4558
   4559/**
   4560 *	swap_buf_le16 - swap halves of 16-bit words in place
   4561 *	@buf:  Buffer to swap
   4562 *	@buf_words:  Number of 16-bit words in buffer.
   4563 *
   4564 *	Swap halves of 16-bit words if needed to convert from
   4565 *	little-endian byte order to native cpu byte order, or
   4566 *	vice-versa.
   4567 *
   4568 *	LOCKING:
   4569 *	Inherited from caller.
   4570 */
   4571void swap_buf_le16(u16 *buf, unsigned int buf_words)
   4572{
   4573#ifdef __BIG_ENDIAN
   4574	unsigned int i;
   4575
   4576	for (i = 0; i < buf_words; i++)
   4577		buf[i] = le16_to_cpu(buf[i]);
   4578#endif /* __BIG_ENDIAN */
   4579}
   4580
   4581/**
   4582 *	ata_qc_free - free unused ata_queued_cmd
   4583 *	@qc: Command to complete
   4584 *
   4585 *	Designed to free unused ata_queued_cmd object
   4586 *	in case something prevents using it.
   4587 *
   4588 *	LOCKING:
   4589 *	spin_lock_irqsave(host lock)
   4590 */
   4591void ata_qc_free(struct ata_queued_cmd *qc)
   4592{
   4593	qc->flags = 0;
   4594	if (ata_tag_valid(qc->tag))
   4595		qc->tag = ATA_TAG_POISON;
   4596}
   4597
   4598void __ata_qc_complete(struct ata_queued_cmd *qc)
   4599{
   4600	struct ata_port *ap;
   4601	struct ata_link *link;
   4602
   4603	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
   4604	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
   4605	ap = qc->ap;
   4606	link = qc->dev->link;
   4607
   4608	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
   4609		ata_sg_clean(qc);
   4610
   4611	/* command should be marked inactive atomically with qc completion */
   4612	if (ata_is_ncq(qc->tf.protocol)) {
   4613		link->sactive &= ~(1 << qc->hw_tag);
   4614		if (!link->sactive)
   4615			ap->nr_active_links--;
   4616	} else {
   4617		link->active_tag = ATA_TAG_POISON;
   4618		ap->nr_active_links--;
   4619	}
   4620
   4621	/* clear exclusive status */
   4622	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
   4623		     ap->excl_link == link))
   4624		ap->excl_link = NULL;
   4625
   4626	/* atapi: mark qc as inactive to prevent the interrupt handler
   4627	 * from completing the command twice later, before the error handler
   4628	 * is called. (when rc != 0 and atapi request sense is needed)
   4629	 */
   4630	qc->flags &= ~ATA_QCFLAG_ACTIVE;
   4631	ap->qc_active &= ~(1ULL << qc->tag);
   4632
   4633	/* call completion callback */
   4634	qc->complete_fn(qc);
   4635}
   4636
   4637static void fill_result_tf(struct ata_queued_cmd *qc)
   4638{
   4639	struct ata_port *ap = qc->ap;
   4640
   4641	qc->result_tf.flags = qc->tf.flags;
   4642	ap->ops->qc_fill_rtf(qc);
   4643}
   4644
   4645static void ata_verify_xfer(struct ata_queued_cmd *qc)
   4646{
   4647	struct ata_device *dev = qc->dev;
   4648
   4649	if (!ata_is_data(qc->tf.protocol))
   4650		return;
   4651
   4652	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
   4653		return;
   4654
   4655	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
   4656}
   4657
   4658/**
   4659 *	ata_qc_complete - Complete an active ATA command
   4660 *	@qc: Command to complete
   4661 *
   4662 *	Indicate to the mid and upper layers that an ATA command has
   4663 *	completed, with either an ok or not-ok status.
   4664 *
   4665 *	Refrain from calling this function multiple times when
   4666 *	successfully completing multiple NCQ commands.
   4667 *	ata_qc_complete_multiple() should be used instead, which will
   4668 *	properly update IRQ expect state.
   4669 *
   4670 *	LOCKING:
   4671 *	spin_lock_irqsave(host lock)
   4672 */
   4673void ata_qc_complete(struct ata_queued_cmd *qc)
   4674{
   4675	struct ata_port *ap = qc->ap;
   4676
   4677	/* Trigger the LED (if available) */
   4678	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
   4679
   4680	/* XXX: New EH and old EH use different mechanisms to
   4681	 * synchronize EH with regular execution path.
   4682	 *
   4683	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
   4684	 * Normal execution path is responsible for not accessing a
   4685	 * failed qc.  libata core enforces the rule by returning NULL
   4686	 * from ata_qc_from_tag() for failed qcs.
   4687	 *
   4688	 * Old EH depends on ata_qc_complete() nullifying completion
   4689	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
   4690	 * not synchronize with interrupt handler.  Only PIO task is
   4691	 * taken care of.
   4692	 */
   4693	if (ap->ops->error_handler) {
   4694		struct ata_device *dev = qc->dev;
   4695		struct ata_eh_info *ehi = &dev->link->eh_info;
   4696
   4697		if (unlikely(qc->err_mask))
   4698			qc->flags |= ATA_QCFLAG_FAILED;
   4699
   4700		/*
   4701		 * Finish internal commands without any further processing
   4702		 * and always with the result TF filled.
   4703		 */
   4704		if (unlikely(ata_tag_internal(qc->tag))) {
   4705			fill_result_tf(qc);
   4706			trace_ata_qc_complete_internal(qc);
   4707			__ata_qc_complete(qc);
   4708			return;
   4709		}
   4710
   4711		/*
   4712		 * Non-internal qc has failed.  Fill the result TF and
   4713		 * summon EH.
   4714		 */
   4715		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
   4716			fill_result_tf(qc);
   4717			trace_ata_qc_complete_failed(qc);
   4718			ata_qc_schedule_eh(qc);
   4719			return;
   4720		}
   4721
   4722		WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
   4723
   4724		/* read result TF if requested */
   4725		if (qc->flags & ATA_QCFLAG_RESULT_TF)
   4726			fill_result_tf(qc);
   4727
   4728		trace_ata_qc_complete_done(qc);
   4729		/* Some commands need post-processing after successful
   4730		 * completion.
   4731		 */
   4732		switch (qc->tf.command) {
   4733		case ATA_CMD_SET_FEATURES:
   4734			if (qc->tf.feature != SETFEATURES_WC_ON &&
   4735			    qc->tf.feature != SETFEATURES_WC_OFF &&
   4736			    qc->tf.feature != SETFEATURES_RA_ON &&
   4737			    qc->tf.feature != SETFEATURES_RA_OFF)
   4738				break;
   4739			fallthrough;
   4740		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
   4741		case ATA_CMD_SET_MULTI: /* multi_count changed */
   4742			/* revalidate device */
   4743			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
   4744			ata_port_schedule_eh(ap);
   4745			break;
   4746
   4747		case ATA_CMD_SLEEP:
   4748			dev->flags |= ATA_DFLAG_SLEEPING;
   4749			break;
   4750		}
   4751
   4752		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
   4753			ata_verify_xfer(qc);
   4754
   4755		__ata_qc_complete(qc);
   4756	} else {
   4757		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
   4758			return;
   4759
   4760		/* read result TF if failed or requested */
   4761		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
   4762			fill_result_tf(qc);
   4763
   4764		__ata_qc_complete(qc);
   4765	}
   4766}
   4767EXPORT_SYMBOL_GPL(ata_qc_complete);
   4768
   4769/**
   4770 *	ata_qc_get_active - get bitmask of active qcs
   4771 *	@ap: port in question
   4772 *
   4773 *	LOCKING:
   4774 *	spin_lock_irqsave(host lock)
   4775 *
   4776 *	RETURNS:
   4777 *	Bitmask of active qcs
   4778 */
   4779u64 ata_qc_get_active(struct ata_port *ap)
   4780{
   4781	u64 qc_active = ap->qc_active;
   4782
   4783	/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
   4784	if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
   4785		qc_active |= (1 << 0);
   4786		qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
   4787	}
   4788
   4789	return qc_active;
   4790}
   4791EXPORT_SYMBOL_GPL(ata_qc_get_active);
   4792
   4793/**
   4794 *	ata_qc_issue - issue taskfile to device
   4795 *	@qc: command to issue to device
   4796 *
   4797 *	Prepare an ATA command to submission to device.
   4798 *	This includes mapping the data into a DMA-able
   4799 *	area, filling in the S/G table, and finally
   4800 *	writing the taskfile to hardware, starting the command.
   4801 *
   4802 *	LOCKING:
   4803 *	spin_lock_irqsave(host lock)
   4804 */
   4805void ata_qc_issue(struct ata_queued_cmd *qc)
   4806{
   4807	struct ata_port *ap = qc->ap;
   4808	struct ata_link *link = qc->dev->link;
   4809	u8 prot = qc->tf.protocol;
   4810
   4811	/* Make sure only one non-NCQ command is outstanding.  The
   4812	 * check is skipped for old EH because it reuses active qc to
   4813	 * request ATAPI sense.
   4814	 */
   4815	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
   4816
   4817	if (ata_is_ncq(prot)) {
   4818		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
   4819
   4820		if (!link->sactive)
   4821			ap->nr_active_links++;
   4822		link->sactive |= 1 << qc->hw_tag;
   4823	} else {
   4824		WARN_ON_ONCE(link->sactive);
   4825
   4826		ap->nr_active_links++;
   4827		link->active_tag = qc->tag;
   4828	}
   4829
   4830	qc->flags |= ATA_QCFLAG_ACTIVE;
   4831	ap->qc_active |= 1ULL << qc->tag;
   4832
   4833	/*
   4834	 * We guarantee to LLDs that they will have at least one
   4835	 * non-zero sg if the command is a data command.
   4836	 */
   4837	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
   4838		goto sys_err;
   4839
   4840	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
   4841				 (ap->flags & ATA_FLAG_PIO_DMA)))
   4842		if (ata_sg_setup(qc))
   4843			goto sys_err;
   4844
   4845	/* if device is sleeping, schedule reset and abort the link */
   4846	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
   4847		link->eh_info.action |= ATA_EH_RESET;
   4848		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
   4849		ata_link_abort(link);
   4850		return;
   4851	}
   4852
   4853	trace_ata_qc_prep(qc);
   4854	qc->err_mask |= ap->ops->qc_prep(qc);
   4855	if (unlikely(qc->err_mask))
   4856		goto err;
   4857	trace_ata_qc_issue(qc);
   4858	qc->err_mask |= ap->ops->qc_issue(qc);
   4859	if (unlikely(qc->err_mask))
   4860		goto err;
   4861	return;
   4862
   4863sys_err:
   4864	qc->err_mask |= AC_ERR_SYSTEM;
   4865err:
   4866	ata_qc_complete(qc);
   4867}
   4868
   4869/**
   4870 *	ata_phys_link_online - test whether the given link is online
   4871 *	@link: ATA link to test
   4872 *
   4873 *	Test whether @link is online.  Note that this function returns
   4874 *	0 if online status of @link cannot be obtained, so
   4875 *	ata_link_online(link) != !ata_link_offline(link).
   4876 *
   4877 *	LOCKING:
   4878 *	None.
   4879 *
   4880 *	RETURNS:
   4881 *	True if the port online status is available and online.
   4882 */
   4883bool ata_phys_link_online(struct ata_link *link)
   4884{
   4885	u32 sstatus;
   4886
   4887	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
   4888	    ata_sstatus_online(sstatus))
   4889		return true;
   4890	return false;
   4891}
   4892
   4893/**
   4894 *	ata_phys_link_offline - test whether the given link is offline
   4895 *	@link: ATA link to test
   4896 *
   4897 *	Test whether @link is offline.  Note that this function
   4898 *	returns 0 if offline status of @link cannot be obtained, so
   4899 *	ata_link_online(link) != !ata_link_offline(link).
   4900 *
   4901 *	LOCKING:
   4902 *	None.
   4903 *
   4904 *	RETURNS:
   4905 *	True if the port offline status is available and offline.
   4906 */
   4907bool ata_phys_link_offline(struct ata_link *link)
   4908{
   4909	u32 sstatus;
   4910
   4911	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
   4912	    !ata_sstatus_online(sstatus))
   4913		return true;
   4914	return false;
   4915}
   4916
   4917/**
   4918 *	ata_link_online - test whether the given link is online
   4919 *	@link: ATA link to test
   4920 *
   4921 *	Test whether @link is online.  This is identical to
   4922 *	ata_phys_link_online() when there's no slave link.  When
   4923 *	there's a slave link, this function should only be called on
   4924 *	the master link and will return true if any of M/S links is
   4925 *	online.
   4926 *
   4927 *	LOCKING:
   4928 *	None.
   4929 *
   4930 *	RETURNS:
   4931 *	True if the port online status is available and online.
   4932 */
   4933bool ata_link_online(struct ata_link *link)
   4934{
   4935	struct ata_link *slave = link->ap->slave_link;
   4936
   4937	WARN_ON(link == slave);	/* shouldn't be called on slave link */
   4938
   4939	return ata_phys_link_online(link) ||
   4940		(slave && ata_phys_link_online(slave));
   4941}
   4942EXPORT_SYMBOL_GPL(ata_link_online);
   4943
   4944/**
   4945 *	ata_link_offline - test whether the given link is offline
   4946 *	@link: ATA link to test
   4947 *
   4948 *	Test whether @link is offline.  This is identical to
   4949 *	ata_phys_link_offline() when there's no slave link.  When
   4950 *	there's a slave link, this function should only be called on
   4951 *	the master link and will return true if both M/S links are
   4952 *	offline.
   4953 *
   4954 *	LOCKING:
   4955 *	None.
   4956 *
   4957 *	RETURNS:
   4958 *	True if the port offline status is available and offline.
   4959 */
   4960bool ata_link_offline(struct ata_link *link)
   4961{
   4962	struct ata_link *slave = link->ap->slave_link;
   4963
   4964	WARN_ON(link == slave);	/* shouldn't be called on slave link */
   4965
   4966	return ata_phys_link_offline(link) &&
   4967		(!slave || ata_phys_link_offline(slave));
   4968}
   4969EXPORT_SYMBOL_GPL(ata_link_offline);
   4970
   4971#ifdef CONFIG_PM
   4972static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
   4973				unsigned int action, unsigned int ehi_flags,
   4974				bool async)
   4975{
   4976	struct ata_link *link;
   4977	unsigned long flags;
   4978
   4979	/* Previous resume operation might still be in
   4980	 * progress.  Wait for PM_PENDING to clear.
   4981	 */
   4982	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
   4983		ata_port_wait_eh(ap);
   4984		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
   4985	}
   4986
   4987	/* request PM ops to EH */
   4988	spin_lock_irqsave(ap->lock, flags);
   4989
   4990	ap->pm_mesg = mesg;
   4991	ap->pflags |= ATA_PFLAG_PM_PENDING;
   4992	ata_for_each_link(link, ap, HOST_FIRST) {
   4993		link->eh_info.action |= action;
   4994		link->eh_info.flags |= ehi_flags;
   4995	}
   4996
   4997	ata_port_schedule_eh(ap);
   4998
   4999	spin_unlock_irqrestore(ap->lock, flags);
   5000
   5001	if (!async) {
   5002		ata_port_wait_eh(ap);
   5003		WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
   5004	}
   5005}
   5006
   5007/*
   5008 * On some hardware, device fails to respond after spun down for suspend.  As
   5009 * the device won't be used before being resumed, we don't need to touch the
   5010 * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
   5011 *
   5012 * http://thread.gmane.org/gmane.linux.ide/46764
   5013 */
   5014static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
   5015						 | ATA_EHI_NO_AUTOPSY
   5016						 | ATA_EHI_NO_RECOVERY;
   5017
   5018static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
   5019{
   5020	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
   5021}
   5022
   5023static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
   5024{
   5025	ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
   5026}
   5027
   5028static int ata_port_pm_suspend(struct device *dev)
   5029{
   5030	struct ata_port *ap = to_ata_port(dev);
   5031
   5032	if (pm_runtime_suspended(dev))
   5033		return 0;
   5034
   5035	ata_port_suspend(ap, PMSG_SUSPEND);
   5036	return 0;
   5037}
   5038
   5039static int ata_port_pm_freeze(struct device *dev)
   5040{
   5041	struct ata_port *ap = to_ata_port(dev);
   5042
   5043	if (pm_runtime_suspended(dev))
   5044		return 0;
   5045
   5046	ata_port_suspend(ap, PMSG_FREEZE);
   5047	return 0;
   5048}
   5049
   5050static int ata_port_pm_poweroff(struct device *dev)
   5051{
   5052	ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
   5053	return 0;
   5054}
   5055
   5056static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
   5057						| ATA_EHI_QUIET;
   5058
   5059static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
   5060{
   5061	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
   5062}
   5063
   5064static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
   5065{
   5066	ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
   5067}
   5068
   5069static int ata_port_pm_resume(struct device *dev)
   5070{
   5071	ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
   5072	pm_runtime_disable(dev);
   5073	pm_runtime_set_active(dev);
   5074	pm_runtime_enable(dev);
   5075	return 0;
   5076}
   5077
   5078/*
   5079 * For ODDs, the upper layer will poll for media change every few seconds,
   5080 * which will make it enter and leave suspend state every few seconds. And
   5081 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
   5082 * is very little and the ODD may malfunction after constantly being reset.
   5083 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
   5084 * ODD is attached to the port.
   5085 */
   5086static int ata_port_runtime_idle(struct device *dev)
   5087{
   5088	struct ata_port *ap = to_ata_port(dev);
   5089	struct ata_link *link;
   5090	struct ata_device *adev;
   5091
   5092	ata_for_each_link(link, ap, HOST_FIRST) {
   5093		ata_for_each_dev(adev, link, ENABLED)
   5094			if (adev->class == ATA_DEV_ATAPI &&
   5095			    !zpodd_dev_enabled(adev))
   5096				return -EBUSY;
   5097	}
   5098
   5099	return 0;
   5100}
   5101
   5102static int ata_port_runtime_suspend(struct device *dev)
   5103{
   5104	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
   5105	return 0;
   5106}
   5107
   5108static int ata_port_runtime_resume(struct device *dev)
   5109{
   5110	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
   5111	return 0;
   5112}
   5113
   5114static const struct dev_pm_ops ata_port_pm_ops = {
   5115	.suspend = ata_port_pm_suspend,
   5116	.resume = ata_port_pm_resume,
   5117	.freeze = ata_port_pm_freeze,
   5118	.thaw = ata_port_pm_resume,
   5119	.poweroff = ata_port_pm_poweroff,
   5120	.restore = ata_port_pm_resume,
   5121
   5122	.runtime_suspend = ata_port_runtime_suspend,
   5123	.runtime_resume = ata_port_runtime_resume,
   5124	.runtime_idle = ata_port_runtime_idle,
   5125};
   5126
   5127/* sas ports don't participate in pm runtime management of ata_ports,
   5128 * and need to resume ata devices at the domain level, not the per-port
   5129 * level. sas suspend/resume is async to allow parallel port recovery
   5130 * since sas has multiple ata_port instances per Scsi_Host.
   5131 */
   5132void ata_sas_port_suspend(struct ata_port *ap)
   5133{
   5134	ata_port_suspend_async(ap, PMSG_SUSPEND);
   5135}
   5136EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
   5137
   5138void ata_sas_port_resume(struct ata_port *ap)
   5139{
   5140	ata_port_resume_async(ap, PMSG_RESUME);
   5141}
   5142EXPORT_SYMBOL_GPL(ata_sas_port_resume);
   5143
   5144/**
   5145 *	ata_host_suspend - suspend host
   5146 *	@host: host to suspend
   5147 *	@mesg: PM message
   5148 *
   5149 *	Suspend @host.  Actual operation is performed by port suspend.
   5150 */
   5151void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
   5152{
   5153	host->dev->power.power_state = mesg;
   5154}
   5155EXPORT_SYMBOL_GPL(ata_host_suspend);
   5156
   5157/**
   5158 *	ata_host_resume - resume host
   5159 *	@host: host to resume
   5160 *
   5161 *	Resume @host.  Actual operation is performed by port resume.
   5162 */
   5163void ata_host_resume(struct ata_host *host)
   5164{
   5165	host->dev->power.power_state = PMSG_ON;
   5166}
   5167EXPORT_SYMBOL_GPL(ata_host_resume);
   5168#endif
   5169
   5170const struct device_type ata_port_type = {
   5171	.name = "ata_port",
   5172#ifdef CONFIG_PM
   5173	.pm = &ata_port_pm_ops,
   5174#endif
   5175};
   5176
   5177/**
   5178 *	ata_dev_init - Initialize an ata_device structure
   5179 *	@dev: Device structure to initialize
   5180 *
   5181 *	Initialize @dev in preparation for probing.
   5182 *
   5183 *	LOCKING:
   5184 *	Inherited from caller.
   5185 */
   5186void ata_dev_init(struct ata_device *dev)
   5187{
   5188	struct ata_link *link = ata_dev_phys_link(dev);
   5189	struct ata_port *ap = link->ap;
   5190	unsigned long flags;
   5191
   5192	/* SATA spd limit is bound to the attached device, reset together */
   5193	link->sata_spd_limit = link->hw_sata_spd_limit;
   5194	link->sata_spd = 0;
   5195
   5196	/* High bits of dev->flags are used to record warm plug
   5197	 * requests which occur asynchronously.  Synchronize using
   5198	 * host lock.
   5199	 */
   5200	spin_lock_irqsave(ap->lock, flags);
   5201	dev->flags &= ~ATA_DFLAG_INIT_MASK;
   5202	dev->horkage = 0;
   5203	spin_unlock_irqrestore(ap->lock, flags);
   5204
   5205	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
   5206	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
   5207	dev->pio_mask = UINT_MAX;
   5208	dev->mwdma_mask = UINT_MAX;
   5209	dev->udma_mask = UINT_MAX;
   5210}
   5211
   5212/**
   5213 *	ata_link_init - Initialize an ata_link structure
   5214 *	@ap: ATA port link is attached to
   5215 *	@link: Link structure to initialize
   5216 *	@pmp: Port multiplier port number
   5217 *
   5218 *	Initialize @link.
   5219 *
   5220 *	LOCKING:
   5221 *	Kernel thread context (may sleep)
   5222 */
   5223void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
   5224{
   5225	int i;
   5226
   5227	/* clear everything except for devices */
   5228	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
   5229	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
   5230
   5231	link->ap = ap;
   5232	link->pmp = pmp;
   5233	link->active_tag = ATA_TAG_POISON;
   5234	link->hw_sata_spd_limit = UINT_MAX;
   5235
   5236	/* can't use iterator, ap isn't initialized yet */
   5237	for (i = 0; i < ATA_MAX_DEVICES; i++) {
   5238		struct ata_device *dev = &link->device[i];
   5239
   5240		dev->link = link;
   5241		dev->devno = dev - link->device;
   5242#ifdef CONFIG_ATA_ACPI
   5243		dev->gtf_filter = ata_acpi_gtf_filter;
   5244#endif
   5245		ata_dev_init(dev);
   5246	}
   5247}
   5248
   5249/**
   5250 *	sata_link_init_spd - Initialize link->sata_spd_limit
   5251 *	@link: Link to configure sata_spd_limit for
   5252 *
   5253 *	Initialize ``link->[hw_]sata_spd_limit`` to the currently
   5254 *	configured value.
   5255 *
   5256 *	LOCKING:
   5257 *	Kernel thread context (may sleep).
   5258 *
   5259 *	RETURNS:
   5260 *	0 on success, -errno on failure.
   5261 */
   5262int sata_link_init_spd(struct ata_link *link)
   5263{
   5264	u8 spd;
   5265	int rc;
   5266
   5267	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
   5268	if (rc)
   5269		return rc;
   5270
   5271	spd = (link->saved_scontrol >> 4) & 0xf;
   5272	if (spd)
   5273		link->hw_sata_spd_limit &= (1 << spd) - 1;
   5274
   5275	ata_force_link_limits(link);
   5276
   5277	link->sata_spd_limit = link->hw_sata_spd_limit;
   5278
   5279	return 0;
   5280}
   5281
   5282/**
   5283 *	ata_port_alloc - allocate and initialize basic ATA port resources
   5284 *	@host: ATA host this allocated port belongs to
   5285 *
   5286 *	Allocate and initialize basic ATA port resources.
   5287 *
   5288 *	RETURNS:
   5289 *	Allocate ATA port on success, NULL on failure.
   5290 *
   5291 *	LOCKING:
   5292 *	Inherited from calling layer (may sleep).
   5293 */
   5294struct ata_port *ata_port_alloc(struct ata_host *host)
   5295{
   5296	struct ata_port *ap;
   5297
   5298	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
   5299	if (!ap)
   5300		return NULL;
   5301
   5302	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
   5303	ap->lock = &host->lock;
   5304	ap->print_id = -1;
   5305	ap->local_port_no = -1;
   5306	ap->host = host;
   5307	ap->dev = host->dev;
   5308
   5309	mutex_init(&ap->scsi_scan_mutex);
   5310	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
   5311	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
   5312	INIT_LIST_HEAD(&ap->eh_done_q);
   5313	init_waitqueue_head(&ap->eh_wait_q);
   5314	init_completion(&ap->park_req_pending);
   5315	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
   5316		    TIMER_DEFERRABLE);
   5317
   5318	ap->cbl = ATA_CBL_NONE;
   5319
   5320	ata_link_init(ap, &ap->link, 0);
   5321
   5322#ifdef ATA_IRQ_TRAP
   5323	ap->stats.unhandled_irq = 1;
   5324	ap->stats.idle_irq = 1;
   5325#endif
   5326	ata_sff_port_init(ap);
   5327
   5328	return ap;
   5329}
   5330
   5331static void ata_devres_release(struct device *gendev, void *res)
   5332{
   5333	struct ata_host *host = dev_get_drvdata(gendev);
   5334	int i;
   5335
   5336	for (i = 0; i < host->n_ports; i++) {
   5337		struct ata_port *ap = host->ports[i];
   5338
   5339		if (!ap)
   5340			continue;
   5341
   5342		if (ap->scsi_host)
   5343			scsi_host_put(ap->scsi_host);
   5344
   5345	}
   5346
   5347	dev_set_drvdata(gendev, NULL);
   5348	ata_host_put(host);
   5349}
   5350
   5351static void ata_host_release(struct kref *kref)
   5352{
   5353	struct ata_host *host = container_of(kref, struct ata_host, kref);
   5354	int i;
   5355
   5356	for (i = 0; i < host->n_ports; i++) {
   5357		struct ata_port *ap = host->ports[i];
   5358
   5359		kfree(ap->pmp_link);
   5360		kfree(ap->slave_link);
   5361		kfree(ap);
   5362		host->ports[i] = NULL;
   5363	}
   5364	kfree(host);
   5365}
   5366
   5367void ata_host_get(struct ata_host *host)
   5368{
   5369	kref_get(&host->kref);
   5370}
   5371
   5372void ata_host_put(struct ata_host *host)
   5373{
   5374	kref_put(&host->kref, ata_host_release);
   5375}
   5376EXPORT_SYMBOL_GPL(ata_host_put);
   5377
   5378/**
   5379 *	ata_host_alloc - allocate and init basic ATA host resources
   5380 *	@dev: generic device this host is associated with
   5381 *	@max_ports: maximum number of ATA ports associated with this host
   5382 *
   5383 *	Allocate and initialize basic ATA host resources.  LLD calls
   5384 *	this function to allocate a host, initializes it fully and
   5385 *	attaches it using ata_host_register().
   5386 *
   5387 *	@max_ports ports are allocated and host->n_ports is
   5388 *	initialized to @max_ports.  The caller is allowed to decrease
   5389 *	host->n_ports before calling ata_host_register().  The unused
   5390 *	ports will be automatically freed on registration.
   5391 *
   5392 *	RETURNS:
   5393 *	Allocate ATA host on success, NULL on failure.
   5394 *
   5395 *	LOCKING:
   5396 *	Inherited from calling layer (may sleep).
   5397 */
   5398struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
   5399{
   5400	struct ata_host *host;
   5401	size_t sz;
   5402	int i;
   5403	void *dr;
   5404
   5405	/* alloc a container for our list of ATA ports (buses) */
   5406	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
   5407	host = kzalloc(sz, GFP_KERNEL);
   5408	if (!host)
   5409		return NULL;
   5410
   5411	if (!devres_open_group(dev, NULL, GFP_KERNEL))
   5412		goto err_free;
   5413
   5414	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
   5415	if (!dr)
   5416		goto err_out;
   5417
   5418	devres_add(dev, dr);
   5419	dev_set_drvdata(dev, host);
   5420
   5421	spin_lock_init(&host->lock);
   5422	mutex_init(&host->eh_mutex);
   5423	host->dev = dev;
   5424	host->n_ports = max_ports;
   5425	kref_init(&host->kref);
   5426
   5427	/* allocate ports bound to this host */
   5428	for (i = 0; i < max_ports; i++) {
   5429		struct ata_port *ap;
   5430
   5431		ap = ata_port_alloc(host);
   5432		if (!ap)
   5433			goto err_out;
   5434
   5435		ap->port_no = i;
   5436		host->ports[i] = ap;
   5437	}
   5438
   5439	devres_remove_group(dev, NULL);
   5440	return host;
   5441
   5442 err_out:
   5443	devres_release_group(dev, NULL);
   5444 err_free:
   5445	kfree(host);
   5446	return NULL;
   5447}
   5448EXPORT_SYMBOL_GPL(ata_host_alloc);
   5449
   5450/**
   5451 *	ata_host_alloc_pinfo - alloc host and init with port_info array
   5452 *	@dev: generic device this host is associated with
   5453 *	@ppi: array of ATA port_info to initialize host with
   5454 *	@n_ports: number of ATA ports attached to this host
   5455 *
   5456 *	Allocate ATA host and initialize with info from @ppi.  If NULL
   5457 *	terminated, @ppi may contain fewer entries than @n_ports.  The
   5458 *	last entry will be used for the remaining ports.
   5459 *
   5460 *	RETURNS:
   5461 *	Allocate ATA host on success, NULL on failure.
   5462 *
   5463 *	LOCKING:
   5464 *	Inherited from calling layer (may sleep).
   5465 */
   5466struct ata_host *ata_host_alloc_pinfo(struct device *dev,
   5467				      const struct ata_port_info * const * ppi,
   5468				      int n_ports)
   5469{
   5470	const struct ata_port_info *pi = &ata_dummy_port_info;
   5471	struct ata_host *host;
   5472	int i, j;
   5473
   5474	host = ata_host_alloc(dev, n_ports);
   5475	if (!host)
   5476		return NULL;
   5477
   5478	for (i = 0, j = 0; i < host->n_ports; i++) {
   5479		struct ata_port *ap = host->ports[i];
   5480
   5481		if (ppi[j])
   5482			pi = ppi[j++];
   5483
   5484		ap->pio_mask = pi->pio_mask;
   5485		ap->mwdma_mask = pi->mwdma_mask;
   5486		ap->udma_mask = pi->udma_mask;
   5487		ap->flags |= pi->flags;
   5488		ap->link.flags |= pi->link_flags;
   5489		ap->ops = pi->port_ops;
   5490
   5491		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
   5492			host->ops = pi->port_ops;
   5493	}
   5494
   5495	return host;
   5496}
   5497EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
   5498
   5499static void ata_host_stop(struct device *gendev, void *res)
   5500{
   5501	struct ata_host *host = dev_get_drvdata(gendev);
   5502	int i;
   5503
   5504	WARN_ON(!(host->flags & ATA_HOST_STARTED));
   5505
   5506	for (i = 0; i < host->n_ports; i++) {
   5507		struct ata_port *ap = host->ports[i];
   5508
   5509		if (ap->ops->port_stop)
   5510			ap->ops->port_stop(ap);
   5511	}
   5512
   5513	if (host->ops->host_stop)
   5514		host->ops->host_stop(host);
   5515}
   5516
   5517/**
   5518 *	ata_finalize_port_ops - finalize ata_port_operations
   5519 *	@ops: ata_port_operations to finalize
   5520 *
   5521 *	An ata_port_operations can inherit from another ops and that
   5522 *	ops can again inherit from another.  This can go on as many
   5523 *	times as necessary as long as there is no loop in the
   5524 *	inheritance chain.
   5525 *
   5526 *	Ops tables are finalized when the host is started.  NULL or
   5527 *	unspecified entries are inherited from the closet ancestor
   5528 *	which has the method and the entry is populated with it.
   5529 *	After finalization, the ops table directly points to all the
   5530 *	methods and ->inherits is no longer necessary and cleared.
   5531 *
   5532 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
   5533 *
   5534 *	LOCKING:
   5535 *	None.
   5536 */
   5537static void ata_finalize_port_ops(struct ata_port_operations *ops)
   5538{
   5539	static DEFINE_SPINLOCK(lock);
   5540	const struct ata_port_operations *cur;
   5541	void **begin = (void **)ops;
   5542	void **end = (void **)&ops->inherits;
   5543	void **pp;
   5544
   5545	if (!ops || !ops->inherits)
   5546		return;
   5547
   5548	spin_lock(&lock);
   5549
   5550	for (cur = ops->inherits; cur; cur = cur->inherits) {
   5551		void **inherit = (void **)cur;
   5552
   5553		for (pp = begin; pp < end; pp++, inherit++)
   5554			if (!*pp)
   5555				*pp = *inherit;
   5556	}
   5557
   5558	for (pp = begin; pp < end; pp++)
   5559		if (IS_ERR(*pp))
   5560			*pp = NULL;
   5561
   5562	ops->inherits = NULL;
   5563
   5564	spin_unlock(&lock);
   5565}
   5566
   5567/**
   5568 *	ata_host_start - start and freeze ports of an ATA host
   5569 *	@host: ATA host to start ports for
   5570 *
   5571 *	Start and then freeze ports of @host.  Started status is
   5572 *	recorded in host->flags, so this function can be called
   5573 *	multiple times.  Ports are guaranteed to get started only
   5574 *	once.  If host->ops is not initialized yet, it is set to the
   5575 *	first non-dummy port ops.
   5576 *
   5577 *	LOCKING:
   5578 *	Inherited from calling layer (may sleep).
   5579 *
   5580 *	RETURNS:
   5581 *	0 if all ports are started successfully, -errno otherwise.
   5582 */
   5583int ata_host_start(struct ata_host *host)
   5584{
   5585	int have_stop = 0;
   5586	void *start_dr = NULL;
   5587	int i, rc;
   5588
   5589	if (host->flags & ATA_HOST_STARTED)
   5590		return 0;
   5591
   5592	ata_finalize_port_ops(host->ops);
   5593
   5594	for (i = 0; i < host->n_ports; i++) {
   5595		struct ata_port *ap = host->ports[i];
   5596
   5597		ata_finalize_port_ops(ap->ops);
   5598
   5599		if (!host->ops && !ata_port_is_dummy(ap))
   5600			host->ops = ap->ops;
   5601
   5602		if (ap->ops->port_stop)
   5603			have_stop = 1;
   5604	}
   5605
   5606	if (host->ops && host->ops->host_stop)
   5607		have_stop = 1;
   5608
   5609	if (have_stop) {
   5610		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
   5611		if (!start_dr)
   5612			return -ENOMEM;
   5613	}
   5614
   5615	for (i = 0; i < host->n_ports; i++) {
   5616		struct ata_port *ap = host->ports[i];
   5617
   5618		if (ap->ops->port_start) {
   5619			rc = ap->ops->port_start(ap);
   5620			if (rc) {
   5621				if (rc != -ENODEV)
   5622					dev_err(host->dev,
   5623						"failed to start port %d (errno=%d)\n",
   5624						i, rc);
   5625				goto err_out;
   5626			}
   5627		}
   5628		ata_eh_freeze_port(ap);
   5629	}
   5630
   5631	if (start_dr)
   5632		devres_add(host->dev, start_dr);
   5633	host->flags |= ATA_HOST_STARTED;
   5634	return 0;
   5635
   5636 err_out:
   5637	while (--i >= 0) {
   5638		struct ata_port *ap = host->ports[i];
   5639
   5640		if (ap->ops->port_stop)
   5641			ap->ops->port_stop(ap);
   5642	}
   5643	devres_free(start_dr);
   5644	return rc;
   5645}
   5646EXPORT_SYMBOL_GPL(ata_host_start);
   5647
   5648/**
   5649 *	ata_host_init - Initialize a host struct for sas (ipr, libsas)
   5650 *	@host:	host to initialize
   5651 *	@dev:	device host is attached to
   5652 *	@ops:	port_ops
   5653 *
   5654 */
   5655void ata_host_init(struct ata_host *host, struct device *dev,
   5656		   struct ata_port_operations *ops)
   5657{
   5658	spin_lock_init(&host->lock);
   5659	mutex_init(&host->eh_mutex);
   5660	host->n_tags = ATA_MAX_QUEUE;
   5661	host->dev = dev;
   5662	host->ops = ops;
   5663	kref_init(&host->kref);
   5664}
   5665EXPORT_SYMBOL_GPL(ata_host_init);
   5666
   5667void __ata_port_probe(struct ata_port *ap)
   5668{
   5669	struct ata_eh_info *ehi = &ap->link.eh_info;
   5670	unsigned long flags;
   5671
   5672	/* kick EH for boot probing */
   5673	spin_lock_irqsave(ap->lock, flags);
   5674
   5675	ehi->probe_mask |= ATA_ALL_DEVICES;
   5676	ehi->action |= ATA_EH_RESET;
   5677	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
   5678
   5679	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
   5680	ap->pflags |= ATA_PFLAG_LOADING;
   5681	ata_port_schedule_eh(ap);
   5682
   5683	spin_unlock_irqrestore(ap->lock, flags);
   5684}
   5685
   5686int ata_port_probe(struct ata_port *ap)
   5687{
   5688	int rc = 0;
   5689
   5690	if (ap->ops->error_handler) {
   5691		__ata_port_probe(ap);
   5692		ata_port_wait_eh(ap);
   5693	} else {
   5694		rc = ata_bus_probe(ap);
   5695	}
   5696	return rc;
   5697}
   5698
   5699
   5700static void async_port_probe(void *data, async_cookie_t cookie)
   5701{
   5702	struct ata_port *ap = data;
   5703
   5704	/*
   5705	 * If we're not allowed to scan this host in parallel,
   5706	 * we need to wait until all previous scans have completed
   5707	 * before going further.
   5708	 * Jeff Garzik says this is only within a controller, so we
   5709	 * don't need to wait for port 0, only for later ports.
   5710	 */
   5711	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
   5712		async_synchronize_cookie(cookie);
   5713
   5714	(void)ata_port_probe(ap);
   5715
   5716	/* in order to keep device order, we need to synchronize at this point */
   5717	async_synchronize_cookie(cookie);
   5718
   5719	ata_scsi_scan_host(ap, 1);
   5720}
   5721
   5722/**
   5723 *	ata_host_register - register initialized ATA host
   5724 *	@host: ATA host to register
   5725 *	@sht: template for SCSI host
   5726 *
   5727 *	Register initialized ATA host.  @host is allocated using
   5728 *	ata_host_alloc() and fully initialized by LLD.  This function
   5729 *	starts ports, registers @host with ATA and SCSI layers and
   5730 *	probe registered devices.
   5731 *
   5732 *	LOCKING:
   5733 *	Inherited from calling layer (may sleep).
   5734 *
   5735 *	RETURNS:
   5736 *	0 on success, -errno otherwise.
   5737 */
   5738int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
   5739{
   5740	int i, rc;
   5741
   5742	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
   5743
   5744	/* host must have been started */
   5745	if (!(host->flags & ATA_HOST_STARTED)) {
   5746		dev_err(host->dev, "BUG: trying to register unstarted host\n");
   5747		WARN_ON(1);
   5748		return -EINVAL;
   5749	}
   5750
   5751	/* Blow away unused ports.  This happens when LLD can't
   5752	 * determine the exact number of ports to allocate at
   5753	 * allocation time.
   5754	 */
   5755	for (i = host->n_ports; host->ports[i]; i++)
   5756		kfree(host->ports[i]);
   5757
   5758	/* give ports names and add SCSI hosts */
   5759	for (i = 0; i < host->n_ports; i++) {
   5760		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
   5761		host->ports[i]->local_port_no = i + 1;
   5762	}
   5763
   5764	/* Create associated sysfs transport objects  */
   5765	for (i = 0; i < host->n_ports; i++) {
   5766		rc = ata_tport_add(host->dev,host->ports[i]);
   5767		if (rc) {
   5768			goto err_tadd;
   5769		}
   5770	}
   5771
   5772	rc = ata_scsi_add_hosts(host, sht);
   5773	if (rc)
   5774		goto err_tadd;
   5775
   5776	/* set cable, sata_spd_limit and report */
   5777	for (i = 0; i < host->n_ports; i++) {
   5778		struct ata_port *ap = host->ports[i];
   5779		unsigned long xfer_mask;
   5780
   5781		/* set SATA cable type if still unset */
   5782		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
   5783			ap->cbl = ATA_CBL_SATA;
   5784
   5785		/* init sata_spd_limit to the current value */
   5786		sata_link_init_spd(&ap->link);
   5787		if (ap->slave_link)
   5788			sata_link_init_spd(ap->slave_link);
   5789
   5790		/* print per-port info to dmesg */
   5791		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
   5792					      ap->udma_mask);
   5793
   5794		if (!ata_port_is_dummy(ap)) {
   5795			ata_port_info(ap, "%cATA max %s %s\n",
   5796				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
   5797				      ata_mode_string(xfer_mask),
   5798				      ap->link.eh_info.desc);
   5799			ata_ehi_clear_desc(&ap->link.eh_info);
   5800		} else
   5801			ata_port_info(ap, "DUMMY\n");
   5802	}
   5803
   5804	/* perform each probe asynchronously */
   5805	for (i = 0; i < host->n_ports; i++) {
   5806		struct ata_port *ap = host->ports[i];
   5807		ap->cookie = async_schedule(async_port_probe, ap);
   5808	}
   5809
   5810	return 0;
   5811
   5812 err_tadd:
   5813	while (--i >= 0) {
   5814		ata_tport_delete(host->ports[i]);
   5815	}
   5816	return rc;
   5817
   5818}
   5819EXPORT_SYMBOL_GPL(ata_host_register);
   5820
   5821/**
   5822 *	ata_host_activate - start host, request IRQ and register it
   5823 *	@host: target ATA host
   5824 *	@irq: IRQ to request
   5825 *	@irq_handler: irq_handler used when requesting IRQ
   5826 *	@irq_flags: irq_flags used when requesting IRQ
   5827 *	@sht: scsi_host_template to use when registering the host
   5828 *
   5829 *	After allocating an ATA host and initializing it, most libata
   5830 *	LLDs perform three steps to activate the host - start host,
   5831 *	request IRQ and register it.  This helper takes necessary
   5832 *	arguments and performs the three steps in one go.
   5833 *
   5834 *	An invalid IRQ skips the IRQ registration and expects the host to
   5835 *	have set polling mode on the port. In this case, @irq_handler
   5836 *	should be NULL.
   5837 *
   5838 *	LOCKING:
   5839 *	Inherited from calling layer (may sleep).
   5840 *
   5841 *	RETURNS:
   5842 *	0 on success, -errno otherwise.
   5843 */
   5844int ata_host_activate(struct ata_host *host, int irq,
   5845		      irq_handler_t irq_handler, unsigned long irq_flags,
   5846		      struct scsi_host_template *sht)
   5847{
   5848	int i, rc;
   5849	char *irq_desc;
   5850
   5851	rc = ata_host_start(host);
   5852	if (rc)
   5853		return rc;
   5854
   5855	/* Special case for polling mode */
   5856	if (!irq) {
   5857		WARN_ON(irq_handler);
   5858		return ata_host_register(host, sht);
   5859	}
   5860
   5861	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
   5862				  dev_driver_string(host->dev),
   5863				  dev_name(host->dev));
   5864	if (!irq_desc)
   5865		return -ENOMEM;
   5866
   5867	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
   5868			      irq_desc, host);
   5869	if (rc)
   5870		return rc;
   5871
   5872	for (i = 0; i < host->n_ports; i++)
   5873		ata_port_desc(host->ports[i], "irq %d", irq);
   5874
   5875	rc = ata_host_register(host, sht);
   5876	/* if failed, just free the IRQ and leave ports alone */
   5877	if (rc)
   5878		devm_free_irq(host->dev, irq, host);
   5879
   5880	return rc;
   5881}
   5882EXPORT_SYMBOL_GPL(ata_host_activate);
   5883
   5884/**
   5885 *	ata_port_detach - Detach ATA port in preparation of device removal
   5886 *	@ap: ATA port to be detached
   5887 *
   5888 *	Detach all ATA devices and the associated SCSI devices of @ap;
   5889 *	then, remove the associated SCSI host.  @ap is guaranteed to
   5890 *	be quiescent on return from this function.
   5891 *
   5892 *	LOCKING:
   5893 *	Kernel thread context (may sleep).
   5894 */
   5895static void ata_port_detach(struct ata_port *ap)
   5896{
   5897	unsigned long flags;
   5898	struct ata_link *link;
   5899	struct ata_device *dev;
   5900
   5901	if (!ap->ops->error_handler)
   5902		goto skip_eh;
   5903
   5904	/* tell EH we're leaving & flush EH */
   5905	spin_lock_irqsave(ap->lock, flags);
   5906	ap->pflags |= ATA_PFLAG_UNLOADING;
   5907	ata_port_schedule_eh(ap);
   5908	spin_unlock_irqrestore(ap->lock, flags);
   5909
   5910	/* wait till EH commits suicide */
   5911	ata_port_wait_eh(ap);
   5912
   5913	/* it better be dead now */
   5914	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
   5915
   5916	cancel_delayed_work_sync(&ap->hotplug_task);
   5917
   5918 skip_eh:
   5919	/* clean up zpodd on port removal */
   5920	ata_for_each_link(link, ap, HOST_FIRST) {
   5921		ata_for_each_dev(dev, link, ALL) {
   5922			if (zpodd_dev_enabled(dev))
   5923				zpodd_exit(dev);
   5924		}
   5925	}
   5926	if (ap->pmp_link) {
   5927		int i;
   5928		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
   5929			ata_tlink_delete(&ap->pmp_link[i]);
   5930	}
   5931	/* remove the associated SCSI host */
   5932	scsi_remove_host(ap->scsi_host);
   5933	ata_tport_delete(ap);
   5934}
   5935
   5936/**
   5937 *	ata_host_detach - Detach all ports of an ATA host
   5938 *	@host: Host to detach
   5939 *
   5940 *	Detach all ports of @host.
   5941 *
   5942 *	LOCKING:
   5943 *	Kernel thread context (may sleep).
   5944 */
   5945void ata_host_detach(struct ata_host *host)
   5946{
   5947	int i;
   5948
   5949	for (i = 0; i < host->n_ports; i++) {
   5950		/* Ensure ata_port probe has completed */
   5951		async_synchronize_cookie(host->ports[i]->cookie + 1);
   5952		ata_port_detach(host->ports[i]);
   5953	}
   5954
   5955	/* the host is dead now, dissociate ACPI */
   5956	ata_acpi_dissociate(host);
   5957}
   5958EXPORT_SYMBOL_GPL(ata_host_detach);
   5959
   5960#ifdef CONFIG_PCI
   5961
   5962/**
   5963 *	ata_pci_remove_one - PCI layer callback for device removal
   5964 *	@pdev: PCI device that was removed
   5965 *
   5966 *	PCI layer indicates to libata via this hook that hot-unplug or
   5967 *	module unload event has occurred.  Detach all ports.  Resource
   5968 *	release is handled via devres.
   5969 *
   5970 *	LOCKING:
   5971 *	Inherited from PCI layer (may sleep).
   5972 */
   5973void ata_pci_remove_one(struct pci_dev *pdev)
   5974{
   5975	struct ata_host *host = pci_get_drvdata(pdev);
   5976
   5977	ata_host_detach(host);
   5978}
   5979EXPORT_SYMBOL_GPL(ata_pci_remove_one);
   5980
   5981void ata_pci_shutdown_one(struct pci_dev *pdev)
   5982{
   5983	struct ata_host *host = pci_get_drvdata(pdev);
   5984	int i;
   5985
   5986	for (i = 0; i < host->n_ports; i++) {
   5987		struct ata_port *ap = host->ports[i];
   5988
   5989		ap->pflags |= ATA_PFLAG_FROZEN;
   5990
   5991		/* Disable port interrupts */
   5992		if (ap->ops->freeze)
   5993			ap->ops->freeze(ap);
   5994
   5995		/* Stop the port DMA engines */
   5996		if (ap->ops->port_stop)
   5997			ap->ops->port_stop(ap);
   5998	}
   5999}
   6000EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
   6001
   6002/* move to PCI subsystem */
   6003int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
   6004{
   6005	unsigned long tmp = 0;
   6006
   6007	switch (bits->width) {
   6008	case 1: {
   6009		u8 tmp8 = 0;
   6010		pci_read_config_byte(pdev, bits->reg, &tmp8);
   6011		tmp = tmp8;
   6012		break;
   6013	}
   6014	case 2: {
   6015		u16 tmp16 = 0;
   6016		pci_read_config_word(pdev, bits->reg, &tmp16);
   6017		tmp = tmp16;
   6018		break;
   6019	}
   6020	case 4: {
   6021		u32 tmp32 = 0;
   6022		pci_read_config_dword(pdev, bits->reg, &tmp32);
   6023		tmp = tmp32;
   6024		break;
   6025	}
   6026
   6027	default:
   6028		return -EINVAL;
   6029	}
   6030
   6031	tmp &= bits->mask;
   6032
   6033	return (tmp == bits->val) ? 1 : 0;
   6034}
   6035EXPORT_SYMBOL_GPL(pci_test_config_bits);
   6036
   6037#ifdef CONFIG_PM
   6038void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
   6039{
   6040	pci_save_state(pdev);
   6041	pci_disable_device(pdev);
   6042
   6043	if (mesg.event & PM_EVENT_SLEEP)
   6044		pci_set_power_state(pdev, PCI_D3hot);
   6045}
   6046EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
   6047
   6048int ata_pci_device_do_resume(struct pci_dev *pdev)
   6049{
   6050	int rc;
   6051
   6052	pci_set_power_state(pdev, PCI_D0);
   6053	pci_restore_state(pdev);
   6054
   6055	rc = pcim_enable_device(pdev);
   6056	if (rc) {
   6057		dev_err(&pdev->dev,
   6058			"failed to enable device after resume (%d)\n", rc);
   6059		return rc;
   6060	}
   6061
   6062	pci_set_master(pdev);
   6063	return 0;
   6064}
   6065EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
   6066
   6067int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
   6068{
   6069	struct ata_host *host = pci_get_drvdata(pdev);
   6070
   6071	ata_host_suspend(host, mesg);
   6072
   6073	ata_pci_device_do_suspend(pdev, mesg);
   6074
   6075	return 0;
   6076}
   6077EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
   6078
   6079int ata_pci_device_resume(struct pci_dev *pdev)
   6080{
   6081	struct ata_host *host = pci_get_drvdata(pdev);
   6082	int rc;
   6083
   6084	rc = ata_pci_device_do_resume(pdev);
   6085	if (rc == 0)
   6086		ata_host_resume(host);
   6087	return rc;
   6088}
   6089EXPORT_SYMBOL_GPL(ata_pci_device_resume);
   6090#endif /* CONFIG_PM */
   6091#endif /* CONFIG_PCI */
   6092
   6093/**
   6094 *	ata_platform_remove_one - Platform layer callback for device removal
   6095 *	@pdev: Platform device that was removed
   6096 *
   6097 *	Platform layer indicates to libata via this hook that hot-unplug or
   6098 *	module unload event has occurred.  Detach all ports.  Resource
   6099 *	release is handled via devres.
   6100 *
   6101 *	LOCKING:
   6102 *	Inherited from platform layer (may sleep).
   6103 */
   6104int ata_platform_remove_one(struct platform_device *pdev)
   6105{
   6106	struct ata_host *host = platform_get_drvdata(pdev);
   6107
   6108	ata_host_detach(host);
   6109
   6110	return 0;
   6111}
   6112EXPORT_SYMBOL_GPL(ata_platform_remove_one);
   6113
   6114#ifdef CONFIG_ATA_FORCE
   6115
   6116#define force_cbl(name, flag)				\
   6117	{ #name,	.cbl		= (flag) }
   6118
   6119#define force_spd_limit(spd, val)			\
   6120	{ #spd,	.spd_limit		= (val) }
   6121
   6122#define force_xfer(mode, shift)				\
   6123	{ #mode,	.xfer_mask	= (1UL << (shift)) }
   6124
   6125#define force_lflag_on(name, flags)			\
   6126	{ #name,	.lflags_on	= (flags) }
   6127
   6128#define force_lflag_onoff(name, flags)			\
   6129	{ "no" #name,	.lflags_on	= (flags) },	\
   6130	{ #name,	.lflags_off	= (flags) }
   6131
   6132#define force_horkage_on(name, flag)			\
   6133	{ #name,	.horkage_on	= (flag) }
   6134
   6135#define force_horkage_onoff(name, flag)			\
   6136	{ "no" #name,	.horkage_on	= (flag) },	\
   6137	{ #name,	.horkage_off	= (flag) }
   6138
   6139static const struct ata_force_param force_tbl[] __initconst = {
   6140	force_cbl(40c,			ATA_CBL_PATA40),
   6141	force_cbl(80c,			ATA_CBL_PATA80),
   6142	force_cbl(short40c,		ATA_CBL_PATA40_SHORT),
   6143	force_cbl(unk,			ATA_CBL_PATA_UNK),
   6144	force_cbl(ign,			ATA_CBL_PATA_IGN),
   6145	force_cbl(sata,			ATA_CBL_SATA),
   6146
   6147	force_spd_limit(1.5Gbps,	1),
   6148	force_spd_limit(3.0Gbps,	2),
   6149
   6150	force_xfer(pio0,		ATA_SHIFT_PIO + 0),
   6151	force_xfer(pio1,		ATA_SHIFT_PIO + 1),
   6152	force_xfer(pio2,		ATA_SHIFT_PIO + 2),
   6153	force_xfer(pio3,		ATA_SHIFT_PIO + 3),
   6154	force_xfer(pio4,		ATA_SHIFT_PIO + 4),
   6155	force_xfer(pio5,		ATA_SHIFT_PIO + 5),
   6156	force_xfer(pio6,		ATA_SHIFT_PIO + 6),
   6157	force_xfer(mwdma0,		ATA_SHIFT_MWDMA + 0),
   6158	force_xfer(mwdma1,		ATA_SHIFT_MWDMA + 1),
   6159	force_xfer(mwdma2,		ATA_SHIFT_MWDMA + 2),
   6160	force_xfer(mwdma3,		ATA_SHIFT_MWDMA + 3),
   6161	force_xfer(mwdma4,		ATA_SHIFT_MWDMA + 4),
   6162	force_xfer(udma0,		ATA_SHIFT_UDMA + 0),
   6163	force_xfer(udma16,		ATA_SHIFT_UDMA + 0),
   6164	force_xfer(udma/16,		ATA_SHIFT_UDMA + 0),
   6165	force_xfer(udma1,		ATA_SHIFT_UDMA + 1),
   6166	force_xfer(udma25,		ATA_SHIFT_UDMA + 1),
   6167	force_xfer(udma/25,		ATA_SHIFT_UDMA + 1),
   6168	force_xfer(udma2,		ATA_SHIFT_UDMA + 2),
   6169	force_xfer(udma33,		ATA_SHIFT_UDMA + 2),
   6170	force_xfer(udma/33,		ATA_SHIFT_UDMA + 2),
   6171	force_xfer(udma3,		ATA_SHIFT_UDMA + 3),
   6172	force_xfer(udma44,		ATA_SHIFT_UDMA + 3),
   6173	force_xfer(udma/44,		ATA_SHIFT_UDMA + 3),
   6174	force_xfer(udma4,		ATA_SHIFT_UDMA + 4),
   6175	force_xfer(udma66,		ATA_SHIFT_UDMA + 4),
   6176	force_xfer(udma/66,		ATA_SHIFT_UDMA + 4),
   6177	force_xfer(udma5,		ATA_SHIFT_UDMA + 5),
   6178	force_xfer(udma100,		ATA_SHIFT_UDMA + 5),
   6179	force_xfer(udma/100,		ATA_SHIFT_UDMA + 5),
   6180	force_xfer(udma6,		ATA_SHIFT_UDMA + 6),
   6181	force_xfer(udma133,		ATA_SHIFT_UDMA + 6),
   6182	force_xfer(udma/133,		ATA_SHIFT_UDMA + 6),
   6183	force_xfer(udma7,		ATA_SHIFT_UDMA + 7),
   6184
   6185	force_lflag_on(nohrst,		ATA_LFLAG_NO_HRST),
   6186	force_lflag_on(nosrst,		ATA_LFLAG_NO_SRST),
   6187	force_lflag_on(norst,		ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
   6188	force_lflag_on(rstonce,		ATA_LFLAG_RST_ONCE),
   6189	force_lflag_onoff(dbdelay,	ATA_LFLAG_NO_DEBOUNCE_DELAY),
   6190
   6191	force_horkage_onoff(ncq,	ATA_HORKAGE_NONCQ),
   6192	force_horkage_onoff(ncqtrim,	ATA_HORKAGE_NO_NCQ_TRIM),
   6193	force_horkage_onoff(ncqati,	ATA_HORKAGE_NO_NCQ_ON_ATI),
   6194
   6195	force_horkage_onoff(trim,	ATA_HORKAGE_NOTRIM),
   6196	force_horkage_on(trim_zero,	ATA_HORKAGE_ZERO_AFTER_TRIM),
   6197	force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
   6198
   6199	force_horkage_onoff(dma,	ATA_HORKAGE_NODMA),
   6200	force_horkage_on(atapi_dmadir,	ATA_HORKAGE_ATAPI_DMADIR),
   6201	force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
   6202
   6203	force_horkage_onoff(dmalog,	ATA_HORKAGE_NO_DMA_LOG),
   6204	force_horkage_onoff(iddevlog,	ATA_HORKAGE_NO_ID_DEV_LOG),
   6205	force_horkage_onoff(logdir,	ATA_HORKAGE_NO_LOG_DIR),
   6206
   6207	force_horkage_on(max_sec_128,	ATA_HORKAGE_MAX_SEC_128),
   6208	force_horkage_on(max_sec_1024,	ATA_HORKAGE_MAX_SEC_1024),
   6209	force_horkage_on(max_sec_lba48,	ATA_HORKAGE_MAX_SEC_LBA48),
   6210
   6211	force_horkage_onoff(lpm,	ATA_HORKAGE_NOLPM),
   6212	force_horkage_onoff(setxfer,	ATA_HORKAGE_NOSETXFER),
   6213	force_horkage_on(dump_id,	ATA_HORKAGE_DUMP_ID),
   6214
   6215	force_horkage_on(disable,	ATA_HORKAGE_DISABLE),
   6216};
   6217
   6218static int __init ata_parse_force_one(char **cur,
   6219				      struct ata_force_ent *force_ent,
   6220				      const char **reason)
   6221{
   6222	char *start = *cur, *p = *cur;
   6223	char *id, *val, *endp;
   6224	const struct ata_force_param *match_fp = NULL;
   6225	int nr_matches = 0, i;
   6226
   6227	/* find where this param ends and update *cur */
   6228	while (*p != '\0' && *p != ',')
   6229		p++;
   6230
   6231	if (*p == '\0')
   6232		*cur = p;
   6233	else
   6234		*cur = p + 1;
   6235
   6236	*p = '\0';
   6237
   6238	/* parse */
   6239	p = strchr(start, ':');
   6240	if (!p) {
   6241		val = strstrip(start);
   6242		goto parse_val;
   6243	}
   6244	*p = '\0';
   6245
   6246	id = strstrip(start);
   6247	val = strstrip(p + 1);
   6248
   6249	/* parse id */
   6250	p = strchr(id, '.');
   6251	if (p) {
   6252		*p++ = '\0';
   6253		force_ent->device = simple_strtoul(p, &endp, 10);
   6254		if (p == endp || *endp != '\0') {
   6255			*reason = "invalid device";
   6256			return -EINVAL;
   6257		}
   6258	}
   6259
   6260	force_ent->port = simple_strtoul(id, &endp, 10);
   6261	if (id == endp || *endp != '\0') {
   6262		*reason = "invalid port/link";
   6263		return -EINVAL;
   6264	}
   6265
   6266 parse_val:
   6267	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
   6268	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
   6269		const struct ata_force_param *fp = &force_tbl[i];
   6270
   6271		if (strncasecmp(val, fp->name, strlen(val)))
   6272			continue;
   6273
   6274		nr_matches++;
   6275		match_fp = fp;
   6276
   6277		if (strcasecmp(val, fp->name) == 0) {
   6278			nr_matches = 1;
   6279			break;
   6280		}
   6281	}
   6282
   6283	if (!nr_matches) {
   6284		*reason = "unknown value";
   6285		return -EINVAL;
   6286	}
   6287	if (nr_matches > 1) {
   6288		*reason = "ambiguous value";
   6289		return -EINVAL;
   6290	}
   6291
   6292	force_ent->param = *match_fp;
   6293
   6294	return 0;
   6295}
   6296
   6297static void __init ata_parse_force_param(void)
   6298{
   6299	int idx = 0, size = 1;
   6300	int last_port = -1, last_device = -1;
   6301	char *p, *cur, *next;
   6302
   6303	/* Calculate maximum number of params and allocate ata_force_tbl */
   6304	for (p = ata_force_param_buf; *p; p++)
   6305		if (*p == ',')
   6306			size++;
   6307
   6308	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
   6309	if (!ata_force_tbl) {
   6310		printk(KERN_WARNING "ata: failed to extend force table, "
   6311		       "libata.force ignored\n");
   6312		return;
   6313	}
   6314
   6315	/* parse and populate the table */
   6316	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
   6317		const char *reason = "";
   6318		struct ata_force_ent te = { .port = -1, .device = -1 };
   6319
   6320		next = cur;
   6321		if (ata_parse_force_one(&next, &te, &reason)) {
   6322			printk(KERN_WARNING "ata: failed to parse force "
   6323			       "parameter \"%s\" (%s)\n",
   6324			       cur, reason);
   6325			continue;
   6326		}
   6327
   6328		if (te.port == -1) {
   6329			te.port = last_port;
   6330			te.device = last_device;
   6331		}
   6332
   6333		ata_force_tbl[idx++] = te;
   6334
   6335		last_port = te.port;
   6336		last_device = te.device;
   6337	}
   6338
   6339	ata_force_tbl_size = idx;
   6340}
   6341
   6342static void ata_free_force_param(void)
   6343{
   6344	kfree(ata_force_tbl);
   6345}
   6346#else
   6347static inline void ata_parse_force_param(void) { }
   6348static inline void ata_free_force_param(void) { }
   6349#endif
   6350
   6351static int __init ata_init(void)
   6352{
   6353	int rc;
   6354
   6355	ata_parse_force_param();
   6356
   6357	rc = ata_sff_init();
   6358	if (rc) {
   6359		ata_free_force_param();
   6360		return rc;
   6361	}
   6362
   6363	libata_transport_init();
   6364	ata_scsi_transport_template = ata_attach_transport();
   6365	if (!ata_scsi_transport_template) {
   6366		ata_sff_exit();
   6367		rc = -ENOMEM;
   6368		goto err_out;
   6369	}
   6370
   6371	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
   6372	return 0;
   6373
   6374err_out:
   6375	return rc;
   6376}
   6377
   6378static void __exit ata_exit(void)
   6379{
   6380	ata_release_transport(ata_scsi_transport_template);
   6381	libata_transport_exit();
   6382	ata_sff_exit();
   6383	ata_free_force_param();
   6384}
   6385
   6386subsys_initcall(ata_init);
   6387module_exit(ata_exit);
   6388
   6389static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
   6390
   6391int ata_ratelimit(void)
   6392{
   6393	return __ratelimit(&ratelimit);
   6394}
   6395EXPORT_SYMBOL_GPL(ata_ratelimit);
   6396
   6397/**
   6398 *	ata_msleep - ATA EH owner aware msleep
   6399 *	@ap: ATA port to attribute the sleep to
   6400 *	@msecs: duration to sleep in milliseconds
   6401 *
   6402 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
   6403 *	ownership is released before going to sleep and reacquired
   6404 *	after the sleep is complete.  IOW, other ports sharing the
   6405 *	@ap->host will be allowed to own the EH while this task is
   6406 *	sleeping.
   6407 *
   6408 *	LOCKING:
   6409 *	Might sleep.
   6410 */
   6411void ata_msleep(struct ata_port *ap, unsigned int msecs)
   6412{
   6413	bool owns_eh = ap && ap->host->eh_owner == current;
   6414
   6415	if (owns_eh)
   6416		ata_eh_release(ap);
   6417
   6418	if (msecs < 20) {
   6419		unsigned long usecs = msecs * USEC_PER_MSEC;
   6420		usleep_range(usecs, usecs + 50);
   6421	} else {
   6422		msleep(msecs);
   6423	}
   6424
   6425	if (owns_eh)
   6426		ata_eh_acquire(ap);
   6427}
   6428EXPORT_SYMBOL_GPL(ata_msleep);
   6429
   6430/**
   6431 *	ata_wait_register - wait until register value changes
   6432 *	@ap: ATA port to wait register for, can be NULL
   6433 *	@reg: IO-mapped register
   6434 *	@mask: Mask to apply to read register value
   6435 *	@val: Wait condition
   6436 *	@interval: polling interval in milliseconds
   6437 *	@timeout: timeout in milliseconds
   6438 *
   6439 *	Waiting for some bits of register to change is a common
   6440 *	operation for ATA controllers.  This function reads 32bit LE
   6441 *	IO-mapped register @reg and tests for the following condition.
   6442 *
   6443 *	(*@reg & mask) != val
   6444 *
   6445 *	If the condition is met, it returns; otherwise, the process is
   6446 *	repeated after @interval_msec until timeout.
   6447 *
   6448 *	LOCKING:
   6449 *	Kernel thread context (may sleep)
   6450 *
   6451 *	RETURNS:
   6452 *	The final register value.
   6453 */
   6454u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
   6455		      unsigned long interval, unsigned long timeout)
   6456{
   6457	unsigned long deadline;
   6458	u32 tmp;
   6459
   6460	tmp = ioread32(reg);
   6461
   6462	/* Calculate timeout _after_ the first read to make sure
   6463	 * preceding writes reach the controller before starting to
   6464	 * eat away the timeout.
   6465	 */
   6466	deadline = ata_deadline(jiffies, timeout);
   6467
   6468	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
   6469		ata_msleep(ap, interval);
   6470		tmp = ioread32(reg);
   6471	}
   6472
   6473	return tmp;
   6474}
   6475EXPORT_SYMBOL_GPL(ata_wait_register);
   6476
   6477/*
   6478 * Dummy port_ops
   6479 */
   6480static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
   6481{
   6482	return AC_ERR_SYSTEM;
   6483}
   6484
   6485static void ata_dummy_error_handler(struct ata_port *ap)
   6486{
   6487	/* truly dummy */
   6488}
   6489
   6490struct ata_port_operations ata_dummy_port_ops = {
   6491	.qc_prep		= ata_noop_qc_prep,
   6492	.qc_issue		= ata_dummy_qc_issue,
   6493	.error_handler		= ata_dummy_error_handler,
   6494	.sched_eh		= ata_std_sched_eh,
   6495	.end_eh			= ata_std_end_eh,
   6496};
   6497EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
   6498
   6499const struct ata_port_info ata_dummy_port_info = {
   6500	.port_ops		= &ata_dummy_port_ops,
   6501};
   6502EXPORT_SYMBOL_GPL(ata_dummy_port_info);
   6503
   6504void ata_print_version(const struct device *dev, const char *version)
   6505{
   6506	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
   6507}
   6508EXPORT_SYMBOL(ata_print_version);
   6509
   6510EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
   6511EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
   6512EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
   6513EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
   6514EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);