cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sata_mv.c (125247B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * sata_mv.c - Marvell SATA support
      4 *
      5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
      6 * Copyright 2005: EMC Corporation, all rights reserved.
      7 * Copyright 2005 Red Hat, Inc.  All rights reserved.
      8 *
      9 * Originally written by Brett Russ.
     10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
     11 *
     12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
     13 */
     14
     15/*
     16 * sata_mv TODO list:
     17 *
     18 * --> Develop a low-power-consumption strategy, and implement it.
     19 *
     20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
     21 *
     22 * --> [Experiment, Marvell value added] Is it possible to use target
     23 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
     24 *       creating LibATA target mode support would be very interesting.
     25 *
     26 *       Target mode, for those without docs, is the ability to directly
     27 *       connect two SATA ports.
     28 */
     29
     30/*
     31 * 80x1-B2 errata PCI#11:
     32 *
     33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
     34 * should be careful to insert those cards only onto PCI-X bus #0,
     35 * and only in device slots 0..7, not higher.  The chips may not
     36 * work correctly otherwise  (note: this is a pretty rare condition).
     37 */
     38
     39#include <linux/kernel.h>
     40#include <linux/module.h>
     41#include <linux/pci.h>
     42#include <linux/init.h>
     43#include <linux/blkdev.h>
     44#include <linux/delay.h>
     45#include <linux/interrupt.h>
     46#include <linux/dmapool.h>
     47#include <linux/dma-mapping.h>
     48#include <linux/device.h>
     49#include <linux/clk.h>
     50#include <linux/phy/phy.h>
     51#include <linux/platform_device.h>
     52#include <linux/ata_platform.h>
     53#include <linux/mbus.h>
     54#include <linux/bitops.h>
     55#include <linux/gfp.h>
     56#include <linux/of.h>
     57#include <linux/of_irq.h>
     58#include <scsi/scsi_host.h>
     59#include <scsi/scsi_cmnd.h>
     60#include <scsi/scsi_device.h>
     61#include <linux/libata.h>
     62
     63#define DRV_NAME	"sata_mv"
     64#define DRV_VERSION	"1.28"
     65
     66/*
     67 * module options
     68 */
     69
     70#ifdef CONFIG_PCI
     71static int msi;
     72module_param(msi, int, S_IRUGO);
     73MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
     74#endif
     75
     76static int irq_coalescing_io_count;
     77module_param(irq_coalescing_io_count, int, S_IRUGO);
     78MODULE_PARM_DESC(irq_coalescing_io_count,
     79		 "IRQ coalescing I/O count threshold (0..255)");
     80
     81static int irq_coalescing_usecs;
     82module_param(irq_coalescing_usecs, int, S_IRUGO);
     83MODULE_PARM_DESC(irq_coalescing_usecs,
     84		 "IRQ coalescing time threshold in usecs");
     85
     86enum {
     87	/* BAR's are enumerated in terms of pci_resource_start() terms */
     88	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
     89	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
     90	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */
     91
     92	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
     93	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */
     94
     95	/* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
     96	COAL_CLOCKS_PER_USEC	= 150,		/* for calculating COAL_TIMEs */
     97	MAX_COAL_TIME_THRESHOLD	= ((1 << 24) - 1), /* internal clocks count */
     98	MAX_COAL_IO_COUNT	= 255,		/* completed I/O count */
     99
    100	MV_PCI_REG_BASE		= 0,
    101
    102	/*
    103	 * Per-chip ("all ports") interrupt coalescing feature.
    104	 * This is only for GEN_II / GEN_IIE hardware.
    105	 *
    106	 * Coalescing defers the interrupt until either the IO_THRESHOLD
    107	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
    108	 */
    109	COAL_REG_BASE		= 0x18000,
    110	IRQ_COAL_CAUSE		= (COAL_REG_BASE + 0x08),
    111	ALL_PORTS_COAL_IRQ	= (1 << 4),	/* all ports irq event */
    112
    113	IRQ_COAL_IO_THRESHOLD   = (COAL_REG_BASE + 0xcc),
    114	IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
    115
    116	/*
    117	 * Registers for the (unused here) transaction coalescing feature:
    118	 */
    119	TRAN_COAL_CAUSE_LO	= (COAL_REG_BASE + 0x88),
    120	TRAN_COAL_CAUSE_HI	= (COAL_REG_BASE + 0x8c),
    121
    122	SATAHC0_REG_BASE	= 0x20000,
    123	FLASH_CTL		= 0x1046c,
    124	GPIO_PORT_CTL		= 0x104f0,
    125	RESET_CFG		= 0x180d8,
    126
    127	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
    128	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
    129	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
    130	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,
    131
    132	MV_MAX_Q_DEPTH		= 32,
    133	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,
    134
    135	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
    136	 * CRPB needs alignment on a 256B boundary. Size == 256B
    137	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
    138	 */
    139	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
    140	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
    141	MV_MAX_SG_CT		= 256,
    142	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
    143
    144	/* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
    145	MV_PORT_HC_SHIFT	= 2,
    146	MV_PORTS_PER_HC		= (1 << MV_PORT_HC_SHIFT), /* 4 */
    147	/* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
    148	MV_PORT_MASK		= (MV_PORTS_PER_HC - 1),   /* 3 */
    149
    150	/* Host Flags */
    151	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
    152
    153	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
    154
    155	MV_GEN_I_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
    156
    157	MV_GEN_II_FLAGS		= MV_COMMON_FLAGS | ATA_FLAG_NCQ |
    158				  ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
    159
    160	MV_GEN_IIE_FLAGS	= MV_GEN_II_FLAGS | ATA_FLAG_AN,
    161
    162	CRQB_FLAG_READ		= (1 << 0),
    163	CRQB_TAG_SHIFT		= 1,
    164	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
    165	CRQB_PMP_SHIFT		= 12,	/* CRQB Gen-II/IIE PMP shift */
    166	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
    167	CRQB_CMD_ADDR_SHIFT	= 8,
    168	CRQB_CMD_CS		= (0x2 << 11),
    169	CRQB_CMD_LAST		= (1 << 15),
    170
    171	CRPB_FLAG_STATUS_SHIFT	= 8,
    172	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
    173	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
    174
    175	EPRD_FLAG_END_OF_TBL	= (1 << 31),
    176
    177	/* PCI interface registers */
    178
    179	MV_PCI_COMMAND		= 0xc00,
    180	MV_PCI_COMMAND_MWRCOM	= (1 << 4),	/* PCI Master Write Combining */
    181	MV_PCI_COMMAND_MRDTRIG	= (1 << 7),	/* PCI Master Read Trigger */
    182
    183	PCI_MAIN_CMD_STS	= 0xd30,
    184	STOP_PCI_MASTER		= (1 << 2),
    185	PCI_MASTER_EMPTY	= (1 << 3),
    186	GLOB_SFT_RST		= (1 << 4),
    187
    188	MV_PCI_MODE		= 0xd00,
    189	MV_PCI_MODE_MASK	= 0x30,
    190
    191	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
    192	MV_PCI_DISC_TIMER	= 0xd04,
    193	MV_PCI_MSI_TRIGGER	= 0xc38,
    194	MV_PCI_SERR_MASK	= 0xc28,
    195	MV_PCI_XBAR_TMOUT	= 0x1d04,
    196	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
    197	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
    198	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
    199	MV_PCI_ERR_COMMAND	= 0x1d50,
    200
    201	PCI_IRQ_CAUSE		= 0x1d58,
    202	PCI_IRQ_MASK		= 0x1d5c,
    203	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */
    204
    205	PCIE_IRQ_CAUSE		= 0x1900,
    206	PCIE_IRQ_MASK		= 0x1910,
    207	PCIE_UNMASK_ALL_IRQS	= 0x40a,	/* assorted bits */
    208
    209	/* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
    210	PCI_HC_MAIN_IRQ_CAUSE	= 0x1d60,
    211	PCI_HC_MAIN_IRQ_MASK	= 0x1d64,
    212	SOC_HC_MAIN_IRQ_CAUSE	= 0x20020,
    213	SOC_HC_MAIN_IRQ_MASK	= 0x20024,
    214	ERR_IRQ			= (1 << 0),	/* shift by (2 * port #) */
    215	DONE_IRQ		= (1 << 1),	/* shift by (2 * port #) */
    216	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
    217	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
    218	DONE_IRQ_0_3		= 0x000000aa,	/* DONE_IRQ ports 0,1,2,3 */
    219	DONE_IRQ_4_7		= (DONE_IRQ_0_3 << HC_SHIFT),  /* 4,5,6,7 */
    220	PCI_ERR			= (1 << 18),
    221	TRAN_COAL_LO_DONE	= (1 << 19),	/* transaction coalescing */
    222	TRAN_COAL_HI_DONE	= (1 << 20),	/* transaction coalescing */
    223	PORTS_0_3_COAL_DONE	= (1 << 8),	/* HC0 IRQ coalescing */
    224	PORTS_4_7_COAL_DONE	= (1 << 17),	/* HC1 IRQ coalescing */
    225	ALL_PORTS_COAL_DONE	= (1 << 21),	/* GEN_II(E) IRQ coalescing */
    226	GPIO_INT		= (1 << 22),
    227	SELF_INT		= (1 << 23),
    228	TWSI_INT		= (1 << 24),
    229	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
    230	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
    231	HC_MAIN_RSVD_SOC	= (0x3fffffb << 6),     /* bits 31-9, 7-6 */
    232
    233	/* SATAHC registers */
    234	HC_CFG			= 0x00,
    235
    236	HC_IRQ_CAUSE		= 0x14,
    237	DMA_IRQ			= (1 << 0),	/* shift by port # */
    238	HC_COAL_IRQ		= (1 << 4),	/* IRQ coalescing */
    239	DEV_IRQ			= (1 << 8),	/* shift by port # */
    240
    241	/*
    242	 * Per-HC (Host-Controller) interrupt coalescing feature.
    243	 * This is present on all chip generations.
    244	 *
    245	 * Coalescing defers the interrupt until either the IO_THRESHOLD
    246	 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
    247	 */
    248	HC_IRQ_COAL_IO_THRESHOLD	= 0x000c,
    249	HC_IRQ_COAL_TIME_THRESHOLD	= 0x0010,
    250
    251	SOC_LED_CTRL		= 0x2c,
    252	SOC_LED_CTRL_BLINK	= (1 << 0),	/* Active LED blink */
    253	SOC_LED_CTRL_ACT_PRESENCE = (1 << 2),	/* Multiplex dev presence */
    254						/*  with dev activity LED */
    255
    256	/* Shadow block registers */
    257	SHD_BLK			= 0x100,
    258	SHD_CTL_AST		= 0x20,		/* ofs from SHD_BLK */
    259
    260	/* SATA registers */
    261	SATA_STATUS		= 0x300,  /* ctrl, err regs follow status */
    262	SATA_ACTIVE		= 0x350,
    263	FIS_IRQ_CAUSE		= 0x364,
    264	FIS_IRQ_CAUSE_AN	= (1 << 9),	/* async notification */
    265
    266	LTMODE			= 0x30c,	/* requires read-after-write */
    267	LTMODE_BIT8		= (1 << 8),	/* unknown, but necessary */
    268
    269	PHY_MODE2		= 0x330,
    270	PHY_MODE3		= 0x310,
    271
    272	PHY_MODE4		= 0x314,	/* requires read-after-write */
    273	PHY_MODE4_CFG_MASK	= 0x00000003,	/* phy internal config field */
    274	PHY_MODE4_CFG_VALUE	= 0x00000001,	/* phy internal config field */
    275	PHY_MODE4_RSVD_ZEROS	= 0x5de3fffa,	/* Gen2e always write zeros */
    276	PHY_MODE4_RSVD_ONES	= 0x00000005,	/* Gen2e always write ones */
    277
    278	SATA_IFCTL		= 0x344,
    279	SATA_TESTCTL		= 0x348,
    280	SATA_IFSTAT		= 0x34c,
    281	VENDOR_UNIQUE_FIS	= 0x35c,
    282
    283	FISCFG			= 0x360,
    284	FISCFG_WAIT_DEV_ERR	= (1 << 8),	/* wait for host on DevErr */
    285	FISCFG_SINGLE_SYNC	= (1 << 16),	/* SYNC on DMA activation */
    286
    287	PHY_MODE9_GEN2		= 0x398,
    288	PHY_MODE9_GEN1		= 0x39c,
    289	PHYCFG_OFS		= 0x3a0,	/* only in 65n devices */
    290
    291	MV5_PHY_MODE		= 0x74,
    292	MV5_LTMODE		= 0x30,
    293	MV5_PHY_CTL		= 0x0C,
    294	SATA_IFCFG		= 0x050,
    295	LP_PHY_CTL		= 0x058,
    296	LP_PHY_CTL_PIN_PU_PLL   = (1 << 0),
    297	LP_PHY_CTL_PIN_PU_RX    = (1 << 1),
    298	LP_PHY_CTL_PIN_PU_TX    = (1 << 2),
    299	LP_PHY_CTL_GEN_TX_3G    = (1 << 5),
    300	LP_PHY_CTL_GEN_RX_3G    = (1 << 9),
    301
    302	MV_M2_PREAMP_MASK	= 0x7e0,
    303
    304	/* Port registers */
    305	EDMA_CFG		= 0,
    306	EDMA_CFG_Q_DEPTH	= 0x1f,		/* max device queue depth */
    307	EDMA_CFG_NCQ		= (1 << 5),	/* for R/W FPDMA queued */
    308	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),	/* continue on error */
    309	EDMA_CFG_RD_BRST_EXT	= (1 << 11),	/* read burst 512B */
    310	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),	/* write buffer 512B */
    311	EDMA_CFG_EDMA_FBS	= (1 << 16),	/* EDMA FIS-Based Switching */
    312	EDMA_CFG_FBS		= (1 << 26),	/* FIS-Based Switching */
    313
    314	EDMA_ERR_IRQ_CAUSE	= 0x8,
    315	EDMA_ERR_IRQ_MASK	= 0xc,
    316	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
    317	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
    318	EDMA_ERR_DEV		= (1 << 2),	/* device error */
    319	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
    320	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
    321	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
    322	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
    323	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
    324	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
    325	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
    326	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
    327	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
    328	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
    329	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
    330
    331	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
    332	EDMA_ERR_LNK_CTRL_RX_0	= (1 << 13),	/* transient: CRC err */
    333	EDMA_ERR_LNK_CTRL_RX_1	= (1 << 14),	/* transient: FIFO err */
    334	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),	/* fatal: caught SYNC */
    335	EDMA_ERR_LNK_CTRL_RX_3	= (1 << 16),	/* transient: FIS rx err */
    336
    337	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
    338
    339	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
    340	EDMA_ERR_LNK_CTRL_TX_0	= (1 << 21),	/* transient: CRC err */
    341	EDMA_ERR_LNK_CTRL_TX_1	= (1 << 22),	/* transient: FIFO err */
    342	EDMA_ERR_LNK_CTRL_TX_2	= (1 << 23),	/* transient: caught SYNC */
    343	EDMA_ERR_LNK_CTRL_TX_3	= (1 << 24),	/* transient: caught DMAT */
    344	EDMA_ERR_LNK_CTRL_TX_4	= (1 << 25),	/* transient: FIS collision */
    345
    346	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
    347
    348	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
    349	EDMA_ERR_OVERRUN_5	= (1 << 5),
    350	EDMA_ERR_UNDERRUN_5	= (1 << 6),
    351
    352	EDMA_ERR_IRQ_TRANSIENT  = EDMA_ERR_LNK_CTRL_RX_0 |
    353				  EDMA_ERR_LNK_CTRL_RX_1 |
    354				  EDMA_ERR_LNK_CTRL_RX_3 |
    355				  EDMA_ERR_LNK_CTRL_TX,
    356
    357	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
    358				  EDMA_ERR_PRD_PAR |
    359				  EDMA_ERR_DEV_DCON |
    360				  EDMA_ERR_DEV_CON |
    361				  EDMA_ERR_SERR |
    362				  EDMA_ERR_SELF_DIS |
    363				  EDMA_ERR_CRQB_PAR |
    364				  EDMA_ERR_CRPB_PAR |
    365				  EDMA_ERR_INTRL_PAR |
    366				  EDMA_ERR_IORDY |
    367				  EDMA_ERR_LNK_CTRL_RX_2 |
    368				  EDMA_ERR_LNK_DATA_RX |
    369				  EDMA_ERR_LNK_DATA_TX |
    370				  EDMA_ERR_TRANS_PROTO,
    371
    372	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
    373				  EDMA_ERR_PRD_PAR |
    374				  EDMA_ERR_DEV_DCON |
    375				  EDMA_ERR_DEV_CON |
    376				  EDMA_ERR_OVERRUN_5 |
    377				  EDMA_ERR_UNDERRUN_5 |
    378				  EDMA_ERR_SELF_DIS_5 |
    379				  EDMA_ERR_CRQB_PAR |
    380				  EDMA_ERR_CRPB_PAR |
    381				  EDMA_ERR_INTRL_PAR |
    382				  EDMA_ERR_IORDY,
    383
    384	EDMA_REQ_Q_BASE_HI	= 0x10,
    385	EDMA_REQ_Q_IN_PTR	= 0x14,		/* also contains BASE_LO */
    386
    387	EDMA_REQ_Q_OUT_PTR	= 0x18,
    388	EDMA_REQ_Q_PTR_SHIFT	= 5,
    389
    390	EDMA_RSP_Q_BASE_HI	= 0x1c,
    391	EDMA_RSP_Q_IN_PTR	= 0x20,
    392	EDMA_RSP_Q_OUT_PTR	= 0x24,		/* also contains BASE_LO */
    393	EDMA_RSP_Q_PTR_SHIFT	= 3,
    394
    395	EDMA_CMD		= 0x28,		/* EDMA command register */
    396	EDMA_EN			= (1 << 0),	/* enable EDMA */
    397	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
    398	EDMA_RESET		= (1 << 2),	/* reset eng/trans/link/phy */
    399
    400	EDMA_STATUS		= 0x30,		/* EDMA engine status */
    401	EDMA_STATUS_CACHE_EMPTY	= (1 << 6),	/* GenIIe command cache empty */
    402	EDMA_STATUS_IDLE	= (1 << 7),	/* GenIIe EDMA enabled/idle */
    403
    404	EDMA_IORDY_TMOUT	= 0x34,
    405	EDMA_ARB_CFG		= 0x38,
    406
    407	EDMA_HALTCOND		= 0x60,		/* GenIIe halt conditions */
    408	EDMA_UNKNOWN_RSVD	= 0x6C,		/* GenIIe unknown/reserved */
    409
    410	BMDMA_CMD		= 0x224,	/* bmdma command register */
    411	BMDMA_STATUS		= 0x228,	/* bmdma status register */
    412	BMDMA_PRD_LOW		= 0x22c,	/* bmdma PRD addr 31:0 */
    413	BMDMA_PRD_HIGH		= 0x230,	/* bmdma PRD addr 63:32 */
    414
    415	/* Host private flags (hp_flags) */
    416	MV_HP_FLAG_MSI		= (1 << 0),
    417	MV_HP_ERRATA_50XXB0	= (1 << 1),
    418	MV_HP_ERRATA_50XXB2	= (1 << 2),
    419	MV_HP_ERRATA_60X1B2	= (1 << 3),
    420	MV_HP_ERRATA_60X1C0	= (1 << 4),
    421	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
    422	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
    423	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
    424	MV_HP_PCIE		= (1 << 9),	/* PCIe bus/regs: 7042 */
    425	MV_HP_CUT_THROUGH	= (1 << 10),	/* can use EDMA cut-through */
    426	MV_HP_FLAG_SOC		= (1 << 11),	/* SystemOnChip, no PCI */
    427	MV_HP_QUIRK_LED_BLINK_EN = (1 << 12),	/* is led blinking enabled? */
    428	MV_HP_FIX_LP_PHY_CTL	= (1 << 13),	/* fix speed in LP_PHY_CTL ? */
    429
    430	/* Port private flags (pp_flags) */
    431	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
    432	MV_PP_FLAG_NCQ_EN	= (1 << 1),	/* is EDMA set up for NCQ? */
    433	MV_PP_FLAG_FBS_EN	= (1 << 2),	/* is EDMA set up for FBS? */
    434	MV_PP_FLAG_DELAYED_EH	= (1 << 3),	/* delayed dev err handling */
    435	MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4),	/* ignore initial ATA_DRDY */
    436};
    437
    438#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
    439#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
    440#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
    441#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
    442#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
    443
    444#define WINDOW_CTRL(i)		(0x20030 + ((i) << 4))
    445#define WINDOW_BASE(i)		(0x20034 + ((i) << 4))
    446
    447enum {
    448	/* DMA boundary 0xffff is required by the s/g splitting
    449	 * we need on /length/ in mv_fill-sg().
    450	 */
    451	MV_DMA_BOUNDARY		= 0xffffU,
    452
    453	/* mask of register bits containing lower 32 bits
    454	 * of EDMA request queue DMA address
    455	 */
    456	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,
    457
    458	/* ditto, for response queue */
    459	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
    460};
    461
    462enum chip_type {
    463	chip_504x,
    464	chip_508x,
    465	chip_5080,
    466	chip_604x,
    467	chip_608x,
    468	chip_6042,
    469	chip_7042,
    470	chip_soc,
    471};
    472
    473/* Command ReQuest Block: 32B */
    474struct mv_crqb {
    475	__le32			sg_addr;
    476	__le32			sg_addr_hi;
    477	__le16			ctrl_flags;
    478	__le16			ata_cmd[11];
    479};
    480
    481struct mv_crqb_iie {
    482	__le32			addr;
    483	__le32			addr_hi;
    484	__le32			flags;
    485	__le32			len;
    486	__le32			ata_cmd[4];
    487};
    488
    489/* Command ResPonse Block: 8B */
    490struct mv_crpb {
    491	__le16			id;
    492	__le16			flags;
    493	__le32			tmstmp;
    494};
    495
    496/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
    497struct mv_sg {
    498	__le32			addr;
    499	__le32			flags_size;
    500	__le32			addr_hi;
    501	__le32			reserved;
    502};
    503
    504/*
    505 * We keep a local cache of a few frequently accessed port
    506 * registers here, to avoid having to read them (very slow)
    507 * when switching between EDMA and non-EDMA modes.
    508 */
    509struct mv_cached_regs {
    510	u32			fiscfg;
    511	u32			ltmode;
    512	u32			haltcond;
    513	u32			unknown_rsvd;
    514};
    515
    516struct mv_port_priv {
    517	struct mv_crqb		*crqb;
    518	dma_addr_t		crqb_dma;
    519	struct mv_crpb		*crpb;
    520	dma_addr_t		crpb_dma;
    521	struct mv_sg		*sg_tbl[MV_MAX_Q_DEPTH];
    522	dma_addr_t		sg_tbl_dma[MV_MAX_Q_DEPTH];
    523
    524	unsigned int		req_idx;
    525	unsigned int		resp_idx;
    526
    527	u32			pp_flags;
    528	struct mv_cached_regs	cached;
    529	unsigned int		delayed_eh_pmp_map;
    530};
    531
    532struct mv_port_signal {
    533	u32			amps;
    534	u32			pre;
    535};
    536
    537struct mv_host_priv {
    538	u32			hp_flags;
    539	unsigned int 		board_idx;
    540	u32			main_irq_mask;
    541	struct mv_port_signal	signal[8];
    542	const struct mv_hw_ops	*ops;
    543	int			n_ports;
    544	void __iomem		*base;
    545	void __iomem		*main_irq_cause_addr;
    546	void __iomem		*main_irq_mask_addr;
    547	u32			irq_cause_offset;
    548	u32			irq_mask_offset;
    549	u32			unmask_all_irqs;
    550
    551	/*
    552	 * Needed on some devices that require their clocks to be enabled.
    553	 * These are optional: if the platform device does not have any
    554	 * clocks, they won't be used.  Also, if the underlying hardware
    555	 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
    556	 * all the clock operations become no-ops (see clk.h).
    557	 */
    558	struct clk		*clk;
    559	struct clk              **port_clks;
    560	/*
    561	 * Some devices have a SATA PHY which can be enabled/disabled
    562	 * in order to save power. These are optional: if the platform
    563	 * devices does not have any phy, they won't be used.
    564	 */
    565	struct phy		**port_phys;
    566	/*
    567	 * These consistent DMA memory pools give us guaranteed
    568	 * alignment for hardware-accessed data structures,
    569	 * and less memory waste in accomplishing the alignment.
    570	 */
    571	struct dma_pool		*crqb_pool;
    572	struct dma_pool		*crpb_pool;
    573	struct dma_pool		*sg_tbl_pool;
    574};
    575
    576struct mv_hw_ops {
    577	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
    578			   unsigned int port);
    579	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
    580	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
    581			   void __iomem *mmio);
    582	int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
    583			unsigned int n_hc);
    584	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
    585	void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
    586};
    587
    588static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
    589static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
    590static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
    591static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
    592static int mv_port_start(struct ata_port *ap);
    593static void mv_port_stop(struct ata_port *ap);
    594static int mv_qc_defer(struct ata_queued_cmd *qc);
    595static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
    596static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
    597static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
    598static int mv_hardreset(struct ata_link *link, unsigned int *class,
    599			unsigned long deadline);
    600static void mv_eh_freeze(struct ata_port *ap);
    601static void mv_eh_thaw(struct ata_port *ap);
    602static void mv6_dev_config(struct ata_device *dev);
    603
    604static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
    605			   unsigned int port);
    606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
    607static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
    608			   void __iomem *mmio);
    609static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
    610			unsigned int n_hc);
    611static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
    612static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
    613
    614static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
    615			   unsigned int port);
    616static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
    617static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
    618			   void __iomem *mmio);
    619static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
    620			unsigned int n_hc);
    621static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
    622static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
    623				      void __iomem *mmio);
    624static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
    625				      void __iomem *mmio);
    626static int mv_soc_reset_hc(struct ata_host *host,
    627				  void __iomem *mmio, unsigned int n_hc);
    628static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
    629				      void __iomem *mmio);
    630static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
    631static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
    632				  void __iomem *mmio, unsigned int port);
    633static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
    634static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
    635			     unsigned int port_no);
    636static int mv_stop_edma(struct ata_port *ap);
    637static int mv_stop_edma_engine(void __iomem *port_mmio);
    638static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
    639
    640static void mv_pmp_select(struct ata_port *ap, int pmp);
    641static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
    642				unsigned long deadline);
    643static int  mv_softreset(struct ata_link *link, unsigned int *class,
    644				unsigned long deadline);
    645static void mv_pmp_error_handler(struct ata_port *ap);
    646static void mv_process_crpb_entries(struct ata_port *ap,
    647					struct mv_port_priv *pp);
    648
    649static void mv_sff_irq_clear(struct ata_port *ap);
    650static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
    651static void mv_bmdma_setup(struct ata_queued_cmd *qc);
    652static void mv_bmdma_start(struct ata_queued_cmd *qc);
    653static void mv_bmdma_stop(struct ata_queued_cmd *qc);
    654static u8   mv_bmdma_status(struct ata_port *ap);
    655static u8 mv_sff_check_status(struct ata_port *ap);
    656
    657/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
    658 * because we have to allow room for worst case splitting of
    659 * PRDs for 64K boundaries in mv_fill_sg().
    660 */
    661#ifdef CONFIG_PCI
    662static struct scsi_host_template mv5_sht = {
    663	ATA_BASE_SHT(DRV_NAME),
    664	.sg_tablesize		= MV_MAX_SG_CT / 2,
    665	.dma_boundary		= MV_DMA_BOUNDARY,
    666};
    667#endif
    668static struct scsi_host_template mv6_sht = {
    669	__ATA_BASE_SHT(DRV_NAME),
    670	.can_queue		= MV_MAX_Q_DEPTH - 1,
    671	.sg_tablesize		= MV_MAX_SG_CT / 2,
    672	.dma_boundary		= MV_DMA_BOUNDARY,
    673	.sdev_groups		= ata_ncq_sdev_groups,
    674	.change_queue_depth	= ata_scsi_change_queue_depth,
    675	.tag_alloc_policy	= BLK_TAG_ALLOC_RR,
    676	.slave_configure	= ata_scsi_slave_config
    677};
    678
    679static struct ata_port_operations mv5_ops = {
    680	.inherits		= &ata_sff_port_ops,
    681
    682	.lost_interrupt		= ATA_OP_NULL,
    683
    684	.qc_defer		= mv_qc_defer,
    685	.qc_prep		= mv_qc_prep,
    686	.qc_issue		= mv_qc_issue,
    687
    688	.freeze			= mv_eh_freeze,
    689	.thaw			= mv_eh_thaw,
    690	.hardreset		= mv_hardreset,
    691
    692	.scr_read		= mv5_scr_read,
    693	.scr_write		= mv5_scr_write,
    694
    695	.port_start		= mv_port_start,
    696	.port_stop		= mv_port_stop,
    697};
    698
    699static struct ata_port_operations mv6_ops = {
    700	.inherits		= &ata_bmdma_port_ops,
    701
    702	.lost_interrupt		= ATA_OP_NULL,
    703
    704	.qc_defer		= mv_qc_defer,
    705	.qc_prep		= mv_qc_prep,
    706	.qc_issue		= mv_qc_issue,
    707
    708	.dev_config             = mv6_dev_config,
    709
    710	.freeze			= mv_eh_freeze,
    711	.thaw			= mv_eh_thaw,
    712	.hardreset		= mv_hardreset,
    713	.softreset		= mv_softreset,
    714	.pmp_hardreset		= mv_pmp_hardreset,
    715	.pmp_softreset		= mv_softreset,
    716	.error_handler		= mv_pmp_error_handler,
    717
    718	.scr_read		= mv_scr_read,
    719	.scr_write		= mv_scr_write,
    720
    721	.sff_check_status	= mv_sff_check_status,
    722	.sff_irq_clear		= mv_sff_irq_clear,
    723	.check_atapi_dma	= mv_check_atapi_dma,
    724	.bmdma_setup		= mv_bmdma_setup,
    725	.bmdma_start		= mv_bmdma_start,
    726	.bmdma_stop		= mv_bmdma_stop,
    727	.bmdma_status		= mv_bmdma_status,
    728
    729	.port_start		= mv_port_start,
    730	.port_stop		= mv_port_stop,
    731};
    732
    733static struct ata_port_operations mv_iie_ops = {
    734	.inherits		= &mv6_ops,
    735	.dev_config		= ATA_OP_NULL,
    736	.qc_prep		= mv_qc_prep_iie,
    737};
    738
    739static const struct ata_port_info mv_port_info[] = {
    740	{  /* chip_504x */
    741		.flags		= MV_GEN_I_FLAGS,
    742		.pio_mask	= ATA_PIO4,
    743		.udma_mask	= ATA_UDMA6,
    744		.port_ops	= &mv5_ops,
    745	},
    746	{  /* chip_508x */
    747		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
    748		.pio_mask	= ATA_PIO4,
    749		.udma_mask	= ATA_UDMA6,
    750		.port_ops	= &mv5_ops,
    751	},
    752	{  /* chip_5080 */
    753		.flags		= MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
    754		.pio_mask	= ATA_PIO4,
    755		.udma_mask	= ATA_UDMA6,
    756		.port_ops	= &mv5_ops,
    757	},
    758	{  /* chip_604x */
    759		.flags		= MV_GEN_II_FLAGS,
    760		.pio_mask	= ATA_PIO4,
    761		.udma_mask	= ATA_UDMA6,
    762		.port_ops	= &mv6_ops,
    763	},
    764	{  /* chip_608x */
    765		.flags		= MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
    766		.pio_mask	= ATA_PIO4,
    767		.udma_mask	= ATA_UDMA6,
    768		.port_ops	= &mv6_ops,
    769	},
    770	{  /* chip_6042 */
    771		.flags		= MV_GEN_IIE_FLAGS,
    772		.pio_mask	= ATA_PIO4,
    773		.udma_mask	= ATA_UDMA6,
    774		.port_ops	= &mv_iie_ops,
    775	},
    776	{  /* chip_7042 */
    777		.flags		= MV_GEN_IIE_FLAGS,
    778		.pio_mask	= ATA_PIO4,
    779		.udma_mask	= ATA_UDMA6,
    780		.port_ops	= &mv_iie_ops,
    781	},
    782	{  /* chip_soc */
    783		.flags		= MV_GEN_IIE_FLAGS,
    784		.pio_mask	= ATA_PIO4,
    785		.udma_mask	= ATA_UDMA6,
    786		.port_ops	= &mv_iie_ops,
    787	},
    788};
    789
    790static const struct pci_device_id mv_pci_tbl[] = {
    791	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
    792	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
    793	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
    794	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
    795	/* RocketRAID 1720/174x have different identifiers */
    796	{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
    797	{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
    798	{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
    799
    800	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
    801	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
    802	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
    803	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
    804	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
    805
    806	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
    807
    808	/* Adaptec 1430SA */
    809	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
    810
    811	/* Marvell 7042 support */
    812	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
    813
    814	/* Highpoint RocketRAID PCIe series */
    815	{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
    816	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
    817
    818	{ }			/* terminate list */
    819};
    820
    821static const struct mv_hw_ops mv5xxx_ops = {
    822	.phy_errata		= mv5_phy_errata,
    823	.enable_leds		= mv5_enable_leds,
    824	.read_preamp		= mv5_read_preamp,
    825	.reset_hc		= mv5_reset_hc,
    826	.reset_flash		= mv5_reset_flash,
    827	.reset_bus		= mv5_reset_bus,
    828};
    829
    830static const struct mv_hw_ops mv6xxx_ops = {
    831	.phy_errata		= mv6_phy_errata,
    832	.enable_leds		= mv6_enable_leds,
    833	.read_preamp		= mv6_read_preamp,
    834	.reset_hc		= mv6_reset_hc,
    835	.reset_flash		= mv6_reset_flash,
    836	.reset_bus		= mv_reset_pci_bus,
    837};
    838
    839static const struct mv_hw_ops mv_soc_ops = {
    840	.phy_errata		= mv6_phy_errata,
    841	.enable_leds		= mv_soc_enable_leds,
    842	.read_preamp		= mv_soc_read_preamp,
    843	.reset_hc		= mv_soc_reset_hc,
    844	.reset_flash		= mv_soc_reset_flash,
    845	.reset_bus		= mv_soc_reset_bus,
    846};
    847
    848static const struct mv_hw_ops mv_soc_65n_ops = {
    849	.phy_errata		= mv_soc_65n_phy_errata,
    850	.enable_leds		= mv_soc_enable_leds,
    851	.reset_hc		= mv_soc_reset_hc,
    852	.reset_flash		= mv_soc_reset_flash,
    853	.reset_bus		= mv_soc_reset_bus,
    854};
    855
    856/*
    857 * Functions
    858 */
    859
    860static inline void writelfl(unsigned long data, void __iomem *addr)
    861{
    862	writel(data, addr);
    863	(void) readl(addr);	/* flush to avoid PCI posted write */
    864}
    865
    866static inline unsigned int mv_hc_from_port(unsigned int port)
    867{
    868	return port >> MV_PORT_HC_SHIFT;
    869}
    870
    871static inline unsigned int mv_hardport_from_port(unsigned int port)
    872{
    873	return port & MV_PORT_MASK;
    874}
    875
    876/*
    877 * Consolidate some rather tricky bit shift calculations.
    878 * This is hot-path stuff, so not a function.
    879 * Simple code, with two return values, so macro rather than inline.
    880 *
    881 * port is the sole input, in range 0..7.
    882 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
    883 * hardport is the other output, in range 0..3.
    884 *
    885 * Note that port and hardport may be the same variable in some cases.
    886 */
    887#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)	\
    888{								\
    889	shift    = mv_hc_from_port(port) * HC_SHIFT;		\
    890	hardport = mv_hardport_from_port(port);			\
    891	shift   += hardport * 2;				\
    892}
    893
    894static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
    895{
    896	return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
    897}
    898
    899static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
    900						 unsigned int port)
    901{
    902	return mv_hc_base(base, mv_hc_from_port(port));
    903}
    904
    905static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
    906{
    907	return  mv_hc_base_from_port(base, port) +
    908		MV_SATAHC_ARBTR_REG_SZ +
    909		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
    910}
    911
    912static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
    913{
    914	void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
    915	unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
    916
    917	return hc_mmio + ofs;
    918}
    919
    920static inline void __iomem *mv_host_base(struct ata_host *host)
    921{
    922	struct mv_host_priv *hpriv = host->private_data;
    923	return hpriv->base;
    924}
    925
    926static inline void __iomem *mv_ap_base(struct ata_port *ap)
    927{
    928	return mv_port_base(mv_host_base(ap->host), ap->port_no);
    929}
    930
    931static inline int mv_get_hc_count(unsigned long port_flags)
    932{
    933	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
    934}
    935
    936/**
    937 *      mv_save_cached_regs - (re-)initialize cached port registers
    938 *      @ap: the port whose registers we are caching
    939 *
    940 *	Initialize the local cache of port registers,
    941 *	so that reading them over and over again can
    942 *	be avoided on the hotter paths of this driver.
    943 *	This saves a few microseconds each time we switch
    944 *	to/from EDMA mode to perform (eg.) a drive cache flush.
    945 */
    946static void mv_save_cached_regs(struct ata_port *ap)
    947{
    948	void __iomem *port_mmio = mv_ap_base(ap);
    949	struct mv_port_priv *pp = ap->private_data;
    950
    951	pp->cached.fiscfg = readl(port_mmio + FISCFG);
    952	pp->cached.ltmode = readl(port_mmio + LTMODE);
    953	pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
    954	pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
    955}
    956
    957/**
    958 *      mv_write_cached_reg - write to a cached port register
    959 *      @addr: hardware address of the register
    960 *      @old: pointer to cached value of the register
    961 *      @new: new value for the register
    962 *
    963 *	Write a new value to a cached register,
    964 *	but only if the value is different from before.
    965 */
    966static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
    967{
    968	if (new != *old) {
    969		unsigned long laddr;
    970		*old = new;
    971		/*
    972		 * Workaround for 88SX60x1-B2 FEr SATA#13:
    973		 * Read-after-write is needed to prevent generating 64-bit
    974		 * write cycles on the PCI bus for SATA interface registers
    975		 * at offsets ending in 0x4 or 0xc.
    976		 *
    977		 * Looks like a lot of fuss, but it avoids an unnecessary
    978		 * +1 usec read-after-write delay for unaffected registers.
    979		 */
    980		laddr = (unsigned long)addr & 0xffff;
    981		if (laddr >= 0x300 && laddr <= 0x33c) {
    982			laddr &= 0x000f;
    983			if (laddr == 0x4 || laddr == 0xc) {
    984				writelfl(new, addr); /* read after write */
    985				return;
    986			}
    987		}
    988		writel(new, addr); /* unaffected by the errata */
    989	}
    990}
    991
    992static void mv_set_edma_ptrs(void __iomem *port_mmio,
    993			     struct mv_host_priv *hpriv,
    994			     struct mv_port_priv *pp)
    995{
    996	u32 index;
    997
    998	/*
    999	 * initialize request queue
   1000	 */
   1001	pp->req_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
   1002	index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
   1003
   1004	WARN_ON(pp->crqb_dma & 0x3ff);
   1005	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
   1006	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
   1007		 port_mmio + EDMA_REQ_Q_IN_PTR);
   1008	writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
   1009
   1010	/*
   1011	 * initialize response queue
   1012	 */
   1013	pp->resp_idx &= MV_MAX_Q_DEPTH_MASK;	/* paranoia */
   1014	index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
   1015
   1016	WARN_ON(pp->crpb_dma & 0xff);
   1017	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
   1018	writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
   1019	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
   1020		 port_mmio + EDMA_RSP_Q_OUT_PTR);
   1021}
   1022
   1023static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
   1024{
   1025	/*
   1026	 * When writing to the main_irq_mask in hardware,
   1027	 * we must ensure exclusivity between the interrupt coalescing bits
   1028	 * and the corresponding individual port DONE_IRQ bits.
   1029	 *
   1030	 * Note that this register is really an "IRQ enable" register,
   1031	 * not an "IRQ mask" register as Marvell's naming might suggest.
   1032	 */
   1033	if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
   1034		mask &= ~DONE_IRQ_0_3;
   1035	if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
   1036		mask &= ~DONE_IRQ_4_7;
   1037	writelfl(mask, hpriv->main_irq_mask_addr);
   1038}
   1039
   1040static void mv_set_main_irq_mask(struct ata_host *host,
   1041				 u32 disable_bits, u32 enable_bits)
   1042{
   1043	struct mv_host_priv *hpriv = host->private_data;
   1044	u32 old_mask, new_mask;
   1045
   1046	old_mask = hpriv->main_irq_mask;
   1047	new_mask = (old_mask & ~disable_bits) | enable_bits;
   1048	if (new_mask != old_mask) {
   1049		hpriv->main_irq_mask = new_mask;
   1050		mv_write_main_irq_mask(new_mask, hpriv);
   1051	}
   1052}
   1053
   1054static void mv_enable_port_irqs(struct ata_port *ap,
   1055				     unsigned int port_bits)
   1056{
   1057	unsigned int shift, hardport, port = ap->port_no;
   1058	u32 disable_bits, enable_bits;
   1059
   1060	MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
   1061
   1062	disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
   1063	enable_bits  = port_bits << shift;
   1064	mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
   1065}
   1066
   1067static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
   1068					  void __iomem *port_mmio,
   1069					  unsigned int port_irqs)
   1070{
   1071	struct mv_host_priv *hpriv = ap->host->private_data;
   1072	int hardport = mv_hardport_from_port(ap->port_no);
   1073	void __iomem *hc_mmio = mv_hc_base_from_port(
   1074				mv_host_base(ap->host), ap->port_no);
   1075	u32 hc_irq_cause;
   1076
   1077	/* clear EDMA event indicators, if any */
   1078	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
   1079
   1080	/* clear pending irq events */
   1081	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
   1082	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
   1083
   1084	/* clear FIS IRQ Cause */
   1085	if (IS_GEN_IIE(hpriv))
   1086		writelfl(0, port_mmio + FIS_IRQ_CAUSE);
   1087
   1088	mv_enable_port_irqs(ap, port_irqs);
   1089}
   1090
   1091static void mv_set_irq_coalescing(struct ata_host *host,
   1092				  unsigned int count, unsigned int usecs)
   1093{
   1094	struct mv_host_priv *hpriv = host->private_data;
   1095	void __iomem *mmio = hpriv->base, *hc_mmio;
   1096	u32 coal_enable = 0;
   1097	unsigned long flags;
   1098	unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
   1099	const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
   1100							ALL_PORTS_COAL_DONE;
   1101
   1102	/* Disable IRQ coalescing if either threshold is zero */
   1103	if (!usecs || !count) {
   1104		clks = count = 0;
   1105	} else {
   1106		/* Respect maximum limits of the hardware */
   1107		clks = usecs * COAL_CLOCKS_PER_USEC;
   1108		if (clks > MAX_COAL_TIME_THRESHOLD)
   1109			clks = MAX_COAL_TIME_THRESHOLD;
   1110		if (count > MAX_COAL_IO_COUNT)
   1111			count = MAX_COAL_IO_COUNT;
   1112	}
   1113
   1114	spin_lock_irqsave(&host->lock, flags);
   1115	mv_set_main_irq_mask(host, coal_disable, 0);
   1116
   1117	if (is_dual_hc && !IS_GEN_I(hpriv)) {
   1118		/*
   1119		 * GEN_II/GEN_IIE with dual host controllers:
   1120		 * one set of global thresholds for the entire chip.
   1121		 */
   1122		writel(clks,  mmio + IRQ_COAL_TIME_THRESHOLD);
   1123		writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
   1124		/* clear leftover coal IRQ bit */
   1125		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
   1126		if (count)
   1127			coal_enable = ALL_PORTS_COAL_DONE;
   1128		clks = count = 0; /* force clearing of regular regs below */
   1129	}
   1130
   1131	/*
   1132	 * All chips: independent thresholds for each HC on the chip.
   1133	 */
   1134	hc_mmio = mv_hc_base_from_port(mmio, 0);
   1135	writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
   1136	writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
   1137	writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
   1138	if (count)
   1139		coal_enable |= PORTS_0_3_COAL_DONE;
   1140	if (is_dual_hc) {
   1141		hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
   1142		writel(clks,  hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
   1143		writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
   1144		writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
   1145		if (count)
   1146			coal_enable |= PORTS_4_7_COAL_DONE;
   1147	}
   1148
   1149	mv_set_main_irq_mask(host, 0, coal_enable);
   1150	spin_unlock_irqrestore(&host->lock, flags);
   1151}
   1152
   1153/*
   1154 *      mv_start_edma - Enable eDMA engine
   1155 *      @pp: port private data
   1156 *
   1157 *      Verify the local cache of the eDMA state is accurate with a
   1158 *      WARN_ON.
   1159 *
   1160 *      LOCKING:
   1161 *      Inherited from caller.
   1162 */
   1163static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
   1164			 struct mv_port_priv *pp, u8 protocol)
   1165{
   1166	int want_ncq = (protocol == ATA_PROT_NCQ);
   1167
   1168	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
   1169		int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
   1170		if (want_ncq != using_ncq)
   1171			mv_stop_edma(ap);
   1172	}
   1173	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
   1174		struct mv_host_priv *hpriv = ap->host->private_data;
   1175
   1176		mv_edma_cfg(ap, want_ncq, 1);
   1177
   1178		mv_set_edma_ptrs(port_mmio, hpriv, pp);
   1179		mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
   1180
   1181		writelfl(EDMA_EN, port_mmio + EDMA_CMD);
   1182		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
   1183	}
   1184}
   1185
   1186static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
   1187{
   1188	void __iomem *port_mmio = mv_ap_base(ap);
   1189	const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
   1190	const int per_loop = 5, timeout = (15 * 1000 / per_loop);
   1191	int i;
   1192
   1193	/*
   1194	 * Wait for the EDMA engine to finish transactions in progress.
   1195	 * No idea what a good "timeout" value might be, but measurements
   1196	 * indicate that it often requires hundreds of microseconds
   1197	 * with two drives in-use.  So we use the 15msec value above
   1198	 * as a rough guess at what even more drives might require.
   1199	 */
   1200	for (i = 0; i < timeout; ++i) {
   1201		u32 edma_stat = readl(port_mmio + EDMA_STATUS);
   1202		if ((edma_stat & empty_idle) == empty_idle)
   1203			break;
   1204		udelay(per_loop);
   1205	}
   1206	/* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
   1207}
   1208
   1209/**
   1210 *      mv_stop_edma_engine - Disable eDMA engine
   1211 *      @port_mmio: io base address
   1212 *
   1213 *      LOCKING:
   1214 *      Inherited from caller.
   1215 */
   1216static int mv_stop_edma_engine(void __iomem *port_mmio)
   1217{
   1218	int i;
   1219
   1220	/* Disable eDMA.  The disable bit auto clears. */
   1221	writelfl(EDMA_DS, port_mmio + EDMA_CMD);
   1222
   1223	/* Wait for the chip to confirm eDMA is off. */
   1224	for (i = 10000; i > 0; i--) {
   1225		u32 reg = readl(port_mmio + EDMA_CMD);
   1226		if (!(reg & EDMA_EN))
   1227			return 0;
   1228		udelay(10);
   1229	}
   1230	return -EIO;
   1231}
   1232
   1233static int mv_stop_edma(struct ata_port *ap)
   1234{
   1235	void __iomem *port_mmio = mv_ap_base(ap);
   1236	struct mv_port_priv *pp = ap->private_data;
   1237	int err = 0;
   1238
   1239	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
   1240		return 0;
   1241	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
   1242	mv_wait_for_edma_empty_idle(ap);
   1243	if (mv_stop_edma_engine(port_mmio)) {
   1244		ata_port_err(ap, "Unable to stop eDMA\n");
   1245		err = -EIO;
   1246	}
   1247	mv_edma_cfg(ap, 0, 0);
   1248	return err;
   1249}
   1250
   1251static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
   1252{
   1253	int b, w, o;
   1254	unsigned char linebuf[38];
   1255
   1256	for (b = 0; b < bytes; ) {
   1257		for (w = 0, o = 0; b < bytes && w < 4; w++) {
   1258			o += snprintf(linebuf + o, sizeof(linebuf) - o,
   1259				      "%08x ", readl(start + b));
   1260			b += sizeof(u32);
   1261		}
   1262		dev_dbg(dev, "%s: %p: %s\n",
   1263			__func__, start + b, linebuf);
   1264	}
   1265}
   1266
   1267static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
   1268{
   1269	int b, w, o;
   1270	u32 dw = 0;
   1271	unsigned char linebuf[38];
   1272
   1273	for (b = 0; b < bytes; ) {
   1274		for (w = 0, o = 0; b < bytes && w < 4; w++) {
   1275			(void) pci_read_config_dword(pdev, b, &dw);
   1276			o += snprintf(linebuf + o, sizeof(linebuf) - o,
   1277				      "%08x ", dw);
   1278			b += sizeof(u32);
   1279		}
   1280		dev_dbg(&pdev->dev, "%s: %02x: %s\n",
   1281			__func__, b, linebuf);
   1282	}
   1283}
   1284
   1285static void mv_dump_all_regs(void __iomem *mmio_base,
   1286			     struct pci_dev *pdev)
   1287{
   1288	void __iomem *hc_base;
   1289	void __iomem *port_base;
   1290	int start_port, num_ports, p, start_hc, num_hcs, hc;
   1291
   1292	start_hc = start_port = 0;
   1293	num_ports = 8;		/* should be benign for 4 port devs */
   1294	num_hcs = 2;
   1295	dev_dbg(&pdev->dev,
   1296		"%s: All registers for port(s) %u-%u:\n", __func__,
   1297		start_port, num_ports > 1 ? num_ports - 1 : start_port);
   1298
   1299	dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
   1300	mv_dump_pci_cfg(pdev, 0x68);
   1301
   1302	dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
   1303	mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
   1304	mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
   1305	mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
   1306	mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
   1307	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
   1308		hc_base = mv_hc_base(mmio_base, hc);
   1309		dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
   1310		mv_dump_mem(&pdev->dev, hc_base, 0x1c);
   1311	}
   1312	for (p = start_port; p < start_port + num_ports; p++) {
   1313		port_base = mv_port_base(mmio_base, p);
   1314		dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
   1315		mv_dump_mem(&pdev->dev, port_base, 0x54);
   1316		dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
   1317		mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
   1318	}
   1319}
   1320
   1321static unsigned int mv_scr_offset(unsigned int sc_reg_in)
   1322{
   1323	unsigned int ofs;
   1324
   1325	switch (sc_reg_in) {
   1326	case SCR_STATUS:
   1327	case SCR_CONTROL:
   1328	case SCR_ERROR:
   1329		ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
   1330		break;
   1331	case SCR_ACTIVE:
   1332		ofs = SATA_ACTIVE;   /* active is not with the others */
   1333		break;
   1334	default:
   1335		ofs = 0xffffffffU;
   1336		break;
   1337	}
   1338	return ofs;
   1339}
   1340
   1341static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
   1342{
   1343	unsigned int ofs = mv_scr_offset(sc_reg_in);
   1344
   1345	if (ofs != 0xffffffffU) {
   1346		*val = readl(mv_ap_base(link->ap) + ofs);
   1347		return 0;
   1348	} else
   1349		return -EINVAL;
   1350}
   1351
   1352static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
   1353{
   1354	unsigned int ofs = mv_scr_offset(sc_reg_in);
   1355
   1356	if (ofs != 0xffffffffU) {
   1357		void __iomem *addr = mv_ap_base(link->ap) + ofs;
   1358		struct mv_host_priv *hpriv = link->ap->host->private_data;
   1359		if (sc_reg_in == SCR_CONTROL) {
   1360			/*
   1361			 * Workaround for 88SX60x1 FEr SATA#26:
   1362			 *
   1363			 * COMRESETs have to take care not to accidentally
   1364			 * put the drive to sleep when writing SCR_CONTROL.
   1365			 * Setting bits 12..15 prevents this problem.
   1366			 *
   1367			 * So if we see an outbound COMMRESET, set those bits.
   1368			 * Ditto for the followup write that clears the reset.
   1369			 *
   1370			 * The proprietary driver does this for
   1371			 * all chip versions, and so do we.
   1372			 */
   1373			if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
   1374				val |= 0xf000;
   1375
   1376			if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
   1377				void __iomem *lp_phy_addr =
   1378					mv_ap_base(link->ap) + LP_PHY_CTL;
   1379				/*
   1380				 * Set PHY speed according to SControl speed.
   1381				 */
   1382				u32 lp_phy_val =
   1383					LP_PHY_CTL_PIN_PU_PLL |
   1384					LP_PHY_CTL_PIN_PU_RX  |
   1385					LP_PHY_CTL_PIN_PU_TX;
   1386
   1387				if ((val & 0xf0) != 0x10)
   1388					lp_phy_val |=
   1389						LP_PHY_CTL_GEN_TX_3G |
   1390						LP_PHY_CTL_GEN_RX_3G;
   1391
   1392				writelfl(lp_phy_val, lp_phy_addr);
   1393			}
   1394		}
   1395		writelfl(val, addr);
   1396		return 0;
   1397	} else
   1398		return -EINVAL;
   1399}
   1400
   1401static void mv6_dev_config(struct ata_device *adev)
   1402{
   1403	/*
   1404	 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
   1405	 *
   1406	 * Gen-II does not support NCQ over a port multiplier
   1407	 *  (no FIS-based switching).
   1408	 */
   1409	if (adev->flags & ATA_DFLAG_NCQ) {
   1410		if (sata_pmp_attached(adev->link->ap)) {
   1411			adev->flags &= ~ATA_DFLAG_NCQ;
   1412			ata_dev_info(adev,
   1413				"NCQ disabled for command-based switching\n");
   1414		}
   1415	}
   1416}
   1417
   1418static int mv_qc_defer(struct ata_queued_cmd *qc)
   1419{
   1420	struct ata_link *link = qc->dev->link;
   1421	struct ata_port *ap = link->ap;
   1422	struct mv_port_priv *pp = ap->private_data;
   1423
   1424	/*
   1425	 * Don't allow new commands if we're in a delayed EH state
   1426	 * for NCQ and/or FIS-based switching.
   1427	 */
   1428	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
   1429		return ATA_DEFER_PORT;
   1430
   1431	/* PIO commands need exclusive link: no other commands [DMA or PIO]
   1432	 * can run concurrently.
   1433	 * set excl_link when we want to send a PIO command in DMA mode
   1434	 * or a non-NCQ command in NCQ mode.
   1435	 * When we receive a command from that link, and there are no
   1436	 * outstanding commands, mark a flag to clear excl_link and let
   1437	 * the command go through.
   1438	 */
   1439	if (unlikely(ap->excl_link)) {
   1440		if (link == ap->excl_link) {
   1441			if (ap->nr_active_links)
   1442				return ATA_DEFER_PORT;
   1443			qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
   1444			return 0;
   1445		} else
   1446			return ATA_DEFER_PORT;
   1447	}
   1448
   1449	/*
   1450	 * If the port is completely idle, then allow the new qc.
   1451	 */
   1452	if (ap->nr_active_links == 0)
   1453		return 0;
   1454
   1455	/*
   1456	 * The port is operating in host queuing mode (EDMA) with NCQ
   1457	 * enabled, allow multiple NCQ commands.  EDMA also allows
   1458	 * queueing multiple DMA commands but libata core currently
   1459	 * doesn't allow it.
   1460	 */
   1461	if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
   1462	    (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
   1463		if (ata_is_ncq(qc->tf.protocol))
   1464			return 0;
   1465		else {
   1466			ap->excl_link = link;
   1467			return ATA_DEFER_PORT;
   1468		}
   1469	}
   1470
   1471	return ATA_DEFER_PORT;
   1472}
   1473
   1474static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
   1475{
   1476	struct mv_port_priv *pp = ap->private_data;
   1477	void __iomem *port_mmio;
   1478
   1479	u32 fiscfg,   *old_fiscfg   = &pp->cached.fiscfg;
   1480	u32 ltmode,   *old_ltmode   = &pp->cached.ltmode;
   1481	u32 haltcond, *old_haltcond = &pp->cached.haltcond;
   1482
   1483	ltmode   = *old_ltmode & ~LTMODE_BIT8;
   1484	haltcond = *old_haltcond | EDMA_ERR_DEV;
   1485
   1486	if (want_fbs) {
   1487		fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
   1488		ltmode = *old_ltmode | LTMODE_BIT8;
   1489		if (want_ncq)
   1490			haltcond &= ~EDMA_ERR_DEV;
   1491		else
   1492			fiscfg |=  FISCFG_WAIT_DEV_ERR;
   1493	} else {
   1494		fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
   1495	}
   1496
   1497	port_mmio = mv_ap_base(ap);
   1498	mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
   1499	mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
   1500	mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
   1501}
   1502
   1503static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
   1504{
   1505	struct mv_host_priv *hpriv = ap->host->private_data;
   1506	u32 old, new;
   1507
   1508	/* workaround for 88SX60x1 FEr SATA#25 (part 1) */
   1509	old = readl(hpriv->base + GPIO_PORT_CTL);
   1510	if (want_ncq)
   1511		new = old | (1 << 22);
   1512	else
   1513		new = old & ~(1 << 22);
   1514	if (new != old)
   1515		writel(new, hpriv->base + GPIO_PORT_CTL);
   1516}
   1517
   1518/*
   1519 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
   1520 *	@ap: Port being initialized
   1521 *
   1522 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
   1523 *
   1524 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
   1525 *	of basic DMA on the GEN_IIE versions of the chips.
   1526 *
   1527 *	This bit survives EDMA resets, and must be set for basic DMA
   1528 *	to function, and should be cleared when EDMA is active.
   1529 */
   1530static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
   1531{
   1532	struct mv_port_priv *pp = ap->private_data;
   1533	u32 new, *old = &pp->cached.unknown_rsvd;
   1534
   1535	if (enable_bmdma)
   1536		new = *old | 1;
   1537	else
   1538		new = *old & ~1;
   1539	mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
   1540}
   1541
   1542/*
   1543 * SOC chips have an issue whereby the HDD LEDs don't always blink
   1544 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
   1545 * of the SOC takes care of it, generating a steady blink rate when
   1546 * any drive on the chip is active.
   1547 *
   1548 * Unfortunately, the blink mode is a global hardware setting for the SOC,
   1549 * so we must use it whenever at least one port on the SOC has NCQ enabled.
   1550 *
   1551 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
   1552 * LED operation works then, and provides better (more accurate) feedback.
   1553 *
   1554 * Note that this code assumes that an SOC never has more than one HC onboard.
   1555 */
   1556static void mv_soc_led_blink_enable(struct ata_port *ap)
   1557{
   1558	struct ata_host *host = ap->host;
   1559	struct mv_host_priv *hpriv = host->private_data;
   1560	void __iomem *hc_mmio;
   1561	u32 led_ctrl;
   1562
   1563	if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
   1564		return;
   1565	hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
   1566	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
   1567	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
   1568	writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
   1569}
   1570
   1571static void mv_soc_led_blink_disable(struct ata_port *ap)
   1572{
   1573	struct ata_host *host = ap->host;
   1574	struct mv_host_priv *hpriv = host->private_data;
   1575	void __iomem *hc_mmio;
   1576	u32 led_ctrl;
   1577	unsigned int port;
   1578
   1579	if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
   1580		return;
   1581
   1582	/* disable led-blink only if no ports are using NCQ */
   1583	for (port = 0; port < hpriv->n_ports; port++) {
   1584		struct ata_port *this_ap = host->ports[port];
   1585		struct mv_port_priv *pp = this_ap->private_data;
   1586
   1587		if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
   1588			return;
   1589	}
   1590
   1591	hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
   1592	hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
   1593	led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
   1594	writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
   1595}
   1596
   1597static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
   1598{
   1599	u32 cfg;
   1600	struct mv_port_priv *pp    = ap->private_data;
   1601	struct mv_host_priv *hpriv = ap->host->private_data;
   1602	void __iomem *port_mmio    = mv_ap_base(ap);
   1603
   1604	/* set up non-NCQ EDMA configuration */
   1605	cfg = EDMA_CFG_Q_DEPTH;		/* always 0x1f for *all* chips */
   1606	pp->pp_flags &=
   1607	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
   1608
   1609	if (IS_GEN_I(hpriv))
   1610		cfg |= (1 << 8);	/* enab config burst size mask */
   1611
   1612	else if (IS_GEN_II(hpriv)) {
   1613		cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
   1614		mv_60x1_errata_sata25(ap, want_ncq);
   1615
   1616	} else if (IS_GEN_IIE(hpriv)) {
   1617		int want_fbs = sata_pmp_attached(ap);
   1618		/*
   1619		 * Possible future enhancement:
   1620		 *
   1621		 * The chip can use FBS with non-NCQ, if we allow it,
   1622		 * But first we need to have the error handling in place
   1623		 * for this mode (datasheet section 7.3.15.4.2.3).
   1624		 * So disallow non-NCQ FBS for now.
   1625		 */
   1626		want_fbs &= want_ncq;
   1627
   1628		mv_config_fbs(ap, want_ncq, want_fbs);
   1629
   1630		if (want_fbs) {
   1631			pp->pp_flags |= MV_PP_FLAG_FBS_EN;
   1632			cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
   1633		}
   1634
   1635		cfg |= (1 << 23);	/* do not mask PM field in rx'd FIS */
   1636		if (want_edma) {
   1637			cfg |= (1 << 22); /* enab 4-entry host queue cache */
   1638			if (!IS_SOC(hpriv))
   1639				cfg |= (1 << 18); /* enab early completion */
   1640		}
   1641		if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
   1642			cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
   1643		mv_bmdma_enable_iie(ap, !want_edma);
   1644
   1645		if (IS_SOC(hpriv)) {
   1646			if (want_ncq)
   1647				mv_soc_led_blink_enable(ap);
   1648			else
   1649				mv_soc_led_blink_disable(ap);
   1650		}
   1651	}
   1652
   1653	if (want_ncq) {
   1654		cfg |= EDMA_CFG_NCQ;
   1655		pp->pp_flags |=  MV_PP_FLAG_NCQ_EN;
   1656	}
   1657
   1658	writelfl(cfg, port_mmio + EDMA_CFG);
   1659}
   1660
   1661static void mv_port_free_dma_mem(struct ata_port *ap)
   1662{
   1663	struct mv_host_priv *hpriv = ap->host->private_data;
   1664	struct mv_port_priv *pp = ap->private_data;
   1665	int tag;
   1666
   1667	if (pp->crqb) {
   1668		dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
   1669		pp->crqb = NULL;
   1670	}
   1671	if (pp->crpb) {
   1672		dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
   1673		pp->crpb = NULL;
   1674	}
   1675	/*
   1676	 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
   1677	 * For later hardware, we have one unique sg_tbl per NCQ tag.
   1678	 */
   1679	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
   1680		if (pp->sg_tbl[tag]) {
   1681			if (tag == 0 || !IS_GEN_I(hpriv))
   1682				dma_pool_free(hpriv->sg_tbl_pool,
   1683					      pp->sg_tbl[tag],
   1684					      pp->sg_tbl_dma[tag]);
   1685			pp->sg_tbl[tag] = NULL;
   1686		}
   1687	}
   1688}
   1689
   1690/**
   1691 *      mv_port_start - Port specific init/start routine.
   1692 *      @ap: ATA channel to manipulate
   1693 *
   1694 *      Allocate and point to DMA memory, init port private memory,
   1695 *      zero indices.
   1696 *
   1697 *      LOCKING:
   1698 *      Inherited from caller.
   1699 */
   1700static int mv_port_start(struct ata_port *ap)
   1701{
   1702	struct device *dev = ap->host->dev;
   1703	struct mv_host_priv *hpriv = ap->host->private_data;
   1704	struct mv_port_priv *pp;
   1705	unsigned long flags;
   1706	int tag;
   1707
   1708	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
   1709	if (!pp)
   1710		return -ENOMEM;
   1711	ap->private_data = pp;
   1712
   1713	pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
   1714	if (!pp->crqb)
   1715		return -ENOMEM;
   1716
   1717	pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
   1718	if (!pp->crpb)
   1719		goto out_port_free_dma_mem;
   1720
   1721	/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
   1722	if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
   1723		ap->flags |= ATA_FLAG_AN;
   1724	/*
   1725	 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
   1726	 * For later hardware, we need one unique sg_tbl per NCQ tag.
   1727	 */
   1728	for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
   1729		if (tag == 0 || !IS_GEN_I(hpriv)) {
   1730			pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
   1731					      GFP_KERNEL, &pp->sg_tbl_dma[tag]);
   1732			if (!pp->sg_tbl[tag])
   1733				goto out_port_free_dma_mem;
   1734		} else {
   1735			pp->sg_tbl[tag]     = pp->sg_tbl[0];
   1736			pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
   1737		}
   1738	}
   1739
   1740	spin_lock_irqsave(ap->lock, flags);
   1741	mv_save_cached_regs(ap);
   1742	mv_edma_cfg(ap, 0, 0);
   1743	spin_unlock_irqrestore(ap->lock, flags);
   1744
   1745	return 0;
   1746
   1747out_port_free_dma_mem:
   1748	mv_port_free_dma_mem(ap);
   1749	return -ENOMEM;
   1750}
   1751
   1752/**
   1753 *      mv_port_stop - Port specific cleanup/stop routine.
   1754 *      @ap: ATA channel to manipulate
   1755 *
   1756 *      Stop DMA, cleanup port memory.
   1757 *
   1758 *      LOCKING:
   1759 *      This routine uses the host lock to protect the DMA stop.
   1760 */
   1761static void mv_port_stop(struct ata_port *ap)
   1762{
   1763	unsigned long flags;
   1764
   1765	spin_lock_irqsave(ap->lock, flags);
   1766	mv_stop_edma(ap);
   1767	mv_enable_port_irqs(ap, 0);
   1768	spin_unlock_irqrestore(ap->lock, flags);
   1769	mv_port_free_dma_mem(ap);
   1770}
   1771
   1772/**
   1773 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
   1774 *      @qc: queued command whose SG list to source from
   1775 *
   1776 *      Populate the SG list and mark the last entry.
   1777 *
   1778 *      LOCKING:
   1779 *      Inherited from caller.
   1780 */
   1781static void mv_fill_sg(struct ata_queued_cmd *qc)
   1782{
   1783	struct mv_port_priv *pp = qc->ap->private_data;
   1784	struct scatterlist *sg;
   1785	struct mv_sg *mv_sg, *last_sg = NULL;
   1786	unsigned int si;
   1787
   1788	mv_sg = pp->sg_tbl[qc->hw_tag];
   1789	for_each_sg(qc->sg, sg, qc->n_elem, si) {
   1790		dma_addr_t addr = sg_dma_address(sg);
   1791		u32 sg_len = sg_dma_len(sg);
   1792
   1793		while (sg_len) {
   1794			u32 offset = addr & 0xffff;
   1795			u32 len = sg_len;
   1796
   1797			if (offset + len > 0x10000)
   1798				len = 0x10000 - offset;
   1799
   1800			mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
   1801			mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
   1802			mv_sg->flags_size = cpu_to_le32(len & 0xffff);
   1803			mv_sg->reserved = 0;
   1804
   1805			sg_len -= len;
   1806			addr += len;
   1807
   1808			last_sg = mv_sg;
   1809			mv_sg++;
   1810		}
   1811	}
   1812
   1813	if (likely(last_sg))
   1814		last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
   1815	mb(); /* ensure data structure is visible to the chipset */
   1816}
   1817
   1818static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
   1819{
   1820	u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
   1821		(last ? CRQB_CMD_LAST : 0);
   1822	*cmdw = cpu_to_le16(tmp);
   1823}
   1824
   1825/**
   1826 *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
   1827 *	@ap: Port associated with this ATA transaction.
   1828 *
   1829 *	We need this only for ATAPI bmdma transactions,
   1830 *	as otherwise we experience spurious interrupts
   1831 *	after libata-sff handles the bmdma interrupts.
   1832 */
   1833static void mv_sff_irq_clear(struct ata_port *ap)
   1834{
   1835	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
   1836}
   1837
   1838/**
   1839 *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
   1840 *	@qc: queued command to check for chipset/DMA compatibility.
   1841 *
   1842 *	The bmdma engines cannot handle speculative data sizes
   1843 *	(bytecount under/over flow).  So only allow DMA for
   1844 *	data transfer commands with known data sizes.
   1845 *
   1846 *	LOCKING:
   1847 *	Inherited from caller.
   1848 */
   1849static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
   1850{
   1851	struct scsi_cmnd *scmd = qc->scsicmd;
   1852
   1853	if (scmd) {
   1854		switch (scmd->cmnd[0]) {
   1855		case READ_6:
   1856		case READ_10:
   1857		case READ_12:
   1858		case WRITE_6:
   1859		case WRITE_10:
   1860		case WRITE_12:
   1861		case GPCMD_READ_CD:
   1862		case GPCMD_SEND_DVD_STRUCTURE:
   1863		case GPCMD_SEND_CUE_SHEET:
   1864			return 0; /* DMA is safe */
   1865		}
   1866	}
   1867	return -EOPNOTSUPP; /* use PIO instead */
   1868}
   1869
   1870/**
   1871 *	mv_bmdma_setup - Set up BMDMA transaction
   1872 *	@qc: queued command to prepare DMA for.
   1873 *
   1874 *	LOCKING:
   1875 *	Inherited from caller.
   1876 */
   1877static void mv_bmdma_setup(struct ata_queued_cmd *qc)
   1878{
   1879	struct ata_port *ap = qc->ap;
   1880	void __iomem *port_mmio = mv_ap_base(ap);
   1881	struct mv_port_priv *pp = ap->private_data;
   1882
   1883	mv_fill_sg(qc);
   1884
   1885	/* clear all DMA cmd bits */
   1886	writel(0, port_mmio + BMDMA_CMD);
   1887
   1888	/* load PRD table addr. */
   1889	writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
   1890		port_mmio + BMDMA_PRD_HIGH);
   1891	writelfl(pp->sg_tbl_dma[qc->hw_tag],
   1892		port_mmio + BMDMA_PRD_LOW);
   1893
   1894	/* issue r/w command */
   1895	ap->ops->sff_exec_command(ap, &qc->tf);
   1896}
   1897
   1898/**
   1899 *	mv_bmdma_start - Start a BMDMA transaction
   1900 *	@qc: queued command to start DMA on.
   1901 *
   1902 *	LOCKING:
   1903 *	Inherited from caller.
   1904 */
   1905static void mv_bmdma_start(struct ata_queued_cmd *qc)
   1906{
   1907	struct ata_port *ap = qc->ap;
   1908	void __iomem *port_mmio = mv_ap_base(ap);
   1909	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
   1910	u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
   1911
   1912	/* start host DMA transaction */
   1913	writelfl(cmd, port_mmio + BMDMA_CMD);
   1914}
   1915
   1916/**
   1917 *	mv_bmdma_stop_ap - Stop BMDMA transfer
   1918 *	@ap: port to stop
   1919 *
   1920 *	Clears the ATA_DMA_START flag in the bmdma control register
   1921 *
   1922 *	LOCKING:
   1923 *	Inherited from caller.
   1924 */
   1925static void mv_bmdma_stop_ap(struct ata_port *ap)
   1926{
   1927	void __iomem *port_mmio = mv_ap_base(ap);
   1928	u32 cmd;
   1929
   1930	/* clear start/stop bit */
   1931	cmd = readl(port_mmio + BMDMA_CMD);
   1932	if (cmd & ATA_DMA_START) {
   1933		cmd &= ~ATA_DMA_START;
   1934		writelfl(cmd, port_mmio + BMDMA_CMD);
   1935
   1936		/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
   1937		ata_sff_dma_pause(ap);
   1938	}
   1939}
   1940
   1941static void mv_bmdma_stop(struct ata_queued_cmd *qc)
   1942{
   1943	mv_bmdma_stop_ap(qc->ap);
   1944}
   1945
   1946/**
   1947 *	mv_bmdma_status - Read BMDMA status
   1948 *	@ap: port for which to retrieve DMA status.
   1949 *
   1950 *	Read and return equivalent of the sff BMDMA status register.
   1951 *
   1952 *	LOCKING:
   1953 *	Inherited from caller.
   1954 */
   1955static u8 mv_bmdma_status(struct ata_port *ap)
   1956{
   1957	void __iomem *port_mmio = mv_ap_base(ap);
   1958	u32 reg, status;
   1959
   1960	/*
   1961	 * Other bits are valid only if ATA_DMA_ACTIVE==0,
   1962	 * and the ATA_DMA_INTR bit doesn't exist.
   1963	 */
   1964	reg = readl(port_mmio + BMDMA_STATUS);
   1965	if (reg & ATA_DMA_ACTIVE)
   1966		status = ATA_DMA_ACTIVE;
   1967	else if (reg & ATA_DMA_ERR)
   1968		status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
   1969	else {
   1970		/*
   1971		 * Just because DMA_ACTIVE is 0 (DMA completed),
   1972		 * this does _not_ mean the device is "done".
   1973		 * So we should not yet be signalling ATA_DMA_INTR
   1974		 * in some cases.  Eg. DSM/TRIM, and perhaps others.
   1975		 */
   1976		mv_bmdma_stop_ap(ap);
   1977		if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
   1978			status = 0;
   1979		else
   1980			status = ATA_DMA_INTR;
   1981	}
   1982	return status;
   1983}
   1984
   1985static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
   1986{
   1987	struct ata_taskfile *tf = &qc->tf;
   1988	/*
   1989	 * Workaround for 88SX60x1 FEr SATA#24.
   1990	 *
   1991	 * Chip may corrupt WRITEs if multi_count >= 4kB.
   1992	 * Note that READs are unaffected.
   1993	 *
   1994	 * It's not clear if this errata really means "4K bytes",
   1995	 * or if it always happens for multi_count > 7
   1996	 * regardless of device sector_size.
   1997	 *
   1998	 * So, for safety, any write with multi_count > 7
   1999	 * gets converted here into a regular PIO write instead:
   2000	 */
   2001	if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
   2002		if (qc->dev->multi_count > 7) {
   2003			switch (tf->command) {
   2004			case ATA_CMD_WRITE_MULTI:
   2005				tf->command = ATA_CMD_PIO_WRITE;
   2006				break;
   2007			case ATA_CMD_WRITE_MULTI_FUA_EXT:
   2008				tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
   2009				fallthrough;
   2010			case ATA_CMD_WRITE_MULTI_EXT:
   2011				tf->command = ATA_CMD_PIO_WRITE_EXT;
   2012				break;
   2013			}
   2014		}
   2015	}
   2016}
   2017
   2018/**
   2019 *      mv_qc_prep - Host specific command preparation.
   2020 *      @qc: queued command to prepare
   2021 *
   2022 *      This routine simply redirects to the general purpose routine
   2023 *      if command is not DMA.  Else, it handles prep of the CRQB
   2024 *      (command request block), does some sanity checking, and calls
   2025 *      the SG load routine.
   2026 *
   2027 *      LOCKING:
   2028 *      Inherited from caller.
   2029 */
   2030static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
   2031{
   2032	struct ata_port *ap = qc->ap;
   2033	struct mv_port_priv *pp = ap->private_data;
   2034	__le16 *cw;
   2035	struct ata_taskfile *tf = &qc->tf;
   2036	u16 flags = 0;
   2037	unsigned in_index;
   2038
   2039	switch (tf->protocol) {
   2040	case ATA_PROT_DMA:
   2041		if (tf->command == ATA_CMD_DSM)
   2042			return AC_ERR_OK;
   2043		fallthrough;
   2044	case ATA_PROT_NCQ:
   2045		break;	/* continue below */
   2046	case ATA_PROT_PIO:
   2047		mv_rw_multi_errata_sata24(qc);
   2048		return AC_ERR_OK;
   2049	default:
   2050		return AC_ERR_OK;
   2051	}
   2052
   2053	/* Fill in command request block
   2054	 */
   2055	if (!(tf->flags & ATA_TFLAG_WRITE))
   2056		flags |= CRQB_FLAG_READ;
   2057	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
   2058	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
   2059	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
   2060
   2061	/* get current queue index from software */
   2062	in_index = pp->req_idx;
   2063
   2064	pp->crqb[in_index].sg_addr =
   2065		cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
   2066	pp->crqb[in_index].sg_addr_hi =
   2067		cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
   2068	pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
   2069
   2070	cw = &pp->crqb[in_index].ata_cmd[0];
   2071
   2072	/* Sadly, the CRQB cannot accommodate all registers--there are
   2073	 * only 11 bytes...so we must pick and choose required
   2074	 * registers based on the command.  So, we drop feature and
   2075	 * hob_feature for [RW] DMA commands, but they are needed for
   2076	 * NCQ.  NCQ will drop hob_nsect, which is not needed there
   2077	 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
   2078	 */
   2079	switch (tf->command) {
   2080	case ATA_CMD_READ:
   2081	case ATA_CMD_READ_EXT:
   2082	case ATA_CMD_WRITE:
   2083	case ATA_CMD_WRITE_EXT:
   2084	case ATA_CMD_WRITE_FUA_EXT:
   2085		mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
   2086		break;
   2087	case ATA_CMD_FPDMA_READ:
   2088	case ATA_CMD_FPDMA_WRITE:
   2089		mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
   2090		mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
   2091		break;
   2092	default:
   2093		/* The only other commands EDMA supports in non-queued and
   2094		 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
   2095		 * of which are defined/used by Linux.  If we get here, this
   2096		 * driver needs work.
   2097		 */
   2098		ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
   2099				tf->command);
   2100		return AC_ERR_INVALID;
   2101	}
   2102	mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
   2103	mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
   2104	mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
   2105	mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
   2106	mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
   2107	mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
   2108	mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
   2109	mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
   2110	mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1);	/* last */
   2111
   2112	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
   2113		return AC_ERR_OK;
   2114	mv_fill_sg(qc);
   2115
   2116	return AC_ERR_OK;
   2117}
   2118
   2119/**
   2120 *      mv_qc_prep_iie - Host specific command preparation.
   2121 *      @qc: queued command to prepare
   2122 *
   2123 *      This routine simply redirects to the general purpose routine
   2124 *      if command is not DMA.  Else, it handles prep of the CRQB
   2125 *      (command request block), does some sanity checking, and calls
   2126 *      the SG load routine.
   2127 *
   2128 *      LOCKING:
   2129 *      Inherited from caller.
   2130 */
   2131static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
   2132{
   2133	struct ata_port *ap = qc->ap;
   2134	struct mv_port_priv *pp = ap->private_data;
   2135	struct mv_crqb_iie *crqb;
   2136	struct ata_taskfile *tf = &qc->tf;
   2137	unsigned in_index;
   2138	u32 flags = 0;
   2139
   2140	if ((tf->protocol != ATA_PROT_DMA) &&
   2141	    (tf->protocol != ATA_PROT_NCQ))
   2142		return AC_ERR_OK;
   2143	if (tf->command == ATA_CMD_DSM)
   2144		return AC_ERR_OK;  /* use bmdma for this */
   2145
   2146	/* Fill in Gen IIE command request block */
   2147	if (!(tf->flags & ATA_TFLAG_WRITE))
   2148		flags |= CRQB_FLAG_READ;
   2149
   2150	WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
   2151	flags |= qc->hw_tag << CRQB_TAG_SHIFT;
   2152	flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
   2153	flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
   2154
   2155	/* get current queue index from software */
   2156	in_index = pp->req_idx;
   2157
   2158	crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
   2159	crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
   2160	crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
   2161	crqb->flags = cpu_to_le32(flags);
   2162
   2163	crqb->ata_cmd[0] = cpu_to_le32(
   2164			(tf->command << 16) |
   2165			(tf->feature << 24)
   2166		);
   2167	crqb->ata_cmd[1] = cpu_to_le32(
   2168			(tf->lbal << 0) |
   2169			(tf->lbam << 8) |
   2170			(tf->lbah << 16) |
   2171			(tf->device << 24)
   2172		);
   2173	crqb->ata_cmd[2] = cpu_to_le32(
   2174			(tf->hob_lbal << 0) |
   2175			(tf->hob_lbam << 8) |
   2176			(tf->hob_lbah << 16) |
   2177			(tf->hob_feature << 24)
   2178		);
   2179	crqb->ata_cmd[3] = cpu_to_le32(
   2180			(tf->nsect << 0) |
   2181			(tf->hob_nsect << 8)
   2182		);
   2183
   2184	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
   2185		return AC_ERR_OK;
   2186	mv_fill_sg(qc);
   2187
   2188	return AC_ERR_OK;
   2189}
   2190
   2191/**
   2192 *	mv_sff_check_status - fetch device status, if valid
   2193 *	@ap: ATA port to fetch status from
   2194 *
   2195 *	When using command issue via mv_qc_issue_fis(),
   2196 *	the initial ATA_BUSY state does not show up in the
   2197 *	ATA status (shadow) register.  This can confuse libata!
   2198 *
   2199 *	So we have a hook here to fake ATA_BUSY for that situation,
   2200 *	until the first time a BUSY, DRQ, or ERR bit is seen.
   2201 *
   2202 *	The rest of the time, it simply returns the ATA status register.
   2203 */
   2204static u8 mv_sff_check_status(struct ata_port *ap)
   2205{
   2206	u8 stat = ioread8(ap->ioaddr.status_addr);
   2207	struct mv_port_priv *pp = ap->private_data;
   2208
   2209	if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
   2210		if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
   2211			pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
   2212		else
   2213			stat = ATA_BUSY;
   2214	}
   2215	return stat;
   2216}
   2217
   2218/**
   2219 *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
   2220 *	@ap: ATA port to send a FIS
   2221 *	@fis: fis to be sent
   2222 *	@nwords: number of 32-bit words in the fis
   2223 */
   2224static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
   2225{
   2226	void __iomem *port_mmio = mv_ap_base(ap);
   2227	u32 ifctl, old_ifctl, ifstat;
   2228	int i, timeout = 200, final_word = nwords - 1;
   2229
   2230	/* Initiate FIS transmission mode */
   2231	old_ifctl = readl(port_mmio + SATA_IFCTL);
   2232	ifctl = 0x100 | (old_ifctl & 0xf);
   2233	writelfl(ifctl, port_mmio + SATA_IFCTL);
   2234
   2235	/* Send all words of the FIS except for the final word */
   2236	for (i = 0; i < final_word; ++i)
   2237		writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
   2238
   2239	/* Flag end-of-transmission, and then send the final word */
   2240	writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
   2241	writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
   2242
   2243	/*
   2244	 * Wait for FIS transmission to complete.
   2245	 * This typically takes just a single iteration.
   2246	 */
   2247	do {
   2248		ifstat = readl(port_mmio + SATA_IFSTAT);
   2249	} while (!(ifstat & 0x1000) && --timeout);
   2250
   2251	/* Restore original port configuration */
   2252	writelfl(old_ifctl, port_mmio + SATA_IFCTL);
   2253
   2254	/* See if it worked */
   2255	if ((ifstat & 0x3000) != 0x1000) {
   2256		ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
   2257			      __func__, ifstat);
   2258		return AC_ERR_OTHER;
   2259	}
   2260	return 0;
   2261}
   2262
   2263/**
   2264 *	mv_qc_issue_fis - Issue a command directly as a FIS
   2265 *	@qc: queued command to start
   2266 *
   2267 *	Note that the ATA shadow registers are not updated
   2268 *	after command issue, so the device will appear "READY"
   2269 *	if polled, even while it is BUSY processing the command.
   2270 *
   2271 *	So we use a status hook to fake ATA_BUSY until the drive changes state.
   2272 *
   2273 *	Note: we don't get updated shadow regs on *completion*
   2274 *	of non-data commands. So avoid sending them via this function,
   2275 *	as they will appear to have completed immediately.
   2276 *
   2277 *	GEN_IIE has special registers that we could get the result tf from,
   2278 *	but earlier chipsets do not.  For now, we ignore those registers.
   2279 */
   2280static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
   2281{
   2282	struct ata_port *ap = qc->ap;
   2283	struct mv_port_priv *pp = ap->private_data;
   2284	struct ata_link *link = qc->dev->link;
   2285	u32 fis[5];
   2286	int err = 0;
   2287
   2288	ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
   2289	err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
   2290	if (err)
   2291		return err;
   2292
   2293	switch (qc->tf.protocol) {
   2294	case ATAPI_PROT_PIO:
   2295		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
   2296		fallthrough;
   2297	case ATAPI_PROT_NODATA:
   2298		ap->hsm_task_state = HSM_ST_FIRST;
   2299		break;
   2300	case ATA_PROT_PIO:
   2301		pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
   2302		if (qc->tf.flags & ATA_TFLAG_WRITE)
   2303			ap->hsm_task_state = HSM_ST_FIRST;
   2304		else
   2305			ap->hsm_task_state = HSM_ST;
   2306		break;
   2307	default:
   2308		ap->hsm_task_state = HSM_ST_LAST;
   2309		break;
   2310	}
   2311
   2312	if (qc->tf.flags & ATA_TFLAG_POLLING)
   2313		ata_sff_queue_pio_task(link, 0);
   2314	return 0;
   2315}
   2316
   2317/**
   2318 *      mv_qc_issue - Initiate a command to the host
   2319 *      @qc: queued command to start
   2320 *
   2321 *      This routine simply redirects to the general purpose routine
   2322 *      if command is not DMA.  Else, it sanity checks our local
   2323 *      caches of the request producer/consumer indices then enables
   2324 *      DMA and bumps the request producer index.
   2325 *
   2326 *      LOCKING:
   2327 *      Inherited from caller.
   2328 */
   2329static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
   2330{
   2331	static int limit_warnings = 10;
   2332	struct ata_port *ap = qc->ap;
   2333	void __iomem *port_mmio = mv_ap_base(ap);
   2334	struct mv_port_priv *pp = ap->private_data;
   2335	u32 in_index;
   2336	unsigned int port_irqs;
   2337
   2338	pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
   2339
   2340	switch (qc->tf.protocol) {
   2341	case ATA_PROT_DMA:
   2342		if (qc->tf.command == ATA_CMD_DSM) {
   2343			if (!ap->ops->bmdma_setup)  /* no bmdma on GEN_I */
   2344				return AC_ERR_OTHER;
   2345			break;  /* use bmdma for this */
   2346		}
   2347		fallthrough;
   2348	case ATA_PROT_NCQ:
   2349		mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
   2350		pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
   2351		in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
   2352
   2353		/* Write the request in pointer to kick the EDMA to life */
   2354		writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
   2355					port_mmio + EDMA_REQ_Q_IN_PTR);
   2356		return 0;
   2357
   2358	case ATA_PROT_PIO:
   2359		/*
   2360		 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
   2361		 *
   2362		 * Someday, we might implement special polling workarounds
   2363		 * for these, but it all seems rather unnecessary since we
   2364		 * normally use only DMA for commands which transfer more
   2365		 * than a single block of data.
   2366		 *
   2367		 * Much of the time, this could just work regardless.
   2368		 * So for now, just log the incident, and allow the attempt.
   2369		 */
   2370		if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
   2371			--limit_warnings;
   2372			ata_link_warn(qc->dev->link, DRV_NAME
   2373				      ": attempting PIO w/multiple DRQ: "
   2374				      "this may fail due to h/w errata\n");
   2375		}
   2376		fallthrough;
   2377	case ATA_PROT_NODATA:
   2378	case ATAPI_PROT_PIO:
   2379	case ATAPI_PROT_NODATA:
   2380		if (ap->flags & ATA_FLAG_PIO_POLLING)
   2381			qc->tf.flags |= ATA_TFLAG_POLLING;
   2382		break;
   2383	}
   2384
   2385	if (qc->tf.flags & ATA_TFLAG_POLLING)
   2386		port_irqs = ERR_IRQ;	/* mask device interrupt when polling */
   2387	else
   2388		port_irqs = ERR_IRQ | DONE_IRQ;	/* unmask all interrupts */
   2389
   2390	/*
   2391	 * We're about to send a non-EDMA capable command to the
   2392	 * port.  Turn off EDMA so there won't be problems accessing
   2393	 * shadow block, etc registers.
   2394	 */
   2395	mv_stop_edma(ap);
   2396	mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
   2397	mv_pmp_select(ap, qc->dev->link->pmp);
   2398
   2399	if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
   2400		struct mv_host_priv *hpriv = ap->host->private_data;
   2401		/*
   2402		 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
   2403		 *
   2404		 * After any NCQ error, the READ_LOG_EXT command
   2405		 * from libata-eh *must* use mv_qc_issue_fis().
   2406		 * Otherwise it might fail, due to chip errata.
   2407		 *
   2408		 * Rather than special-case it, we'll just *always*
   2409		 * use this method here for READ_LOG_EXT, making for
   2410		 * easier testing.
   2411		 */
   2412		if (IS_GEN_II(hpriv))
   2413			return mv_qc_issue_fis(qc);
   2414	}
   2415	return ata_bmdma_qc_issue(qc);
   2416}
   2417
   2418static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
   2419{
   2420	struct mv_port_priv *pp = ap->private_data;
   2421	struct ata_queued_cmd *qc;
   2422
   2423	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
   2424		return NULL;
   2425	qc = ata_qc_from_tag(ap, ap->link.active_tag);
   2426	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
   2427		return qc;
   2428	return NULL;
   2429}
   2430
   2431static void mv_pmp_error_handler(struct ata_port *ap)
   2432{
   2433	unsigned int pmp, pmp_map;
   2434	struct mv_port_priv *pp = ap->private_data;
   2435
   2436	if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
   2437		/*
   2438		 * Perform NCQ error analysis on failed PMPs
   2439		 * before we freeze the port entirely.
   2440		 *
   2441		 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
   2442		 */
   2443		pmp_map = pp->delayed_eh_pmp_map;
   2444		pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
   2445		for (pmp = 0; pmp_map != 0; pmp++) {
   2446			unsigned int this_pmp = (1 << pmp);
   2447			if (pmp_map & this_pmp) {
   2448				struct ata_link *link = &ap->pmp_link[pmp];
   2449				pmp_map &= ~this_pmp;
   2450				ata_eh_analyze_ncq_error(link);
   2451			}
   2452		}
   2453		ata_port_freeze(ap);
   2454	}
   2455	sata_pmp_error_handler(ap);
   2456}
   2457
   2458static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
   2459{
   2460	void __iomem *port_mmio = mv_ap_base(ap);
   2461
   2462	return readl(port_mmio + SATA_TESTCTL) >> 16;
   2463}
   2464
   2465static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
   2466{
   2467	unsigned int pmp;
   2468
   2469	/*
   2470	 * Initialize EH info for PMPs which saw device errors
   2471	 */
   2472	for (pmp = 0; pmp_map != 0; pmp++) {
   2473		unsigned int this_pmp = (1 << pmp);
   2474		if (pmp_map & this_pmp) {
   2475			struct ata_link *link = &ap->pmp_link[pmp];
   2476			struct ata_eh_info *ehi = &link->eh_info;
   2477
   2478			pmp_map &= ~this_pmp;
   2479			ata_ehi_clear_desc(ehi);
   2480			ata_ehi_push_desc(ehi, "dev err");
   2481			ehi->err_mask |= AC_ERR_DEV;
   2482			ehi->action |= ATA_EH_RESET;
   2483			ata_link_abort(link);
   2484		}
   2485	}
   2486}
   2487
   2488static int mv_req_q_empty(struct ata_port *ap)
   2489{
   2490	void __iomem *port_mmio = mv_ap_base(ap);
   2491	u32 in_ptr, out_ptr;
   2492
   2493	in_ptr  = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
   2494			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
   2495	out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
   2496			>> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
   2497	return (in_ptr == out_ptr);	/* 1 == queue_is_empty */
   2498}
   2499
   2500static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
   2501{
   2502	struct mv_port_priv *pp = ap->private_data;
   2503	int failed_links;
   2504	unsigned int old_map, new_map;
   2505
   2506	/*
   2507	 * Device error during FBS+NCQ operation:
   2508	 *
   2509	 * Set a port flag to prevent further I/O being enqueued.
   2510	 * Leave the EDMA running to drain outstanding commands from this port.
   2511	 * Perform the post-mortem/EH only when all responses are complete.
   2512	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
   2513	 */
   2514	if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
   2515		pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
   2516		pp->delayed_eh_pmp_map = 0;
   2517	}
   2518	old_map = pp->delayed_eh_pmp_map;
   2519	new_map = old_map | mv_get_err_pmp_map(ap);
   2520
   2521	if (old_map != new_map) {
   2522		pp->delayed_eh_pmp_map = new_map;
   2523		mv_pmp_eh_prep(ap, new_map & ~old_map);
   2524	}
   2525	failed_links = hweight16(new_map);
   2526
   2527	ata_port_info(ap,
   2528		      "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
   2529		      __func__, pp->delayed_eh_pmp_map,
   2530		      ap->qc_active, failed_links,
   2531		      ap->nr_active_links);
   2532
   2533	if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
   2534		mv_process_crpb_entries(ap, pp);
   2535		mv_stop_edma(ap);
   2536		mv_eh_freeze(ap);
   2537		ata_port_info(ap, "%s: done\n", __func__);
   2538		return 1;	/* handled */
   2539	}
   2540	ata_port_info(ap, "%s: waiting\n", __func__);
   2541	return 1;	/* handled */
   2542}
   2543
   2544static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
   2545{
   2546	/*
   2547	 * Possible future enhancement:
   2548	 *
   2549	 * FBS+non-NCQ operation is not yet implemented.
   2550	 * See related notes in mv_edma_cfg().
   2551	 *
   2552	 * Device error during FBS+non-NCQ operation:
   2553	 *
   2554	 * We need to snapshot the shadow registers for each failed command.
   2555	 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
   2556	 */
   2557	return 0;	/* not handled */
   2558}
   2559
   2560static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
   2561{
   2562	struct mv_port_priv *pp = ap->private_data;
   2563
   2564	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
   2565		return 0;	/* EDMA was not active: not handled */
   2566	if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
   2567		return 0;	/* FBS was not active: not handled */
   2568
   2569	if (!(edma_err_cause & EDMA_ERR_DEV))
   2570		return 0;	/* non DEV error: not handled */
   2571	edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
   2572	if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
   2573		return 0;	/* other problems: not handled */
   2574
   2575	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
   2576		/*
   2577		 * EDMA should NOT have self-disabled for this case.
   2578		 * If it did, then something is wrong elsewhere,
   2579		 * and we cannot handle it here.
   2580		 */
   2581		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
   2582			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
   2583				      __func__, edma_err_cause, pp->pp_flags);
   2584			return 0; /* not handled */
   2585		}
   2586		return mv_handle_fbs_ncq_dev_err(ap);
   2587	} else {
   2588		/*
   2589		 * EDMA should have self-disabled for this case.
   2590		 * If it did not, then something is wrong elsewhere,
   2591		 * and we cannot handle it here.
   2592		 */
   2593		if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
   2594			ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
   2595				      __func__, edma_err_cause, pp->pp_flags);
   2596			return 0; /* not handled */
   2597		}
   2598		return mv_handle_fbs_non_ncq_dev_err(ap);
   2599	}
   2600	return 0;	/* not handled */
   2601}
   2602
   2603static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
   2604{
   2605	struct ata_eh_info *ehi = &ap->link.eh_info;
   2606	char *when = "idle";
   2607
   2608	ata_ehi_clear_desc(ehi);
   2609	if (edma_was_enabled) {
   2610		when = "EDMA enabled";
   2611	} else {
   2612		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
   2613		if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
   2614			when = "polling";
   2615	}
   2616	ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
   2617	ehi->err_mask |= AC_ERR_OTHER;
   2618	ehi->action   |= ATA_EH_RESET;
   2619	ata_port_freeze(ap);
   2620}
   2621
   2622/**
   2623 *      mv_err_intr - Handle error interrupts on the port
   2624 *      @ap: ATA channel to manipulate
   2625 *
   2626 *      Most cases require a full reset of the chip's state machine,
   2627 *      which also performs a COMRESET.
   2628 *      Also, if the port disabled DMA, update our cached copy to match.
   2629 *
   2630 *      LOCKING:
   2631 *      Inherited from caller.
   2632 */
   2633static void mv_err_intr(struct ata_port *ap)
   2634{
   2635	void __iomem *port_mmio = mv_ap_base(ap);
   2636	u32 edma_err_cause, eh_freeze_mask, serr = 0;
   2637	u32 fis_cause = 0;
   2638	struct mv_port_priv *pp = ap->private_data;
   2639	struct mv_host_priv *hpriv = ap->host->private_data;
   2640	unsigned int action = 0, err_mask = 0;
   2641	struct ata_eh_info *ehi = &ap->link.eh_info;
   2642	struct ata_queued_cmd *qc;
   2643	int abort = 0;
   2644
   2645	/*
   2646	 * Read and clear the SError and err_cause bits.
   2647	 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
   2648	 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
   2649	 */
   2650	sata_scr_read(&ap->link, SCR_ERROR, &serr);
   2651	sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
   2652
   2653	edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
   2654	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
   2655		fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
   2656		writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
   2657	}
   2658	writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
   2659
   2660	if (edma_err_cause & EDMA_ERR_DEV) {
   2661		/*
   2662		 * Device errors during FIS-based switching operation
   2663		 * require special handling.
   2664		 */
   2665		if (mv_handle_dev_err(ap, edma_err_cause))
   2666			return;
   2667	}
   2668
   2669	qc = mv_get_active_qc(ap);
   2670	ata_ehi_clear_desc(ehi);
   2671	ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
   2672			  edma_err_cause, pp->pp_flags);
   2673
   2674	if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
   2675		ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
   2676		if (fis_cause & FIS_IRQ_CAUSE_AN) {
   2677			u32 ec = edma_err_cause &
   2678			       ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
   2679			sata_async_notification(ap);
   2680			if (!ec)
   2681				return; /* Just an AN; no need for the nukes */
   2682			ata_ehi_push_desc(ehi, "SDB notify");
   2683		}
   2684	}
   2685	/*
   2686	 * All generations share these EDMA error cause bits:
   2687	 */
   2688	if (edma_err_cause & EDMA_ERR_DEV) {
   2689		err_mask |= AC_ERR_DEV;
   2690		action |= ATA_EH_RESET;
   2691		ata_ehi_push_desc(ehi, "dev error");
   2692	}
   2693	if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
   2694			EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
   2695			EDMA_ERR_INTRL_PAR)) {
   2696		err_mask |= AC_ERR_ATA_BUS;
   2697		action |= ATA_EH_RESET;
   2698		ata_ehi_push_desc(ehi, "parity error");
   2699	}
   2700	if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
   2701		ata_ehi_hotplugged(ehi);
   2702		ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
   2703			"dev disconnect" : "dev connect");
   2704		action |= ATA_EH_RESET;
   2705	}
   2706
   2707	/*
   2708	 * Gen-I has a different SELF_DIS bit,
   2709	 * different FREEZE bits, and no SERR bit:
   2710	 */
   2711	if (IS_GEN_I(hpriv)) {
   2712		eh_freeze_mask = EDMA_EH_FREEZE_5;
   2713		if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
   2714			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
   2715			ata_ehi_push_desc(ehi, "EDMA self-disable");
   2716		}
   2717	} else {
   2718		eh_freeze_mask = EDMA_EH_FREEZE;
   2719		if (edma_err_cause & EDMA_ERR_SELF_DIS) {
   2720			pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
   2721			ata_ehi_push_desc(ehi, "EDMA self-disable");
   2722		}
   2723		if (edma_err_cause & EDMA_ERR_SERR) {
   2724			ata_ehi_push_desc(ehi, "SError=%08x", serr);
   2725			err_mask |= AC_ERR_ATA_BUS;
   2726			action |= ATA_EH_RESET;
   2727		}
   2728	}
   2729
   2730	if (!err_mask) {
   2731		err_mask = AC_ERR_OTHER;
   2732		action |= ATA_EH_RESET;
   2733	}
   2734
   2735	ehi->serror |= serr;
   2736	ehi->action |= action;
   2737
   2738	if (qc)
   2739		qc->err_mask |= err_mask;
   2740	else
   2741		ehi->err_mask |= err_mask;
   2742
   2743	if (err_mask == AC_ERR_DEV) {
   2744		/*
   2745		 * Cannot do ata_port_freeze() here,
   2746		 * because it would kill PIO access,
   2747		 * which is needed for further diagnosis.
   2748		 */
   2749		mv_eh_freeze(ap);
   2750		abort = 1;
   2751	} else if (edma_err_cause & eh_freeze_mask) {
   2752		/*
   2753		 * Note to self: ata_port_freeze() calls ata_port_abort()
   2754		 */
   2755		ata_port_freeze(ap);
   2756	} else {
   2757		abort = 1;
   2758	}
   2759
   2760	if (abort) {
   2761		if (qc)
   2762			ata_link_abort(qc->dev->link);
   2763		else
   2764			ata_port_abort(ap);
   2765	}
   2766}
   2767
   2768static bool mv_process_crpb_response(struct ata_port *ap,
   2769		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
   2770{
   2771	u8 ata_status;
   2772	u16 edma_status = le16_to_cpu(response->flags);
   2773
   2774	/*
   2775	 * edma_status from a response queue entry:
   2776	 *   LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
   2777	 *   MSB is saved ATA status from command completion.
   2778	 */
   2779	if (!ncq_enabled) {
   2780		u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
   2781		if (err_cause) {
   2782			/*
   2783			 * Error will be seen/handled by
   2784			 * mv_err_intr().  So do nothing at all here.
   2785			 */
   2786			return false;
   2787		}
   2788	}
   2789	ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
   2790	if (!ac_err_mask(ata_status))
   2791		return true;
   2792	/* else: leave it for mv_err_intr() */
   2793	return false;
   2794}
   2795
   2796static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
   2797{
   2798	void __iomem *port_mmio = mv_ap_base(ap);
   2799	struct mv_host_priv *hpriv = ap->host->private_data;
   2800	u32 in_index;
   2801	bool work_done = false;
   2802	u32 done_mask = 0;
   2803	int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
   2804
   2805	/* Get the hardware queue position index */
   2806	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
   2807			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
   2808
   2809	/* Process new responses from since the last time we looked */
   2810	while (in_index != pp->resp_idx) {
   2811		unsigned int tag;
   2812		struct mv_crpb *response = &pp->crpb[pp->resp_idx];
   2813
   2814		pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
   2815
   2816		if (IS_GEN_I(hpriv)) {
   2817			/* 50xx: no NCQ, only one command active at a time */
   2818			tag = ap->link.active_tag;
   2819		} else {
   2820			/* Gen II/IIE: get command tag from CRPB entry */
   2821			tag = le16_to_cpu(response->id) & 0x1f;
   2822		}
   2823		if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
   2824			done_mask |= 1 << tag;
   2825		work_done = true;
   2826	}
   2827
   2828	if (work_done) {
   2829		ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
   2830
   2831		/* Update the software queue position index in hardware */
   2832		writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
   2833			 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
   2834			 port_mmio + EDMA_RSP_Q_OUT_PTR);
   2835	}
   2836}
   2837
   2838static void mv_port_intr(struct ata_port *ap, u32 port_cause)
   2839{
   2840	struct mv_port_priv *pp;
   2841	int edma_was_enabled;
   2842
   2843	/*
   2844	 * Grab a snapshot of the EDMA_EN flag setting,
   2845	 * so that we have a consistent view for this port,
   2846	 * even if something we call of our routines changes it.
   2847	 */
   2848	pp = ap->private_data;
   2849	edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
   2850	/*
   2851	 * Process completed CRPB response(s) before other events.
   2852	 */
   2853	if (edma_was_enabled && (port_cause & DONE_IRQ)) {
   2854		mv_process_crpb_entries(ap, pp);
   2855		if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
   2856			mv_handle_fbs_ncq_dev_err(ap);
   2857	}
   2858	/*
   2859	 * Handle chip-reported errors, or continue on to handle PIO.
   2860	 */
   2861	if (unlikely(port_cause & ERR_IRQ)) {
   2862		mv_err_intr(ap);
   2863	} else if (!edma_was_enabled) {
   2864		struct ata_queued_cmd *qc = mv_get_active_qc(ap);
   2865		if (qc)
   2866			ata_bmdma_port_intr(ap, qc);
   2867		else
   2868			mv_unexpected_intr(ap, edma_was_enabled);
   2869	}
   2870}
   2871
   2872/**
   2873 *      mv_host_intr - Handle all interrupts on the given host controller
   2874 *      @host: host specific structure
   2875 *      @main_irq_cause: Main interrupt cause register for the chip.
   2876 *
   2877 *      LOCKING:
   2878 *      Inherited from caller.
   2879 */
   2880static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
   2881{
   2882	struct mv_host_priv *hpriv = host->private_data;
   2883	void __iomem *mmio = hpriv->base, *hc_mmio;
   2884	unsigned int handled = 0, port;
   2885
   2886	/* If asserted, clear the "all ports" IRQ coalescing bit */
   2887	if (main_irq_cause & ALL_PORTS_COAL_DONE)
   2888		writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
   2889
   2890	for (port = 0; port < hpriv->n_ports; port++) {
   2891		struct ata_port *ap = host->ports[port];
   2892		unsigned int p, shift, hardport, port_cause;
   2893
   2894		MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
   2895		/*
   2896		 * Each hc within the host has its own hc_irq_cause register,
   2897		 * where the interrupting ports bits get ack'd.
   2898		 */
   2899		if (hardport == 0) {	/* first port on this hc ? */
   2900			u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
   2901			u32 port_mask, ack_irqs;
   2902			/*
   2903			 * Skip this entire hc if nothing pending for any ports
   2904			 */
   2905			if (!hc_cause) {
   2906				port += MV_PORTS_PER_HC - 1;
   2907				continue;
   2908			}
   2909			/*
   2910			 * We don't need/want to read the hc_irq_cause register,
   2911			 * because doing so hurts performance, and
   2912			 * main_irq_cause already gives us everything we need.
   2913			 *
   2914			 * But we do have to *write* to the hc_irq_cause to ack
   2915			 * the ports that we are handling this time through.
   2916			 *
   2917			 * This requires that we create a bitmap for those
   2918			 * ports which interrupted us, and use that bitmap
   2919			 * to ack (only) those ports via hc_irq_cause.
   2920			 */
   2921			ack_irqs = 0;
   2922			if (hc_cause & PORTS_0_3_COAL_DONE)
   2923				ack_irqs = HC_COAL_IRQ;
   2924			for (p = 0; p < MV_PORTS_PER_HC; ++p) {
   2925				if ((port + p) >= hpriv->n_ports)
   2926					break;
   2927				port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
   2928				if (hc_cause & port_mask)
   2929					ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
   2930			}
   2931			hc_mmio = mv_hc_base_from_port(mmio, port);
   2932			writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
   2933			handled = 1;
   2934		}
   2935		/*
   2936		 * Handle interrupts signalled for this port:
   2937		 */
   2938		port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
   2939		if (port_cause)
   2940			mv_port_intr(ap, port_cause);
   2941	}
   2942	return handled;
   2943}
   2944
   2945static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
   2946{
   2947	struct mv_host_priv *hpriv = host->private_data;
   2948	struct ata_port *ap;
   2949	struct ata_queued_cmd *qc;
   2950	struct ata_eh_info *ehi;
   2951	unsigned int i, err_mask, printed = 0;
   2952	u32 err_cause;
   2953
   2954	err_cause = readl(mmio + hpriv->irq_cause_offset);
   2955
   2956	dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
   2957
   2958	dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
   2959	mv_dump_all_regs(mmio, to_pci_dev(host->dev));
   2960
   2961	writelfl(0, mmio + hpriv->irq_cause_offset);
   2962
   2963	for (i = 0; i < host->n_ports; i++) {
   2964		ap = host->ports[i];
   2965		if (!ata_link_offline(&ap->link)) {
   2966			ehi = &ap->link.eh_info;
   2967			ata_ehi_clear_desc(ehi);
   2968			if (!printed++)
   2969				ata_ehi_push_desc(ehi,
   2970					"PCI err cause 0x%08x", err_cause);
   2971			err_mask = AC_ERR_HOST_BUS;
   2972			ehi->action = ATA_EH_RESET;
   2973			qc = ata_qc_from_tag(ap, ap->link.active_tag);
   2974			if (qc)
   2975				qc->err_mask |= err_mask;
   2976			else
   2977				ehi->err_mask |= err_mask;
   2978
   2979			ata_port_freeze(ap);
   2980		}
   2981	}
   2982	return 1;	/* handled */
   2983}
   2984
   2985/**
   2986 *      mv_interrupt - Main interrupt event handler
   2987 *      @irq: unused
   2988 *      @dev_instance: private data; in this case the host structure
   2989 *
   2990 *      Read the read only register to determine if any host
   2991 *      controllers have pending interrupts.  If so, call lower level
   2992 *      routine to handle.  Also check for PCI errors which are only
   2993 *      reported here.
   2994 *
   2995 *      LOCKING:
   2996 *      This routine holds the host lock while processing pending
   2997 *      interrupts.
   2998 */
   2999static irqreturn_t mv_interrupt(int irq, void *dev_instance)
   3000{
   3001	struct ata_host *host = dev_instance;
   3002	struct mv_host_priv *hpriv = host->private_data;
   3003	unsigned int handled = 0;
   3004	int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
   3005	u32 main_irq_cause, pending_irqs;
   3006
   3007	spin_lock(&host->lock);
   3008
   3009	/* for MSI:  block new interrupts while in here */
   3010	if (using_msi)
   3011		mv_write_main_irq_mask(0, hpriv);
   3012
   3013	main_irq_cause = readl(hpriv->main_irq_cause_addr);
   3014	pending_irqs   = main_irq_cause & hpriv->main_irq_mask;
   3015	/*
   3016	 * Deal with cases where we either have nothing pending, or have read
   3017	 * a bogus register value which can indicate HW removal or PCI fault.
   3018	 */
   3019	if (pending_irqs && main_irq_cause != 0xffffffffU) {
   3020		if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
   3021			handled = mv_pci_error(host, hpriv->base);
   3022		else
   3023			handled = mv_host_intr(host, pending_irqs);
   3024	}
   3025
   3026	/* for MSI: unmask; interrupt cause bits will retrigger now */
   3027	if (using_msi)
   3028		mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
   3029
   3030	spin_unlock(&host->lock);
   3031
   3032	return IRQ_RETVAL(handled);
   3033}
   3034
   3035static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
   3036{
   3037	unsigned int ofs;
   3038
   3039	switch (sc_reg_in) {
   3040	case SCR_STATUS:
   3041	case SCR_ERROR:
   3042	case SCR_CONTROL:
   3043		ofs = sc_reg_in * sizeof(u32);
   3044		break;
   3045	default:
   3046		ofs = 0xffffffffU;
   3047		break;
   3048	}
   3049	return ofs;
   3050}
   3051
   3052static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
   3053{
   3054	struct mv_host_priv *hpriv = link->ap->host->private_data;
   3055	void __iomem *mmio = hpriv->base;
   3056	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
   3057	unsigned int ofs = mv5_scr_offset(sc_reg_in);
   3058
   3059	if (ofs != 0xffffffffU) {
   3060		*val = readl(addr + ofs);
   3061		return 0;
   3062	} else
   3063		return -EINVAL;
   3064}
   3065
   3066static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
   3067{
   3068	struct mv_host_priv *hpriv = link->ap->host->private_data;
   3069	void __iomem *mmio = hpriv->base;
   3070	void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
   3071	unsigned int ofs = mv5_scr_offset(sc_reg_in);
   3072
   3073	if (ofs != 0xffffffffU) {
   3074		writelfl(val, addr + ofs);
   3075		return 0;
   3076	} else
   3077		return -EINVAL;
   3078}
   3079
   3080static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
   3081{
   3082	struct pci_dev *pdev = to_pci_dev(host->dev);
   3083	int early_5080;
   3084
   3085	early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
   3086
   3087	if (!early_5080) {
   3088		u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
   3089		tmp |= (1 << 0);
   3090		writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
   3091	}
   3092
   3093	mv_reset_pci_bus(host, mmio);
   3094}
   3095
   3096static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
   3097{
   3098	writel(0x0fcfffff, mmio + FLASH_CTL);
   3099}
   3100
   3101static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
   3102			   void __iomem *mmio)
   3103{
   3104	void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
   3105	u32 tmp;
   3106
   3107	tmp = readl(phy_mmio + MV5_PHY_MODE);
   3108
   3109	hpriv->signal[idx].pre = tmp & 0x1800;	/* bits 12:11 */
   3110	hpriv->signal[idx].amps = tmp & 0xe0;	/* bits 7:5 */
   3111}
   3112
   3113static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
   3114{
   3115	u32 tmp;
   3116
   3117	writel(0, mmio + GPIO_PORT_CTL);
   3118
   3119	/* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
   3120
   3121	tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
   3122	tmp |= ~(1 << 0);
   3123	writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
   3124}
   3125
   3126static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
   3127			   unsigned int port)
   3128{
   3129	void __iomem *phy_mmio = mv5_phy_base(mmio, port);
   3130	const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
   3131	u32 tmp;
   3132	int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
   3133
   3134	if (fix_apm_sq) {
   3135		tmp = readl(phy_mmio + MV5_LTMODE);
   3136		tmp |= (1 << 19);
   3137		writel(tmp, phy_mmio + MV5_LTMODE);
   3138
   3139		tmp = readl(phy_mmio + MV5_PHY_CTL);
   3140		tmp &= ~0x3;
   3141		tmp |= 0x1;
   3142		writel(tmp, phy_mmio + MV5_PHY_CTL);
   3143	}
   3144
   3145	tmp = readl(phy_mmio + MV5_PHY_MODE);
   3146	tmp &= ~mask;
   3147	tmp |= hpriv->signal[port].pre;
   3148	tmp |= hpriv->signal[port].amps;
   3149	writel(tmp, phy_mmio + MV5_PHY_MODE);
   3150}
   3151
   3152
   3153#undef ZERO
   3154#define ZERO(reg) writel(0, port_mmio + (reg))
   3155static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
   3156			     unsigned int port)
   3157{
   3158	void __iomem *port_mmio = mv_port_base(mmio, port);
   3159
   3160	mv_reset_channel(hpriv, mmio, port);
   3161
   3162	ZERO(0x028);	/* command */
   3163	writel(0x11f, port_mmio + EDMA_CFG);
   3164	ZERO(0x004);	/* timer */
   3165	ZERO(0x008);	/* irq err cause */
   3166	ZERO(0x00c);	/* irq err mask */
   3167	ZERO(0x010);	/* rq bah */
   3168	ZERO(0x014);	/* rq inp */
   3169	ZERO(0x018);	/* rq outp */
   3170	ZERO(0x01c);	/* respq bah */
   3171	ZERO(0x024);	/* respq outp */
   3172	ZERO(0x020);	/* respq inp */
   3173	ZERO(0x02c);	/* test control */
   3174	writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
   3175}
   3176#undef ZERO
   3177
   3178#define ZERO(reg) writel(0, hc_mmio + (reg))
   3179static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
   3180			unsigned int hc)
   3181{
   3182	void __iomem *hc_mmio = mv_hc_base(mmio, hc);
   3183	u32 tmp;
   3184
   3185	ZERO(0x00c);
   3186	ZERO(0x010);
   3187	ZERO(0x014);
   3188	ZERO(0x018);
   3189
   3190	tmp = readl(hc_mmio + 0x20);
   3191	tmp &= 0x1c1c1c1c;
   3192	tmp |= 0x03030303;
   3193	writel(tmp, hc_mmio + 0x20);
   3194}
   3195#undef ZERO
   3196
   3197static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
   3198			unsigned int n_hc)
   3199{
   3200	struct mv_host_priv *hpriv = host->private_data;
   3201	unsigned int hc, port;
   3202
   3203	for (hc = 0; hc < n_hc; hc++) {
   3204		for (port = 0; port < MV_PORTS_PER_HC; port++)
   3205			mv5_reset_hc_port(hpriv, mmio,
   3206					  (hc * MV_PORTS_PER_HC) + port);
   3207
   3208		mv5_reset_one_hc(hpriv, mmio, hc);
   3209	}
   3210
   3211	return 0;
   3212}
   3213
   3214#undef ZERO
   3215#define ZERO(reg) writel(0, mmio + (reg))
   3216static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
   3217{
   3218	struct mv_host_priv *hpriv = host->private_data;
   3219	u32 tmp;
   3220
   3221	tmp = readl(mmio + MV_PCI_MODE);
   3222	tmp &= 0xff00ffff;
   3223	writel(tmp, mmio + MV_PCI_MODE);
   3224
   3225	ZERO(MV_PCI_DISC_TIMER);
   3226	ZERO(MV_PCI_MSI_TRIGGER);
   3227	writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
   3228	ZERO(MV_PCI_SERR_MASK);
   3229	ZERO(hpriv->irq_cause_offset);
   3230	ZERO(hpriv->irq_mask_offset);
   3231	ZERO(MV_PCI_ERR_LOW_ADDRESS);
   3232	ZERO(MV_PCI_ERR_HIGH_ADDRESS);
   3233	ZERO(MV_PCI_ERR_ATTRIBUTE);
   3234	ZERO(MV_PCI_ERR_COMMAND);
   3235}
   3236#undef ZERO
   3237
   3238static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
   3239{
   3240	u32 tmp;
   3241
   3242	mv5_reset_flash(hpriv, mmio);
   3243
   3244	tmp = readl(mmio + GPIO_PORT_CTL);
   3245	tmp &= 0x3;
   3246	tmp |= (1 << 5) | (1 << 6);
   3247	writel(tmp, mmio + GPIO_PORT_CTL);
   3248}
   3249
   3250/*
   3251 *      mv6_reset_hc - Perform the 6xxx global soft reset
   3252 *      @mmio: base address of the HBA
   3253 *
   3254 *      This routine only applies to 6xxx parts.
   3255 *
   3256 *      LOCKING:
   3257 *      Inherited from caller.
   3258 */
   3259static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
   3260			unsigned int n_hc)
   3261{
   3262	void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
   3263	int i, rc = 0;
   3264	u32 t;
   3265
   3266	/* Following procedure defined in PCI "main command and status
   3267	 * register" table.
   3268	 */
   3269	t = readl(reg);
   3270	writel(t | STOP_PCI_MASTER, reg);
   3271
   3272	for (i = 0; i < 1000; i++) {
   3273		udelay(1);
   3274		t = readl(reg);
   3275		if (PCI_MASTER_EMPTY & t)
   3276			break;
   3277	}
   3278	if (!(PCI_MASTER_EMPTY & t)) {
   3279		dev_err(host->dev, "PCI master won't flush\n");
   3280		rc = 1;
   3281		goto done;
   3282	}
   3283
   3284	/* set reset */
   3285	i = 5;
   3286	do {
   3287		writel(t | GLOB_SFT_RST, reg);
   3288		t = readl(reg);
   3289		udelay(1);
   3290	} while (!(GLOB_SFT_RST & t) && (i-- > 0));
   3291
   3292	if (!(GLOB_SFT_RST & t)) {
   3293		dev_err(host->dev, "can't set global reset\n");
   3294		rc = 1;
   3295		goto done;
   3296	}
   3297
   3298	/* clear reset and *reenable the PCI master* (not mentioned in spec) */
   3299	i = 5;
   3300	do {
   3301		writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
   3302		t = readl(reg);
   3303		udelay(1);
   3304	} while ((GLOB_SFT_RST & t) && (i-- > 0));
   3305
   3306	if (GLOB_SFT_RST & t) {
   3307		dev_err(host->dev, "can't clear global reset\n");
   3308		rc = 1;
   3309	}
   3310done:
   3311	return rc;
   3312}
   3313
   3314static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
   3315			   void __iomem *mmio)
   3316{
   3317	void __iomem *port_mmio;
   3318	u32 tmp;
   3319
   3320	tmp = readl(mmio + RESET_CFG);
   3321	if ((tmp & (1 << 0)) == 0) {
   3322		hpriv->signal[idx].amps = 0x7 << 8;
   3323		hpriv->signal[idx].pre = 0x1 << 5;
   3324		return;
   3325	}
   3326
   3327	port_mmio = mv_port_base(mmio, idx);
   3328	tmp = readl(port_mmio + PHY_MODE2);
   3329
   3330	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
   3331	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
   3332}
   3333
   3334static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
   3335{
   3336	writel(0x00000060, mmio + GPIO_PORT_CTL);
   3337}
   3338
   3339static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
   3340			   unsigned int port)
   3341{
   3342	void __iomem *port_mmio = mv_port_base(mmio, port);
   3343
   3344	u32 hp_flags = hpriv->hp_flags;
   3345	int fix_phy_mode2 =
   3346		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
   3347	int fix_phy_mode4 =
   3348		hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
   3349	u32 m2, m3;
   3350
   3351	if (fix_phy_mode2) {
   3352		m2 = readl(port_mmio + PHY_MODE2);
   3353		m2 &= ~(1 << 16);
   3354		m2 |= (1 << 31);
   3355		writel(m2, port_mmio + PHY_MODE2);
   3356
   3357		udelay(200);
   3358
   3359		m2 = readl(port_mmio + PHY_MODE2);
   3360		m2 &= ~((1 << 16) | (1 << 31));
   3361		writel(m2, port_mmio + PHY_MODE2);
   3362
   3363		udelay(200);
   3364	}
   3365
   3366	/*
   3367	 * Gen-II/IIe PHY_MODE3 errata RM#2:
   3368	 * Achieves better receiver noise performance than the h/w default:
   3369	 */
   3370	m3 = readl(port_mmio + PHY_MODE3);
   3371	m3 = (m3 & 0x1f) | (0x5555601 << 5);
   3372
   3373	/* Guideline 88F5182 (GL# SATA-S11) */
   3374	if (IS_SOC(hpriv))
   3375		m3 &= ~0x1c;
   3376
   3377	if (fix_phy_mode4) {
   3378		u32 m4 = readl(port_mmio + PHY_MODE4);
   3379		/*
   3380		 * Enforce reserved-bit restrictions on GenIIe devices only.
   3381		 * For earlier chipsets, force only the internal config field
   3382		 *  (workaround for errata FEr SATA#10 part 1).
   3383		 */
   3384		if (IS_GEN_IIE(hpriv))
   3385			m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
   3386		else
   3387			m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
   3388		writel(m4, port_mmio + PHY_MODE4);
   3389	}
   3390	/*
   3391	 * Workaround for 60x1-B2 errata SATA#13:
   3392	 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
   3393	 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
   3394	 * Or ensure we use writelfl() when writing PHY_MODE4.
   3395	 */
   3396	writel(m3, port_mmio + PHY_MODE3);
   3397
   3398	/* Revert values of pre-emphasis and signal amps to the saved ones */
   3399	m2 = readl(port_mmio + PHY_MODE2);
   3400
   3401	m2 &= ~MV_M2_PREAMP_MASK;
   3402	m2 |= hpriv->signal[port].amps;
   3403	m2 |= hpriv->signal[port].pre;
   3404	m2 &= ~(1 << 16);
   3405
   3406	/* according to mvSata 3.6.1, some IIE values are fixed */
   3407	if (IS_GEN_IIE(hpriv)) {
   3408		m2 &= ~0xC30FF01F;
   3409		m2 |= 0x0000900F;
   3410	}
   3411
   3412	writel(m2, port_mmio + PHY_MODE2);
   3413}
   3414
   3415/* TODO: use the generic LED interface to configure the SATA Presence */
   3416/* & Acitivy LEDs on the board */
   3417static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
   3418				      void __iomem *mmio)
   3419{
   3420	return;
   3421}
   3422
   3423static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
   3424			   void __iomem *mmio)
   3425{
   3426	void __iomem *port_mmio;
   3427	u32 tmp;
   3428
   3429	port_mmio = mv_port_base(mmio, idx);
   3430	tmp = readl(port_mmio + PHY_MODE2);
   3431
   3432	hpriv->signal[idx].amps = tmp & 0x700;	/* bits 10:8 */
   3433	hpriv->signal[idx].pre = tmp & 0xe0;	/* bits 7:5 */
   3434}
   3435
   3436#undef ZERO
   3437#define ZERO(reg) writel(0, port_mmio + (reg))
   3438static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
   3439					void __iomem *mmio, unsigned int port)
   3440{
   3441	void __iomem *port_mmio = mv_port_base(mmio, port);
   3442
   3443	mv_reset_channel(hpriv, mmio, port);
   3444
   3445	ZERO(0x028);		/* command */
   3446	writel(0x101f, port_mmio + EDMA_CFG);
   3447	ZERO(0x004);		/* timer */
   3448	ZERO(0x008);		/* irq err cause */
   3449	ZERO(0x00c);		/* irq err mask */
   3450	ZERO(0x010);		/* rq bah */
   3451	ZERO(0x014);		/* rq inp */
   3452	ZERO(0x018);		/* rq outp */
   3453	ZERO(0x01c);		/* respq bah */
   3454	ZERO(0x024);		/* respq outp */
   3455	ZERO(0x020);		/* respq inp */
   3456	ZERO(0x02c);		/* test control */
   3457	writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
   3458}
   3459
   3460#undef ZERO
   3461
   3462#define ZERO(reg) writel(0, hc_mmio + (reg))
   3463static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
   3464				       void __iomem *mmio)
   3465{
   3466	void __iomem *hc_mmio = mv_hc_base(mmio, 0);
   3467
   3468	ZERO(0x00c);
   3469	ZERO(0x010);
   3470	ZERO(0x014);
   3471
   3472}
   3473
   3474#undef ZERO
   3475
   3476static int mv_soc_reset_hc(struct ata_host *host,
   3477				  void __iomem *mmio, unsigned int n_hc)
   3478{
   3479	struct mv_host_priv *hpriv = host->private_data;
   3480	unsigned int port;
   3481
   3482	for (port = 0; port < hpriv->n_ports; port++)
   3483		mv_soc_reset_hc_port(hpriv, mmio, port);
   3484
   3485	mv_soc_reset_one_hc(hpriv, mmio);
   3486
   3487	return 0;
   3488}
   3489
   3490static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
   3491				      void __iomem *mmio)
   3492{
   3493	return;
   3494}
   3495
   3496static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
   3497{
   3498	return;
   3499}
   3500
   3501static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
   3502				  void __iomem *mmio, unsigned int port)
   3503{
   3504	void __iomem *port_mmio = mv_port_base(mmio, port);
   3505	u32	reg;
   3506
   3507	reg = readl(port_mmio + PHY_MODE3);
   3508	reg &= ~(0x3 << 27);	/* SELMUPF (bits 28:27) to 1 */
   3509	reg |= (0x1 << 27);
   3510	reg &= ~(0x3 << 29);	/* SELMUPI (bits 30:29) to 1 */
   3511	reg |= (0x1 << 29);
   3512	writel(reg, port_mmio + PHY_MODE3);
   3513
   3514	reg = readl(port_mmio + PHY_MODE4);
   3515	reg &= ~0x1;	/* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
   3516	reg |= (0x1 << 16);
   3517	writel(reg, port_mmio + PHY_MODE4);
   3518
   3519	reg = readl(port_mmio + PHY_MODE9_GEN2);
   3520	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
   3521	reg |= 0x8;
   3522	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
   3523	writel(reg, port_mmio + PHY_MODE9_GEN2);
   3524
   3525	reg = readl(port_mmio + PHY_MODE9_GEN1);
   3526	reg &= ~0xf;	/* TXAMP[3:0] (bits 3:0) to 8 */
   3527	reg |= 0x8;
   3528	reg &= ~(0x1 << 14);	/* TXAMP[4] (bit 14) to 0 */
   3529	writel(reg, port_mmio + PHY_MODE9_GEN1);
   3530}
   3531
   3532/*
   3533 *	soc_is_65 - check if the soc is 65 nano device
   3534 *
   3535 *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
   3536 *	register, this register should contain non-zero value and it exists only
   3537 *	in the 65 nano devices, when reading it from older devices we get 0.
   3538 */
   3539static bool soc_is_65n(struct mv_host_priv *hpriv)
   3540{
   3541	void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
   3542
   3543	if (readl(port0_mmio + PHYCFG_OFS))
   3544		return true;
   3545	return false;
   3546}
   3547
   3548static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
   3549{
   3550	u32 ifcfg = readl(port_mmio + SATA_IFCFG);
   3551
   3552	ifcfg = (ifcfg & 0xf7f) | 0x9b1000;	/* from chip spec */
   3553	if (want_gen2i)
   3554		ifcfg |= (1 << 7);		/* enable gen2i speed */
   3555	writelfl(ifcfg, port_mmio + SATA_IFCFG);
   3556}
   3557
   3558static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
   3559			     unsigned int port_no)
   3560{
   3561	void __iomem *port_mmio = mv_port_base(mmio, port_no);
   3562
   3563	/*
   3564	 * The datasheet warns against setting EDMA_RESET when EDMA is active
   3565	 * (but doesn't say what the problem might be).  So we first try
   3566	 * to disable the EDMA engine before doing the EDMA_RESET operation.
   3567	 */
   3568	mv_stop_edma_engine(port_mmio);
   3569	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
   3570
   3571	if (!IS_GEN_I(hpriv)) {
   3572		/* Enable 3.0gb/s link speed: this survives EDMA_RESET */
   3573		mv_setup_ifcfg(port_mmio, 1);
   3574	}
   3575	/*
   3576	 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
   3577	 * link, and physical layers.  It resets all SATA interface registers
   3578	 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
   3579	 */
   3580	writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
   3581	udelay(25);	/* allow reset propagation */
   3582	writelfl(0, port_mmio + EDMA_CMD);
   3583
   3584	hpriv->ops->phy_errata(hpriv, mmio, port_no);
   3585
   3586	if (IS_GEN_I(hpriv))
   3587		usleep_range(500, 1000);
   3588}
   3589
   3590static void mv_pmp_select(struct ata_port *ap, int pmp)
   3591{
   3592	if (sata_pmp_supported(ap)) {
   3593		void __iomem *port_mmio = mv_ap_base(ap);
   3594		u32 reg = readl(port_mmio + SATA_IFCTL);
   3595		int old = reg & 0xf;
   3596
   3597		if (old != pmp) {
   3598			reg = (reg & ~0xf) | pmp;
   3599			writelfl(reg, port_mmio + SATA_IFCTL);
   3600		}
   3601	}
   3602}
   3603
   3604static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
   3605				unsigned long deadline)
   3606{
   3607	mv_pmp_select(link->ap, sata_srst_pmp(link));
   3608	return sata_std_hardreset(link, class, deadline);
   3609}
   3610
   3611static int mv_softreset(struct ata_link *link, unsigned int *class,
   3612				unsigned long deadline)
   3613{
   3614	mv_pmp_select(link->ap, sata_srst_pmp(link));
   3615	return ata_sff_softreset(link, class, deadline);
   3616}
   3617
   3618static int mv_hardreset(struct ata_link *link, unsigned int *class,
   3619			unsigned long deadline)
   3620{
   3621	struct ata_port *ap = link->ap;
   3622	struct mv_host_priv *hpriv = ap->host->private_data;
   3623	struct mv_port_priv *pp = ap->private_data;
   3624	void __iomem *mmio = hpriv->base;
   3625	int rc, attempts = 0, extra = 0;
   3626	u32 sstatus;
   3627	bool online;
   3628
   3629	mv_reset_channel(hpriv, mmio, ap->port_no);
   3630	pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
   3631	pp->pp_flags &=
   3632	  ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
   3633
   3634	/* Workaround for errata FEr SATA#10 (part 2) */
   3635	do {
   3636		const unsigned long *timing =
   3637				sata_ehc_deb_timing(&link->eh_context);
   3638
   3639		rc = sata_link_hardreset(link, timing, deadline + extra,
   3640					 &online, NULL);
   3641		rc = online ? -EAGAIN : rc;
   3642		if (rc)
   3643			return rc;
   3644		sata_scr_read(link, SCR_STATUS, &sstatus);
   3645		if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
   3646			/* Force 1.5gb/s link speed and try again */
   3647			mv_setup_ifcfg(mv_ap_base(ap), 0);
   3648			if (time_after(jiffies + HZ, deadline))
   3649				extra = HZ; /* only extend it once, max */
   3650		}
   3651	} while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
   3652	mv_save_cached_regs(ap);
   3653	mv_edma_cfg(ap, 0, 0);
   3654
   3655	return rc;
   3656}
   3657
   3658static void mv_eh_freeze(struct ata_port *ap)
   3659{
   3660	mv_stop_edma(ap);
   3661	mv_enable_port_irqs(ap, 0);
   3662}
   3663
   3664static void mv_eh_thaw(struct ata_port *ap)
   3665{
   3666	struct mv_host_priv *hpriv = ap->host->private_data;
   3667	unsigned int port = ap->port_no;
   3668	unsigned int hardport = mv_hardport_from_port(port);
   3669	void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
   3670	void __iomem *port_mmio = mv_ap_base(ap);
   3671	u32 hc_irq_cause;
   3672
   3673	/* clear EDMA errors on this port */
   3674	writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
   3675
   3676	/* clear pending irq events */
   3677	hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
   3678	writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
   3679
   3680	mv_enable_port_irqs(ap, ERR_IRQ);
   3681}
   3682
   3683/**
   3684 *      mv_port_init - Perform some early initialization on a single port.
   3685 *      @port: libata data structure storing shadow register addresses
   3686 *      @port_mmio: base address of the port
   3687 *
   3688 *      Initialize shadow register mmio addresses, clear outstanding
   3689 *      interrupts on the port, and unmask interrupts for the future
   3690 *      start of the port.
   3691 *
   3692 *      LOCKING:
   3693 *      Inherited from caller.
   3694 */
   3695static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
   3696{
   3697	void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
   3698
   3699	/* PIO related setup
   3700	 */
   3701	port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
   3702	port->error_addr =
   3703		port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
   3704	port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
   3705	port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
   3706	port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
   3707	port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
   3708	port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
   3709	port->status_addr =
   3710		port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
   3711	/* special case: control/altstatus doesn't have ATA_REG_ address */
   3712	port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
   3713
   3714	/* Clear any currently outstanding port interrupt conditions */
   3715	serr = port_mmio + mv_scr_offset(SCR_ERROR);
   3716	writelfl(readl(serr), serr);
   3717	writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
   3718
   3719	/* unmask all non-transient EDMA error interrupts */
   3720	writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
   3721}
   3722
   3723static unsigned int mv_in_pcix_mode(struct ata_host *host)
   3724{
   3725	struct mv_host_priv *hpriv = host->private_data;
   3726	void __iomem *mmio = hpriv->base;
   3727	u32 reg;
   3728
   3729	if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
   3730		return 0;	/* not PCI-X capable */
   3731	reg = readl(mmio + MV_PCI_MODE);
   3732	if ((reg & MV_PCI_MODE_MASK) == 0)
   3733		return 0;	/* conventional PCI mode */
   3734	return 1;	/* chip is in PCI-X mode */
   3735}
   3736
   3737static int mv_pci_cut_through_okay(struct ata_host *host)
   3738{
   3739	struct mv_host_priv *hpriv = host->private_data;
   3740	void __iomem *mmio = hpriv->base;
   3741	u32 reg;
   3742
   3743	if (!mv_in_pcix_mode(host)) {
   3744		reg = readl(mmio + MV_PCI_COMMAND);
   3745		if (reg & MV_PCI_COMMAND_MRDTRIG)
   3746			return 0; /* not okay */
   3747	}
   3748	return 1; /* okay */
   3749}
   3750
   3751static void mv_60x1b2_errata_pci7(struct ata_host *host)
   3752{
   3753	struct mv_host_priv *hpriv = host->private_data;
   3754	void __iomem *mmio = hpriv->base;
   3755
   3756	/* workaround for 60x1-B2 errata PCI#7 */
   3757	if (mv_in_pcix_mode(host)) {
   3758		u32 reg = readl(mmio + MV_PCI_COMMAND);
   3759		writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
   3760	}
   3761}
   3762
   3763static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
   3764{
   3765	struct pci_dev *pdev = to_pci_dev(host->dev);
   3766	struct mv_host_priv *hpriv = host->private_data;
   3767	u32 hp_flags = hpriv->hp_flags;
   3768
   3769	switch (board_idx) {
   3770	case chip_5080:
   3771		hpriv->ops = &mv5xxx_ops;
   3772		hp_flags |= MV_HP_GEN_I;
   3773
   3774		switch (pdev->revision) {
   3775		case 0x1:
   3776			hp_flags |= MV_HP_ERRATA_50XXB0;
   3777			break;
   3778		case 0x3:
   3779			hp_flags |= MV_HP_ERRATA_50XXB2;
   3780			break;
   3781		default:
   3782			dev_warn(&pdev->dev,
   3783				 "Applying 50XXB2 workarounds to unknown rev\n");
   3784			hp_flags |= MV_HP_ERRATA_50XXB2;
   3785			break;
   3786		}
   3787		break;
   3788
   3789	case chip_504x:
   3790	case chip_508x:
   3791		hpriv->ops = &mv5xxx_ops;
   3792		hp_flags |= MV_HP_GEN_I;
   3793
   3794		switch (pdev->revision) {
   3795		case 0x0:
   3796			hp_flags |= MV_HP_ERRATA_50XXB0;
   3797			break;
   3798		case 0x3:
   3799			hp_flags |= MV_HP_ERRATA_50XXB2;
   3800			break;
   3801		default:
   3802			dev_warn(&pdev->dev,
   3803				 "Applying B2 workarounds to unknown rev\n");
   3804			hp_flags |= MV_HP_ERRATA_50XXB2;
   3805			break;
   3806		}
   3807		break;
   3808
   3809	case chip_604x:
   3810	case chip_608x:
   3811		hpriv->ops = &mv6xxx_ops;
   3812		hp_flags |= MV_HP_GEN_II;
   3813
   3814		switch (pdev->revision) {
   3815		case 0x7:
   3816			mv_60x1b2_errata_pci7(host);
   3817			hp_flags |= MV_HP_ERRATA_60X1B2;
   3818			break;
   3819		case 0x9:
   3820			hp_flags |= MV_HP_ERRATA_60X1C0;
   3821			break;
   3822		default:
   3823			dev_warn(&pdev->dev,
   3824				 "Applying B2 workarounds to unknown rev\n");
   3825			hp_flags |= MV_HP_ERRATA_60X1B2;
   3826			break;
   3827		}
   3828		break;
   3829
   3830	case chip_7042:
   3831		hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
   3832		if (pdev->vendor == PCI_VENDOR_ID_TTI &&
   3833		    (pdev->device == 0x2300 || pdev->device == 0x2310))
   3834		{
   3835			/*
   3836			 * Highpoint RocketRAID PCIe 23xx series cards:
   3837			 *
   3838			 * Unconfigured drives are treated as "Legacy"
   3839			 * by the BIOS, and it overwrites sector 8 with
   3840			 * a "Lgcy" metadata block prior to Linux boot.
   3841			 *
   3842			 * Configured drives (RAID or JBOD) leave sector 8
   3843			 * alone, but instead overwrite a high numbered
   3844			 * sector for the RAID metadata.  This sector can
   3845			 * be determined exactly, by truncating the physical
   3846			 * drive capacity to a nice even GB value.
   3847			 *
   3848			 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
   3849			 *
   3850			 * Warn the user, lest they think we're just buggy.
   3851			 */
   3852			dev_warn(&pdev->dev, "Highpoint RocketRAID"
   3853				" BIOS CORRUPTS DATA on all attached drives,"
   3854				" regardless of if/how they are configured."
   3855				" BEWARE!\n");
   3856			dev_warn(&pdev->dev, "For data safety, do not"
   3857				" use sectors 8-9 on \"Legacy\" drives,"
   3858				" and avoid the final two gigabytes on"
   3859				" all RocketRAID BIOS initialized drives.\n");
   3860		}
   3861		fallthrough;
   3862	case chip_6042:
   3863		hpriv->ops = &mv6xxx_ops;
   3864		hp_flags |= MV_HP_GEN_IIE;
   3865		if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
   3866			hp_flags |= MV_HP_CUT_THROUGH;
   3867
   3868		switch (pdev->revision) {
   3869		case 0x2: /* Rev.B0: the first/only public release */
   3870			hp_flags |= MV_HP_ERRATA_60X1C0;
   3871			break;
   3872		default:
   3873			dev_warn(&pdev->dev,
   3874				 "Applying 60X1C0 workarounds to unknown rev\n");
   3875			hp_flags |= MV_HP_ERRATA_60X1C0;
   3876			break;
   3877		}
   3878		break;
   3879	case chip_soc:
   3880		if (soc_is_65n(hpriv))
   3881			hpriv->ops = &mv_soc_65n_ops;
   3882		else
   3883			hpriv->ops = &mv_soc_ops;
   3884		hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
   3885			MV_HP_ERRATA_60X1C0;
   3886		break;
   3887
   3888	default:
   3889		dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
   3890		return -EINVAL;
   3891	}
   3892
   3893	hpriv->hp_flags = hp_flags;
   3894	if (hp_flags & MV_HP_PCIE) {
   3895		hpriv->irq_cause_offset	= PCIE_IRQ_CAUSE;
   3896		hpriv->irq_mask_offset	= PCIE_IRQ_MASK;
   3897		hpriv->unmask_all_irqs	= PCIE_UNMASK_ALL_IRQS;
   3898	} else {
   3899		hpriv->irq_cause_offset	= PCI_IRQ_CAUSE;
   3900		hpriv->irq_mask_offset	= PCI_IRQ_MASK;
   3901		hpriv->unmask_all_irqs	= PCI_UNMASK_ALL_IRQS;
   3902	}
   3903
   3904	return 0;
   3905}
   3906
   3907/**
   3908 *      mv_init_host - Perform some early initialization of the host.
   3909 *	@host: ATA host to initialize
   3910 *
   3911 *      If possible, do an early global reset of the host.  Then do
   3912 *      our port init and clear/unmask all/relevant host interrupts.
   3913 *
   3914 *      LOCKING:
   3915 *      Inherited from caller.
   3916 */
   3917static int mv_init_host(struct ata_host *host)
   3918{
   3919	int rc = 0, n_hc, port, hc;
   3920	struct mv_host_priv *hpriv = host->private_data;
   3921	void __iomem *mmio = hpriv->base;
   3922
   3923	rc = mv_chip_id(host, hpriv->board_idx);
   3924	if (rc)
   3925		goto done;
   3926
   3927	if (IS_SOC(hpriv)) {
   3928		hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
   3929		hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK;
   3930	} else {
   3931		hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
   3932		hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK;
   3933	}
   3934
   3935	/* initialize shadow irq mask with register's value */
   3936	hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
   3937
   3938	/* global interrupt mask: 0 == mask everything */
   3939	mv_set_main_irq_mask(host, ~0, 0);
   3940
   3941	n_hc = mv_get_hc_count(host->ports[0]->flags);
   3942
   3943	for (port = 0; port < host->n_ports; port++)
   3944		if (hpriv->ops->read_preamp)
   3945			hpriv->ops->read_preamp(hpriv, port, mmio);
   3946
   3947	rc = hpriv->ops->reset_hc(host, mmio, n_hc);
   3948	if (rc)
   3949		goto done;
   3950
   3951	hpriv->ops->reset_flash(hpriv, mmio);
   3952	hpriv->ops->reset_bus(host, mmio);
   3953	hpriv->ops->enable_leds(hpriv, mmio);
   3954
   3955	for (port = 0; port < host->n_ports; port++) {
   3956		struct ata_port *ap = host->ports[port];
   3957		void __iomem *port_mmio = mv_port_base(mmio, port);
   3958
   3959		mv_port_init(&ap->ioaddr, port_mmio);
   3960	}
   3961
   3962	for (hc = 0; hc < n_hc; hc++) {
   3963		void __iomem *hc_mmio = mv_hc_base(mmio, hc);
   3964
   3965		dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
   3966			"(before clear)=0x%08x\n", hc,
   3967			readl(hc_mmio + HC_CFG),
   3968			readl(hc_mmio + HC_IRQ_CAUSE));
   3969
   3970		/* Clear any currently outstanding hc interrupt conditions */
   3971		writelfl(0, hc_mmio + HC_IRQ_CAUSE);
   3972	}
   3973
   3974	if (!IS_SOC(hpriv)) {
   3975		/* Clear any currently outstanding host interrupt conditions */
   3976		writelfl(0, mmio + hpriv->irq_cause_offset);
   3977
   3978		/* and unmask interrupt generation for host regs */
   3979		writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
   3980	}
   3981
   3982	/*
   3983	 * enable only global host interrupts for now.
   3984	 * The per-port interrupts get done later as ports are set up.
   3985	 */
   3986	mv_set_main_irq_mask(host, 0, PCI_ERR);
   3987	mv_set_irq_coalescing(host, irq_coalescing_io_count,
   3988				    irq_coalescing_usecs);
   3989done:
   3990	return rc;
   3991}
   3992
   3993static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
   3994{
   3995	hpriv->crqb_pool   = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
   3996							     MV_CRQB_Q_SZ, 0);
   3997	if (!hpriv->crqb_pool)
   3998		return -ENOMEM;
   3999
   4000	hpriv->crpb_pool   = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
   4001							     MV_CRPB_Q_SZ, 0);
   4002	if (!hpriv->crpb_pool)
   4003		return -ENOMEM;
   4004
   4005	hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
   4006							     MV_SG_TBL_SZ, 0);
   4007	if (!hpriv->sg_tbl_pool)
   4008		return -ENOMEM;
   4009
   4010	return 0;
   4011}
   4012
   4013static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
   4014				 const struct mbus_dram_target_info *dram)
   4015{
   4016	int i;
   4017
   4018	for (i = 0; i < 4; i++) {
   4019		writel(0, hpriv->base + WINDOW_CTRL(i));
   4020		writel(0, hpriv->base + WINDOW_BASE(i));
   4021	}
   4022
   4023	for (i = 0; i < dram->num_cs; i++) {
   4024		const struct mbus_dram_window *cs = dram->cs + i;
   4025
   4026		writel(((cs->size - 1) & 0xffff0000) |
   4027			(cs->mbus_attr << 8) |
   4028			(dram->mbus_dram_target_id << 4) | 1,
   4029			hpriv->base + WINDOW_CTRL(i));
   4030		writel(cs->base, hpriv->base + WINDOW_BASE(i));
   4031	}
   4032}
   4033
   4034/**
   4035 *      mv_platform_probe - handle a positive probe of an soc Marvell
   4036 *      host
   4037 *      @pdev: platform device found
   4038 *
   4039 *      LOCKING:
   4040 *      Inherited from caller.
   4041 */
   4042static int mv_platform_probe(struct platform_device *pdev)
   4043{
   4044	const struct mv_sata_platform_data *mv_platform_data;
   4045	const struct mbus_dram_target_info *dram;
   4046	const struct ata_port_info *ppi[] =
   4047	    { &mv_port_info[chip_soc], NULL };
   4048	struct ata_host *host;
   4049	struct mv_host_priv *hpriv;
   4050	struct resource *res;
   4051	int n_ports = 0, irq = 0;
   4052	int rc;
   4053	int port;
   4054
   4055	ata_print_version_once(&pdev->dev, DRV_VERSION);
   4056
   4057	/*
   4058	 * Simple resource validation ..
   4059	 */
   4060	if (unlikely(pdev->num_resources != 2)) {
   4061		dev_err(&pdev->dev, "invalid number of resources\n");
   4062		return -EINVAL;
   4063	}
   4064
   4065	/*
   4066	 * Get the register base first
   4067	 */
   4068	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   4069	if (res == NULL)
   4070		return -EINVAL;
   4071
   4072	/* allocate host */
   4073	if (pdev->dev.of_node) {
   4074		rc = of_property_read_u32(pdev->dev.of_node, "nr-ports",
   4075					   &n_ports);
   4076		if (rc) {
   4077			dev_err(&pdev->dev,
   4078				"error parsing nr-ports property: %d\n", rc);
   4079			return rc;
   4080		}
   4081
   4082		if (n_ports <= 0) {
   4083			dev_err(&pdev->dev, "nr-ports must be positive: %d\n",
   4084				n_ports);
   4085			return -EINVAL;
   4086		}
   4087
   4088		irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
   4089	} else {
   4090		mv_platform_data = dev_get_platdata(&pdev->dev);
   4091		n_ports = mv_platform_data->n_ports;
   4092		irq = platform_get_irq(pdev, 0);
   4093	}
   4094	if (irq < 0)
   4095		return irq;
   4096	if (!irq)
   4097		return -EINVAL;
   4098
   4099	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
   4100	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
   4101
   4102	if (!host || !hpriv)
   4103		return -ENOMEM;
   4104	hpriv->port_clks = devm_kcalloc(&pdev->dev,
   4105					n_ports, sizeof(struct clk *),
   4106					GFP_KERNEL);
   4107	if (!hpriv->port_clks)
   4108		return -ENOMEM;
   4109	hpriv->port_phys = devm_kcalloc(&pdev->dev,
   4110					n_ports, sizeof(struct phy *),
   4111					GFP_KERNEL);
   4112	if (!hpriv->port_phys)
   4113		return -ENOMEM;
   4114	host->private_data = hpriv;
   4115	hpriv->board_idx = chip_soc;
   4116
   4117	host->iomap = NULL;
   4118	hpriv->base = devm_ioremap(&pdev->dev, res->start,
   4119				   resource_size(res));
   4120	if (!hpriv->base)
   4121		return -ENOMEM;
   4122
   4123	hpriv->base -= SATAHC0_REG_BASE;
   4124
   4125	hpriv->clk = clk_get(&pdev->dev, NULL);
   4126	if (IS_ERR(hpriv->clk))
   4127		dev_notice(&pdev->dev, "cannot get optional clkdev\n");
   4128	else
   4129		clk_prepare_enable(hpriv->clk);
   4130
   4131	for (port = 0; port < n_ports; port++) {
   4132		char port_number[16];
   4133		sprintf(port_number, "%d", port);
   4134		hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
   4135		if (!IS_ERR(hpriv->port_clks[port]))
   4136			clk_prepare_enable(hpriv->port_clks[port]);
   4137
   4138		sprintf(port_number, "port%d", port);
   4139		hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
   4140							       port_number);
   4141		if (IS_ERR(hpriv->port_phys[port])) {
   4142			rc = PTR_ERR(hpriv->port_phys[port]);
   4143			hpriv->port_phys[port] = NULL;
   4144			if (rc != -EPROBE_DEFER)
   4145				dev_warn(&pdev->dev, "error getting phy %d", rc);
   4146
   4147			/* Cleanup only the initialized ports */
   4148			hpriv->n_ports = port;
   4149			goto err;
   4150		} else
   4151			phy_power_on(hpriv->port_phys[port]);
   4152	}
   4153
   4154	/* All the ports have been initialized */
   4155	hpriv->n_ports = n_ports;
   4156
   4157	/*
   4158	 * (Re-)program MBUS remapping windows if we are asked to.
   4159	 */
   4160	dram = mv_mbus_dram_info();
   4161	if (dram)
   4162		mv_conf_mbus_windows(hpriv, dram);
   4163
   4164	rc = mv_create_dma_pools(hpriv, &pdev->dev);
   4165	if (rc)
   4166		goto err;
   4167
   4168	/*
   4169	 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
   4170	 * updated in the LP_PHY_CTL register.
   4171	 */
   4172	if (pdev->dev.of_node &&
   4173		of_device_is_compatible(pdev->dev.of_node,
   4174					"marvell,armada-370-sata"))
   4175		hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
   4176
   4177	/* initialize adapter */
   4178	rc = mv_init_host(host);
   4179	if (rc)
   4180		goto err;
   4181
   4182	dev_info(&pdev->dev, "slots %u ports %d\n",
   4183		 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
   4184
   4185	rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
   4186	if (!rc)
   4187		return 0;
   4188
   4189err:
   4190	if (!IS_ERR(hpriv->clk)) {
   4191		clk_disable_unprepare(hpriv->clk);
   4192		clk_put(hpriv->clk);
   4193	}
   4194	for (port = 0; port < hpriv->n_ports; port++) {
   4195		if (!IS_ERR(hpriv->port_clks[port])) {
   4196			clk_disable_unprepare(hpriv->port_clks[port]);
   4197			clk_put(hpriv->port_clks[port]);
   4198		}
   4199		phy_power_off(hpriv->port_phys[port]);
   4200	}
   4201
   4202	return rc;
   4203}
   4204
   4205/*
   4206 *
   4207 *      mv_platform_remove    -       unplug a platform interface
   4208 *      @pdev: platform device
   4209 *
   4210 *      A platform bus SATA device has been unplugged. Perform the needed
   4211 *      cleanup. Also called on module unload for any active devices.
   4212 */
   4213static int mv_platform_remove(struct platform_device *pdev)
   4214{
   4215	struct ata_host *host = platform_get_drvdata(pdev);
   4216	struct mv_host_priv *hpriv = host->private_data;
   4217	int port;
   4218	ata_host_detach(host);
   4219
   4220	if (!IS_ERR(hpriv->clk)) {
   4221		clk_disable_unprepare(hpriv->clk);
   4222		clk_put(hpriv->clk);
   4223	}
   4224	for (port = 0; port < host->n_ports; port++) {
   4225		if (!IS_ERR(hpriv->port_clks[port])) {
   4226			clk_disable_unprepare(hpriv->port_clks[port]);
   4227			clk_put(hpriv->port_clks[port]);
   4228		}
   4229		phy_power_off(hpriv->port_phys[port]);
   4230	}
   4231	return 0;
   4232}
   4233
   4234#ifdef CONFIG_PM_SLEEP
   4235static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
   4236{
   4237	struct ata_host *host = platform_get_drvdata(pdev);
   4238
   4239	if (host)
   4240		ata_host_suspend(host, state);
   4241	return 0;
   4242}
   4243
   4244static int mv_platform_resume(struct platform_device *pdev)
   4245{
   4246	struct ata_host *host = platform_get_drvdata(pdev);
   4247	const struct mbus_dram_target_info *dram;
   4248	int ret;
   4249
   4250	if (host) {
   4251		struct mv_host_priv *hpriv = host->private_data;
   4252
   4253		/*
   4254		 * (Re-)program MBUS remapping windows if we are asked to.
   4255		 */
   4256		dram = mv_mbus_dram_info();
   4257		if (dram)
   4258			mv_conf_mbus_windows(hpriv, dram);
   4259
   4260		/* initialize adapter */
   4261		ret = mv_init_host(host);
   4262		if (ret) {
   4263			dev_err(&pdev->dev, "Error during HW init\n");
   4264			return ret;
   4265		}
   4266		ata_host_resume(host);
   4267	}
   4268
   4269	return 0;
   4270}
   4271#else
   4272#define mv_platform_suspend NULL
   4273#define mv_platform_resume NULL
   4274#endif
   4275
   4276#ifdef CONFIG_OF
   4277static const struct of_device_id mv_sata_dt_ids[] = {
   4278	{ .compatible = "marvell,armada-370-sata", },
   4279	{ .compatible = "marvell,orion-sata", },
   4280	{ /* sentinel */ }
   4281};
   4282MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
   4283#endif
   4284
   4285static struct platform_driver mv_platform_driver = {
   4286	.probe		= mv_platform_probe,
   4287	.remove		= mv_platform_remove,
   4288	.suspend	= mv_platform_suspend,
   4289	.resume		= mv_platform_resume,
   4290	.driver		= {
   4291		.name = DRV_NAME,
   4292		.of_match_table = of_match_ptr(mv_sata_dt_ids),
   4293	},
   4294};
   4295
   4296
   4297#ifdef CONFIG_PCI
   4298static int mv_pci_init_one(struct pci_dev *pdev,
   4299			   const struct pci_device_id *ent);
   4300#ifdef CONFIG_PM_SLEEP
   4301static int mv_pci_device_resume(struct pci_dev *pdev);
   4302#endif
   4303
   4304
   4305static struct pci_driver mv_pci_driver = {
   4306	.name			= DRV_NAME,
   4307	.id_table		= mv_pci_tbl,
   4308	.probe			= mv_pci_init_one,
   4309	.remove			= ata_pci_remove_one,
   4310#ifdef CONFIG_PM_SLEEP
   4311	.suspend		= ata_pci_device_suspend,
   4312	.resume			= mv_pci_device_resume,
   4313#endif
   4314
   4315};
   4316
   4317/**
   4318 *      mv_print_info - Dump key info to kernel log for perusal.
   4319 *      @host: ATA host to print info about
   4320 *
   4321 *      FIXME: complete this.
   4322 *
   4323 *      LOCKING:
   4324 *      Inherited from caller.
   4325 */
   4326static void mv_print_info(struct ata_host *host)
   4327{
   4328	struct pci_dev *pdev = to_pci_dev(host->dev);
   4329	struct mv_host_priv *hpriv = host->private_data;
   4330	u8 scc;
   4331	const char *scc_s, *gen;
   4332
   4333	/* Use this to determine the HW stepping of the chip so we know
   4334	 * what errata to workaround
   4335	 */
   4336	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
   4337	if (scc == 0)
   4338		scc_s = "SCSI";
   4339	else if (scc == 0x01)
   4340		scc_s = "RAID";
   4341	else
   4342		scc_s = "?";
   4343
   4344	if (IS_GEN_I(hpriv))
   4345		gen = "I";
   4346	else if (IS_GEN_II(hpriv))
   4347		gen = "II";
   4348	else if (IS_GEN_IIE(hpriv))
   4349		gen = "IIE";
   4350	else
   4351		gen = "?";
   4352
   4353	dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
   4354		 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
   4355		 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
   4356}
   4357
   4358/**
   4359 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
   4360 *      @pdev: PCI device found
   4361 *      @ent: PCI device ID entry for the matched host
   4362 *
   4363 *      LOCKING:
   4364 *      Inherited from caller.
   4365 */
   4366static int mv_pci_init_one(struct pci_dev *pdev,
   4367			   const struct pci_device_id *ent)
   4368{
   4369	unsigned int board_idx = (unsigned int)ent->driver_data;
   4370	const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
   4371	struct ata_host *host;
   4372	struct mv_host_priv *hpriv;
   4373	int n_ports, port, rc;
   4374
   4375	ata_print_version_once(&pdev->dev, DRV_VERSION);
   4376
   4377	/* allocate host */
   4378	n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
   4379
   4380	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
   4381	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
   4382	if (!host || !hpriv)
   4383		return -ENOMEM;
   4384	host->private_data = hpriv;
   4385	hpriv->n_ports = n_ports;
   4386	hpriv->board_idx = board_idx;
   4387
   4388	/* acquire resources */
   4389	rc = pcim_enable_device(pdev);
   4390	if (rc)
   4391		return rc;
   4392
   4393	rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
   4394	if (rc == -EBUSY)
   4395		pcim_pin_device(pdev);
   4396	if (rc)
   4397		return rc;
   4398	host->iomap = pcim_iomap_table(pdev);
   4399	hpriv->base = host->iomap[MV_PRIMARY_BAR];
   4400
   4401	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   4402	if (rc) {
   4403		dev_err(&pdev->dev, "DMA enable failed\n");
   4404		return rc;
   4405	}
   4406
   4407	rc = mv_create_dma_pools(hpriv, &pdev->dev);
   4408	if (rc)
   4409		return rc;
   4410
   4411	for (port = 0; port < host->n_ports; port++) {
   4412		struct ata_port *ap = host->ports[port];
   4413		void __iomem *port_mmio = mv_port_base(hpriv->base, port);
   4414		unsigned int offset = port_mmio - hpriv->base;
   4415
   4416		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
   4417		ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
   4418	}
   4419
   4420	/* initialize adapter */
   4421	rc = mv_init_host(host);
   4422	if (rc)
   4423		return rc;
   4424
   4425	/* Enable message-switched interrupts, if requested */
   4426	if (msi && pci_enable_msi(pdev) == 0)
   4427		hpriv->hp_flags |= MV_HP_FLAG_MSI;
   4428
   4429	mv_dump_pci_cfg(pdev, 0x68);
   4430	mv_print_info(host);
   4431
   4432	pci_set_master(pdev);
   4433	pci_try_set_mwi(pdev);
   4434	return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
   4435				 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
   4436}
   4437
   4438#ifdef CONFIG_PM_SLEEP
   4439static int mv_pci_device_resume(struct pci_dev *pdev)
   4440{
   4441	struct ata_host *host = pci_get_drvdata(pdev);
   4442	int rc;
   4443
   4444	rc = ata_pci_device_do_resume(pdev);
   4445	if (rc)
   4446		return rc;
   4447
   4448	/* initialize adapter */
   4449	rc = mv_init_host(host);
   4450	if (rc)
   4451		return rc;
   4452
   4453	ata_host_resume(host);
   4454
   4455	return 0;
   4456}
   4457#endif
   4458#endif
   4459
   4460static int __init mv_init(void)
   4461{
   4462	int rc = -ENODEV;
   4463#ifdef CONFIG_PCI
   4464	rc = pci_register_driver(&mv_pci_driver);
   4465	if (rc < 0)
   4466		return rc;
   4467#endif
   4468	rc = platform_driver_register(&mv_platform_driver);
   4469
   4470#ifdef CONFIG_PCI
   4471	if (rc < 0)
   4472		pci_unregister_driver(&mv_pci_driver);
   4473#endif
   4474	return rc;
   4475}
   4476
   4477static void __exit mv_exit(void)
   4478{
   4479#ifdef CONFIG_PCI
   4480	pci_unregister_driver(&mv_pci_driver);
   4481#endif
   4482	platform_driver_unregister(&mv_platform_driver);
   4483}
   4484
   4485MODULE_AUTHOR("Brett Russ");
   4486MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
   4487MODULE_LICENSE("GPL v2");
   4488MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
   4489MODULE_VERSION(DRV_VERSION);
   4490MODULE_ALIAS("platform:" DRV_NAME);
   4491
   4492module_init(mv_init);
   4493module_exit(mv_exit);