cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

quirks.c (216657B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * This file contains work-arounds for many known PCI hardware bugs.
      4 * Devices present only on certain architectures (host bridges et cetera)
      5 * should be handled in arch-specific code.
      6 *
      7 * Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
      8 *
      9 * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
     10 *
     11 * Init/reset quirks for USB host controllers should be in the USB quirks
     12 * file, where their drivers can use them.
     13 */
     14
     15#include <linux/bitfield.h>
     16#include <linux/types.h>
     17#include <linux/kernel.h>
     18#include <linux/export.h>
     19#include <linux/pci.h>
     20#include <linux/init.h>
     21#include <linux/delay.h>
     22#include <linux/acpi.h>
     23#include <linux/dmi.h>
     24#include <linux/ioport.h>
     25#include <linux/sched.h>
     26#include <linux/ktime.h>
     27#include <linux/mm.h>
     28#include <linux/nvme.h>
     29#include <linux/platform_data/x86/apple.h>
     30#include <linux/pm_runtime.h>
     31#include <linux/suspend.h>
     32#include <linux/switchtec.h>
     33#include <asm/dma.h>	/* isa_dma_bridge_buggy */
     34#include "pci.h"
     35
     36static ktime_t fixup_debug_start(struct pci_dev *dev,
     37				 void (*fn)(struct pci_dev *dev))
     38{
     39	if (initcall_debug)
     40		pci_info(dev, "calling  %pS @ %i\n", fn, task_pid_nr(current));
     41
     42	return ktime_get();
     43}
     44
     45static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
     46			       void (*fn)(struct pci_dev *dev))
     47{
     48	ktime_t delta, rettime;
     49	unsigned long long duration;
     50
     51	rettime = ktime_get();
     52	delta = ktime_sub(rettime, calltime);
     53	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
     54	if (initcall_debug || duration > 10000)
     55		pci_info(dev, "%pS took %lld usecs\n", fn, duration);
     56}
     57
     58static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
     59			  struct pci_fixup *end)
     60{
     61	ktime_t calltime;
     62
     63	for (; f < end; f++)
     64		if ((f->class == (u32) (dev->class >> f->class_shift) ||
     65		     f->class == (u32) PCI_ANY_ID) &&
     66		    (f->vendor == dev->vendor ||
     67		     f->vendor == (u16) PCI_ANY_ID) &&
     68		    (f->device == dev->device ||
     69		     f->device == (u16) PCI_ANY_ID)) {
     70			void (*hook)(struct pci_dev *dev);
     71#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
     72			hook = offset_to_ptr(&f->hook_offset);
     73#else
     74			hook = f->hook;
     75#endif
     76			calltime = fixup_debug_start(dev, hook);
     77			hook(dev);
     78			fixup_debug_report(dev, calltime, hook);
     79		}
     80}
     81
     82extern struct pci_fixup __start_pci_fixups_early[];
     83extern struct pci_fixup __end_pci_fixups_early[];
     84extern struct pci_fixup __start_pci_fixups_header[];
     85extern struct pci_fixup __end_pci_fixups_header[];
     86extern struct pci_fixup __start_pci_fixups_final[];
     87extern struct pci_fixup __end_pci_fixups_final[];
     88extern struct pci_fixup __start_pci_fixups_enable[];
     89extern struct pci_fixup __end_pci_fixups_enable[];
     90extern struct pci_fixup __start_pci_fixups_resume[];
     91extern struct pci_fixup __end_pci_fixups_resume[];
     92extern struct pci_fixup __start_pci_fixups_resume_early[];
     93extern struct pci_fixup __end_pci_fixups_resume_early[];
     94extern struct pci_fixup __start_pci_fixups_suspend[];
     95extern struct pci_fixup __end_pci_fixups_suspend[];
     96extern struct pci_fixup __start_pci_fixups_suspend_late[];
     97extern struct pci_fixup __end_pci_fixups_suspend_late[];
     98
     99static bool pci_apply_fixup_final_quirks;
    100
    101void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
    102{
    103	struct pci_fixup *start, *end;
    104
    105	switch (pass) {
    106	case pci_fixup_early:
    107		start = __start_pci_fixups_early;
    108		end = __end_pci_fixups_early;
    109		break;
    110
    111	case pci_fixup_header:
    112		start = __start_pci_fixups_header;
    113		end = __end_pci_fixups_header;
    114		break;
    115
    116	case pci_fixup_final:
    117		if (!pci_apply_fixup_final_quirks)
    118			return;
    119		start = __start_pci_fixups_final;
    120		end = __end_pci_fixups_final;
    121		break;
    122
    123	case pci_fixup_enable:
    124		start = __start_pci_fixups_enable;
    125		end = __end_pci_fixups_enable;
    126		break;
    127
    128	case pci_fixup_resume:
    129		start = __start_pci_fixups_resume;
    130		end = __end_pci_fixups_resume;
    131		break;
    132
    133	case pci_fixup_resume_early:
    134		start = __start_pci_fixups_resume_early;
    135		end = __end_pci_fixups_resume_early;
    136		break;
    137
    138	case pci_fixup_suspend:
    139		start = __start_pci_fixups_suspend;
    140		end = __end_pci_fixups_suspend;
    141		break;
    142
    143	case pci_fixup_suspend_late:
    144		start = __start_pci_fixups_suspend_late;
    145		end = __end_pci_fixups_suspend_late;
    146		break;
    147
    148	default:
    149		/* stupid compiler warning, you would think with an enum... */
    150		return;
    151	}
    152	pci_do_fixups(dev, start, end);
    153}
    154EXPORT_SYMBOL(pci_fixup_device);
    155
    156static int __init pci_apply_final_quirks(void)
    157{
    158	struct pci_dev *dev = NULL;
    159	u8 cls = 0;
    160	u8 tmp;
    161
    162	if (pci_cache_line_size)
    163		pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
    164
    165	pci_apply_fixup_final_quirks = true;
    166	for_each_pci_dev(dev) {
    167		pci_fixup_device(pci_fixup_final, dev);
    168		/*
    169		 * If arch hasn't set it explicitly yet, use the CLS
    170		 * value shared by all PCI devices.  If there's a
    171		 * mismatch, fall back to the default value.
    172		 */
    173		if (!pci_cache_line_size) {
    174			pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
    175			if (!cls)
    176				cls = tmp;
    177			if (!tmp || cls == tmp)
    178				continue;
    179
    180			pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
    181			         cls << 2, tmp << 2,
    182				 pci_dfl_cache_line_size << 2);
    183			pci_cache_line_size = pci_dfl_cache_line_size;
    184		}
    185	}
    186
    187	if (!pci_cache_line_size) {
    188		pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
    189			pci_dfl_cache_line_size << 2);
    190		pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
    191	}
    192
    193	return 0;
    194}
    195fs_initcall_sync(pci_apply_final_quirks);
    196
    197/*
    198 * Decoding should be disabled for a PCI device during BAR sizing to avoid
    199 * conflict. But doing so may cause problems on host bridge and perhaps other
    200 * key system devices. For devices that need to have mmio decoding always-on,
    201 * we need to set the dev->mmio_always_on bit.
    202 */
    203static void quirk_mmio_always_on(struct pci_dev *dev)
    204{
    205	dev->mmio_always_on = 1;
    206}
    207DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
    208				PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
    209
    210/*
    211 * The Mellanox Tavor device gives false positive parity errors.  Disable
    212 * parity error reporting.
    213 */
    214DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity);
    215DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity);
    216
    217/*
    218 * Deal with broken BIOSes that neglect to enable passive release,
    219 * which can cause problems in combination with the 82441FX/PPro MTRRs
    220 */
    221static void quirk_passive_release(struct pci_dev *dev)
    222{
    223	struct pci_dev *d = NULL;
    224	unsigned char dlc;
    225
    226	/*
    227	 * We have to make sure a particular bit is set in the PIIX3
    228	 * ISA bridge, so we have to go out and find it.
    229	 */
    230	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
    231		pci_read_config_byte(d, 0x82, &dlc);
    232		if (!(dlc & 1<<1)) {
    233			pci_info(d, "PIIX3: Enabling Passive Release\n");
    234			dlc |= 1<<1;
    235			pci_write_config_byte(d, 0x82, dlc);
    236		}
    237	}
    238}
    239DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
    240DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
    241
    242/*
    243 * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a
    244 * workaround but VIA don't answer queries. If you happen to have good
    245 * contacts at VIA ask them for me please -- Alan
    246 *
    247 * This appears to be BIOS not version dependent. So presumably there is a
    248 * chipset level fix.
    249 */
    250static void quirk_isa_dma_hangs(struct pci_dev *dev)
    251{
    252	if (!isa_dma_bridge_buggy) {
    253		isa_dma_bridge_buggy = 1;
    254		pci_info(dev, "Activating ISA DMA hang workarounds\n");
    255	}
    256}
    257/*
    258 * It's not totally clear which chipsets are the problematic ones.  We know
    259 * 82C586 and 82C596 variants are affected.
    260 */
    261DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_0,	quirk_isa_dma_hangs);
    262DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C596,	quirk_isa_dma_hangs);
    263DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371SB_0,  quirk_isa_dma_hangs);
    264DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1533,		quirk_isa_dma_hangs);
    265DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_1,	quirk_isa_dma_hangs);
    266DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs);
    267DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs);
    268
    269/*
    270 * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
    271 * for some HT machines to use C4 w/o hanging.
    272 */
    273static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
    274{
    275	u32 pmbase;
    276	u16 pm1a;
    277
    278	pci_read_config_dword(dev, 0x40, &pmbase);
    279	pmbase = pmbase & 0xff80;
    280	pm1a = inw(pmbase);
    281
    282	if (pm1a & 0x10) {
    283		pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
    284		outw(0x10, pmbase);
    285	}
    286}
    287DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
    288
    289/* Chipsets where PCI->PCI transfers vanish or hang */
    290static void quirk_nopcipci(struct pci_dev *dev)
    291{
    292	if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
    293		pci_info(dev, "Disabling direct PCI/PCI transfers\n");
    294		pci_pci_problems |= PCIPCI_FAIL;
    295	}
    296}
    297DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5597,		quirk_nopcipci);
    298DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_496,		quirk_nopcipci);
    299
    300static void quirk_nopciamd(struct pci_dev *dev)
    301{
    302	u8 rev;
    303	pci_read_config_byte(dev, 0x08, &rev);
    304	if (rev == 0x13) {
    305		/* Erratum 24 */
    306		pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
    307		pci_pci_problems |= PCIAGP_FAIL;
    308	}
    309}
    310DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8151_0,	quirk_nopciamd);
    311
    312/* Triton requires workarounds to be used by the drivers */
    313static void quirk_triton(struct pci_dev *dev)
    314{
    315	if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
    316		pci_info(dev, "Limiting direct PCI/PCI transfers\n");
    317		pci_pci_problems |= PCIPCI_TRITON;
    318	}
    319}
    320DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82437,	quirk_triton);
    321DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82437VX,	quirk_triton);
    322DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439,	quirk_triton);
    323DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439TX,	quirk_triton);
    324
    325/*
    326 * VIA Apollo KT133 needs PCI latency patch
    327 * Made according to a Windows driver-based patch by George E. Breese;
    328 * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
    329 * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on
    330 * which Mr Breese based his work.
    331 *
    332 * Updated based on further information from the site and also on
    333 * information provided by VIA
    334 */
    335static void quirk_vialatency(struct pci_dev *dev)
    336{
    337	struct pci_dev *p;
    338	u8 busarb;
    339
    340	/*
    341	 * Ok, we have a potential problem chipset here. Now see if we have
    342	 * a buggy southbridge.
    343	 */
    344	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
    345	if (p != NULL) {
    346
    347		/*
    348		 * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A;
    349		 * thanks Dan Hollis.
    350		 * Check for buggy part revisions
    351		 */
    352		if (p->revision < 0x40 || p->revision > 0x42)
    353			goto exit;
    354	} else {
    355		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
    356		if (p == NULL)	/* No problem parts */
    357			goto exit;
    358
    359		/* Check for buggy part revisions */
    360		if (p->revision < 0x10 || p->revision > 0x12)
    361			goto exit;
    362	}
    363
    364	/*
    365	 * Ok we have the problem. Now set the PCI master grant to occur
    366	 * every master grant. The apparent bug is that under high PCI load
    367	 * (quite common in Linux of course) you can get data loss when the
    368	 * CPU is held off the bus for 3 bus master requests.  This happens
    369	 * to include the IDE controllers....
    370	 *
    371	 * VIA only apply this fix when an SB Live! is present but under
    372	 * both Linux and Windows this isn't enough, and we have seen
    373	 * corruption without SB Live! but with things like 3 UDMA IDE
    374	 * controllers. So we ignore that bit of the VIA recommendation..
    375	 */
    376	pci_read_config_byte(dev, 0x76, &busarb);
    377
    378	/*
    379	 * Set bit 4 and bit 5 of byte 76 to 0x01
    380	 * "Master priority rotation on every PCI master grant"
    381	 */
    382	busarb &= ~(1<<5);
    383	busarb |= (1<<4);
    384	pci_write_config_byte(dev, 0x76, busarb);
    385	pci_info(dev, "Applying VIA southbridge workaround\n");
    386exit:
    387	pci_dev_put(p);
    388}
    389DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency);
    390DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency);
    391DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency);
    392/* Must restore this on a resume from RAM */
    393DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency);
    394DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency);
    395DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency);
    396
    397/* VIA Apollo VP3 needs ETBF on BT848/878 */
    398static void quirk_viaetbf(struct pci_dev *dev)
    399{
    400	if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
    401		pci_info(dev, "Limiting direct PCI/PCI transfers\n");
    402		pci_pci_problems |= PCIPCI_VIAETBF;
    403	}
    404}
    405DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_viaetbf);
    406
    407static void quirk_vsfx(struct pci_dev *dev)
    408{
    409	if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
    410		pci_info(dev, "Limiting direct PCI/PCI transfers\n");
    411		pci_pci_problems |= PCIPCI_VSFX;
    412	}
    413}
    414DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C576,	quirk_vsfx);
    415
    416/*
    417 * ALi Magik requires workarounds to be used by the drivers that DMA to AGP
    418 * space. Latency must be set to 0xA and Triton workaround applied too.
    419 * [Info kindly provided by ALi]
    420 */
    421static void quirk_alimagik(struct pci_dev *dev)
    422{
    423	if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
    424		pci_info(dev, "Limiting direct PCI/PCI transfers\n");
    425		pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
    426	}
    427}
    428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1647,		quirk_alimagik);
    429DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1651,		quirk_alimagik);
    430
    431/* Natoma has some interesting boundary conditions with Zoran stuff at least */
    432static void quirk_natoma(struct pci_dev *dev)
    433{
    434	if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
    435		pci_info(dev, "Limiting direct PCI/PCI transfers\n");
    436		pci_pci_problems |= PCIPCI_NATOMA;
    437	}
    438}
    439DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_natoma);
    440DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443LX_0,	quirk_natoma);
    441DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443LX_1,	quirk_natoma);
    442DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_0,	quirk_natoma);
    443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_1,	quirk_natoma);
    444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_2,	quirk_natoma);
    445
    446/*
    447 * This chip can cause PCI parity errors if config register 0xA0 is read
    448 * while DMAs are occurring.
    449 */
    450static void quirk_citrine(struct pci_dev *dev)
    451{
    452	dev->cfg_size = 0xA0;
    453}
    454DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
    455
    456/*
    457 * This chip can cause bus lockups if config addresses above 0x600
    458 * are read or written.
    459 */
    460static void quirk_nfp6000(struct pci_dev *dev)
    461{
    462	dev->cfg_size = 0x600;
    463}
    464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP4000,	quirk_nfp6000);
    465DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000,	quirk_nfp6000);
    466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP5000,	quirk_nfp6000);
    467DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000_VF,	quirk_nfp6000);
    468
    469/*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
    470static void quirk_extend_bar_to_page(struct pci_dev *dev)
    471{
    472	int i;
    473
    474	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
    475		struct resource *r = &dev->resource[i];
    476
    477		if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
    478			r->end = PAGE_SIZE - 1;
    479			r->start = 0;
    480			r->flags |= IORESOURCE_UNSET;
    481			pci_info(dev, "expanded BAR %d to page size: %pR\n",
    482				 i, r);
    483		}
    484	}
    485}
    486DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
    487
    488/*
    489 * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
    490 * If it's needed, re-allocate the region.
    491 */
    492static void quirk_s3_64M(struct pci_dev *dev)
    493{
    494	struct resource *r = &dev->resource[0];
    495
    496	if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
    497		r->flags |= IORESOURCE_UNSET;
    498		r->start = 0;
    499		r->end = 0x3ffffff;
    500	}
    501}
    502DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M);
    503DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M);
    504
    505static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
    506		     const char *name)
    507{
    508	u32 region;
    509	struct pci_bus_region bus_region;
    510	struct resource *res = dev->resource + pos;
    511
    512	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
    513
    514	if (!region)
    515		return;
    516
    517	res->name = pci_name(dev);
    518	res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
    519	res->flags |=
    520		(IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
    521	region &= ~(size - 1);
    522
    523	/* Convert from PCI bus to resource space */
    524	bus_region.start = region;
    525	bus_region.end = region + size - 1;
    526	pcibios_bus_to_resource(dev->bus, res, &bus_region);
    527
    528	pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
    529		 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
    530}
    531
    532/*
    533 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
    534 * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
    535 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
    536 * (which conflicts w/ BAR1's memory range).
    537 *
    538 * CS553x's ISA PCI BARs may also be read-only (ref:
    539 * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
    540 */
    541static void quirk_cs5536_vsa(struct pci_dev *dev)
    542{
    543	static char *name = "CS5536 ISA bridge";
    544
    545	if (pci_resource_len(dev, 0) != 8) {
    546		quirk_io(dev, 0,   8, name);	/* SMB */
    547		quirk_io(dev, 1, 256, name);	/* GPIO */
    548		quirk_io(dev, 2,  64, name);	/* MFGPT */
    549		pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
    550			 name);
    551	}
    552}
    553DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
    554
    555static void quirk_io_region(struct pci_dev *dev, int port,
    556			    unsigned int size, int nr, const char *name)
    557{
    558	u16 region;
    559	struct pci_bus_region bus_region;
    560	struct resource *res = dev->resource + nr;
    561
    562	pci_read_config_word(dev, port, &region);
    563	region &= ~(size - 1);
    564
    565	if (!region)
    566		return;
    567
    568	res->name = pci_name(dev);
    569	res->flags = IORESOURCE_IO;
    570
    571	/* Convert from PCI bus to resource space */
    572	bus_region.start = region;
    573	bus_region.end = region + size - 1;
    574	pcibios_bus_to_resource(dev->bus, res, &bus_region);
    575
    576	if (!pci_claim_resource(dev, nr))
    577		pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
    578}
    579
    580/*
    581 * ATI Northbridge setups MCE the processor if you even read somewhere
    582 * between 0x3b0->0x3bb or read 0x3d3
    583 */
    584static void quirk_ati_exploding_mce(struct pci_dev *dev)
    585{
    586	pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
    587	/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
    588	request_region(0x3b0, 0x0C, "RadeonIGP");
    589	request_region(0x3d3, 0x01, "RadeonIGP");
    590}
    591DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_exploding_mce);
    592
    593/*
    594 * In the AMD NL platform, this device ([1022:7912]) has a class code of
    595 * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
    596 * claim it.
    597 *
    598 * But the dwc3 driver is a more specific driver for this device, and we'd
    599 * prefer to use it instead of xhci. To prevent xhci from claiming the
    600 * device, change the class code to 0x0c03fe, which the PCI r3.0 spec
    601 * defines as "USB device (not host controller)". The dwc3 driver can then
    602 * claim it based on its Vendor and Device ID.
    603 */
    604static void quirk_amd_nl_class(struct pci_dev *pdev)
    605{
    606	u32 class = pdev->class;
    607
    608	/* Use "USB Device (not host controller)" class */
    609	pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
    610	pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
    611		 class, pdev->class);
    612}
    613DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
    614		quirk_amd_nl_class);
    615
    616/*
    617 * Synopsys USB 3.x host HAPS platform has a class code of
    618 * PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it.  However, these
    619 * devices should use dwc3-haps driver.  Change these devices' class code to
    620 * PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming
    621 * them.
    622 */
    623static void quirk_synopsys_haps(struct pci_dev *pdev)
    624{
    625	u32 class = pdev->class;
    626
    627	switch (pdev->device) {
    628	case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
    629	case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
    630	case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
    631		pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
    632		pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
    633			 class, pdev->class);
    634		break;
    635	}
    636}
    637DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
    638			       PCI_CLASS_SERIAL_USB_XHCI, 0,
    639			       quirk_synopsys_haps);
    640
    641/*
    642 * Let's make the southbridge information explicit instead of having to
    643 * worry about people probing the ACPI areas, for example.. (Yes, it
    644 * happens, and if you read the wrong ACPI register it will put the machine
    645 * to sleep with no way of waking it up again. Bummer).
    646 *
    647 * ALI M7101: Two IO regions pointed to by words at
    648 *	0xE0 (64 bytes of ACPI registers)
    649 *	0xE2 (32 bytes of SMB registers)
    650 */
    651static void quirk_ali7101_acpi(struct pci_dev *dev)
    652{
    653	quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
    654	quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
    655}
    656DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M7101,		quirk_ali7101_acpi);
    657
    658static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
    659{
    660	u32 devres;
    661	u32 mask, size, base;
    662
    663	pci_read_config_dword(dev, port, &devres);
    664	if ((devres & enable) != enable)
    665		return;
    666	mask = (devres >> 16) & 15;
    667	base = devres & 0xffff;
    668	size = 16;
    669	for (;;) {
    670		unsigned int bit = size >> 1;
    671		if ((bit & mask) == bit)
    672			break;
    673		size = bit;
    674	}
    675	/*
    676	 * For now we only print it out. Eventually we'll want to
    677	 * reserve it (at least if it's in the 0x1000+ range), but
    678	 * let's get enough confirmation reports first.
    679	 */
    680	base &= -size;
    681	pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
    682}
    683
    684static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
    685{
    686	u32 devres;
    687	u32 mask, size, base;
    688
    689	pci_read_config_dword(dev, port, &devres);
    690	if ((devres & enable) != enable)
    691		return;
    692	base = devres & 0xffff0000;
    693	mask = (devres & 0x3f) << 16;
    694	size = 128 << 16;
    695	for (;;) {
    696		unsigned int bit = size >> 1;
    697		if ((bit & mask) == bit)
    698			break;
    699		size = bit;
    700	}
    701
    702	/*
    703	 * For now we only print it out. Eventually we'll want to
    704	 * reserve it, but let's get enough confirmation reports first.
    705	 */
    706	base &= -size;
    707	pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
    708}
    709
    710/*
    711 * PIIX4 ACPI: Two IO regions pointed to by longwords at
    712 *	0x40 (64 bytes of ACPI registers)
    713 *	0x90 (16 bytes of SMB registers)
    714 * and a few strange programmable PIIX4 device resources.
    715 */
    716static void quirk_piix4_acpi(struct pci_dev *dev)
    717{
    718	u32 res_a;
    719
    720	quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
    721	quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
    722
    723	/* Device resource A has enables for some of the other ones */
    724	pci_read_config_dword(dev, 0x5c, &res_a);
    725
    726	piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
    727	piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
    728
    729	/* Device resource D is just bitfields for static resources */
    730
    731	/* Device 12 enabled? */
    732	if (res_a & (1 << 29)) {
    733		piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
    734		piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
    735	}
    736	/* Device 13 enabled? */
    737	if (res_a & (1 << 30)) {
    738		piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
    739		piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
    740	}
    741	piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
    742	piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
    743}
    744DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	quirk_piix4_acpi);
    745DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443MX_3,	quirk_piix4_acpi);
    746
    747#define ICH_PMBASE	0x40
    748#define ICH_ACPI_CNTL	0x44
    749#define  ICH4_ACPI_EN	0x10
    750#define  ICH6_ACPI_EN	0x80
    751#define ICH4_GPIOBASE	0x58
    752#define ICH4_GPIO_CNTL	0x5c
    753#define  ICH4_GPIO_EN	0x10
    754#define ICH6_GPIOBASE	0x48
    755#define ICH6_GPIO_CNTL	0x4c
    756#define  ICH6_GPIO_EN	0x10
    757
    758/*
    759 * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
    760 *	0x40 (128 bytes of ACPI, GPIO & TCO registers)
    761 *	0x58 (64 bytes of GPIO I/O space)
    762 */
    763static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
    764{
    765	u8 enable;
    766
    767	/*
    768	 * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
    769	 * with low legacy (and fixed) ports. We don't know the decoding
    770	 * priority and can't tell whether the legacy device or the one created
    771	 * here is really at that address.  This happens on boards with broken
    772	 * BIOSes.
    773	 */
    774	pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
    775	if (enable & ICH4_ACPI_EN)
    776		quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
    777				 "ICH4 ACPI/GPIO/TCO");
    778
    779	pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
    780	if (enable & ICH4_GPIO_EN)
    781		quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
    782				"ICH4 GPIO");
    783}
    784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AA_0,		quirk_ich4_lpc_acpi);
    785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AB_0,		quirk_ich4_lpc_acpi);
    786DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_0,		quirk_ich4_lpc_acpi);
    787DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_10,	quirk_ich4_lpc_acpi);
    788DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_0,		quirk_ich4_lpc_acpi);
    789DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_12,	quirk_ich4_lpc_acpi);
    790DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi);
    791DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi);
    792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi);
    793DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi);
    794
    795static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
    796{
    797	u8 enable;
    798
    799	pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
    800	if (enable & ICH6_ACPI_EN)
    801		quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
    802				 "ICH6 ACPI/GPIO/TCO");
    803
    804	pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
    805	if (enable & ICH6_GPIO_EN)
    806		quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
    807				"ICH6 GPIO");
    808}
    809
    810static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
    811				    const char *name, int dynsize)
    812{
    813	u32 val;
    814	u32 size, base;
    815
    816	pci_read_config_dword(dev, reg, &val);
    817
    818	/* Enabled? */
    819	if (!(val & 1))
    820		return;
    821	base = val & 0xfffc;
    822	if (dynsize) {
    823		/*
    824		 * This is not correct. It is 16, 32 or 64 bytes depending on
    825		 * register D31:F0:ADh bits 5:4.
    826		 *
    827		 * But this gets us at least _part_ of it.
    828		 */
    829		size = 16;
    830	} else {
    831		size = 128;
    832	}
    833	base &= ~(size-1);
    834
    835	/*
    836	 * Just print it out for now. We should reserve it after more
    837	 * debugging.
    838	 */
    839	pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
    840}
    841
    842static void quirk_ich6_lpc(struct pci_dev *dev)
    843{
    844	/* Shared ACPI/GPIO decode with all ICH6+ */
    845	ich6_lpc_acpi_gpio(dev);
    846
    847	/* ICH6-specific generic IO decode */
    848	ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
    849	ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
    850}
    851DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
    852DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
    853
    854static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
    855				    const char *name)
    856{
    857	u32 val;
    858	u32 mask, base;
    859
    860	pci_read_config_dword(dev, reg, &val);
    861
    862	/* Enabled? */
    863	if (!(val & 1))
    864		return;
    865
    866	/* IO base in bits 15:2, mask in bits 23:18, both are dword-based */
    867	base = val & 0xfffc;
    868	mask = (val >> 16) & 0xfc;
    869	mask |= 3;
    870
    871	/*
    872	 * Just print it out for now. We should reserve it after more
    873	 * debugging.
    874	 */
    875	pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
    876}
    877
    878/* ICH7-10 has the same common LPC generic IO decode registers */
    879static void quirk_ich7_lpc(struct pci_dev *dev)
    880{
    881	/* We share the common ACPI/GPIO decode with ICH6 */
    882	ich6_lpc_acpi_gpio(dev);
    883
    884	/* And have 4 ICH7+ generic decodes */
    885	ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
    886	ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
    887	ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
    888	ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
    889}
    890DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
    891DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
    892DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
    893DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
    894DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
    895DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
    896DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
    897DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
    898DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
    899DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
    900DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
    901DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
    902DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
    903
    904/*
    905 * VIA ACPI: One IO region pointed to by longword at
    906 *	0x48 or 0x20 (256 bytes of ACPI registers)
    907 */
    908static void quirk_vt82c586_acpi(struct pci_dev *dev)
    909{
    910	if (dev->revision & 0x10)
    911		quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
    912				"vt82c586 ACPI");
    913}
    914DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_vt82c586_acpi);
    915
    916/*
    917 * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
    918 *	0x48 (256 bytes of ACPI registers)
    919 *	0x70 (128 bytes of hardware monitoring register)
    920 *	0x90 (16 bytes of SMB registers)
    921 */
    922static void quirk_vt82c686_acpi(struct pci_dev *dev)
    923{
    924	quirk_vt82c586_acpi(dev);
    925
    926	quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
    927				 "vt82c686 HW-mon");
    928
    929	quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
    930}
    931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_vt82c686_acpi);
    932
    933/*
    934 * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
    935 *	0x88 (128 bytes of power management registers)
    936 *	0xd0 (16 bytes of SMB registers)
    937 */
    938static void quirk_vt8235_acpi(struct pci_dev *dev)
    939{
    940	quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
    941	quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
    942}
    943DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
    944
    945/*
    946 * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast
    947 * back-to-back: Disable fast back-to-back on the secondary bus segment
    948 */
    949static void quirk_xio2000a(struct pci_dev *dev)
    950{
    951	struct pci_dev *pdev;
    952	u16 command;
    953
    954	pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
    955	list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
    956		pci_read_config_word(pdev, PCI_COMMAND, &command);
    957		if (command & PCI_COMMAND_FAST_BACK)
    958			pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
    959	}
    960}
    961DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
    962			quirk_xio2000a);
    963
    964#ifdef CONFIG_X86_IO_APIC
    965
    966#include <asm/io_apic.h>
    967
    968/*
    969 * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
    970 * devices to the external APIC.
    971 *
    972 * TODO: When we have device-specific interrupt routers, this code will go
    973 * away from quirks.
    974 */
    975static void quirk_via_ioapic(struct pci_dev *dev)
    976{
    977	u8 tmp;
    978
    979	if (nr_ioapics < 1)
    980		tmp = 0;    /* nothing routed to external APIC */
    981	else
    982		tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
    983
    984	pci_info(dev, "%s VIA external APIC routing\n",
    985		 tmp ? "Enabling" : "Disabling");
    986
    987	/* Offset 0x58: External APIC IRQ output control */
    988	pci_write_config_byte(dev, 0x58, tmp);
    989}
    990DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic);
    991DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic);
    992
    993/*
    994 * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
    995 * This leads to doubled level interrupt rates.
    996 * Set this bit to get rid of cycle wastage.
    997 * Otherwise uncritical.
    998 */
    999static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
   1000{
   1001	u8 misc_control2;
   1002#define BYPASS_APIC_DEASSERT 8
   1003
   1004	pci_read_config_byte(dev, 0x5B, &misc_control2);
   1005	if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
   1006		pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
   1007		pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
   1008	}
   1009}
   1010DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
   1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
   1012
   1013/*
   1014 * The AMD IO-APIC can hang the box when an APIC IRQ is masked.
   1015 * We check all revs >= B0 (yet not in the pre production!) as the bug
   1016 * is currently marked NoFix
   1017 *
   1018 * We have multiple reports of hangs with this chipset that went away with
   1019 * noapic specified. For the moment we assume it's the erratum. We may be wrong
   1020 * of course. However the advice is demonstrably good even if so.
   1021 */
   1022static void quirk_amd_ioapic(struct pci_dev *dev)
   1023{
   1024	if (dev->revision >= 0x02) {
   1025		pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
   1026		pci_warn(dev, "        : booting with the \"noapic\" option\n");
   1027	}
   1028}
   1029DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_amd_ioapic);
   1030#endif /* CONFIG_X86_IO_APIC */
   1031
   1032#if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
   1033
   1034static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
   1035{
   1036	/* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */
   1037	if (dev->subsystem_device == 0xa118)
   1038		dev->sriov->link = dev->devfn;
   1039}
   1040DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
   1041#endif
   1042
   1043/*
   1044 * Some settings of MMRBC can lead to data corruption so block changes.
   1045 * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
   1046 */
   1047static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
   1048{
   1049	if (dev->subordinate && dev->revision <= 0x12) {
   1050		pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
   1051			 dev->revision);
   1052		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
   1053	}
   1054}
   1055DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
   1056
   1057/*
   1058 * FIXME: it is questionable that quirk_via_acpi() is needed.  It shows up
   1059 * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register
   1060 * at all.  Therefore it seems like setting the pci_dev's IRQ to the value
   1061 * of the ACPI SCI interrupt is only done for convenience.
   1062 *	-jgarzik
   1063 */
   1064static void quirk_via_acpi(struct pci_dev *d)
   1065{
   1066	u8 irq;
   1067
   1068	/* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */
   1069	pci_read_config_byte(d, 0x42, &irq);
   1070	irq &= 0xf;
   1071	if (irq && (irq != 2))
   1072		d->irq = irq;
   1073}
   1074DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_via_acpi);
   1075DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_via_acpi);
   1076
   1077/* VIA bridges which have VLink */
   1078static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
   1079
   1080static void quirk_via_bridge(struct pci_dev *dev)
   1081{
   1082	/* See what bridge we have and find the device ranges */
   1083	switch (dev->device) {
   1084	case PCI_DEVICE_ID_VIA_82C686:
   1085		/*
   1086		 * The VT82C686 is special; it attaches to PCI and can have
   1087		 * any device number. All its subdevices are functions of
   1088		 * that single device.
   1089		 */
   1090		via_vlink_dev_lo = PCI_SLOT(dev->devfn);
   1091		via_vlink_dev_hi = PCI_SLOT(dev->devfn);
   1092		break;
   1093	case PCI_DEVICE_ID_VIA_8237:
   1094	case PCI_DEVICE_ID_VIA_8237A:
   1095		via_vlink_dev_lo = 15;
   1096		break;
   1097	case PCI_DEVICE_ID_VIA_8235:
   1098		via_vlink_dev_lo = 16;
   1099		break;
   1100	case PCI_DEVICE_ID_VIA_8231:
   1101	case PCI_DEVICE_ID_VIA_8233_0:
   1102	case PCI_DEVICE_ID_VIA_8233A:
   1103	case PCI_DEVICE_ID_VIA_8233C_0:
   1104		via_vlink_dev_lo = 17;
   1105		break;
   1106	}
   1107}
   1108DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_bridge);
   1109DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8231,		quirk_via_bridge);
   1110DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233_0,	quirk_via_bridge);
   1111DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233A,	quirk_via_bridge);
   1112DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233C_0,	quirk_via_bridge);
   1113DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,		quirk_via_bridge);
   1114DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_bridge);
   1115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237A,	quirk_via_bridge);
   1116
   1117/*
   1118 * quirk_via_vlink		-	VIA VLink IRQ number update
   1119 * @dev: PCI device
   1120 *
   1121 * If the device we are dealing with is on a PIC IRQ we need to ensure that
   1122 * the IRQ line register which usually is not relevant for PCI cards, is
   1123 * actually written so that interrupts get sent to the right place.
   1124 *
   1125 * We only do this on systems where a VIA south bridge was detected, and
   1126 * only for VIA devices on the motherboard (see quirk_via_bridge above).
   1127 */
   1128static void quirk_via_vlink(struct pci_dev *dev)
   1129{
   1130	u8 irq, new_irq;
   1131
   1132	/* Check if we have VLink at all */
   1133	if (via_vlink_dev_lo == -1)
   1134		return;
   1135
   1136	new_irq = dev->irq;
   1137
   1138	/* Don't quirk interrupts outside the legacy IRQ range */
   1139	if (!new_irq || new_irq > 15)
   1140		return;
   1141
   1142	/* Internal device ? */
   1143	if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
   1144	    PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
   1145		return;
   1146
   1147	/*
   1148	 * This is an internal VLink device on a PIC interrupt. The BIOS
   1149	 * ought to have set this but may not have, so we redo it.
   1150	 */
   1151	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
   1152	if (new_irq != irq) {
   1153		pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
   1154			irq, new_irq);
   1155		udelay(15);	/* unknown if delay really needed */
   1156		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
   1157	}
   1158}
   1159DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
   1160
   1161/*
   1162 * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID
   1163 * of VT82C597 for backward compatibility.  We need to switch it off to be
   1164 * able to recognize the real type of the chip.
   1165 */
   1166static void quirk_vt82c598_id(struct pci_dev *dev)
   1167{
   1168	pci_write_config_byte(dev, 0xfc, 0);
   1169	pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
   1170}
   1171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_vt82c598_id);
   1172
   1173/*
   1174 * CardBus controllers have a legacy base address that enables them to
   1175 * respond as i82365 pcmcia controllers.  We don't want them to do this
   1176 * even if the Linux CardBus driver is not loaded, because the Linux i82365
   1177 * driver does not (and should not) handle CardBus.
   1178 */
   1179static void quirk_cardbus_legacy(struct pci_dev *dev)
   1180{
   1181	pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
   1182}
   1183DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
   1184			PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
   1185DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
   1186			PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
   1187
   1188/*
   1189 * Following the PCI ordering rules is optional on the AMD762. I'm not sure
   1190 * what the designers were smoking but let's not inhale...
   1191 *
   1192 * To be fair to AMD, it follows the spec by default, it's BIOS people who
   1193 * turn it off!
   1194 */
   1195static void quirk_amd_ordering(struct pci_dev *dev)
   1196{
   1197	u32 pcic;
   1198	pci_read_config_dword(dev, 0x4C, &pcic);
   1199	if ((pcic & 6) != 6) {
   1200		pcic |= 6;
   1201		pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
   1202		pci_write_config_dword(dev, 0x4C, pcic);
   1203		pci_read_config_dword(dev, 0x84, &pcic);
   1204		pcic |= (1 << 23);	/* Required in this mode */
   1205		pci_write_config_dword(dev, 0x84, pcic);
   1206	}
   1207}
   1208DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
   1209DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
   1210
   1211/*
   1212 * DreamWorks-provided workaround for Dunord I-3000 problem
   1213 *
   1214 * This card decodes and responds to addresses not apparently assigned to
   1215 * it.  We force a larger allocation to ensure that nothing gets put too
   1216 * close to it.
   1217 */
   1218static void quirk_dunord(struct pci_dev *dev)
   1219{
   1220	struct resource *r = &dev->resource[1];
   1221
   1222	r->flags |= IORESOURCE_UNSET;
   1223	r->start = 0;
   1224	r->end = 0xffffff;
   1225}
   1226DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD,	PCI_DEVICE_ID_DUNORD_I3000,	quirk_dunord);
   1227
   1228/*
   1229 * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive
   1230 * decoding (transparent), and does indicate this in the ProgIf.
   1231 * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01.
   1232 */
   1233static void quirk_transparent_bridge(struct pci_dev *dev)
   1234{
   1235	dev->transparent = 1;
   1236}
   1237DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82380FB,	quirk_transparent_bridge);
   1238DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA,	0x605,	quirk_transparent_bridge);
   1239
   1240/*
   1241 * Common misconfiguration of the MediaGX/Geode PCI master that will reduce
   1242 * PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1 datasheets
   1243 * found at http://www.national.com/analog for info on what these bits do.
   1244 * <christer@weinigel.se>
   1245 */
   1246static void quirk_mediagx_master(struct pci_dev *dev)
   1247{
   1248	u8 reg;
   1249
   1250	pci_read_config_byte(dev, 0x41, &reg);
   1251	if (reg & 2) {
   1252		reg &= ~2;
   1253		pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
   1254			 reg);
   1255		pci_write_config_byte(dev, 0x41, reg);
   1256	}
   1257}
   1258DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
   1259DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
   1260
   1261/*
   1262 * Ensure C0 rev restreaming is off. This is normally done by the BIOS but
   1263 * in the odd case it is not the results are corruption hence the presence
   1264 * of a Linux check.
   1265 */
   1266static void quirk_disable_pxb(struct pci_dev *pdev)
   1267{
   1268	u16 config;
   1269
   1270	if (pdev->revision != 0x04)		/* Only C0 requires this */
   1271		return;
   1272	pci_read_config_word(pdev, 0x40, &config);
   1273	if (config & (1<<6)) {
   1274		config &= ~(1<<6);
   1275		pci_write_config_word(pdev, 0x40, config);
   1276		pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
   1277	}
   1278}
   1279DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb);
   1280DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb);
   1281
   1282static void quirk_amd_ide_mode(struct pci_dev *pdev)
   1283{
   1284	/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
   1285	u8 tmp;
   1286
   1287	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
   1288	if (tmp == 0x01) {
   1289		pci_read_config_byte(pdev, 0x40, &tmp);
   1290		pci_write_config_byte(pdev, 0x40, tmp|1);
   1291		pci_write_config_byte(pdev, 0x9, 1);
   1292		pci_write_config_byte(pdev, 0xa, 6);
   1293		pci_write_config_byte(pdev, 0x40, tmp);
   1294
   1295		pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
   1296		pci_info(pdev, "set SATA to AHCI mode\n");
   1297	}
   1298}
   1299DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
   1300DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
   1301DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
   1302DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
   1303DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
   1304DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
   1305DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
   1306DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
   1307
   1308/* Serverworks CSB5 IDE does not fully support native mode */
   1309static void quirk_svwks_csb5ide(struct pci_dev *pdev)
   1310{
   1311	u8 prog;
   1312	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
   1313	if (prog & 5) {
   1314		prog &= ~5;
   1315		pdev->class &= ~5;
   1316		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
   1317		/* PCI layer will sort out resources */
   1318	}
   1319}
   1320DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
   1321
   1322/* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */
   1323static void quirk_ide_samemode(struct pci_dev *pdev)
   1324{
   1325	u8 prog;
   1326
   1327	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
   1328
   1329	if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
   1330		pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
   1331		prog &= ~5;
   1332		pdev->class &= ~5;
   1333		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
   1334	}
   1335}
   1336DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
   1337
   1338/* Some ATA devices break if put into D3 */
   1339static void quirk_no_ata_d3(struct pci_dev *pdev)
   1340{
   1341	pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
   1342}
   1343/* Quirk the legacy ATA devices only. The AHCI ones are ok */
   1344DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
   1345				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
   1346DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
   1347				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
   1348/* ALi loses some register settings that we cannot then restore */
   1349DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
   1350				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
   1351/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
   1352   occur when mode detecting */
   1353DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
   1354				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
   1355
   1356/*
   1357 * This was originally an Alpha-specific thing, but it really fits here.
   1358 * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
   1359 */
   1360static void quirk_eisa_bridge(struct pci_dev *dev)
   1361{
   1362	dev->class = PCI_CLASS_BRIDGE_EISA << 8;
   1363}
   1364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82375,	quirk_eisa_bridge);
   1365
   1366/*
   1367 * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
   1368 * is not activated. The myth is that Asus said that they do not want the
   1369 * users to be irritated by just another PCI Device in the Win98 device
   1370 * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
   1371 * package 2.7.0 for details)
   1372 *
   1373 * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
   1374 * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
   1375 * becomes necessary to do this tweak in two steps -- the chosen trigger
   1376 * is either the Host bridge (preferred) or on-board VGA controller.
   1377 *
   1378 * Note that we used to unhide the SMBus that way on Toshiba laptops
   1379 * (Satellite A40 and Tecra M2) but then found that the thermal management
   1380 * was done by SMM code, which could cause unsynchronized concurrent
   1381 * accesses to the SMBus registers, with potentially bad effects. Thus you
   1382 * should be very careful when adding new entries: if SMM is accessing the
   1383 * Intel SMBus, this is a very good reason to leave it hidden.
   1384 *
   1385 * Likewise, many recent laptops use ACPI for thermal management. If the
   1386 * ACPI DSDT code accesses the SMBus, then Linux should not access it
   1387 * natively, and keeping the SMBus hidden is the right thing to do. If you
   1388 * are about to add an entry in the table below, please first disassemble
   1389 * the DSDT and double-check that there is no code accessing the SMBus.
   1390 */
   1391static int asus_hides_smbus;
   1392
   1393static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
   1394{
   1395	if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
   1396		if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
   1397			switch (dev->subsystem_device) {
   1398			case 0x8025: /* P4B-LX */
   1399			case 0x8070: /* P4B */
   1400			case 0x8088: /* P4B533 */
   1401			case 0x1626: /* L3C notebook */
   1402				asus_hides_smbus = 1;
   1403			}
   1404		else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
   1405			switch (dev->subsystem_device) {
   1406			case 0x80b1: /* P4GE-V */
   1407			case 0x80b2: /* P4PE */
   1408			case 0x8093: /* P4B533-V */
   1409				asus_hides_smbus = 1;
   1410			}
   1411		else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
   1412			switch (dev->subsystem_device) {
   1413			case 0x8030: /* P4T533 */
   1414				asus_hides_smbus = 1;
   1415			}
   1416		else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
   1417			switch (dev->subsystem_device) {
   1418			case 0x8070: /* P4G8X Deluxe */
   1419				asus_hides_smbus = 1;
   1420			}
   1421		else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
   1422			switch (dev->subsystem_device) {
   1423			case 0x80c9: /* PU-DLS */
   1424				asus_hides_smbus = 1;
   1425			}
   1426		else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
   1427			switch (dev->subsystem_device) {
   1428			case 0x1751: /* M2N notebook */
   1429			case 0x1821: /* M5N notebook */
   1430			case 0x1897: /* A6L notebook */
   1431				asus_hides_smbus = 1;
   1432			}
   1433		else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
   1434			switch (dev->subsystem_device) {
   1435			case 0x184b: /* W1N notebook */
   1436			case 0x186a: /* M6Ne notebook */
   1437				asus_hides_smbus = 1;
   1438			}
   1439		else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
   1440			switch (dev->subsystem_device) {
   1441			case 0x80f2: /* P4P800-X */
   1442				asus_hides_smbus = 1;
   1443			}
   1444		else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
   1445			switch (dev->subsystem_device) {
   1446			case 0x1882: /* M6V notebook */
   1447			case 0x1977: /* A6VA notebook */
   1448				asus_hides_smbus = 1;
   1449			}
   1450	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
   1451		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
   1452			switch (dev->subsystem_device) {
   1453			case 0x088C: /* HP Compaq nc8000 */
   1454			case 0x0890: /* HP Compaq nc6000 */
   1455				asus_hides_smbus = 1;
   1456			}
   1457		else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
   1458			switch (dev->subsystem_device) {
   1459			case 0x12bc: /* HP D330L */
   1460			case 0x12bd: /* HP D530 */
   1461			case 0x006a: /* HP Compaq nx9500 */
   1462				asus_hides_smbus = 1;
   1463			}
   1464		else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
   1465			switch (dev->subsystem_device) {
   1466			case 0x12bf: /* HP xw4100 */
   1467				asus_hides_smbus = 1;
   1468			}
   1469	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
   1470		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
   1471			switch (dev->subsystem_device) {
   1472			case 0xC00C: /* Samsung P35 notebook */
   1473				asus_hides_smbus = 1;
   1474		}
   1475	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
   1476		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
   1477			switch (dev->subsystem_device) {
   1478			case 0x0058: /* Compaq Evo N620c */
   1479				asus_hides_smbus = 1;
   1480			}
   1481		else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
   1482			switch (dev->subsystem_device) {
   1483			case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
   1484				/* Motherboard doesn't have Host bridge
   1485				 * subvendor/subdevice IDs, therefore checking
   1486				 * its on-board VGA controller */
   1487				asus_hides_smbus = 1;
   1488			}
   1489		else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
   1490			switch (dev->subsystem_device) {
   1491			case 0x00b8: /* Compaq Evo D510 CMT */
   1492			case 0x00b9: /* Compaq Evo D510 SFF */
   1493			case 0x00ba: /* Compaq Evo D510 USDT */
   1494				/* Motherboard doesn't have Host bridge
   1495				 * subvendor/subdevice IDs and on-board VGA
   1496				 * controller is disabled if an AGP card is
   1497				 * inserted, therefore checking USB UHCI
   1498				 * Controller #1 */
   1499				asus_hides_smbus = 1;
   1500			}
   1501		else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
   1502			switch (dev->subsystem_device) {
   1503			case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
   1504				/* Motherboard doesn't have host bridge
   1505				 * subvendor/subdevice IDs, therefore checking
   1506				 * its on-board VGA controller */
   1507				asus_hides_smbus = 1;
   1508			}
   1509	}
   1510}
   1511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845_HB,	asus_hides_smbus_hostbridge);
   1512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845G_HB,	asus_hides_smbus_hostbridge);
   1513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82850_HB,	asus_hides_smbus_hostbridge);
   1514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82865_HB,	asus_hides_smbus_hostbridge);
   1515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82875_HB,	asus_hides_smbus_hostbridge);
   1516DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_7205_0,	asus_hides_smbus_hostbridge);
   1517DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7501_MCH,	asus_hides_smbus_hostbridge);
   1518DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855PM_HB,	asus_hides_smbus_hostbridge);
   1519DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855GM_HB,	asus_hides_smbus_hostbridge);
   1520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
   1521
   1522DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82810_IG3,	asus_hides_smbus_hostbridge);
   1523DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_2,	asus_hides_smbus_hostbridge);
   1524DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82815_CGC,	asus_hides_smbus_hostbridge);
   1525
   1526static void asus_hides_smbus_lpc(struct pci_dev *dev)
   1527{
   1528	u16 val;
   1529
   1530	if (likely(!asus_hides_smbus))
   1531		return;
   1532
   1533	pci_read_config_word(dev, 0xF2, &val);
   1534	if (val & 0x8) {
   1535		pci_write_config_word(dev, 0xF2, val & (~0x8));
   1536		pci_read_config_word(dev, 0xF2, &val);
   1537		if (val & 0x8)
   1538			pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
   1539				 val);
   1540		else
   1541			pci_info(dev, "Enabled i801 SMBus device\n");
   1542	}
   1543}
   1544DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801AA_0,	asus_hides_smbus_lpc);
   1545DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc);
   1546DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc);
   1547DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_0,	asus_hides_smbus_lpc);
   1548DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc);
   1549DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc);
   1550DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc);
   1551DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801AA_0,	asus_hides_smbus_lpc);
   1552DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc);
   1553DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc);
   1554DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_0,	asus_hides_smbus_lpc);
   1555DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc);
   1556DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc);
   1557DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc);
   1558
   1559/* It appears we just have one such device. If not, we have a warning */
   1560static void __iomem *asus_rcba_base;
   1561static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
   1562{
   1563	u32 rcba;
   1564
   1565	if (likely(!asus_hides_smbus))
   1566		return;
   1567	WARN_ON(asus_rcba_base);
   1568
   1569	pci_read_config_dword(dev, 0xF0, &rcba);
   1570	/* use bits 31:14, 16 kB aligned */
   1571	asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000);
   1572	if (asus_rcba_base == NULL)
   1573		return;
   1574}
   1575
   1576static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
   1577{
   1578	u32 val;
   1579
   1580	if (likely(!asus_hides_smbus || !asus_rcba_base))
   1581		return;
   1582
   1583	/* read the Function Disable register, dword mode only */
   1584	val = readl(asus_rcba_base + 0x3418);
   1585
   1586	/* enable the SMBus device */
   1587	writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
   1588}
   1589
   1590static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
   1591{
   1592	if (likely(!asus_hides_smbus || !asus_rcba_base))
   1593		return;
   1594
   1595	iounmap(asus_rcba_base);
   1596	asus_rcba_base = NULL;
   1597	pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
   1598}
   1599
   1600static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
   1601{
   1602	asus_hides_smbus_lpc_ich6_suspend(dev);
   1603	asus_hides_smbus_lpc_ich6_resume_early(dev);
   1604	asus_hides_smbus_lpc_ich6_resume(dev);
   1605}
   1606DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6);
   1607DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_suspend);
   1608DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume);
   1609DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume_early);
   1610
   1611/* SiS 96x south bridge: BIOS typically hides SMBus device...  */
   1612static void quirk_sis_96x_smbus(struct pci_dev *dev)
   1613{
   1614	u8 val = 0;
   1615	pci_read_config_byte(dev, 0x77, &val);
   1616	if (val & 0x10) {
   1617		pci_info(dev, "Enabling SiS 96x SMBus\n");
   1618		pci_write_config_byte(dev, 0x77, val & ~0x10);
   1619	}
   1620}
   1621DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus);
   1622DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus);
   1623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus);
   1624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus);
   1625DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus);
   1626DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus);
   1627DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus);
   1628DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus);
   1629
   1630/*
   1631 * ... This is further complicated by the fact that some SiS96x south
   1632 * bridges pretend to be 85C503/5513 instead.  In that case see if we
   1633 * spotted a compatible north bridge to make sure.
   1634 * (pci_find_device() doesn't work yet)
   1635 *
   1636 * We can also enable the sis96x bit in the discovery register..
   1637 */
   1638#define SIS_DETECT_REGISTER 0x40
   1639
   1640static void quirk_sis_503(struct pci_dev *dev)
   1641{
   1642	u8 reg;
   1643	u16 devid;
   1644
   1645	pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
   1646	pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
   1647	pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
   1648	if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
   1649		pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
   1650		return;
   1651	}
   1652
   1653	/*
   1654	 * Ok, it now shows up as a 96x.  Run the 96x quirk by hand in case
   1655	 * it has already been processed.  (Depends on link order, which is
   1656	 * apparently not guaranteed)
   1657	 */
   1658	dev->device = devid;
   1659	quirk_sis_96x_smbus(dev);
   1660}
   1661DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
   1662DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
   1663
   1664/*
   1665 * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
   1666 * and MC97 modem controller are disabled when a second PCI soundcard is
   1667 * present. This patch, tweaking the VT8237 ISA bridge, enables them.
   1668 * -- bjd
   1669 */
   1670static void asus_hides_ac97_lpc(struct pci_dev *dev)
   1671{
   1672	u8 val;
   1673	int asus_hides_ac97 = 0;
   1674
   1675	if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
   1676		if (dev->device == PCI_DEVICE_ID_VIA_8237)
   1677			asus_hides_ac97 = 1;
   1678	}
   1679
   1680	if (!asus_hides_ac97)
   1681		return;
   1682
   1683	pci_read_config_byte(dev, 0x50, &val);
   1684	if (val & 0xc0) {
   1685		pci_write_config_byte(dev, 0x50, val & (~0xc0));
   1686		pci_read_config_byte(dev, 0x50, &val);
   1687		if (val & 0xc0)
   1688			pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
   1689				 val);
   1690		else
   1691			pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
   1692	}
   1693}
   1694DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
   1695DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
   1696
   1697#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
   1698
   1699/*
   1700 * If we are using libata we can drive this chip properly but must do this
   1701 * early on to make the additional device appear during the PCI scanning.
   1702 */
   1703static void quirk_jmicron_ata(struct pci_dev *pdev)
   1704{
   1705	u32 conf1, conf5, class;
   1706	u8 hdr;
   1707
   1708	/* Only poke fn 0 */
   1709	if (PCI_FUNC(pdev->devfn))
   1710		return;
   1711
   1712	pci_read_config_dword(pdev, 0x40, &conf1);
   1713	pci_read_config_dword(pdev, 0x80, &conf5);
   1714
   1715	conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
   1716	conf5 &= ~(1 << 24);  /* Clear bit 24 */
   1717
   1718	switch (pdev->device) {
   1719	case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
   1720	case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
   1721	case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
   1722		/* The controller should be in single function ahci mode */
   1723		conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
   1724		break;
   1725
   1726	case PCI_DEVICE_ID_JMICRON_JMB365:
   1727	case PCI_DEVICE_ID_JMICRON_JMB366:
   1728		/* Redirect IDE second PATA port to the right spot */
   1729		conf5 |= (1 << 24);
   1730		fallthrough;
   1731	case PCI_DEVICE_ID_JMICRON_JMB361:
   1732	case PCI_DEVICE_ID_JMICRON_JMB363:
   1733	case PCI_DEVICE_ID_JMICRON_JMB369:
   1734		/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
   1735		/* Set the class codes correctly and then direct IDE 0 */
   1736		conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
   1737		break;
   1738
   1739	case PCI_DEVICE_ID_JMICRON_JMB368:
   1740		/* The controller should be in single function IDE mode */
   1741		conf1 |= 0x00C00000; /* Set 22, 23 */
   1742		break;
   1743	}
   1744
   1745	pci_write_config_dword(pdev, 0x40, conf1);
   1746	pci_write_config_dword(pdev, 0x80, conf5);
   1747
   1748	/* Update pdev accordingly */
   1749	pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
   1750	pdev->hdr_type = hdr & 0x7f;
   1751	pdev->multifunction = !!(hdr & 0x80);
   1752
   1753	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
   1754	pdev->class = class >> 8;
   1755}
   1756DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
   1757DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
   1758DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
   1759DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
   1760DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
   1761DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
   1762DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
   1763DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
   1764DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
   1765DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
   1766DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
   1767DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
   1768DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
   1769DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
   1770DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
   1771DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
   1772DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
   1773DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
   1774
   1775#endif
   1776
   1777static void quirk_jmicron_async_suspend(struct pci_dev *dev)
   1778{
   1779	if (dev->multifunction) {
   1780		device_disable_async_suspend(&dev->dev);
   1781		pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
   1782	}
   1783}
   1784DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
   1785DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
   1786DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
   1787DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
   1788
   1789#ifdef CONFIG_X86_IO_APIC
   1790static void quirk_alder_ioapic(struct pci_dev *pdev)
   1791{
   1792	int i;
   1793
   1794	if ((pdev->class >> 8) != 0xff00)
   1795		return;
   1796
   1797	/*
   1798	 * The first BAR is the location of the IO-APIC... we must
   1799	 * not touch this (and it's already covered by the fixmap), so
   1800	 * forcibly insert it into the resource tree.
   1801	 */
   1802	if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
   1803		insert_resource(&iomem_resource, &pdev->resource[0]);
   1804
   1805	/*
   1806	 * The next five BARs all seem to be rubbish, so just clean
   1807	 * them out.
   1808	 */
   1809	for (i = 1; i < PCI_STD_NUM_BARS; i++)
   1810		memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
   1811}
   1812DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic);
   1813#endif
   1814
   1815static void quirk_no_msi(struct pci_dev *dev)
   1816{
   1817	pci_info(dev, "avoiding MSI to work around a hardware defect\n");
   1818	dev->no_msi = 1;
   1819}
   1820DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi);
   1821DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi);
   1822DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi);
   1823DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi);
   1824DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi);
   1825DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi);
   1826
   1827static void quirk_pcie_mch(struct pci_dev *pdev)
   1828{
   1829	pdev->no_msi = 1;
   1830}
   1831DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_pcie_mch);
   1832DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_pcie_mch);
   1833DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_pcie_mch);
   1834
   1835DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
   1836
   1837/*
   1838 * HiSilicon KunPeng920 and KunPeng930 have devices appear as PCI but are
   1839 * actually on the AMBA bus. These fake PCI devices can support SVA via
   1840 * SMMU stall feature, by setting dma-can-stall for ACPI platforms.
   1841 *
   1842 * Normally stalling must not be enabled for PCI devices, since it would
   1843 * break the PCI requirement for free-flowing writes and may lead to
   1844 * deadlock.  We expect PCI devices to support ATS and PRI if they want to
   1845 * be fault-tolerant, so there's no ACPI binding to describe anything else,
   1846 * even when a "PCI" device turns out to be a regular old SoC device
   1847 * dressed up as a RCiEP and normal rules don't apply.
   1848 */
   1849static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
   1850{
   1851	struct property_entry properties[] = {
   1852		PROPERTY_ENTRY_BOOL("dma-can-stall"),
   1853		{},
   1854	};
   1855
   1856	if (pdev->revision != 0x21 && pdev->revision != 0x30)
   1857		return;
   1858
   1859	pdev->pasid_no_tlp = 1;
   1860
   1861	/*
   1862	 * Set the dma-can-stall property on ACPI platforms. Device tree
   1863	 * can set it directly.
   1864	 */
   1865	if (!pdev->dev.of_node &&
   1866	    device_create_managed_software_node(&pdev->dev, properties, NULL))
   1867		pci_warn(pdev, "could not add stall property");
   1868}
   1869DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
   1870DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
   1871DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
   1872DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
   1873DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
   1874DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
   1875
   1876/*
   1877 * It's possible for the MSI to get corrupted if SHPC and ACPI are used
   1878 * together on certain PXH-based systems.
   1879 */
   1880static void quirk_pcie_pxh(struct pci_dev *dev)
   1881{
   1882	dev->no_msi = 1;
   1883	pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
   1884}
   1885DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_0,	quirk_pcie_pxh);
   1886DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_1,	quirk_pcie_pxh);
   1887DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_pcie_pxh);
   1888DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_pcie_pxh);
   1889DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_pcie_pxh);
   1890
   1891/*
   1892 * Some Intel PCI Express chipsets have trouble with downstream device
   1893 * power management.
   1894 */
   1895static void quirk_intel_pcie_pm(struct pci_dev *dev)
   1896{
   1897	pci_pm_d3hot_delay = 120;
   1898	dev->no_d1d2 = 1;
   1899}
   1900DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e2, quirk_intel_pcie_pm);
   1901DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e3, quirk_intel_pcie_pm);
   1902DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e4, quirk_intel_pcie_pm);
   1903DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e5, quirk_intel_pcie_pm);
   1904DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e6, quirk_intel_pcie_pm);
   1905DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e7, quirk_intel_pcie_pm);
   1906DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f7, quirk_intel_pcie_pm);
   1907DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f8, quirk_intel_pcie_pm);
   1908DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f9, quirk_intel_pcie_pm);
   1909DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25fa, quirk_intel_pcie_pm);
   1910DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2601, quirk_intel_pcie_pm);
   1911DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2602, quirk_intel_pcie_pm);
   1912DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2603, quirk_intel_pcie_pm);
   1913DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2604, quirk_intel_pcie_pm);
   1914DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2605, quirk_intel_pcie_pm);
   1915DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2606, quirk_intel_pcie_pm);
   1916DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2607, quirk_intel_pcie_pm);
   1917DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2608, quirk_intel_pcie_pm);
   1918DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2609, quirk_intel_pcie_pm);
   1919DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260a, quirk_intel_pcie_pm);
   1920DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260b, quirk_intel_pcie_pm);
   1921
   1922static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
   1923{
   1924	if (dev->d3hot_delay >= delay)
   1925		return;
   1926
   1927	dev->d3hot_delay = delay;
   1928	pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
   1929		 dev->d3hot_delay);
   1930}
   1931
   1932static void quirk_radeon_pm(struct pci_dev *dev)
   1933{
   1934	if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
   1935	    dev->subsystem_device == 0x00e2)
   1936		quirk_d3hot_delay(dev, 20);
   1937}
   1938DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
   1939
   1940/*
   1941 * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
   1942 * https://bugzilla.kernel.org/show_bug.cgi?id=205587
   1943 *
   1944 * The kernel attempts to transition these devices to D3cold, but that seems
   1945 * to be ineffective on the platforms in question; the PCI device appears to
   1946 * remain on in D3hot state. The D3hot-to-D0 transition then requires an
   1947 * extended delay in order to succeed.
   1948 */
   1949static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
   1950{
   1951	quirk_d3hot_delay(dev, 20);
   1952}
   1953DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
   1954DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
   1955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
   1956
   1957#ifdef CONFIG_X86_IO_APIC
   1958static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
   1959{
   1960	noioapicreroute = 1;
   1961	pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
   1962
   1963	return 0;
   1964}
   1965
   1966static const struct dmi_system_id boot_interrupt_dmi_table[] = {
   1967	/*
   1968	 * Systems to exclude from boot interrupt reroute quirks
   1969	 */
   1970	{
   1971		.callback = dmi_disable_ioapicreroute,
   1972		.ident = "ASUSTek Computer INC. M2N-LR",
   1973		.matches = {
   1974			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
   1975			DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
   1976		},
   1977	},
   1978	{}
   1979};
   1980
   1981/*
   1982 * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
   1983 * remap the original interrupt in the Linux kernel to the boot interrupt, so
   1984 * that a PCI device's interrupt handler is installed on the boot interrupt
   1985 * line instead.
   1986 */
   1987static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
   1988{
   1989	dmi_check_system(boot_interrupt_dmi_table);
   1990	if (noioapicquirk || noioapicreroute)
   1991		return;
   1992
   1993	dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
   1994	pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
   1995		 dev->vendor, dev->device);
   1996}
   1997DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_0,	quirk_reroute_to_boot_interrupts_intel);
   1998DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_1,	quirk_reroute_to_boot_interrupts_intel);
   1999DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB2_0,	quirk_reroute_to_boot_interrupts_intel);
   2000DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_reroute_to_boot_interrupts_intel);
   2001DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_reroute_to_boot_interrupts_intel);
   2002DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_reroute_to_boot_interrupts_intel);
   2003DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_0,	quirk_reroute_to_boot_interrupts_intel);
   2004DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_1,	quirk_reroute_to_boot_interrupts_intel);
   2005DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_0,	quirk_reroute_to_boot_interrupts_intel);
   2006DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_1,	quirk_reroute_to_boot_interrupts_intel);
   2007DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB2_0,	quirk_reroute_to_boot_interrupts_intel);
   2008DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_reroute_to_boot_interrupts_intel);
   2009DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_reroute_to_boot_interrupts_intel);
   2010DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_reroute_to_boot_interrupts_intel);
   2011DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_0,	quirk_reroute_to_boot_interrupts_intel);
   2012DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_1,	quirk_reroute_to_boot_interrupts_intel);
   2013
   2014/*
   2015 * On some chipsets we can disable the generation of legacy INTx boot
   2016 * interrupts.
   2017 */
   2018
   2019/*
   2020 * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no
   2021 * 300641-004US, section 5.7.3.
   2022 *
   2023 * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003.
   2024 * Core IO on Xeon E5 v2, see Intel order no 329188-003.
   2025 * Core IO on Xeon E7 v2, see Intel order no 329595-002.
   2026 * Core IO on Xeon E5 v3, see Intel order no 330784-003.
   2027 * Core IO on Xeon E7 v3, see Intel order no 332315-001US.
   2028 * Core IO on Xeon E5 v4, see Intel order no 333810-002US.
   2029 * Core IO on Xeon E7 v4, see Intel order no 332315-001US.
   2030 * Core IO on Xeon D-1500, see Intel order no 332051-001.
   2031 * Core IO on Xeon Scalable, see Intel order no 610950.
   2032 */
   2033#define INTEL_6300_IOAPIC_ABAR		0x40	/* Bus 0, Dev 29, Func 5 */
   2034#define INTEL_6300_DISABLE_BOOT_IRQ	(1<<14)
   2035
   2036#define INTEL_CIPINTRC_CFG_OFFSET	0x14C	/* Bus 0, Dev 5, Func 0 */
   2037#define INTEL_CIPINTRC_DIS_INTX_ICH	(1<<25)
   2038
   2039static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
   2040{
   2041	u16 pci_config_word;
   2042	u32 pci_config_dword;
   2043
   2044	if (noioapicquirk)
   2045		return;
   2046
   2047	switch (dev->device) {
   2048	case PCI_DEVICE_ID_INTEL_ESB_10:
   2049		pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
   2050				     &pci_config_word);
   2051		pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
   2052		pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
   2053				      pci_config_word);
   2054		break;
   2055	case 0x3c28:	/* Xeon E5 1600/2600/4600	*/
   2056	case 0x0e28:	/* Xeon E5/E7 V2		*/
   2057	case 0x2f28:	/* Xeon E5/E7 V3,V4		*/
   2058	case 0x6f28:	/* Xeon D-1500			*/
   2059	case 0x2034:	/* Xeon Scalable Family		*/
   2060		pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
   2061				      &pci_config_dword);
   2062		pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
   2063		pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
   2064				       pci_config_dword);
   2065		break;
   2066	default:
   2067		return;
   2068	}
   2069	pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
   2070		 dev->vendor, dev->device);
   2071}
   2072/*
   2073 * Device 29 Func 5 Device IDs of IO-APIC
   2074 * containing ABAR—APIC1 Alternate Base Address Register
   2075 */
   2076DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB_10,
   2077		quirk_disable_intel_boot_interrupt);
   2078DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB_10,
   2079		quirk_disable_intel_boot_interrupt);
   2080
   2081/*
   2082 * Device 5 Func 0 Device IDs of Core IO modules/hubs
   2083 * containing Coherent Interface Protocol Interrupt Control
   2084 *
   2085 * Device IDs obtained from volume 2 datasheets of commented
   2086 * families above.
   2087 */
   2088DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x3c28,
   2089		quirk_disable_intel_boot_interrupt);
   2090DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x0e28,
   2091		quirk_disable_intel_boot_interrupt);
   2092DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2f28,
   2093		quirk_disable_intel_boot_interrupt);
   2094DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x6f28,
   2095		quirk_disable_intel_boot_interrupt);
   2096DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2034,
   2097		quirk_disable_intel_boot_interrupt);
   2098DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	0x3c28,
   2099		quirk_disable_intel_boot_interrupt);
   2100DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	0x0e28,
   2101		quirk_disable_intel_boot_interrupt);
   2102DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	0x2f28,
   2103		quirk_disable_intel_boot_interrupt);
   2104DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	0x6f28,
   2105		quirk_disable_intel_boot_interrupt);
   2106DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	0x2034,
   2107		quirk_disable_intel_boot_interrupt);
   2108
   2109/* Disable boot interrupts on HT-1000 */
   2110#define BC_HT1000_FEATURE_REG		0x64
   2111#define BC_HT1000_PIC_REGS_ENABLE	(1<<0)
   2112#define BC_HT1000_MAP_IDX		0xC00
   2113#define BC_HT1000_MAP_DATA		0xC01
   2114
   2115static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
   2116{
   2117	u32 pci_config_dword;
   2118	u8 irq;
   2119
   2120	if (noioapicquirk)
   2121		return;
   2122
   2123	pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
   2124	pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
   2125			BC_HT1000_PIC_REGS_ENABLE);
   2126
   2127	for (irq = 0x10; irq < 0x10 + 32; irq++) {
   2128		outb(irq, BC_HT1000_MAP_IDX);
   2129		outb(0x00, BC_HT1000_MAP_DATA);
   2130	}
   2131
   2132	pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
   2133
   2134	pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
   2135		 dev->vendor, dev->device);
   2136}
   2137DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
   2138DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
   2139
   2140/* Disable boot interrupts on AMD and ATI chipsets */
   2141
   2142/*
   2143 * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
   2144 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
   2145 * (due to an erratum).
   2146 */
   2147#define AMD_813X_MISC			0x40
   2148#define AMD_813X_NOIOAMODE		(1<<0)
   2149#define AMD_813X_REV_B1			0x12
   2150#define AMD_813X_REV_B2			0x13
   2151
   2152static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
   2153{
   2154	u32 pci_config_dword;
   2155
   2156	if (noioapicquirk)
   2157		return;
   2158	if ((dev->revision == AMD_813X_REV_B1) ||
   2159	    (dev->revision == AMD_813X_REV_B2))
   2160		return;
   2161
   2162	pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
   2163	pci_config_dword &= ~AMD_813X_NOIOAMODE;
   2164	pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
   2165
   2166	pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
   2167		 dev->vendor, dev->device);
   2168}
   2169DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
   2170DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
   2171DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
   2172DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
   2173
   2174#define AMD_8111_PCI_IRQ_ROUTING	0x56
   2175
   2176static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
   2177{
   2178	u16 pci_config_word;
   2179
   2180	if (noioapicquirk)
   2181		return;
   2182
   2183	pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
   2184	if (!pci_config_word) {
   2185		pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
   2186			 dev->vendor, dev->device);
   2187		return;
   2188	}
   2189	pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
   2190	pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
   2191		 dev->vendor, dev->device);
   2192}
   2193DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,	quirk_disable_amd_8111_boot_interrupt);
   2194DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,	quirk_disable_amd_8111_boot_interrupt);
   2195#endif /* CONFIG_X86_IO_APIC */
   2196
   2197/*
   2198 * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
   2199 * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
   2200 * Re-allocate the region if needed...
   2201 */
   2202static void quirk_tc86c001_ide(struct pci_dev *dev)
   2203{
   2204	struct resource *r = &dev->resource[0];
   2205
   2206	if (r->start & 0x8) {
   2207		r->flags |= IORESOURCE_UNSET;
   2208		r->start = 0;
   2209		r->end = 0xf;
   2210	}
   2211}
   2212DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
   2213			 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
   2214			 quirk_tc86c001_ide);
   2215
   2216/*
   2217 * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the
   2218 * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
   2219 * being read correctly if bit 7 of the base address is set.
   2220 * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
   2221 * Re-allocate the regions to a 256-byte boundary if necessary.
   2222 */
   2223static void quirk_plx_pci9050(struct pci_dev *dev)
   2224{
   2225	unsigned int bar;
   2226
   2227	/* Fixed in revision 2 (PCI 9052). */
   2228	if (dev->revision >= 2)
   2229		return;
   2230	for (bar = 0; bar <= 1; bar++)
   2231		if (pci_resource_len(dev, bar) == 0x80 &&
   2232		    (pci_resource_start(dev, bar) & 0x80)) {
   2233			struct resource *r = &dev->resource[bar];
   2234			pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
   2235				 bar);
   2236			r->flags |= IORESOURCE_UNSET;
   2237			r->start = 0;
   2238			r->end = 0xff;
   2239		}
   2240}
   2241DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
   2242			 quirk_plx_pci9050);
   2243/*
   2244 * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
   2245 * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
   2246 * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
   2247 * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
   2248 *
   2249 * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
   2250 * driver.
   2251 */
   2252DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
   2253DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
   2254
   2255static void quirk_netmos(struct pci_dev *dev)
   2256{
   2257	unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
   2258	unsigned int num_serial = dev->subsystem_device & 0xf;
   2259
   2260	/*
   2261	 * These Netmos parts are multiport serial devices with optional
   2262	 * parallel ports.  Even when parallel ports are present, they
   2263	 * are identified as class SERIAL, which means the serial driver
   2264	 * will claim them.  To prevent this, mark them as class OTHER.
   2265	 * These combo devices should be claimed by parport_serial.
   2266	 *
   2267	 * The subdevice ID is of the form 0x00PS, where <P> is the number
   2268	 * of parallel ports and <S> is the number of serial ports.
   2269	 */
   2270	switch (dev->device) {
   2271	case PCI_DEVICE_ID_NETMOS_9835:
   2272		/* Well, this rule doesn't hold for the following 9835 device */
   2273		if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
   2274				dev->subsystem_device == 0x0299)
   2275			return;
   2276		fallthrough;
   2277	case PCI_DEVICE_ID_NETMOS_9735:
   2278	case PCI_DEVICE_ID_NETMOS_9745:
   2279	case PCI_DEVICE_ID_NETMOS_9845:
   2280	case PCI_DEVICE_ID_NETMOS_9855:
   2281		if (num_parallel) {
   2282			pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
   2283				dev->device, num_parallel, num_serial);
   2284			dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
   2285			    (dev->class & 0xff);
   2286		}
   2287	}
   2288}
   2289DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
   2290			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
   2291
   2292static void quirk_e100_interrupt(struct pci_dev *dev)
   2293{
   2294	u16 command, pmcsr;
   2295	u8 __iomem *csr;
   2296	u8 cmd_hi;
   2297
   2298	switch (dev->device) {
   2299	/* PCI IDs taken from drivers/net/e100.c */
   2300	case 0x1029:
   2301	case 0x1030 ... 0x1034:
   2302	case 0x1038 ... 0x103E:
   2303	case 0x1050 ... 0x1057:
   2304	case 0x1059:
   2305	case 0x1064 ... 0x106B:
   2306	case 0x1091 ... 0x1095:
   2307	case 0x1209:
   2308	case 0x1229:
   2309	case 0x2449:
   2310	case 0x2459:
   2311	case 0x245D:
   2312	case 0x27DC:
   2313		break;
   2314	default:
   2315		return;
   2316	}
   2317
   2318	/*
   2319	 * Some firmware hands off the e100 with interrupts enabled,
   2320	 * which can cause a flood of interrupts if packets are
   2321	 * received before the driver attaches to the device.  So
   2322	 * disable all e100 interrupts here.  The driver will
   2323	 * re-enable them when it's ready.
   2324	 */
   2325	pci_read_config_word(dev, PCI_COMMAND, &command);
   2326
   2327	if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
   2328		return;
   2329
   2330	/*
   2331	 * Check that the device is in the D0 power state. If it's not,
   2332	 * there is no point to look any further.
   2333	 */
   2334	if (dev->pm_cap) {
   2335		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
   2336		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
   2337			return;
   2338	}
   2339
   2340	/* Convert from PCI bus to resource space.  */
   2341	csr = ioremap(pci_resource_start(dev, 0), 8);
   2342	if (!csr) {
   2343		pci_warn(dev, "Can't map e100 registers\n");
   2344		return;
   2345	}
   2346
   2347	cmd_hi = readb(csr + 3);
   2348	if (cmd_hi == 0) {
   2349		pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
   2350		writeb(1, csr + 3);
   2351	}
   2352
   2353	iounmap(csr);
   2354}
   2355DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
   2356			PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
   2357
   2358/*
   2359 * The 82575 and 82598 may experience data corruption issues when transitioning
   2360 * out of L0S.  To prevent this we need to disable L0S on the PCIe link.
   2361 */
   2362static void quirk_disable_aspm_l0s(struct pci_dev *dev)
   2363{
   2364	pci_info(dev, "Disabling L0s\n");
   2365	pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
   2366}
   2367DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
   2368DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
   2369DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
   2370DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
   2371DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
   2372DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
   2373DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
   2374DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
   2375DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
   2376DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
   2377DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
   2378DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
   2379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
   2380DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
   2381
   2382static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
   2383{
   2384	pci_info(dev, "Disabling ASPM L0s/L1\n");
   2385	pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
   2386}
   2387
   2388/*
   2389 * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
   2390 * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
   2391 * disable both L0s and L1 for now to be safe.
   2392 */
   2393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
   2394
   2395/*
   2396 * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
   2397 * Link bit cleared after starting the link retrain process to allow this
   2398 * process to finish.
   2399 *
   2400 * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130.  See also the
   2401 * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
   2402 */
   2403static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
   2404{
   2405	dev->clear_retrain_link = 1;
   2406	pci_info(dev, "Enable PCIe Retrain Link quirk\n");
   2407}
   2408DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
   2409DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
   2410DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
   2411
   2412static void fixup_rev1_53c810(struct pci_dev *dev)
   2413{
   2414	u32 class = dev->class;
   2415
   2416	/*
   2417	 * rev 1 ncr53c810 chips don't set the class at all which means
   2418	 * they don't get their resources remapped. Fix that here.
   2419	 */
   2420	if (class)
   2421		return;
   2422
   2423	dev->class = PCI_CLASS_STORAGE_SCSI << 8;
   2424	pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
   2425		 class, dev->class);
   2426}
   2427DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
   2428
   2429/* Enable 1k I/O space granularity on the Intel P64H2 */
   2430static void quirk_p64h2_1k_io(struct pci_dev *dev)
   2431{
   2432	u16 en1k;
   2433
   2434	pci_read_config_word(dev, 0x40, &en1k);
   2435
   2436	if (en1k & 0x200) {
   2437		pci_info(dev, "Enable I/O Space to 1KB granularity\n");
   2438		dev->io_window_1k = 1;
   2439	}
   2440}
   2441DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
   2442
   2443/*
   2444 * Under some circumstances, AER is not linked with extended capabilities.
   2445 * Force it to be linked by setting the corresponding control bit in the
   2446 * config space.
   2447 */
   2448static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
   2449{
   2450	uint8_t b;
   2451
   2452	if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
   2453		if (!(b & 0x20)) {
   2454			pci_write_config_byte(dev, 0xf41, b | 0x20);
   2455			pci_info(dev, "Linking AER extended capability\n");
   2456		}
   2457	}
   2458}
   2459DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA,  PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
   2460			quirk_nvidia_ck804_pcie_aer_ext_cap);
   2461DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA,  PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
   2462			quirk_nvidia_ck804_pcie_aer_ext_cap);
   2463
   2464static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
   2465{
   2466	/*
   2467	 * Disable PCI Bus Parking and PCI Master read caching on CX700
   2468	 * which causes unspecified timing errors with a VT6212L on the PCI
   2469	 * bus leading to USB2.0 packet loss.
   2470	 *
   2471	 * This quirk is only enabled if a second (on the external PCI bus)
   2472	 * VT6212L is found -- the CX700 core itself also contains a USB
   2473	 * host controller with the same PCI ID as the VT6212L.
   2474	 */
   2475
   2476	/* Count VT6212L instances */
   2477	struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
   2478		PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
   2479	uint8_t b;
   2480
   2481	/*
   2482	 * p should contain the first (internal) VT6212L -- see if we have
   2483	 * an external one by searching again.
   2484	 */
   2485	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
   2486	if (!p)
   2487		return;
   2488	pci_dev_put(p);
   2489
   2490	if (pci_read_config_byte(dev, 0x76, &b) == 0) {
   2491		if (b & 0x40) {
   2492			/* Turn off PCI Bus Parking */
   2493			pci_write_config_byte(dev, 0x76, b ^ 0x40);
   2494
   2495			pci_info(dev, "Disabling VIA CX700 PCI parking\n");
   2496		}
   2497	}
   2498
   2499	if (pci_read_config_byte(dev, 0x72, &b) == 0) {
   2500		if (b != 0) {
   2501			/* Turn off PCI Master read caching */
   2502			pci_write_config_byte(dev, 0x72, 0x0);
   2503
   2504			/* Set PCI Master Bus time-out to "1x16 PCLK" */
   2505			pci_write_config_byte(dev, 0x75, 0x1);
   2506
   2507			/* Disable "Read FIFO Timer" */
   2508			pci_write_config_byte(dev, 0x77, 0x0);
   2509
   2510			pci_info(dev, "Disabling VIA CX700 PCI caching\n");
   2511		}
   2512	}
   2513}
   2514DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
   2515
   2516static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
   2517{
   2518	u32 rev;
   2519
   2520	pci_read_config_dword(dev, 0xf4, &rev);
   2521
   2522	/* Only CAP the MRRS if the device is a 5719 A0 */
   2523	if (rev == 0x05719000) {
   2524		int readrq = pcie_get_readrq(dev);
   2525		if (readrq > 2048)
   2526			pcie_set_readrq(dev, 2048);
   2527	}
   2528}
   2529DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
   2530			 PCI_DEVICE_ID_TIGON3_5719,
   2531			 quirk_brcm_5719_limit_mrrs);
   2532
   2533/*
   2534 * Originally in EDAC sources for i82875P: Intel tells BIOS developers to
   2535 * hide device 6 which configures the overflow device access containing the
   2536 * DRBs - this is where we expose device 6.
   2537 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
   2538 */
   2539static void quirk_unhide_mch_dev6(struct pci_dev *dev)
   2540{
   2541	u8 reg;
   2542
   2543	if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
   2544		pci_info(dev, "Enabling MCH 'Overflow' Device\n");
   2545		pci_write_config_byte(dev, 0xF4, reg | 0x02);
   2546	}
   2547}
   2548DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
   2549			quirk_unhide_mch_dev6);
   2550DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
   2551			quirk_unhide_mch_dev6);
   2552
   2553#ifdef CONFIG_PCI_MSI
   2554/*
   2555 * Some chipsets do not support MSI. We cannot easily rely on setting
   2556 * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some
   2557 * other buses controlled by the chipset even if Linux is not aware of it.
   2558 * Instead of setting the flag on all buses in the machine, simply disable
   2559 * MSI globally.
   2560 */
   2561static void quirk_disable_all_msi(struct pci_dev *dev)
   2562{
   2563	pci_no_msi();
   2564	pci_warn(dev, "MSI quirk detected; MSI disabled\n");
   2565}
   2566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
   2567DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
   2568DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
   2569DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
   2570DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
   2571DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
   2572DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
   2573DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
   2574DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi);
   2575
   2576/* Disable MSI on chipsets that are known to not support it */
   2577static void quirk_disable_msi(struct pci_dev *dev)
   2578{
   2579	if (dev->subordinate) {
   2580		pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
   2581		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
   2582	}
   2583}
   2584DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
   2585DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
   2586DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
   2587
   2588/*
   2589 * The APC bridge device in AMD 780 family northbridges has some random
   2590 * OEM subsystem ID in its vendor ID register (erratum 18), so instead
   2591 * we use the possible vendor/device IDs of the host bridge for the
   2592 * declared quirk, and search for the APC bridge by slot number.
   2593 */
   2594static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
   2595{
   2596	struct pci_dev *apc_bridge;
   2597
   2598	apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
   2599	if (apc_bridge) {
   2600		if (apc_bridge->device == 0x9602)
   2601			quirk_disable_msi(apc_bridge);
   2602		pci_dev_put(apc_bridge);
   2603	}
   2604}
   2605DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
   2606DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
   2607
   2608/*
   2609 * Go through the list of HyperTransport capabilities and return 1 if a HT
   2610 * MSI capability is found and enabled.
   2611 */
   2612static int msi_ht_cap_enabled(struct pci_dev *dev)
   2613{
   2614	int pos, ttl = PCI_FIND_CAP_TTL;
   2615
   2616	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
   2617	while (pos && ttl--) {
   2618		u8 flags;
   2619
   2620		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
   2621					 &flags) == 0) {
   2622			pci_info(dev, "Found %s HT MSI Mapping\n",
   2623				flags & HT_MSI_FLAGS_ENABLE ?
   2624				"enabled" : "disabled");
   2625			return (flags & HT_MSI_FLAGS_ENABLE) != 0;
   2626		}
   2627
   2628		pos = pci_find_next_ht_capability(dev, pos,
   2629						  HT_CAPTYPE_MSI_MAPPING);
   2630	}
   2631	return 0;
   2632}
   2633
   2634/* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */
   2635static void quirk_msi_ht_cap(struct pci_dev *dev)
   2636{
   2637	if (!msi_ht_cap_enabled(dev))
   2638		quirk_disable_msi(dev);
   2639}
   2640DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
   2641			quirk_msi_ht_cap);
   2642
   2643/*
   2644 * The nVidia CK804 chipset may have 2 HT MSI mappings.  MSI is supported
   2645 * if the MSI capability is set in any of these mappings.
   2646 */
   2647static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
   2648{
   2649	struct pci_dev *pdev;
   2650
   2651	/*
   2652	 * Check HT MSI cap on this chipset and the root one.  A single one
   2653	 * having MSI is enough to be sure that MSI is supported.
   2654	 */
   2655	pdev = pci_get_slot(dev->bus, 0);
   2656	if (!pdev)
   2657		return;
   2658	if (!msi_ht_cap_enabled(pdev))
   2659		quirk_msi_ht_cap(dev);
   2660	pci_dev_put(pdev);
   2661}
   2662DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
   2663			quirk_nvidia_ck804_msi_ht_cap);
   2664
   2665/* Force enable MSI mapping capability on HT bridges */
   2666static void ht_enable_msi_mapping(struct pci_dev *dev)
   2667{
   2668	int pos, ttl = PCI_FIND_CAP_TTL;
   2669
   2670	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
   2671	while (pos && ttl--) {
   2672		u8 flags;
   2673
   2674		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
   2675					 &flags) == 0) {
   2676			pci_info(dev, "Enabling HT MSI Mapping\n");
   2677
   2678			pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
   2679					      flags | HT_MSI_FLAGS_ENABLE);
   2680		}
   2681		pos = pci_find_next_ht_capability(dev, pos,
   2682						  HT_CAPTYPE_MSI_MAPPING);
   2683	}
   2684}
   2685DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
   2686			 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
   2687			 ht_enable_msi_mapping);
   2688DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
   2689			 ht_enable_msi_mapping);
   2690
   2691/*
   2692 * The P5N32-SLI motherboards from Asus have a problem with MSI
   2693 * for the MCP55 NIC. It is not yet determined whether the MSI problem
   2694 * also affects other devices. As for now, turn off MSI for this device.
   2695 */
   2696static void nvenet_msi_disable(struct pci_dev *dev)
   2697{
   2698	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
   2699
   2700	if (board_name &&
   2701	    (strstr(board_name, "P5N32-SLI PREMIUM") ||
   2702	     strstr(board_name, "P5N32-E SLI"))) {
   2703		pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
   2704		dev->no_msi = 1;
   2705	}
   2706}
   2707DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
   2708			PCI_DEVICE_ID_NVIDIA_NVENET_15,
   2709			nvenet_msi_disable);
   2710
   2711/*
   2712 * PCIe spec r4.0 sec 7.7.1.2 and sec 7.7.2.2 say that if MSI/MSI-X is enabled,
   2713 * then the device can't use INTx interrupts. Tegra's PCIe root ports don't
   2714 * generate MSI interrupts for PME and AER events instead only INTx interrupts
   2715 * are generated. Though Tegra's PCIe root ports can generate MSI interrupts
   2716 * for other events, since PCIe specification doesn't support using a mix of
   2717 * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
   2718 * service drivers registering their respective ISRs for MSIs.
   2719 */
   2720static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
   2721{
   2722	dev->no_msi = 1;
   2723}
   2724DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
   2725			      PCI_CLASS_BRIDGE_PCI, 8,
   2726			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2727DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
   2728			      PCI_CLASS_BRIDGE_PCI, 8,
   2729			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2730DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
   2731			      PCI_CLASS_BRIDGE_PCI, 8,
   2732			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2733DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
   2734			      PCI_CLASS_BRIDGE_PCI, 8,
   2735			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2736DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
   2737			      PCI_CLASS_BRIDGE_PCI, 8,
   2738			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2739DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
   2740			      PCI_CLASS_BRIDGE_PCI, 8,
   2741			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2742DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
   2743			      PCI_CLASS_BRIDGE_PCI, 8,
   2744			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2745DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
   2746			      PCI_CLASS_BRIDGE_PCI, 8,
   2747			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2748DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
   2749			      PCI_CLASS_BRIDGE_PCI, 8,
   2750			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2751DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
   2752			      PCI_CLASS_BRIDGE_PCI, 8,
   2753			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2754DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
   2755			      PCI_CLASS_BRIDGE_PCI, 8,
   2756			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2757DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
   2758			      PCI_CLASS_BRIDGE_PCI, 8,
   2759			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2760DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
   2761			      PCI_CLASS_BRIDGE_PCI, 8,
   2762			      pci_quirk_nvidia_tegra_disable_rp_msi);
   2763
   2764/*
   2765 * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
   2766 * config register.  This register controls the routing of legacy
   2767 * interrupts from devices that route through the MCP55.  If this register
   2768 * is misprogrammed, interrupts are only sent to the BSP, unlike
   2769 * conventional systems where the IRQ is broadcast to all online CPUs.  Not
   2770 * having this register set properly prevents kdump from booting up
   2771 * properly, so let's make sure that we have it set correctly.
   2772 * Note that this is an undocumented register.
   2773 */
   2774static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
   2775{
   2776	u32 cfg;
   2777
   2778	if (!pci_find_capability(dev, PCI_CAP_ID_HT))
   2779		return;
   2780
   2781	pci_read_config_dword(dev, 0x74, &cfg);
   2782
   2783	if (cfg & ((1 << 2) | (1 << 15))) {
   2784		pr_info("Rewriting IRQ routing register on MCP55\n");
   2785		cfg &= ~((1 << 2) | (1 << 15));
   2786		pci_write_config_dword(dev, 0x74, cfg);
   2787	}
   2788}
   2789DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
   2790			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
   2791			nvbridge_check_legacy_irq_routing);
   2792DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
   2793			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
   2794			nvbridge_check_legacy_irq_routing);
   2795
   2796static int ht_check_msi_mapping(struct pci_dev *dev)
   2797{
   2798	int pos, ttl = PCI_FIND_CAP_TTL;
   2799	int found = 0;
   2800
   2801	/* Check if there is HT MSI cap or enabled on this device */
   2802	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
   2803	while (pos && ttl--) {
   2804		u8 flags;
   2805
   2806		if (found < 1)
   2807			found = 1;
   2808		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
   2809					 &flags) == 0) {
   2810			if (flags & HT_MSI_FLAGS_ENABLE) {
   2811				if (found < 2) {
   2812					found = 2;
   2813					break;
   2814				}
   2815			}
   2816		}
   2817		pos = pci_find_next_ht_capability(dev, pos,
   2818						  HT_CAPTYPE_MSI_MAPPING);
   2819	}
   2820
   2821	return found;
   2822}
   2823
   2824static int host_bridge_with_leaf(struct pci_dev *host_bridge)
   2825{
   2826	struct pci_dev *dev;
   2827	int pos;
   2828	int i, dev_no;
   2829	int found = 0;
   2830
   2831	dev_no = host_bridge->devfn >> 3;
   2832	for (i = dev_no + 1; i < 0x20; i++) {
   2833		dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
   2834		if (!dev)
   2835			continue;
   2836
   2837		/* found next host bridge? */
   2838		pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
   2839		if (pos != 0) {
   2840			pci_dev_put(dev);
   2841			break;
   2842		}
   2843
   2844		if (ht_check_msi_mapping(dev)) {
   2845			found = 1;
   2846			pci_dev_put(dev);
   2847			break;
   2848		}
   2849		pci_dev_put(dev);
   2850	}
   2851
   2852	return found;
   2853}
   2854
   2855#define PCI_HT_CAP_SLAVE_CTRL0     4    /* link control */
   2856#define PCI_HT_CAP_SLAVE_CTRL1     8    /* link control to */
   2857
   2858static int is_end_of_ht_chain(struct pci_dev *dev)
   2859{
   2860	int pos, ctrl_off;
   2861	int end = 0;
   2862	u16 flags, ctrl;
   2863
   2864	pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
   2865
   2866	if (!pos)
   2867		goto out;
   2868
   2869	pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
   2870
   2871	ctrl_off = ((flags >> 10) & 1) ?
   2872			PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
   2873	pci_read_config_word(dev, pos + ctrl_off, &ctrl);
   2874
   2875	if (ctrl & (1 << 6))
   2876		end = 1;
   2877
   2878out:
   2879	return end;
   2880}
   2881
   2882static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
   2883{
   2884	struct pci_dev *host_bridge;
   2885	int pos;
   2886	int i, dev_no;
   2887	int found = 0;
   2888
   2889	dev_no = dev->devfn >> 3;
   2890	for (i = dev_no; i >= 0; i--) {
   2891		host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
   2892		if (!host_bridge)
   2893			continue;
   2894
   2895		pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
   2896		if (pos != 0) {
   2897			found = 1;
   2898			break;
   2899		}
   2900		pci_dev_put(host_bridge);
   2901	}
   2902
   2903	if (!found)
   2904		return;
   2905
   2906	/* don't enable end_device/host_bridge with leaf directly here */
   2907	if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
   2908	    host_bridge_with_leaf(host_bridge))
   2909		goto out;
   2910
   2911	/* root did that ! */
   2912	if (msi_ht_cap_enabled(host_bridge))
   2913		goto out;
   2914
   2915	ht_enable_msi_mapping(dev);
   2916
   2917out:
   2918	pci_dev_put(host_bridge);
   2919}
   2920
   2921static void ht_disable_msi_mapping(struct pci_dev *dev)
   2922{
   2923	int pos, ttl = PCI_FIND_CAP_TTL;
   2924
   2925	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
   2926	while (pos && ttl--) {
   2927		u8 flags;
   2928
   2929		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
   2930					 &flags) == 0) {
   2931			pci_info(dev, "Disabling HT MSI Mapping\n");
   2932
   2933			pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
   2934					      flags & ~HT_MSI_FLAGS_ENABLE);
   2935		}
   2936		pos = pci_find_next_ht_capability(dev, pos,
   2937						  HT_CAPTYPE_MSI_MAPPING);
   2938	}
   2939}
   2940
   2941static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
   2942{
   2943	struct pci_dev *host_bridge;
   2944	int pos;
   2945	int found;
   2946
   2947	if (!pci_msi_enabled())
   2948		return;
   2949
   2950	/* check if there is HT MSI cap or enabled on this device */
   2951	found = ht_check_msi_mapping(dev);
   2952
   2953	/* no HT MSI CAP */
   2954	if (found == 0)
   2955		return;
   2956
   2957	/*
   2958	 * HT MSI mapping should be disabled on devices that are below
   2959	 * a non-Hypertransport host bridge. Locate the host bridge...
   2960	 */
   2961	host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
   2962						  PCI_DEVFN(0, 0));
   2963	if (host_bridge == NULL) {
   2964		pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
   2965		return;
   2966	}
   2967
   2968	pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
   2969	if (pos != 0) {
   2970		/* Host bridge is to HT */
   2971		if (found == 1) {
   2972			/* it is not enabled, try to enable it */
   2973			if (all)
   2974				ht_enable_msi_mapping(dev);
   2975			else
   2976				nv_ht_enable_msi_mapping(dev);
   2977		}
   2978		goto out;
   2979	}
   2980
   2981	/* HT MSI is not enabled */
   2982	if (found == 1)
   2983		goto out;
   2984
   2985	/* Host bridge is not to HT, disable HT MSI mapping on this device */
   2986	ht_disable_msi_mapping(dev);
   2987
   2988out:
   2989	pci_dev_put(host_bridge);
   2990}
   2991
   2992static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
   2993{
   2994	return __nv_msi_ht_cap_quirk(dev, 1);
   2995}
   2996DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
   2997DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
   2998
   2999static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
   3000{
   3001	return __nv_msi_ht_cap_quirk(dev, 0);
   3002}
   3003DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
   3004DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
   3005
   3006static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
   3007{
   3008	dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
   3009}
   3010
   3011static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
   3012{
   3013	struct pci_dev *p;
   3014
   3015	/*
   3016	 * SB700 MSI issue will be fixed at HW level from revision A21;
   3017	 * we need check PCI REVISION ID of SMBus controller to get SB700
   3018	 * revision.
   3019	 */
   3020	p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
   3021			   NULL);
   3022	if (!p)
   3023		return;
   3024
   3025	if ((p->revision < 0x3B) && (p->revision >= 0x30))
   3026		dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
   3027	pci_dev_put(p);
   3028}
   3029
   3030static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
   3031{
   3032	/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
   3033	if (dev->revision < 0x18) {
   3034		pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
   3035		dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
   3036	}
   3037}
   3038DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3039			PCI_DEVICE_ID_TIGON3_5780,
   3040			quirk_msi_intx_disable_bug);
   3041DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3042			PCI_DEVICE_ID_TIGON3_5780S,
   3043			quirk_msi_intx_disable_bug);
   3044DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3045			PCI_DEVICE_ID_TIGON3_5714,
   3046			quirk_msi_intx_disable_bug);
   3047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3048			PCI_DEVICE_ID_TIGON3_5714S,
   3049			quirk_msi_intx_disable_bug);
   3050DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3051			PCI_DEVICE_ID_TIGON3_5715,
   3052			quirk_msi_intx_disable_bug);
   3053DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
   3054			PCI_DEVICE_ID_TIGON3_5715S,
   3055			quirk_msi_intx_disable_bug);
   3056
   3057DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
   3058			quirk_msi_intx_disable_ati_bug);
   3059DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
   3060			quirk_msi_intx_disable_ati_bug);
   3061DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
   3062			quirk_msi_intx_disable_ati_bug);
   3063DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
   3064			quirk_msi_intx_disable_ati_bug);
   3065DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
   3066			quirk_msi_intx_disable_ati_bug);
   3067
   3068DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
   3069			quirk_msi_intx_disable_bug);
   3070DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
   3071			quirk_msi_intx_disable_bug);
   3072DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
   3073			quirk_msi_intx_disable_bug);
   3074
   3075DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
   3076			quirk_msi_intx_disable_bug);
   3077DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
   3078			quirk_msi_intx_disable_bug);
   3079DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
   3080			quirk_msi_intx_disable_bug);
   3081DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
   3082			quirk_msi_intx_disable_bug);
   3083DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
   3084			quirk_msi_intx_disable_bug);
   3085DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
   3086			quirk_msi_intx_disable_bug);
   3087DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
   3088			quirk_msi_intx_disable_qca_bug);
   3089DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
   3090			quirk_msi_intx_disable_qca_bug);
   3091DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
   3092			quirk_msi_intx_disable_qca_bug);
   3093DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
   3094			quirk_msi_intx_disable_qca_bug);
   3095DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
   3096			quirk_msi_intx_disable_qca_bug);
   3097
   3098/*
   3099 * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it
   3100 * should be disabled on platforms where the device (mistakenly) advertises it.
   3101 *
   3102 * Notice that this quirk also disables MSI (which may work, but hasn't been
   3103 * tested), since currently there is no standard way to disable only MSI-X.
   3104 *
   3105 * The 0031 device id is reused for other non Root Port device types,
   3106 * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
   3107 */
   3108static void quirk_al_msi_disable(struct pci_dev *dev)
   3109{
   3110	dev->no_msi = 1;
   3111	pci_warn(dev, "Disabling MSI/MSI-X\n");
   3112}
   3113DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
   3114			      PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
   3115#endif /* CONFIG_PCI_MSI */
   3116
   3117/*
   3118 * Allow manual resource allocation for PCI hotplug bridges via
   3119 * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI
   3120 * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to
   3121 * allocate resources when hotplug device is inserted and PCI bus is
   3122 * rescanned.
   3123 */
   3124static void quirk_hotplug_bridge(struct pci_dev *dev)
   3125{
   3126	dev->is_hotplug_bridge = 1;
   3127}
   3128DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
   3129
   3130/*
   3131 * This is a quirk for the Ricoh MMC controller found as a part of some
   3132 * multifunction chips.
   3133 *
   3134 * This is very similar and based on the ricoh_mmc driver written by
   3135 * Philip Langdale. Thank you for these magic sequences.
   3136 *
   3137 * These chips implement the four main memory card controllers (SD, MMC,
   3138 * MS, xD) and one or both of CardBus or FireWire.
   3139 *
   3140 * It happens that they implement SD and MMC support as separate
   3141 * controllers (and PCI functions). The Linux SDHCI driver supports MMC
   3142 * cards but the chip detects MMC cards in hardware and directs them to the
   3143 * MMC controller - so the SDHCI driver never sees them.
   3144 *
   3145 * To get around this, we must disable the useless MMC controller.  At that
   3146 * point, the SDHCI controller will start seeing them.  It seems to be the
   3147 * case that the relevant PCI registers to deactivate the MMC controller
   3148 * live on PCI function 0, which might be the CardBus controller or the
   3149 * FireWire controller, depending on the particular chip in question
   3150 *
   3151 * This has to be done early, because as soon as we disable the MMC controller
   3152 * other PCI functions shift up one level, e.g. function #2 becomes function
   3153 * #1, and this will confuse the PCI core.
   3154 */
   3155#ifdef CONFIG_MMC_RICOH_MMC
   3156static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
   3157{
   3158	u8 write_enable;
   3159	u8 write_target;
   3160	u8 disable;
   3161
   3162	/*
   3163	 * Disable via CardBus interface
   3164	 *
   3165	 * This must be done via function #0
   3166	 */
   3167	if (PCI_FUNC(dev->devfn))
   3168		return;
   3169
   3170	pci_read_config_byte(dev, 0xB7, &disable);
   3171	if (disable & 0x02)
   3172		return;
   3173
   3174	pci_read_config_byte(dev, 0x8E, &write_enable);
   3175	pci_write_config_byte(dev, 0x8E, 0xAA);
   3176	pci_read_config_byte(dev, 0x8D, &write_target);
   3177	pci_write_config_byte(dev, 0x8D, 0xB7);
   3178	pci_write_config_byte(dev, 0xB7, disable | 0x02);
   3179	pci_write_config_byte(dev, 0x8E, write_enable);
   3180	pci_write_config_byte(dev, 0x8D, write_target);
   3181
   3182	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
   3183	pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
   3184}
   3185DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
   3186DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
   3187
   3188static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
   3189{
   3190	u8 write_enable;
   3191	u8 disable;
   3192
   3193	/*
   3194	 * Disable via FireWire interface
   3195	 *
   3196	 * This must be done via function #0
   3197	 */
   3198	if (PCI_FUNC(dev->devfn))
   3199		return;
   3200	/*
   3201	 * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
   3202	 * certain types of SD/MMC cards. Lowering the SD base clock
   3203	 * frequency from 200Mhz to 50Mhz fixes this issue.
   3204	 *
   3205	 * 0x150 - SD2.0 mode enable for changing base clock
   3206	 *	   frequency to 50Mhz
   3207	 * 0xe1  - Base clock frequency
   3208	 * 0x32  - 50Mhz new clock frequency
   3209	 * 0xf9  - Key register for 0x150
   3210	 * 0xfc  - key register for 0xe1
   3211	 */
   3212	if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
   3213	    dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
   3214		pci_write_config_byte(dev, 0xf9, 0xfc);
   3215		pci_write_config_byte(dev, 0x150, 0x10);
   3216		pci_write_config_byte(dev, 0xf9, 0x00);
   3217		pci_write_config_byte(dev, 0xfc, 0x01);
   3218		pci_write_config_byte(dev, 0xe1, 0x32);
   3219		pci_write_config_byte(dev, 0xfc, 0x00);
   3220
   3221		pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
   3222	}
   3223
   3224	pci_read_config_byte(dev, 0xCB, &disable);
   3225
   3226	if (disable & 0x02)
   3227		return;
   3228
   3229	pci_read_config_byte(dev, 0xCA, &write_enable);
   3230	pci_write_config_byte(dev, 0xCA, 0x57);
   3231	pci_write_config_byte(dev, 0xCB, disable | 0x02);
   3232	pci_write_config_byte(dev, 0xCA, write_enable);
   3233
   3234	pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
   3235	pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
   3236
   3237}
   3238DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
   3239DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
   3240DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
   3241DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
   3242DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
   3243DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
   3244#endif /*CONFIG_MMC_RICOH_MMC*/
   3245
   3246#ifdef CONFIG_DMAR_TABLE
   3247#define VTUNCERRMSK_REG	0x1ac
   3248#define VTD_MSK_SPEC_ERRORS	(1 << 31)
   3249/*
   3250 * This is a quirk for masking VT-d spec-defined errors to platform error
   3251 * handling logic. Without this, platforms using Intel 7500, 5500 chipsets
   3252 * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
   3253 * on the RAS config settings of the platform) when a VT-d fault happens.
   3254 * The resulting SMI caused the system to hang.
   3255 *
   3256 * VT-d spec-related errors are already handled by the VT-d OS code, so no
   3257 * need to report the same error through other channels.
   3258 */
   3259static void vtd_mask_spec_errors(struct pci_dev *dev)
   3260{
   3261	u32 word;
   3262
   3263	pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
   3264	pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
   3265}
   3266DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
   3267DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
   3268#endif
   3269
   3270static void fixup_ti816x_class(struct pci_dev *dev)
   3271{
   3272	u32 class = dev->class;
   3273
   3274	/* TI 816x devices do not have class code set when in PCIe boot mode */
   3275	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
   3276	pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
   3277		 class, dev->class);
   3278}
   3279DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
   3280			      PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
   3281
   3282/*
   3283 * Some PCIe devices do not work reliably with the claimed maximum
   3284 * payload size supported.
   3285 */
   3286static void fixup_mpss_256(struct pci_dev *dev)
   3287{
   3288	dev->pcie_mpss = 1; /* 256 bytes */
   3289}
   3290DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
   3291			PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
   3292DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
   3293			PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
   3294DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE,
   3295			PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
   3296DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256);
   3297
   3298/*
   3299 * Intel 5000 and 5100 Memory controllers have an erratum with read completion
   3300 * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
   3301 * Since there is no way of knowing what the PCIe MPS on each fabric will be
   3302 * until all of the devices are discovered and buses walked, read completion
   3303 * coalescing must be disabled.  Unfortunately, it cannot be re-enabled because
   3304 * it is possible to hotplug a device with MPS of 256B.
   3305 */
   3306static void quirk_intel_mc_errata(struct pci_dev *dev)
   3307{
   3308	int err;
   3309	u16 rcc;
   3310
   3311	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
   3312	    pcie_bus_config == PCIE_BUS_DEFAULT)
   3313		return;
   3314
   3315	/*
   3316	 * Intel erratum specifies bits to change but does not say what
   3317	 * they are.  Keeping them magical until such time as the registers
   3318	 * and values can be explained.
   3319	 */
   3320	err = pci_read_config_word(dev, 0x48, &rcc);
   3321	if (err) {
   3322		pci_err(dev, "Error attempting to read the read completion coalescing register\n");
   3323		return;
   3324	}
   3325
   3326	if (!(rcc & (1 << 10)))
   3327		return;
   3328
   3329	rcc &= ~(1 << 10);
   3330
   3331	err = pci_write_config_word(dev, 0x48, rcc);
   3332	if (err) {
   3333		pci_err(dev, "Error attempting to write the read completion coalescing register\n");
   3334		return;
   3335	}
   3336
   3337	pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
   3338}
   3339/* Intel 5000 series memory controllers and ports 2-7 */
   3340DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
   3341DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
   3342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
   3343DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
   3344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
   3345DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
   3346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
   3347DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
   3348DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
   3349DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
   3350DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
   3351DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
   3352DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
   3353DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
   3354/* Intel 5100 series memory controllers and ports 2-7 */
   3355DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
   3356DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
   3357DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
   3358DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
   3359DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
   3360DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
   3361DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
   3362DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
   3363DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
   3364DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
   3365DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
   3366
   3367/*
   3368 * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.
   3369 * To work around this, query the size it should be configured to by the
   3370 * device and modify the resource end to correspond to this new size.
   3371 */
   3372static void quirk_intel_ntb(struct pci_dev *dev)
   3373{
   3374	int rc;
   3375	u8 val;
   3376
   3377	rc = pci_read_config_byte(dev, 0x00D0, &val);
   3378	if (rc)
   3379		return;
   3380
   3381	dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
   3382
   3383	rc = pci_read_config_byte(dev, 0x00D1, &val);
   3384	if (rc)
   3385		return;
   3386
   3387	dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
   3388}
   3389DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
   3390DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
   3391
   3392/*
   3393 * Some BIOS implementations leave the Intel GPU interrupts enabled, even
   3394 * though no one is handling them (e.g., if the i915 driver is never
   3395 * loaded).  Additionally the interrupt destination is not set up properly
   3396 * and the interrupt ends up -somewhere-.
   3397 *
   3398 * These spurious interrupts are "sticky" and the kernel disables the
   3399 * (shared) interrupt line after 100,000+ generated interrupts.
   3400 *
   3401 * Fix it by disabling the still enabled interrupts.  This resolves crashes
   3402 * often seen on monitor unplug.
   3403 */
   3404#define I915_DEIER_REG 0x4400c
   3405static void disable_igfx_irq(struct pci_dev *dev)
   3406{
   3407	void __iomem *regs = pci_iomap(dev, 0, 0);
   3408	if (regs == NULL) {
   3409		pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
   3410		return;
   3411	}
   3412
   3413	/* Check if any interrupt line is still enabled */
   3414	if (readl(regs + I915_DEIER_REG) != 0) {
   3415		pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
   3416
   3417		writel(0, regs + I915_DEIER_REG);
   3418	}
   3419
   3420	pci_iounmap(dev, regs);
   3421}
   3422DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
   3423DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
   3424DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
   3425DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
   3426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
   3427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
   3428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
   3429
   3430/*
   3431 * PCI devices which are on Intel chips can skip the 10ms delay
   3432 * before entering D3 mode.
   3433 */
   3434static void quirk_remove_d3hot_delay(struct pci_dev *dev)
   3435{
   3436	dev->d3hot_delay = 0;
   3437}
   3438/* C600 Series devices do not need 10ms d3hot_delay */
   3439DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
   3440DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
   3441DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
   3442/* Lynxpoint-H PCH devices do not need 10ms d3hot_delay */
   3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
   3444DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
   3445DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
   3446DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
   3447DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
   3448DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
   3449DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
   3450DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
   3451DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
   3452DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
   3453DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
   3454/* Intel Cherrytrail devices do not need 10ms d3hot_delay */
   3455DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
   3456DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
   3457DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
   3458DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
   3459DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
   3460DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
   3461DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
   3462DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
   3463DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
   3464
   3465/*
   3466 * Some devices may pass our check in pci_intx_mask_supported() if
   3467 * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
   3468 * support this feature.
   3469 */
   3470static void quirk_broken_intx_masking(struct pci_dev *dev)
   3471{
   3472	dev->broken_intx_masking = 1;
   3473}
   3474DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
   3475			quirk_broken_intx_masking);
   3476DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
   3477			quirk_broken_intx_masking);
   3478DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
   3479			quirk_broken_intx_masking);
   3480
   3481/*
   3482 * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
   3483 * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
   3484 *
   3485 * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
   3486 */
   3487DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
   3488			quirk_broken_intx_masking);
   3489
   3490/*
   3491 * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
   3492 * DisINTx can be set but the interrupt status bit is non-functional.
   3493 */
   3494DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
   3495DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
   3496DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
   3497DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
   3498DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
   3499DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
   3500DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
   3501DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
   3502DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
   3503DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
   3504DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
   3505DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
   3506DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
   3507DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
   3508DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
   3509DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
   3510
   3511static u16 mellanox_broken_intx_devs[] = {
   3512	PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
   3513	PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
   3514	PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
   3515	PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
   3516	PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
   3517	PCI_DEVICE_ID_MELLANOX_HERMON_EN,
   3518	PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
   3519	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
   3520	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
   3521	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
   3522	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
   3523	PCI_DEVICE_ID_MELLANOX_CONNECTX2,
   3524	PCI_DEVICE_ID_MELLANOX_CONNECTX3,
   3525	PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
   3526};
   3527
   3528#define CONNECTX_4_CURR_MAX_MINOR 99
   3529#define CONNECTX_4_INTX_SUPPORT_MINOR 14
   3530
   3531/*
   3532 * Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
   3533 * If so, don't mark it as broken.
   3534 * FW minor > 99 means older FW version format and no INTx masking support.
   3535 * FW minor < 14 means new FW version format and no INTx masking support.
   3536 */
   3537static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
   3538{
   3539	__be32 __iomem *fw_ver;
   3540	u16 fw_major;
   3541	u16 fw_minor;
   3542	u16 fw_subminor;
   3543	u32 fw_maj_min;
   3544	u32 fw_sub_min;
   3545	int i;
   3546
   3547	for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
   3548		if (pdev->device == mellanox_broken_intx_devs[i]) {
   3549			pdev->broken_intx_masking = 1;
   3550			return;
   3551		}
   3552	}
   3553
   3554	/*
   3555	 * Getting here means Connect-IB cards and up. Connect-IB has no INTx
   3556	 * support so shouldn't be checked further
   3557	 */
   3558	if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
   3559		return;
   3560
   3561	if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
   3562	    pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
   3563		return;
   3564
   3565	/* For ConnectX-4 and ConnectX-4LX, need to check FW support */
   3566	if (pci_enable_device_mem(pdev)) {
   3567		pci_warn(pdev, "Can't enable device memory\n");
   3568		return;
   3569	}
   3570
   3571	fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
   3572	if (!fw_ver) {
   3573		pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
   3574		goto out;
   3575	}
   3576
   3577	/* Reading from resource space should be 32b aligned */
   3578	fw_maj_min = ioread32be(fw_ver);
   3579	fw_sub_min = ioread32be(fw_ver + 1);
   3580	fw_major = fw_maj_min & 0xffff;
   3581	fw_minor = fw_maj_min >> 16;
   3582	fw_subminor = fw_sub_min & 0xffff;
   3583	if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
   3584	    fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
   3585		pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
   3586			 fw_major, fw_minor, fw_subminor, pdev->device ==
   3587			 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
   3588		pdev->broken_intx_masking = 1;
   3589	}
   3590
   3591	iounmap(fw_ver);
   3592
   3593out:
   3594	pci_disable_device(pdev);
   3595}
   3596DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
   3597			mellanox_check_broken_intx_masking);
   3598
   3599static void quirk_no_bus_reset(struct pci_dev *dev)
   3600{
   3601	dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
   3602}
   3603
   3604/*
   3605 * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
   3606 * prevented for those affected devices.
   3607 */
   3608static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
   3609{
   3610	if ((dev->device & 0xffc0) == 0x2340)
   3611		quirk_no_bus_reset(dev);
   3612}
   3613DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   3614			 quirk_nvidia_no_bus_reset);
   3615
   3616/*
   3617 * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
   3618 * The device will throw a Link Down error on AER-capable systems and
   3619 * regardless of AER, config space of the device is never accessible again
   3620 * and typically causes the system to hang or reset when access is attempted.
   3621 * https://lore.kernel.org/r/20140923210318.498dacbd@dualc.maya.org/
   3622 */
   3623DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
   3624DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
   3625DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
   3626DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
   3627DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
   3628DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
   3629
   3630/*
   3631 * Root port on some Cavium CN8xxx chips do not successfully complete a bus
   3632 * reset when used with certain child devices.  After the reset, config
   3633 * accesses to the child may fail.
   3634 */
   3635DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
   3636
   3637/*
   3638 * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
   3639 * automatically disables LTSSM when Secondary Bus Reset is received and
   3640 * the device stops working.  Prevent bus reset for these devices.  With
   3641 * this change, the device can be assigned to VMs with VFIO, but it will
   3642 * leak state between VMs.  Reference
   3643 * https://e2e.ti.com/support/processors/f/791/t/954382
   3644 */
   3645DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
   3646
   3647static void quirk_no_pm_reset(struct pci_dev *dev)
   3648{
   3649	/*
   3650	 * We can't do a bus reset on root bus devices, but an ineffective
   3651	 * PM reset may be better than nothing.
   3652	 */
   3653	if (!pci_is_root_bus(dev->bus))
   3654		dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
   3655}
   3656
   3657/*
   3658 * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
   3659 * causes a reset (i.e., they advertise NoSoftRst-).  This transition seems
   3660 * to have no effect on the device: it retains the framebuffer contents and
   3661 * monitor sync.  Advertising this support makes other layers, like VFIO,
   3662 * assume pci_reset_function() is viable for this device.  Mark it as
   3663 * unavailable to skip it when testing reset methods.
   3664 */
   3665DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
   3666			       PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
   3667
   3668/*
   3669 * Thunderbolt controllers with broken MSI hotplug signaling:
   3670 * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
   3671 * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
   3672 */
   3673static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
   3674{
   3675	if (pdev->is_hotplug_bridge &&
   3676	    (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
   3677	     pdev->revision <= 1))
   3678		pdev->no_msi = 1;
   3679}
   3680DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
   3681			quirk_thunderbolt_hotplug_msi);
   3682DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
   3683			quirk_thunderbolt_hotplug_msi);
   3684DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
   3685			quirk_thunderbolt_hotplug_msi);
   3686DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
   3687			quirk_thunderbolt_hotplug_msi);
   3688DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
   3689			quirk_thunderbolt_hotplug_msi);
   3690
   3691#ifdef CONFIG_ACPI
   3692/*
   3693 * Apple: Shutdown Cactus Ridge Thunderbolt controller.
   3694 *
   3695 * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
   3696 * shutdown before suspend. Otherwise the native host interface (NHI) will not
   3697 * be present after resume if a device was plugged in before suspend.
   3698 *
   3699 * The Thunderbolt controller consists of a PCIe switch with downstream
   3700 * bridges leading to the NHI and to the tunnel PCI bridges.
   3701 *
   3702 * This quirk cuts power to the whole chip. Therefore we have to apply it
   3703 * during suspend_noirq of the upstream bridge.
   3704 *
   3705 * Power is automagically restored before resume. No action is needed.
   3706 */
   3707static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
   3708{
   3709	acpi_handle bridge, SXIO, SXFP, SXLV;
   3710
   3711	if (!x86_apple_machine)
   3712		return;
   3713	if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
   3714		return;
   3715
   3716	/*
   3717	 * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller.
   3718	 * We don't know how to turn it back on again, but firmware does,
   3719	 * so we can only use SXIO/SXFP/SXLF if we're suspending via
   3720	 * firmware.
   3721	 */
   3722	if (!pm_suspend_via_firmware())
   3723		return;
   3724
   3725	bridge = ACPI_HANDLE(&dev->dev);
   3726	if (!bridge)
   3727		return;
   3728
   3729	/*
   3730	 * SXIO and SXLV are present only on machines requiring this quirk.
   3731	 * Thunderbolt bridges in external devices might have the same
   3732	 * device ID as those on the host, but they will not have the
   3733	 * associated ACPI methods. This implicitly checks that we are at
   3734	 * the right bridge.
   3735	 */
   3736	if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
   3737	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
   3738	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
   3739		return;
   3740	pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
   3741
   3742	/* magic sequence */
   3743	acpi_execute_simple_method(SXIO, NULL, 1);
   3744	acpi_execute_simple_method(SXFP, NULL, 0);
   3745	msleep(300);
   3746	acpi_execute_simple_method(SXLV, NULL, 0);
   3747	acpi_execute_simple_method(SXIO, NULL, 0);
   3748	acpi_execute_simple_method(SXLV, NULL, 0);
   3749}
   3750DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
   3751			       PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
   3752			       quirk_apple_poweroff_thunderbolt);
   3753#endif
   3754
   3755/*
   3756 * Following are device-specific reset methods which can be used to
   3757 * reset a single function if other methods (e.g. FLR, PM D0->D3) are
   3758 * not available.
   3759 */
   3760static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe)
   3761{
   3762	/*
   3763	 * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
   3764	 *
   3765	 * The 82599 supports FLR on VFs, but FLR support is reported only
   3766	 * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
   3767	 * Thus we must call pcie_flr() directly without first checking if it is
   3768	 * supported.
   3769	 */
   3770	if (!probe)
   3771		pcie_flr(dev);
   3772	return 0;
   3773}
   3774
   3775#define SOUTH_CHICKEN2		0xc2004
   3776#define PCH_PP_STATUS		0xc7200
   3777#define PCH_PP_CONTROL		0xc7204
   3778#define MSG_CTL			0x45010
   3779#define NSDE_PWR_STATE		0xd0100
   3780#define IGD_OPERATION_TIMEOUT	10000     /* set timeout 10 seconds */
   3781
   3782static int reset_ivb_igd(struct pci_dev *dev, bool probe)
   3783{
   3784	void __iomem *mmio_base;
   3785	unsigned long timeout;
   3786	u32 val;
   3787
   3788	if (probe)
   3789		return 0;
   3790
   3791	mmio_base = pci_iomap(dev, 0, 0);
   3792	if (!mmio_base)
   3793		return -ENOMEM;
   3794
   3795	iowrite32(0x00000002, mmio_base + MSG_CTL);
   3796
   3797	/*
   3798	 * Clobbering SOUTH_CHICKEN2 register is fine only if the next
   3799	 * driver loaded sets the right bits. However, this's a reset and
   3800	 * the bits have been set by i915 previously, so we clobber
   3801	 * SOUTH_CHICKEN2 register directly here.
   3802	 */
   3803	iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
   3804
   3805	val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
   3806	iowrite32(val, mmio_base + PCH_PP_CONTROL);
   3807
   3808	timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
   3809	do {
   3810		val = ioread32(mmio_base + PCH_PP_STATUS);
   3811		if ((val & 0xb0000000) == 0)
   3812			goto reset_complete;
   3813		msleep(10);
   3814	} while (time_before(jiffies, timeout));
   3815	pci_warn(dev, "timeout during reset\n");
   3816
   3817reset_complete:
   3818	iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
   3819
   3820	pci_iounmap(dev, mmio_base);
   3821	return 0;
   3822}
   3823
   3824/* Device-specific reset method for Chelsio T4-based adapters */
   3825static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe)
   3826{
   3827	u16 old_command;
   3828	u16 msix_flags;
   3829
   3830	/*
   3831	 * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
   3832	 * that we have no device-specific reset method.
   3833	 */
   3834	if ((dev->device & 0xf000) != 0x4000)
   3835		return -ENOTTY;
   3836
   3837	/*
   3838	 * If this is the "probe" phase, return 0 indicating that we can
   3839	 * reset this device.
   3840	 */
   3841	if (probe)
   3842		return 0;
   3843
   3844	/*
   3845	 * T4 can wedge if there are DMAs in flight within the chip and Bus
   3846	 * Master has been disabled.  We need to have it on till the Function
   3847	 * Level Reset completes.  (BUS_MASTER is disabled in
   3848	 * pci_reset_function()).
   3849	 */
   3850	pci_read_config_word(dev, PCI_COMMAND, &old_command);
   3851	pci_write_config_word(dev, PCI_COMMAND,
   3852			      old_command | PCI_COMMAND_MASTER);
   3853
   3854	/*
   3855	 * Perform the actual device function reset, saving and restoring
   3856	 * configuration information around the reset.
   3857	 */
   3858	pci_save_state(dev);
   3859
   3860	/*
   3861	 * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
   3862	 * are disabled when an MSI-X interrupt message needs to be delivered.
   3863	 * So we briefly re-enable MSI-X interrupts for the duration of the
   3864	 * FLR.  The pci_restore_state() below will restore the original
   3865	 * MSI-X state.
   3866	 */
   3867	pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
   3868	if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
   3869		pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
   3870				      msix_flags |
   3871				      PCI_MSIX_FLAGS_ENABLE |
   3872				      PCI_MSIX_FLAGS_MASKALL);
   3873
   3874	pcie_flr(dev);
   3875
   3876	/*
   3877	 * Restore the configuration information (BAR values, etc.) including
   3878	 * the original PCI Configuration Space Command word, and return
   3879	 * success.
   3880	 */
   3881	pci_restore_state(dev);
   3882	pci_write_config_word(dev, PCI_COMMAND, old_command);
   3883	return 0;
   3884}
   3885
   3886#define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
   3887#define PCI_DEVICE_ID_INTEL_IVB_M_VGA      0x0156
   3888#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA     0x0166
   3889
   3890/*
   3891 * The Samsung SM961/PM961 controller can sometimes enter a fatal state after
   3892 * FLR where config space reads from the device return -1.  We seem to be
   3893 * able to avoid this condition if we disable the NVMe controller prior to
   3894 * FLR.  This quirk is generic for any NVMe class device requiring similar
   3895 * assistance to quiesce the device prior to FLR.
   3896 *
   3897 * NVMe specification: https://nvmexpress.org/resources/specifications/
   3898 * Revision 1.0e:
   3899 *    Chapter 2: Required and optional PCI config registers
   3900 *    Chapter 3: NVMe control registers
   3901 *    Chapter 7.3: Reset behavior
   3902 */
   3903static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
   3904{
   3905	void __iomem *bar;
   3906	u16 cmd;
   3907	u32 cfg;
   3908
   3909	if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
   3910	    pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0))
   3911		return -ENOTTY;
   3912
   3913	if (probe)
   3914		return 0;
   3915
   3916	bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
   3917	if (!bar)
   3918		return -ENOTTY;
   3919
   3920	pci_read_config_word(dev, PCI_COMMAND, &cmd);
   3921	pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
   3922
   3923	cfg = readl(bar + NVME_REG_CC);
   3924
   3925	/* Disable controller if enabled */
   3926	if (cfg & NVME_CC_ENABLE) {
   3927		u32 cap = readl(bar + NVME_REG_CAP);
   3928		unsigned long timeout;
   3929
   3930		/*
   3931		 * Per nvme_disable_ctrl() skip shutdown notification as it
   3932		 * could complete commands to the admin queue.  We only intend
   3933		 * to quiesce the device before reset.
   3934		 */
   3935		cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
   3936
   3937		writel(cfg, bar + NVME_REG_CC);
   3938
   3939		/*
   3940		 * Some controllers require an additional delay here, see
   3941		 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY.  None of those are yet
   3942		 * supported by this quirk.
   3943		 */
   3944
   3945		/* Cap register provides max timeout in 500ms increments */
   3946		timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
   3947
   3948		for (;;) {
   3949			u32 status = readl(bar + NVME_REG_CSTS);
   3950
   3951			/* Ready status becomes zero on disable complete */
   3952			if (!(status & NVME_CSTS_RDY))
   3953				break;
   3954
   3955			msleep(100);
   3956
   3957			if (time_after(jiffies, timeout)) {
   3958				pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
   3959				break;
   3960			}
   3961		}
   3962	}
   3963
   3964	pci_iounmap(dev, bar);
   3965
   3966	pcie_flr(dev);
   3967
   3968	return 0;
   3969}
   3970
   3971/*
   3972 * Intel DC P3700 NVMe controller will timeout waiting for ready status
   3973 * to change after NVMe enable if the driver starts interacting with the
   3974 * device too soon after FLR.  A 250ms delay after FLR has heuristically
   3975 * proven to produce reliably working results for device assignment cases.
   3976 */
   3977static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
   3978{
   3979	if (probe)
   3980		return pcie_reset_flr(dev, PCI_RESET_PROBE);
   3981
   3982	pcie_reset_flr(dev, PCI_RESET_DO_RESET);
   3983
   3984	msleep(250);
   3985
   3986	return 0;
   3987}
   3988
   3989#define PCI_DEVICE_ID_HINIC_VF      0x375E
   3990#define HINIC_VF_FLR_TYPE           0x1000
   3991#define HINIC_VF_FLR_CAP_BIT        (1UL << 30)
   3992#define HINIC_VF_OP                 0xE80
   3993#define HINIC_VF_FLR_PROC_BIT       (1UL << 18)
   3994#define HINIC_OPERATION_TIMEOUT     15000	/* 15 seconds */
   3995
   3996/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
   3997static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe)
   3998{
   3999	unsigned long timeout;
   4000	void __iomem *bar;
   4001	u32 val;
   4002
   4003	if (probe)
   4004		return 0;
   4005
   4006	bar = pci_iomap(pdev, 0, 0);
   4007	if (!bar)
   4008		return -ENOTTY;
   4009
   4010	/* Get and check firmware capabilities */
   4011	val = ioread32be(bar + HINIC_VF_FLR_TYPE);
   4012	if (!(val & HINIC_VF_FLR_CAP_BIT)) {
   4013		pci_iounmap(pdev, bar);
   4014		return -ENOTTY;
   4015	}
   4016
   4017	/* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
   4018	val = ioread32be(bar + HINIC_VF_OP);
   4019	val = val | HINIC_VF_FLR_PROC_BIT;
   4020	iowrite32be(val, bar + HINIC_VF_OP);
   4021
   4022	pcie_flr(pdev);
   4023
   4024	/*
   4025	 * The device must recapture its Bus and Device Numbers after FLR
   4026	 * in order generate Completions.  Issue a config write to let the
   4027	 * device capture this information.
   4028	 */
   4029	pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
   4030
   4031	/* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
   4032	timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
   4033	do {
   4034		val = ioread32be(bar + HINIC_VF_OP);
   4035		if (!(val & HINIC_VF_FLR_PROC_BIT))
   4036			goto reset_complete;
   4037		msleep(20);
   4038	} while (time_before(jiffies, timeout));
   4039
   4040	val = ioread32be(bar + HINIC_VF_OP);
   4041	if (!(val & HINIC_VF_FLR_PROC_BIT))
   4042		goto reset_complete;
   4043
   4044	pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
   4045
   4046reset_complete:
   4047	pci_iounmap(pdev, bar);
   4048
   4049	return 0;
   4050}
   4051
   4052static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
   4053	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
   4054		 reset_intel_82599_sfp_virtfn },
   4055	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
   4056		reset_ivb_igd },
   4057	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
   4058		reset_ivb_igd },
   4059	{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
   4060	{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
   4061	{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
   4062	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
   4063		reset_chelsio_generic_dev },
   4064	{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
   4065		reset_hinic_vf_dev },
   4066	{ 0 }
   4067};
   4068
   4069/*
   4070 * These device-specific reset methods are here rather than in a driver
   4071 * because when a host assigns a device to a guest VM, the host may need
   4072 * to reset the device but probably doesn't have a driver for it.
   4073 */
   4074int pci_dev_specific_reset(struct pci_dev *dev, bool probe)
   4075{
   4076	const struct pci_dev_reset_methods *i;
   4077
   4078	for (i = pci_dev_reset_methods; i->reset; i++) {
   4079		if ((i->vendor == dev->vendor ||
   4080		     i->vendor == (u16)PCI_ANY_ID) &&
   4081		    (i->device == dev->device ||
   4082		     i->device == (u16)PCI_ANY_ID))
   4083			return i->reset(dev, probe);
   4084	}
   4085
   4086	return -ENOTTY;
   4087}
   4088
   4089static void quirk_dma_func0_alias(struct pci_dev *dev)
   4090{
   4091	if (PCI_FUNC(dev->devfn) != 0)
   4092		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
   4093}
   4094
   4095/*
   4096 * https://bugzilla.redhat.com/show_bug.cgi?id=605888
   4097 *
   4098 * Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
   4099 */
   4100DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
   4101DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
   4102
   4103static void quirk_dma_func1_alias(struct pci_dev *dev)
   4104{
   4105	if (PCI_FUNC(dev->devfn) != 1)
   4106		pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
   4107}
   4108
   4109/*
   4110 * Marvell 88SE9123 uses function 1 as the requester ID for DMA.  In some
   4111 * SKUs function 1 is present and is a legacy IDE controller, in other
   4112 * SKUs this function is not present, making this a ghost requester.
   4113 * https://bugzilla.kernel.org/show_bug.cgi?id=42679
   4114 */
   4115DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
   4116			 quirk_dma_func1_alias);
   4117DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
   4118			 quirk_dma_func1_alias);
   4119/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */
   4120DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125,
   4121			 quirk_dma_func1_alias);
   4122DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
   4123			 quirk_dma_func1_alias);
   4124/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
   4125DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
   4126			 quirk_dma_func1_alias);
   4127DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
   4128			 quirk_dma_func1_alias);
   4129/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
   4130DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
   4131			 quirk_dma_func1_alias);
   4132/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
   4133DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
   4134			 quirk_dma_func1_alias);
   4135/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
   4136DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
   4137			 quirk_dma_func1_alias);
   4138/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c134 */
   4139DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
   4140			 quirk_dma_func1_alias);
   4141/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
   4142DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
   4143			 quirk_dma_func1_alias);
   4144/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */
   4145DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215,
   4146			 quirk_dma_func1_alias);
   4147/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
   4148DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
   4149			 quirk_dma_func1_alias);
   4150/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
   4151DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
   4152			 quirk_dma_func1_alias);
   4153DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
   4154			 quirk_dma_func1_alias);
   4155DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
   4156			 quirk_dma_func1_alias);
   4157/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
   4158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
   4159			 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
   4160			 quirk_dma_func1_alias);
   4161/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c117 */
   4162DECLARE_PCI_FIXUP_HEADER(0x1c28, /* Lite-On */
   4163			 0x0122, /* Plextor M6E (Marvell 88SS9183)*/
   4164			 quirk_dma_func1_alias);
   4165
   4166/*
   4167 * Some devices DMA with the wrong devfn, not just the wrong function.
   4168 * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
   4169 * the alias is "fixed" and independent of the device devfn.
   4170 *
   4171 * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
   4172 * processor.  To software, this appears as a PCIe-to-PCI/X bridge with a
   4173 * single device on the secondary bus.  In reality, the single exposed
   4174 * device at 0e.0 is the Address Translation Unit (ATU) of the controller
   4175 * that provides a bridge to the internal bus of the I/O processor.  The
   4176 * controller supports private devices, which can be hidden from PCI config
   4177 * space.  In the case of the Adaptec 3405, a private device at 01.0
   4178 * appears to be the DMA engine, which therefore needs to become a DMA
   4179 * alias for the device.
   4180 */
   4181static const struct pci_device_id fixed_dma_alias_tbl[] = {
   4182	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
   4183			 PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
   4184	  .driver_data = PCI_DEVFN(1, 0) },
   4185	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
   4186			 PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */
   4187	  .driver_data = PCI_DEVFN(1, 0) },
   4188	{ 0 }
   4189};
   4190
   4191static void quirk_fixed_dma_alias(struct pci_dev *dev)
   4192{
   4193	const struct pci_device_id *id;
   4194
   4195	id = pci_match_id(fixed_dma_alias_tbl, dev);
   4196	if (id)
   4197		pci_add_dma_alias(dev, id->driver_data, 1);
   4198}
   4199DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
   4200
   4201/*
   4202 * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
   4203 * using the wrong DMA alias for the device.  Some of these devices can be
   4204 * used as either forward or reverse bridges, so we need to test whether the
   4205 * device is operating in the correct mode.  We could probably apply this
   4206 * quirk to PCI_ANY_ID, but for now we'll just use known offenders.  The test
   4207 * is for a non-root, non-PCIe bridge where the upstream device is PCIe and
   4208 * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
   4209 */
   4210static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
   4211{
   4212	if (!pci_is_root_bus(pdev->bus) &&
   4213	    pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
   4214	    !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
   4215	    pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
   4216		pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
   4217}
   4218/* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
   4219DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
   4220			 quirk_use_pcie_bridge_dma_alias);
   4221/* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
   4222DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
   4223/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
   4224DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
   4225/* ITE 8893 has the same problem as the 8892 */
   4226DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
   4227/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
   4228DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
   4229
   4230/*
   4231 * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
   4232 * be added as aliases to the DMA device in order to allow buffer access
   4233 * when IOMMU is enabled. Following devfns have to match RIT-LUT table
   4234 * programmed in the EEPROM.
   4235 */
   4236static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
   4237{
   4238	pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
   4239	pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
   4240	pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
   4241}
   4242DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
   4243DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
   4244
   4245/*
   4246 * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
   4247 * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
   4248 *
   4249 * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
   4250 * when IOMMU is enabled.  These aliases allow computational unit access to
   4251 * host memory.  These aliases mark the whole VCA device as one IOMMU
   4252 * group.
   4253 *
   4254 * All possible slot numbers (0x20) are used, since we are unable to tell
   4255 * what slot is used on other side.  This quirk is intended for both host
   4256 * and computational unit sides.  The VCA devices have up to five functions
   4257 * (four for DMA channels and one additional).
   4258 */
   4259static void quirk_pex_vca_alias(struct pci_dev *pdev)
   4260{
   4261	const unsigned int num_pci_slots = 0x20;
   4262	unsigned int slot;
   4263
   4264	for (slot = 0; slot < num_pci_slots; slot++)
   4265		pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
   4266}
   4267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
   4268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
   4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
   4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
   4271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
   4272DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
   4273
   4274/*
   4275 * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
   4276 * associated not at the root bus, but at a bridge below. This quirk avoids
   4277 * generating invalid DMA aliases.
   4278 */
   4279static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
   4280{
   4281	pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
   4282}
   4283DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
   4284				quirk_bridge_cavm_thrx2_pcie_root);
   4285DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
   4286				quirk_bridge_cavm_thrx2_pcie_root);
   4287
   4288/*
   4289 * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
   4290 * class code.  Fix it.
   4291 */
   4292static void quirk_tw686x_class(struct pci_dev *pdev)
   4293{
   4294	u32 class = pdev->class;
   4295
   4296	/* Use "Multimedia controller" class */
   4297	pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
   4298	pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
   4299		 class, pdev->class);
   4300}
   4301DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
   4302			      quirk_tw686x_class);
   4303DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
   4304			      quirk_tw686x_class);
   4305DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
   4306			      quirk_tw686x_class);
   4307DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
   4308			      quirk_tw686x_class);
   4309
   4310/*
   4311 * Some devices have problems with Transaction Layer Packets with the Relaxed
   4312 * Ordering Attribute set.  Such devices should mark themselves and other
   4313 * device drivers should check before sending TLPs with RO set.
   4314 */
   4315static void quirk_relaxedordering_disable(struct pci_dev *dev)
   4316{
   4317	dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
   4318	pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
   4319}
   4320
   4321/*
   4322 * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
   4323 * Complex have a Flow Control Credit issue which can cause performance
   4324 * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
   4325 */
   4326DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
   4327			      quirk_relaxedordering_disable);
   4328DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
   4329			      quirk_relaxedordering_disable);
   4330DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
   4331			      quirk_relaxedordering_disable);
   4332DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
   4333			      quirk_relaxedordering_disable);
   4334DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
   4335			      quirk_relaxedordering_disable);
   4336DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
   4337			      quirk_relaxedordering_disable);
   4338DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
   4339			      quirk_relaxedordering_disable);
   4340DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
   4341			      quirk_relaxedordering_disable);
   4342DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
   4343			      quirk_relaxedordering_disable);
   4344DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
   4345			      quirk_relaxedordering_disable);
   4346DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
   4347			      quirk_relaxedordering_disable);
   4348DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
   4349			      quirk_relaxedordering_disable);
   4350DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
   4351			      quirk_relaxedordering_disable);
   4352DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
   4353			      quirk_relaxedordering_disable);
   4354DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
   4355			      quirk_relaxedordering_disable);
   4356DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
   4357			      quirk_relaxedordering_disable);
   4358DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
   4359			      quirk_relaxedordering_disable);
   4360DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
   4361			      quirk_relaxedordering_disable);
   4362DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
   4363			      quirk_relaxedordering_disable);
   4364DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
   4365			      quirk_relaxedordering_disable);
   4366DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
   4367			      quirk_relaxedordering_disable);
   4368DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
   4369			      quirk_relaxedordering_disable);
   4370DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
   4371			      quirk_relaxedordering_disable);
   4372DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
   4373			      quirk_relaxedordering_disable);
   4374DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
   4375			      quirk_relaxedordering_disable);
   4376DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
   4377			      quirk_relaxedordering_disable);
   4378DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
   4379			      quirk_relaxedordering_disable);
   4380DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
   4381			      quirk_relaxedordering_disable);
   4382
   4383/*
   4384 * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex
   4385 * where Upstream Transaction Layer Packets with the Relaxed Ordering
   4386 * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
   4387 * set.  This is a violation of the PCIe 3.0 Transaction Ordering Rules
   4388 * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
   4389 * November 10, 2010).  As a result, on this platform we can't use Relaxed
   4390 * Ordering for Upstream TLPs.
   4391 */
   4392DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
   4393			      quirk_relaxedordering_disable);
   4394DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
   4395			      quirk_relaxedordering_disable);
   4396DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
   4397			      quirk_relaxedordering_disable);
   4398
   4399/*
   4400 * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
   4401 * values for the Attribute as were supplied in the header of the
   4402 * corresponding Request, except as explicitly allowed when IDO is used."
   4403 *
   4404 * If a non-compliant device generates a completion with a different
   4405 * attribute than the request, the receiver may accept it (which itself
   4406 * seems non-compliant based on sec 2.3.2), or it may handle it as a
   4407 * Malformed TLP or an Unexpected Completion, which will probably lead to a
   4408 * device access timeout.
   4409 *
   4410 * If the non-compliant device generates completions with zero attributes
   4411 * (instead of copying the attributes from the request), we can work around
   4412 * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in
   4413 * upstream devices so they always generate requests with zero attributes.
   4414 *
   4415 * This affects other devices under the same Root Port, but since these
   4416 * attributes are performance hints, there should be no functional problem.
   4417 *
   4418 * Note that Configuration Space accesses are never supposed to have TLP
   4419 * Attributes, so we're safe waiting till after any Configuration Space
   4420 * accesses to do the Root Port fixup.
   4421 */
   4422static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
   4423{
   4424	struct pci_dev *root_port = pcie_find_root_port(pdev);
   4425
   4426	if (!root_port) {
   4427		pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
   4428		return;
   4429	}
   4430
   4431	pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
   4432		 dev_name(&pdev->dev));
   4433	pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
   4434					   PCI_EXP_DEVCTL_RELAX_EN |
   4435					   PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
   4436}
   4437
   4438/*
   4439 * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the
   4440 * Completion it generates.
   4441 */
   4442static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
   4443{
   4444	/*
   4445	 * This mask/compare operation selects for Physical Function 4 on a
   4446	 * T5.  We only need to fix up the Root Port once for any of the
   4447	 * PFs.  PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely
   4448	 * 0x54xx so we use that one.
   4449	 */
   4450	if ((pdev->device & 0xff00) == 0x5400)
   4451		quirk_disable_root_port_attributes(pdev);
   4452}
   4453DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
   4454			 quirk_chelsio_T5_disable_root_port_attributes);
   4455
   4456/*
   4457 * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
   4458 *			  by a device
   4459 * @acs_ctrl_req: Bitmask of desired ACS controls
   4460 * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
   4461 *		  the hardware design
   4462 *
   4463 * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
   4464 * in @acs_ctrl_ena, i.e., the device provides all the access controls the
   4465 * caller desires.  Return 0 otherwise.
   4466 */
   4467static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
   4468{
   4469	if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
   4470		return 1;
   4471	return 0;
   4472}
   4473
   4474/*
   4475 * AMD has indicated that the devices below do not support peer-to-peer
   4476 * in any system where they are found in the southbridge with an AMD
   4477 * IOMMU in the system.  Multifunction devices that do not support
   4478 * peer-to-peer between functions can claim to support a subset of ACS.
   4479 * Such devices effectively enable request redirect (RR) and completion
   4480 * redirect (CR) since all transactions are redirected to the upstream
   4481 * root complex.
   4482 *
   4483 * https://lore.kernel.org/r/201207111426.q6BEQTbh002928@mail.maya.org/
   4484 * https://lore.kernel.org/r/20120711165854.GM25282@amd.com/
   4485 * https://lore.kernel.org/r/20121005130857.GX4009@amd.com/
   4486 *
   4487 * 1002:4385 SBx00 SMBus Controller
   4488 * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
   4489 * 1002:4383 SBx00 Azalia (Intel HDA)
   4490 * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
   4491 * 1002:4384 SBx00 PCI to PCI Bridge
   4492 * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
   4493 *
   4494 * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
   4495 *
   4496 * 1022:780f [AMD] FCH PCI Bridge
   4497 * 1022:7809 [AMD] FCH USB OHCI Controller
   4498 */
   4499static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
   4500{
   4501#ifdef CONFIG_ACPI
   4502	struct acpi_table_header *header = NULL;
   4503	acpi_status status;
   4504
   4505	/* Targeting multifunction devices on the SB (appears on root bus) */
   4506	if (!dev->multifunction || !pci_is_root_bus(dev->bus))
   4507		return -ENODEV;
   4508
   4509	/* The IVRS table describes the AMD IOMMU */
   4510	status = acpi_get_table("IVRS", 0, &header);
   4511	if (ACPI_FAILURE(status))
   4512		return -ENODEV;
   4513
   4514	acpi_put_table(header);
   4515
   4516	/* Filter out flags not applicable to multifunction */
   4517	acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
   4518
   4519	return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
   4520#else
   4521	return -ENODEV;
   4522#endif
   4523}
   4524
   4525static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
   4526{
   4527	if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
   4528		return false;
   4529
   4530	switch (dev->device) {
   4531	/*
   4532	 * Effectively selects all downstream ports for whole ThunderX1
   4533	 * (which represents 8 SoCs).
   4534	 */
   4535	case 0xa000 ... 0xa7ff: /* ThunderX1 */
   4536	case 0xaf84:  /* ThunderX2 */
   4537	case 0xb884:  /* ThunderX3 */
   4538		return true;
   4539	default:
   4540		return false;
   4541	}
   4542}
   4543
   4544static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
   4545{
   4546	if (!pci_quirk_cavium_acs_match(dev))
   4547		return -ENOTTY;
   4548
   4549	/*
   4550	 * Cavium Root Ports don't advertise an ACS capability.  However,
   4551	 * the RTL internally implements similar protection as if ACS had
   4552	 * Source Validation, Request Redirection, Completion Redirection,
   4553	 * and Upstream Forwarding features enabled.  Assert that the
   4554	 * hardware implements and enables equivalent ACS functionality for
   4555	 * these flags.
   4556	 */
   4557	return pci_acs_ctrl_enabled(acs_flags,
   4558		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4559}
   4560
   4561static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
   4562{
   4563	/*
   4564	 * X-Gene Root Ports matching this quirk do not allow peer-to-peer
   4565	 * transactions with others, allowing masking out these bits as if they
   4566	 * were unimplemented in the ACS capability.
   4567	 */
   4568	return pci_acs_ctrl_enabled(acs_flags,
   4569		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4570}
   4571
   4572/*
   4573 * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
   4574 * But the implementation could block peer-to-peer transactions between them
   4575 * and provide ACS-like functionality.
   4576 */
   4577static int  pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
   4578{
   4579	if (!pci_is_pcie(dev) ||
   4580	    ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
   4581	     (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
   4582		return -ENOTTY;
   4583
   4584	switch (dev->device) {
   4585	case 0x0710 ... 0x071e:
   4586	case 0x0721:
   4587	case 0x0723 ... 0x0732:
   4588		return pci_acs_ctrl_enabled(acs_flags,
   4589			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4590	}
   4591
   4592	return false;
   4593}
   4594
   4595/*
   4596 * Many Intel PCH Root Ports do provide ACS-like features to disable peer
   4597 * transactions and validate bus numbers in requests, but do not provide an
   4598 * actual PCIe ACS capability.  This is the list of device IDs known to fall
   4599 * into that category as provided by Intel in Red Hat bugzilla 1037684.
   4600 */
   4601static const u16 pci_quirk_intel_pch_acs_ids[] = {
   4602	/* Ibexpeak PCH */
   4603	0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
   4604	0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
   4605	/* Cougarpoint PCH */
   4606	0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
   4607	0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
   4608	/* Pantherpoint PCH */
   4609	0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
   4610	0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
   4611	/* Lynxpoint-H PCH */
   4612	0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
   4613	0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
   4614	/* Lynxpoint-LP PCH */
   4615	0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
   4616	0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
   4617	/* Wildcat PCH */
   4618	0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
   4619	0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
   4620	/* Patsburg (X79) PCH */
   4621	0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
   4622	/* Wellsburg (X99) PCH */
   4623	0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
   4624	0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
   4625	/* Lynx Point (9 series) PCH */
   4626	0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
   4627};
   4628
   4629static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
   4630{
   4631	int i;
   4632
   4633	/* Filter out a few obvious non-matches first */
   4634	if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
   4635		return false;
   4636
   4637	for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
   4638		if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
   4639			return true;
   4640
   4641	return false;
   4642}
   4643
   4644static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
   4645{
   4646	if (!pci_quirk_intel_pch_acs_match(dev))
   4647		return -ENOTTY;
   4648
   4649	if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
   4650		return pci_acs_ctrl_enabled(acs_flags,
   4651			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4652
   4653	return pci_acs_ctrl_enabled(acs_flags, 0);
   4654}
   4655
   4656/*
   4657 * These QCOM Root Ports do provide ACS-like features to disable peer
   4658 * transactions and validate bus numbers in requests, but do not provide an
   4659 * actual PCIe ACS capability.  Hardware supports source validation but it
   4660 * will report the issue as Completer Abort instead of ACS Violation.
   4661 * Hardware doesn't support peer-to-peer and each Root Port is a Root
   4662 * Complex with unique segment numbers.  It is not possible for one Root
   4663 * Port to pass traffic to another Root Port.  All PCIe transactions are
   4664 * terminated inside the Root Port.
   4665 */
   4666static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
   4667{
   4668	return pci_acs_ctrl_enabled(acs_flags,
   4669		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4670}
   4671
   4672/*
   4673 * Each of these NXP Root Ports is in a Root Complex with a unique segment
   4674 * number and does provide isolation features to disable peer transactions
   4675 * and validate bus numbers in requests, but does not provide an ACS
   4676 * capability.
   4677 */
   4678static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags)
   4679{
   4680	return pci_acs_ctrl_enabled(acs_flags,
   4681		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4682}
   4683
   4684static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
   4685{
   4686	if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
   4687		return -ENOTTY;
   4688
   4689	/*
   4690	 * Amazon's Annapurna Labs root ports don't include an ACS capability,
   4691	 * but do include ACS-like functionality. The hardware doesn't support
   4692	 * peer-to-peer transactions via the root port and each has a unique
   4693	 * segment number.
   4694	 *
   4695	 * Additionally, the root ports cannot send traffic to each other.
   4696	 */
   4697	acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4698
   4699	return acs_flags ? 0 : 1;
   4700}
   4701
   4702/*
   4703 * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
   4704 * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
   4705 * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
   4706 * control registers whereas the PCIe spec packs them into words (Rev 3.0,
   4707 * 7.16 ACS Extended Capability).  The bit definitions are correct, but the
   4708 * control register is at offset 8 instead of 6 and we should probably use
   4709 * dword accesses to them.  This applies to the following PCI Device IDs, as
   4710 * found in volume 1 of the datasheet[2]:
   4711 *
   4712 * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
   4713 * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
   4714 *
   4715 * N.B. This doesn't fix what lspci shows.
   4716 *
   4717 * The 100 series chipset specification update includes this as errata #23[3].
   4718 *
   4719 * The 200 series chipset (Union Point) has the same bug according to the
   4720 * specification update (Intel 200 Series Chipset Family Platform Controller
   4721 * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001,
   4722 * Errata 22)[4].  Per the datasheet[5], root port PCI Device IDs for this
   4723 * chipset include:
   4724 *
   4725 * 0xa290-0xa29f PCI Express Root port #{0-16}
   4726 * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
   4727 *
   4728 * Mobile chipsets are also affected, 7th & 8th Generation
   4729 * Specification update confirms ACS errata 22, status no fix: (7th Generation
   4730 * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
   4731 * Processor Family I/O for U Quad Core Platforms Specification Update,
   4732 * August 2017, Revision 002, Document#: 334660-002)[6]
   4733 * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
   4734 * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
   4735 * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
   4736 *
   4737 * 0x9d10-0x9d1b PCI Express Root port #{1-12}
   4738 *
   4739 * [1] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
   4740 * [2] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
   4741 * [3] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
   4742 * [4] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
   4743 * [5] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
   4744 * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
   4745 * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
   4746 */
   4747static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
   4748{
   4749	if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
   4750		return false;
   4751
   4752	switch (dev->device) {
   4753	case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
   4754	case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
   4755	case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
   4756		return true;
   4757	}
   4758
   4759	return false;
   4760}
   4761
   4762#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
   4763
   4764static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
   4765{
   4766	int pos;
   4767	u32 cap, ctrl;
   4768
   4769	if (!pci_quirk_intel_spt_pch_acs_match(dev))
   4770		return -ENOTTY;
   4771
   4772	pos = dev->acs_cap;
   4773	if (!pos)
   4774		return -ENOTTY;
   4775
   4776	/* see pci_acs_flags_enabled() */
   4777	pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
   4778	acs_flags &= (cap | PCI_ACS_EC);
   4779
   4780	pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
   4781
   4782	return pci_acs_ctrl_enabled(acs_flags, ctrl);
   4783}
   4784
   4785static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
   4786{
   4787	/*
   4788	 * SV, TB, and UF are not relevant to multifunction endpoints.
   4789	 *
   4790	 * Multifunction devices are only required to implement RR, CR, and DT
   4791	 * in their ACS capability if they support peer-to-peer transactions.
   4792	 * Devices matching this quirk have been verified by the vendor to not
   4793	 * perform peer-to-peer with other functions, allowing us to mask out
   4794	 * these bits as if they were unimplemented in the ACS capability.
   4795	 */
   4796	return pci_acs_ctrl_enabled(acs_flags,
   4797		PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
   4798		PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
   4799}
   4800
   4801static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags)
   4802{
   4803	/*
   4804	 * Intel RCiEP's are required to allow p2p only on translated
   4805	 * addresses.  Refer to Intel VT-d specification, r3.1, sec 3.16,
   4806	 * "Root-Complex Peer to Peer Considerations".
   4807	 */
   4808	if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END)
   4809		return -ENOTTY;
   4810
   4811	return pci_acs_ctrl_enabled(acs_flags,
   4812		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4813}
   4814
   4815static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
   4816{
   4817	/*
   4818	 * iProc PAXB Root Ports don't advertise an ACS capability, but
   4819	 * they do not allow peer-to-peer transactions between Root Ports.
   4820	 * Allow each Root Port to be in a separate IOMMU group by masking
   4821	 * SV/RR/CR/UF bits.
   4822	 */
   4823	return pci_acs_ctrl_enabled(acs_flags,
   4824		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
   4825}
   4826
   4827static const struct pci_dev_acs_enabled {
   4828	u16 vendor;
   4829	u16 device;
   4830	int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
   4831} pci_dev_acs_enabled[] = {
   4832	{ PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
   4833	{ PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
   4834	{ PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
   4835	{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
   4836	{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
   4837	{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
   4838	{ PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
   4839	{ PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
   4840	{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
   4841	{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
   4842	{ PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
   4843	{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
   4844	{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
   4845	{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
   4846	{ PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
   4847	{ PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
   4848	{ PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
   4849	{ PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
   4850	{ PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
   4851	{ PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
   4852	{ PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
   4853	{ PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
   4854	{ PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
   4855	{ PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
   4856	{ PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
   4857	{ PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
   4858	{ PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
   4859	{ PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
   4860	{ PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
   4861	{ PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
   4862	{ PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
   4863	/* 82580 */
   4864	{ PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
   4865	{ PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
   4866	{ PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
   4867	{ PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
   4868	{ PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
   4869	{ PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
   4870	{ PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
   4871	/* 82576 */
   4872	{ PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
   4873	{ PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
   4874	{ PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
   4875	{ PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
   4876	{ PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
   4877	{ PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
   4878	{ PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
   4879	{ PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
   4880	/* 82575 */
   4881	{ PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
   4882	{ PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
   4883	{ PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
   4884	/* I350 */
   4885	{ PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
   4886	{ PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
   4887	{ PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
   4888	{ PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
   4889	/* 82571 (Quads omitted due to non-ACS switch) */
   4890	{ PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
   4891	{ PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
   4892	{ PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
   4893	{ PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
   4894	/* I219 */
   4895	{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
   4896	{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
   4897	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs },
   4898	/* QCOM QDF2xxx root ports */
   4899	{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
   4900	{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
   4901	/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
   4902	{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
   4903	/* Intel PCH root ports */
   4904	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
   4905	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
   4906	{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
   4907	{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
   4908	/* Cavium ThunderX */
   4909	{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
   4910	/* Cavium multi-function devices */
   4911	{ PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs },
   4912	{ PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs },
   4913	{ PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs },
   4914	/* APM X-Gene */
   4915	{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
   4916	/* Ampere Computing */
   4917	{ PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
   4918	{ PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
   4919	{ PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
   4920	{ PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
   4921	{ PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
   4922	{ PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
   4923	{ PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
   4924	{ PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
   4925	/* Broadcom multi-function device */
   4926	{ PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
   4927	{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
   4928	/* Amazon Annapurna Labs */
   4929	{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
   4930	/* Zhaoxin multi-function devices */
   4931	{ PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
   4932	{ PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
   4933	{ PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
   4934	/* NXP root ports, xx=16, 12, or 08 cores */
   4935	/* LX2xx0A : without security features + CAN-FD */
   4936	{ PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs },
   4937	{ PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs },
   4938	{ PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs },
   4939	/* LX2xx0C : security features + CAN-FD */
   4940	{ PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs },
   4941	{ PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs },
   4942	{ PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs },
   4943	/* LX2xx0E : security features + CAN */
   4944	{ PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs },
   4945	{ PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs },
   4946	{ PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs },
   4947	/* LX2xx0N : without security features + CAN */
   4948	{ PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs },
   4949	{ PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs },
   4950	{ PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs },
   4951	/* LX2xx2A : without security features + CAN-FD */
   4952	{ PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs },
   4953	{ PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs },
   4954	{ PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs },
   4955	/* LX2xx2C : security features + CAN-FD */
   4956	{ PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs },
   4957	{ PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs },
   4958	{ PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs },
   4959	/* LX2xx2E : security features + CAN */
   4960	{ PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs },
   4961	{ PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs },
   4962	{ PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs },
   4963	/* LX2xx2N : without security features + CAN */
   4964	{ PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs },
   4965	{ PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs },
   4966	{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
   4967	/* Zhaoxin Root/Downstream Ports */
   4968	{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
   4969	{ 0 }
   4970};
   4971
   4972/*
   4973 * pci_dev_specific_acs_enabled - check whether device provides ACS controls
   4974 * @dev:	PCI device
   4975 * @acs_flags:	Bitmask of desired ACS controls
   4976 *
   4977 * Returns:
   4978 *   -ENOTTY:	No quirk applies to this device; we can't tell whether the
   4979 *		device provides the desired controls
   4980 *   0:		Device does not provide all the desired controls
   4981 *   >0:	Device provides all the controls in @acs_flags
   4982 */
   4983int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
   4984{
   4985	const struct pci_dev_acs_enabled *i;
   4986	int ret;
   4987
   4988	/*
   4989	 * Allow devices that do not expose standard PCIe ACS capabilities
   4990	 * or control to indicate their support here.  Multi-function express
   4991	 * devices which do not allow internal peer-to-peer between functions,
   4992	 * but do not implement PCIe ACS may wish to return true here.
   4993	 */
   4994	for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
   4995		if ((i->vendor == dev->vendor ||
   4996		     i->vendor == (u16)PCI_ANY_ID) &&
   4997		    (i->device == dev->device ||
   4998		     i->device == (u16)PCI_ANY_ID)) {
   4999			ret = i->acs_enabled(dev, acs_flags);
   5000			if (ret >= 0)
   5001				return ret;
   5002		}
   5003	}
   5004
   5005	return -ENOTTY;
   5006}
   5007
   5008/* Config space offset of Root Complex Base Address register */
   5009#define INTEL_LPC_RCBA_REG 0xf0
   5010/* 31:14 RCBA address */
   5011#define INTEL_LPC_RCBA_MASK 0xffffc000
   5012/* RCBA Enable */
   5013#define INTEL_LPC_RCBA_ENABLE (1 << 0)
   5014
   5015/* Backbone Scratch Pad Register */
   5016#define INTEL_BSPR_REG 0x1104
   5017/* Backbone Peer Non-Posted Disable */
   5018#define INTEL_BSPR_REG_BPNPD (1 << 8)
   5019/* Backbone Peer Posted Disable */
   5020#define INTEL_BSPR_REG_BPPD  (1 << 9)
   5021
   5022/* Upstream Peer Decode Configuration Register */
   5023#define INTEL_UPDCR_REG 0x1014
   5024/* 5:0 Peer Decode Enable bits */
   5025#define INTEL_UPDCR_REG_MASK 0x3f
   5026
   5027static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
   5028{
   5029	u32 rcba, bspr, updcr;
   5030	void __iomem *rcba_mem;
   5031
   5032	/*
   5033	 * Read the RCBA register from the LPC (D31:F0).  PCH root ports
   5034	 * are D28:F* and therefore get probed before LPC, thus we can't
   5035	 * use pci_get_slot()/pci_read_config_dword() here.
   5036	 */
   5037	pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
   5038				  INTEL_LPC_RCBA_REG, &rcba);
   5039	if (!(rcba & INTEL_LPC_RCBA_ENABLE))
   5040		return -EINVAL;
   5041
   5042	rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK,
   5043				   PAGE_ALIGN(INTEL_UPDCR_REG));
   5044	if (!rcba_mem)
   5045		return -ENOMEM;
   5046
   5047	/*
   5048	 * The BSPR can disallow peer cycles, but it's set by soft strap and
   5049	 * therefore read-only.  If both posted and non-posted peer cycles are
   5050	 * disallowed, we're ok.  If either are allowed, then we need to use
   5051	 * the UPDCR to disable peer decodes for each port.  This provides the
   5052	 * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
   5053	 */
   5054	bspr = readl(rcba_mem + INTEL_BSPR_REG);
   5055	bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
   5056	if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
   5057		updcr = readl(rcba_mem + INTEL_UPDCR_REG);
   5058		if (updcr & INTEL_UPDCR_REG_MASK) {
   5059			pci_info(dev, "Disabling UPDCR peer decodes\n");
   5060			updcr &= ~INTEL_UPDCR_REG_MASK;
   5061			writel(updcr, rcba_mem + INTEL_UPDCR_REG);
   5062		}
   5063	}
   5064
   5065	iounmap(rcba_mem);
   5066	return 0;
   5067}
   5068
   5069/* Miscellaneous Port Configuration register */
   5070#define INTEL_MPC_REG 0xd8
   5071/* MPC: Invalid Receive Bus Number Check Enable */
   5072#define INTEL_MPC_REG_IRBNCE (1 << 26)
   5073
   5074static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
   5075{
   5076	u32 mpc;
   5077
   5078	/*
   5079	 * When enabled, the IRBNCE bit of the MPC register enables the
   5080	 * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
   5081	 * ensures that requester IDs fall within the bus number range
   5082	 * of the bridge.  Enable if not already.
   5083	 */
   5084	pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
   5085	if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
   5086		pci_info(dev, "Enabling MPC IRBNCE\n");
   5087		mpc |= INTEL_MPC_REG_IRBNCE;
   5088		pci_write_config_word(dev, INTEL_MPC_REG, mpc);
   5089	}
   5090}
   5091
   5092/*
   5093 * Currently this quirk does the equivalent of
   5094 * PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
   5095 *
   5096 * TODO: This quirk also needs to do equivalent of PCI_ACS_TB,
   5097 * if dev->external_facing || dev->untrusted
   5098 */
   5099static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
   5100{
   5101	if (!pci_quirk_intel_pch_acs_match(dev))
   5102		return -ENOTTY;
   5103
   5104	if (pci_quirk_enable_intel_lpc_acs(dev)) {
   5105		pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
   5106		return 0;
   5107	}
   5108
   5109	pci_quirk_enable_intel_rp_mpc_acs(dev);
   5110
   5111	dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
   5112
   5113	pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
   5114
   5115	return 0;
   5116}
   5117
   5118static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
   5119{
   5120	int pos;
   5121	u32 cap, ctrl;
   5122
   5123	if (!pci_quirk_intel_spt_pch_acs_match(dev))
   5124		return -ENOTTY;
   5125
   5126	pos = dev->acs_cap;
   5127	if (!pos)
   5128		return -ENOTTY;
   5129
   5130	pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
   5131	pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
   5132
   5133	ctrl |= (cap & PCI_ACS_SV);
   5134	ctrl |= (cap & PCI_ACS_RR);
   5135	ctrl |= (cap & PCI_ACS_CR);
   5136	ctrl |= (cap & PCI_ACS_UF);
   5137
   5138	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
   5139		ctrl |= (cap & PCI_ACS_TB);
   5140
   5141	pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
   5142
   5143	pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
   5144
   5145	return 0;
   5146}
   5147
   5148static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
   5149{
   5150	int pos;
   5151	u32 cap, ctrl;
   5152
   5153	if (!pci_quirk_intel_spt_pch_acs_match(dev))
   5154		return -ENOTTY;
   5155
   5156	pos = dev->acs_cap;
   5157	if (!pos)
   5158		return -ENOTTY;
   5159
   5160	pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
   5161	pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
   5162
   5163	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
   5164
   5165	pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
   5166
   5167	pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
   5168
   5169	return 0;
   5170}
   5171
   5172static const struct pci_dev_acs_ops {
   5173	u16 vendor;
   5174	u16 device;
   5175	int (*enable_acs)(struct pci_dev *dev);
   5176	int (*disable_acs_redir)(struct pci_dev *dev);
   5177} pci_dev_acs_ops[] = {
   5178	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
   5179	    .enable_acs = pci_quirk_enable_intel_pch_acs,
   5180	},
   5181	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
   5182	    .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
   5183	    .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
   5184	},
   5185};
   5186
   5187int pci_dev_specific_enable_acs(struct pci_dev *dev)
   5188{
   5189	const struct pci_dev_acs_ops *p;
   5190	int i, ret;
   5191
   5192	for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
   5193		p = &pci_dev_acs_ops[i];
   5194		if ((p->vendor == dev->vendor ||
   5195		     p->vendor == (u16)PCI_ANY_ID) &&
   5196		    (p->device == dev->device ||
   5197		     p->device == (u16)PCI_ANY_ID) &&
   5198		    p->enable_acs) {
   5199			ret = p->enable_acs(dev);
   5200			if (ret >= 0)
   5201				return ret;
   5202		}
   5203	}
   5204
   5205	return -ENOTTY;
   5206}
   5207
   5208int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
   5209{
   5210	const struct pci_dev_acs_ops *p;
   5211	int i, ret;
   5212
   5213	for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
   5214		p = &pci_dev_acs_ops[i];
   5215		if ((p->vendor == dev->vendor ||
   5216		     p->vendor == (u16)PCI_ANY_ID) &&
   5217		    (p->device == dev->device ||
   5218		     p->device == (u16)PCI_ANY_ID) &&
   5219		    p->disable_acs_redir) {
   5220			ret = p->disable_acs_redir(dev);
   5221			if (ret >= 0)
   5222				return ret;
   5223		}
   5224	}
   5225
   5226	return -ENOTTY;
   5227}
   5228
   5229/*
   5230 * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with
   5231 * QuickAssist Technology (QAT) is prematurely terminated in hardware.  The
   5232 * Next Capability pointer in the MSI Capability Structure should point to
   5233 * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
   5234 * the list.
   5235 */
   5236static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
   5237{
   5238	int pos, i = 0;
   5239	u8 next_cap;
   5240	u16 reg16, *cap;
   5241	struct pci_cap_saved_state *state;
   5242
   5243	/* Bail if the hardware bug is fixed */
   5244	if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
   5245		return;
   5246
   5247	/* Bail if MSI Capability Structure is not found for some reason */
   5248	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
   5249	if (!pos)
   5250		return;
   5251
   5252	/*
   5253	 * Bail if Next Capability pointer in the MSI Capability Structure
   5254	 * is not the expected incorrect 0x00.
   5255	 */
   5256	pci_read_config_byte(pdev, pos + 1, &next_cap);
   5257	if (next_cap)
   5258		return;
   5259
   5260	/*
   5261	 * PCIe Capability Structure is expected to be at 0x50 and should
   5262	 * terminate the list (Next Capability pointer is 0x00).  Verify
   5263	 * Capability Id and Next Capability pointer is as expected.
   5264	 * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
   5265	 * to correctly set kernel data structures which have already been
   5266	 * set incorrectly due to the hardware bug.
   5267	 */
   5268	pos = 0x50;
   5269	pci_read_config_word(pdev, pos, &reg16);
   5270	if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
   5271		u32 status;
   5272#ifndef PCI_EXP_SAVE_REGS
   5273#define PCI_EXP_SAVE_REGS     7
   5274#endif
   5275		int size = PCI_EXP_SAVE_REGS * sizeof(u16);
   5276
   5277		pdev->pcie_cap = pos;
   5278		pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
   5279		pdev->pcie_flags_reg = reg16;
   5280		pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
   5281		pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
   5282
   5283		pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
   5284		if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
   5285		    PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
   5286			pdev->cfg_size = PCI_CFG_SPACE_SIZE;
   5287
   5288		if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
   5289			return;
   5290
   5291		/* Save PCIe cap */
   5292		state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
   5293		if (!state)
   5294			return;
   5295
   5296		state->cap.cap_nr = PCI_CAP_ID_EXP;
   5297		state->cap.cap_extended = 0;
   5298		state->cap.size = size;
   5299		cap = (u16 *)&state->cap.data[0];
   5300		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
   5301		pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
   5302		pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
   5303		pcie_capability_read_word(pdev, PCI_EXP_RTCTL,  &cap[i++]);
   5304		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
   5305		pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
   5306		pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
   5307		hlist_add_head(&state->next, &pdev->saved_cap_space);
   5308	}
   5309}
   5310DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
   5311
   5312/*
   5313 * FLR may cause the following to devices to hang:
   5314 *
   5315 * AMD Starship/Matisse HD Audio Controller 0x1487
   5316 * AMD Starship USB 3.0 Host Controller 0x148c
   5317 * AMD Matisse USB 3.0 Host Controller 0x149c
   5318 * Intel 82579LM Gigabit Ethernet Controller 0x1502
   5319 * Intel 82579V Gigabit Ethernet Controller 0x1503
   5320 *
   5321 */
   5322static void quirk_no_flr(struct pci_dev *dev)
   5323{
   5324	dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
   5325}
   5326DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
   5327DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
   5328DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
   5329DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
   5330DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
   5331
   5332static void quirk_no_ext_tags(struct pci_dev *pdev)
   5333{
   5334	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
   5335
   5336	if (!bridge)
   5337		return;
   5338
   5339	bridge->no_ext_tags = 1;
   5340	pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
   5341
   5342	pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
   5343}
   5344DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
   5345DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
   5346DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
   5347DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
   5348DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
   5349DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
   5350DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
   5351
   5352#ifdef CONFIG_PCI_ATS
   5353/*
   5354 * Some devices require additional driver setup to enable ATS.  Don't use
   5355 * ATS for those devices as ATS will be enabled before the driver has had a
   5356 * chance to load and configure the device.
   5357 */
   5358static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
   5359{
   5360	if (pdev->device == 0x15d8) {
   5361		if (pdev->revision == 0xcf &&
   5362		    pdev->subsystem_vendor == 0xea50 &&
   5363		    (pdev->subsystem_device == 0xce19 ||
   5364		     pdev->subsystem_device == 0xcc10 ||
   5365		     pdev->subsystem_device == 0xcc08))
   5366			goto no_ats;
   5367		else
   5368			return;
   5369	}
   5370
   5371no_ats:
   5372	pci_info(pdev, "disabling ATS\n");
   5373	pdev->ats_cap = 0;
   5374}
   5375
   5376/* AMD Stoney platform GPU */
   5377DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
   5378/* AMD Iceland dGPU */
   5379DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
   5380/* AMD Navi10 dGPU */
   5381DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats);
   5382DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
   5383DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats);
   5384DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats);
   5385DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats);
   5386DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats);
   5387DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats);
   5388DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats);
   5389/* AMD Navi14 dGPU */
   5390DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
   5391DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
   5392DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
   5393DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
   5394/* AMD Raven platform iGPU */
   5395DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
   5396#endif /* CONFIG_PCI_ATS */
   5397
   5398/* Freescale PCIe doesn't support MSI in RC mode */
   5399static void quirk_fsl_no_msi(struct pci_dev *pdev)
   5400{
   5401	if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
   5402		pdev->no_msi = 1;
   5403}
   5404DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
   5405
   5406/*
   5407 * Although not allowed by the spec, some multi-function devices have
   5408 * dependencies of one function (consumer) on another (supplier).  For the
   5409 * consumer to work in D0, the supplier must also be in D0.  Create a
   5410 * device link from the consumer to the supplier to enforce this
   5411 * dependency.  Runtime PM is allowed by default on the consumer to prevent
   5412 * it from permanently keeping the supplier awake.
   5413 */
   5414static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
   5415				   unsigned int supplier, unsigned int class,
   5416				   unsigned int class_shift)
   5417{
   5418	struct pci_dev *supplier_pdev;
   5419
   5420	if (PCI_FUNC(pdev->devfn) != consumer)
   5421		return;
   5422
   5423	supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
   5424				pdev->bus->number,
   5425				PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
   5426	if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
   5427		pci_dev_put(supplier_pdev);
   5428		return;
   5429	}
   5430
   5431	if (device_link_add(&pdev->dev, &supplier_pdev->dev,
   5432			    DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
   5433		pci_info(pdev, "D0 power state depends on %s\n",
   5434			 pci_name(supplier_pdev));
   5435	else
   5436		pci_err(pdev, "Cannot enforce power dependency on %s\n",
   5437			pci_name(supplier_pdev));
   5438
   5439	pm_runtime_allow(&pdev->dev);
   5440	pci_dev_put(supplier_pdev);
   5441}
   5442
   5443/*
   5444 * Create device link for GPUs with integrated HDA controller for streaming
   5445 * audio to attached displays.
   5446 */
   5447static void quirk_gpu_hda(struct pci_dev *hda)
   5448{
   5449	pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
   5450}
   5451DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
   5452			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
   5453DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
   5454			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
   5455DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   5456			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
   5457
   5458/*
   5459 * Create device link for GPUs with integrated USB xHCI Host
   5460 * controller to VGA.
   5461 */
   5462static void quirk_gpu_usb(struct pci_dev *usb)
   5463{
   5464	pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
   5465}
   5466DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   5467			      PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
   5468DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
   5469			      PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
   5470
   5471/*
   5472 * Create device link for GPUs with integrated Type-C UCSI controller
   5473 * to VGA. Currently there is no class code defined for UCSI device over PCI
   5474 * so using UNKNOWN class for now and it will be updated when UCSI
   5475 * over PCI gets a class code.
   5476 */
   5477#define PCI_CLASS_SERIAL_UNKNOWN	0x0c80
   5478static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
   5479{
   5480	pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
   5481}
   5482DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   5483			      PCI_CLASS_SERIAL_UNKNOWN, 8,
   5484			      quirk_gpu_usb_typec_ucsi);
   5485DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
   5486			      PCI_CLASS_SERIAL_UNKNOWN, 8,
   5487			      quirk_gpu_usb_typec_ucsi);
   5488
   5489/*
   5490 * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it
   5491 * disabled.  https://devtalk.nvidia.com/default/topic/1024022
   5492 */
   5493static void quirk_nvidia_hda(struct pci_dev *gpu)
   5494{
   5495	u8 hdr_type;
   5496	u32 val;
   5497
   5498	/* There was no integrated HDA controller before MCP89 */
   5499	if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
   5500		return;
   5501
   5502	/* Bit 25 at offset 0x488 enables the HDA controller */
   5503	pci_read_config_dword(gpu, 0x488, &val);
   5504	if (val & BIT(25))
   5505		return;
   5506
   5507	pci_info(gpu, "Enabling HDA controller\n");
   5508	pci_write_config_dword(gpu, 0x488, val | BIT(25));
   5509
   5510	/* The GPU becomes a multi-function device when the HDA is enabled */
   5511	pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
   5512	gpu->multifunction = !!(hdr_type & 0x80);
   5513}
   5514DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   5515			       PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
   5516DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
   5517			       PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
   5518
   5519/*
   5520 * Some IDT switches incorrectly flag an ACS Source Validation error on
   5521 * completions for config read requests even though PCIe r4.0, sec
   5522 * 6.12.1.1, says that completions are never affected by ACS Source
   5523 * Validation.  Here's the text of IDT 89H32H8G3-YC, erratum #36:
   5524 *
   5525 *   Item #36 - Downstream port applies ACS Source Validation to Completions
   5526 *   Section 6.12.1.1 of the PCI Express Base Specification 3.1 states that
   5527 *   completions are never affected by ACS Source Validation.  However,
   5528 *   completions received by a downstream port of the PCIe switch from a
   5529 *   device that has not yet captured a PCIe bus number are incorrectly
   5530 *   dropped by ACS Source Validation by the switch downstream port.
   5531 *
   5532 * The workaround suggested by IDT is to issue a config write to the
   5533 * downstream device before issuing the first config read.  This allows the
   5534 * downstream device to capture its bus and device numbers (see PCIe r4.0,
   5535 * sec 2.2.9), thus avoiding the ACS error on the completion.
   5536 *
   5537 * However, we don't know when the device is ready to accept the config
   5538 * write, so we do config reads until we receive a non-Config Request Retry
   5539 * Status, then do the config write.
   5540 *
   5541 * To avoid hitting the erratum when doing the config reads, we disable ACS
   5542 * SV around this process.
   5543 */
   5544int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
   5545{
   5546	int pos;
   5547	u16 ctrl = 0;
   5548	bool found;
   5549	struct pci_dev *bridge = bus->self;
   5550
   5551	pos = bridge->acs_cap;
   5552
   5553	/* Disable ACS SV before initial config reads */
   5554	if (pos) {
   5555		pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
   5556		if (ctrl & PCI_ACS_SV)
   5557			pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
   5558					      ctrl & ~PCI_ACS_SV);
   5559	}
   5560
   5561	found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
   5562
   5563	/* Write Vendor ID (read-only) so the endpoint latches its bus/dev */
   5564	if (found)
   5565		pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
   5566
   5567	/* Re-enable ACS_SV if it was previously enabled */
   5568	if (ctrl & PCI_ACS_SV)
   5569		pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
   5570
   5571	return found;
   5572}
   5573
   5574/*
   5575 * Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between
   5576 * NT endpoints via the internal switch fabric. These IDs replace the
   5577 * originating requestor ID TLPs which access host memory on peer NTB
   5578 * ports. Therefore, all proxy IDs must be aliased to the NTB device
   5579 * to permit access when the IOMMU is turned on.
   5580 */
   5581static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
   5582{
   5583	void __iomem *mmio;
   5584	struct ntb_info_regs __iomem *mmio_ntb;
   5585	struct ntb_ctrl_regs __iomem *mmio_ctrl;
   5586	u64 partition_map;
   5587	u8 partition;
   5588	int pp;
   5589
   5590	if (pci_enable_device(pdev)) {
   5591		pci_err(pdev, "Cannot enable Switchtec device\n");
   5592		return;
   5593	}
   5594
   5595	mmio = pci_iomap(pdev, 0, 0);
   5596	if (mmio == NULL) {
   5597		pci_disable_device(pdev);
   5598		pci_err(pdev, "Cannot iomap Switchtec device\n");
   5599		return;
   5600	}
   5601
   5602	pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
   5603
   5604	mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
   5605	mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
   5606
   5607	partition = ioread8(&mmio_ntb->partition_id);
   5608
   5609	partition_map = ioread32(&mmio_ntb->ep_map);
   5610	partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
   5611	partition_map &= ~(1ULL << partition);
   5612
   5613	for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
   5614		struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
   5615		u32 table_sz = 0;
   5616		int te;
   5617
   5618		if (!(partition_map & (1ULL << pp)))
   5619			continue;
   5620
   5621		pci_dbg(pdev, "Processing partition %d\n", pp);
   5622
   5623		mmio_peer_ctrl = &mmio_ctrl[pp];
   5624
   5625		table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
   5626		if (!table_sz) {
   5627			pci_warn(pdev, "Partition %d table_sz 0\n", pp);
   5628			continue;
   5629		}
   5630
   5631		if (table_sz > 512) {
   5632			pci_warn(pdev,
   5633				 "Invalid Switchtec partition %d table_sz %d\n",
   5634				 pp, table_sz);
   5635			continue;
   5636		}
   5637
   5638		for (te = 0; te < table_sz; te++) {
   5639			u32 rid_entry;
   5640			u8 devfn;
   5641
   5642			rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
   5643			devfn = (rid_entry >> 1) & 0xFF;
   5644			pci_dbg(pdev,
   5645				"Aliasing Partition %d Proxy ID %02x.%d\n",
   5646				pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
   5647			pci_add_dma_alias(pdev, devfn, 1);
   5648		}
   5649	}
   5650
   5651	pci_iounmap(pdev, mmio);
   5652	pci_disable_device(pdev);
   5653}
   5654#define SWITCHTEC_QUIRK(vid) \
   5655	DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
   5656		PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
   5657
   5658SWITCHTEC_QUIRK(0x8531);  /* PFX 24xG3 */
   5659SWITCHTEC_QUIRK(0x8532);  /* PFX 32xG3 */
   5660SWITCHTEC_QUIRK(0x8533);  /* PFX 48xG3 */
   5661SWITCHTEC_QUIRK(0x8534);  /* PFX 64xG3 */
   5662SWITCHTEC_QUIRK(0x8535);  /* PFX 80xG3 */
   5663SWITCHTEC_QUIRK(0x8536);  /* PFX 96xG3 */
   5664SWITCHTEC_QUIRK(0x8541);  /* PSX 24xG3 */
   5665SWITCHTEC_QUIRK(0x8542);  /* PSX 32xG3 */
   5666SWITCHTEC_QUIRK(0x8543);  /* PSX 48xG3 */
   5667SWITCHTEC_QUIRK(0x8544);  /* PSX 64xG3 */
   5668SWITCHTEC_QUIRK(0x8545);  /* PSX 80xG3 */
   5669SWITCHTEC_QUIRK(0x8546);  /* PSX 96xG3 */
   5670SWITCHTEC_QUIRK(0x8551);  /* PAX 24XG3 */
   5671SWITCHTEC_QUIRK(0x8552);  /* PAX 32XG3 */
   5672SWITCHTEC_QUIRK(0x8553);  /* PAX 48XG3 */
   5673SWITCHTEC_QUIRK(0x8554);  /* PAX 64XG3 */
   5674SWITCHTEC_QUIRK(0x8555);  /* PAX 80XG3 */
   5675SWITCHTEC_QUIRK(0x8556);  /* PAX 96XG3 */
   5676SWITCHTEC_QUIRK(0x8561);  /* PFXL 24XG3 */
   5677SWITCHTEC_QUIRK(0x8562);  /* PFXL 32XG3 */
   5678SWITCHTEC_QUIRK(0x8563);  /* PFXL 48XG3 */
   5679SWITCHTEC_QUIRK(0x8564);  /* PFXL 64XG3 */
   5680SWITCHTEC_QUIRK(0x8565);  /* PFXL 80XG3 */
   5681SWITCHTEC_QUIRK(0x8566);  /* PFXL 96XG3 */
   5682SWITCHTEC_QUIRK(0x8571);  /* PFXI 24XG3 */
   5683SWITCHTEC_QUIRK(0x8572);  /* PFXI 32XG3 */
   5684SWITCHTEC_QUIRK(0x8573);  /* PFXI 48XG3 */
   5685SWITCHTEC_QUIRK(0x8574);  /* PFXI 64XG3 */
   5686SWITCHTEC_QUIRK(0x8575);  /* PFXI 80XG3 */
   5687SWITCHTEC_QUIRK(0x8576);  /* PFXI 96XG3 */
   5688SWITCHTEC_QUIRK(0x4000);  /* PFX 100XG4 */
   5689SWITCHTEC_QUIRK(0x4084);  /* PFX 84XG4  */
   5690SWITCHTEC_QUIRK(0x4068);  /* PFX 68XG4  */
   5691SWITCHTEC_QUIRK(0x4052);  /* PFX 52XG4  */
   5692SWITCHTEC_QUIRK(0x4036);  /* PFX 36XG4  */
   5693SWITCHTEC_QUIRK(0x4028);  /* PFX 28XG4  */
   5694SWITCHTEC_QUIRK(0x4100);  /* PSX 100XG4 */
   5695SWITCHTEC_QUIRK(0x4184);  /* PSX 84XG4  */
   5696SWITCHTEC_QUIRK(0x4168);  /* PSX 68XG4  */
   5697SWITCHTEC_QUIRK(0x4152);  /* PSX 52XG4  */
   5698SWITCHTEC_QUIRK(0x4136);  /* PSX 36XG4  */
   5699SWITCHTEC_QUIRK(0x4128);  /* PSX 28XG4  */
   5700SWITCHTEC_QUIRK(0x4200);  /* PAX 100XG4 */
   5701SWITCHTEC_QUIRK(0x4284);  /* PAX 84XG4  */
   5702SWITCHTEC_QUIRK(0x4268);  /* PAX 68XG4  */
   5703SWITCHTEC_QUIRK(0x4252);  /* PAX 52XG4  */
   5704SWITCHTEC_QUIRK(0x4236);  /* PAX 36XG4  */
   5705SWITCHTEC_QUIRK(0x4228);  /* PAX 28XG4  */
   5706SWITCHTEC_QUIRK(0x4352);  /* PFXA 52XG4 */
   5707SWITCHTEC_QUIRK(0x4336);  /* PFXA 36XG4 */
   5708SWITCHTEC_QUIRK(0x4328);  /* PFXA 28XG4 */
   5709SWITCHTEC_QUIRK(0x4452);  /* PSXA 52XG4 */
   5710SWITCHTEC_QUIRK(0x4436);  /* PSXA 36XG4 */
   5711SWITCHTEC_QUIRK(0x4428);  /* PSXA 28XG4 */
   5712SWITCHTEC_QUIRK(0x4552);  /* PAXA 52XG4 */
   5713SWITCHTEC_QUIRK(0x4536);  /* PAXA 36XG4 */
   5714SWITCHTEC_QUIRK(0x4528);  /* PAXA 28XG4 */
   5715
   5716/*
   5717 * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
   5718 * These IDs are used to forward responses to the originator on the other
   5719 * side of the NTB.  Alias all possible IDs to the NTB to permit access when
   5720 * the IOMMU is turned on.
   5721 */
   5722static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
   5723{
   5724	pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
   5725	/* PLX NTB may use all 256 devfns */
   5726	pci_add_dma_alias(pdev, 0, 256);
   5727}
   5728DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
   5729DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
   5730
   5731/*
   5732 * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
   5733 * not always reset the secondary Nvidia GPU between reboots if the system
   5734 * is configured to use Hybrid Graphics mode.  This results in the GPU
   5735 * being left in whatever state it was in during the *previous* boot, which
   5736 * causes spurious interrupts from the GPU, which in turn causes us to
   5737 * disable the wrong IRQ and end up breaking the touchpad.  Unsurprisingly,
   5738 * this also completely breaks nouveau.
   5739 *
   5740 * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
   5741 * clean state and fixes all these issues.
   5742 *
   5743 * When the machine is configured in Dedicated display mode, the issue
   5744 * doesn't occur.  Fortunately the GPU advertises NoReset+ when in this
   5745 * mode, so we can detect that and avoid resetting it.
   5746 */
   5747static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
   5748{
   5749	void __iomem *map;
   5750	int ret;
   5751
   5752	if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
   5753	    pdev->subsystem_device != 0x222e ||
   5754	    !pci_reset_supported(pdev))
   5755		return;
   5756
   5757	if (pci_enable_device_mem(pdev))
   5758		return;
   5759
   5760	/*
   5761	 * Based on nvkm_device_ctor() in
   5762	 * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
   5763	 */
   5764	map = pci_iomap(pdev, 0, 0x23000);
   5765	if (!map) {
   5766		pci_err(pdev, "Can't map MMIO space\n");
   5767		goto out_disable;
   5768	}
   5769
   5770	/*
   5771	 * Make sure the GPU looks like it's been POSTed before resetting
   5772	 * it.
   5773	 */
   5774	if (ioread32(map + 0x2240c) & 0x2) {
   5775		pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
   5776		ret = pci_reset_bus(pdev);
   5777		if (ret < 0)
   5778			pci_err(pdev, "Failed to reset GPU: %d\n", ret);
   5779	}
   5780
   5781	iounmap(map);
   5782out_disable:
   5783	pci_disable_device(pdev);
   5784}
   5785DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
   5786			      PCI_CLASS_DISPLAY_VGA, 8,
   5787			      quirk_reset_lenovo_thinkpad_p50_nvgpu);
   5788
   5789/*
   5790 * Device [1b21:2142]
   5791 * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
   5792 */
   5793static void pci_fixup_no_d0_pme(struct pci_dev *dev)
   5794{
   5795	pci_info(dev, "PME# does not work under D0, disabling it\n");
   5796	dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
   5797}
   5798DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
   5799
   5800/*
   5801 * Device 12d8:0x400e [OHCI] and 12d8:0x400f [EHCI]
   5802 *
   5803 * These devices advertise PME# support in all power states but don't
   5804 * reliably assert it.
   5805 *
   5806 * These devices also advertise MSI, but documentation (PI7C9X440SL.pdf)
   5807 * says "The MSI Function is not implemented on this device" in chapters
   5808 * 7.3.27, 7.3.29-7.3.31.
   5809 */
   5810static void pci_fixup_no_msi_no_pme(struct pci_dev *dev)
   5811{
   5812#ifdef CONFIG_PCI_MSI
   5813	pci_info(dev, "MSI is not implemented on this device, disabling it\n");
   5814	dev->no_msi = 1;
   5815#endif
   5816	pci_info(dev, "PME# is unreliable, disabling it\n");
   5817	dev->pme_support = 0;
   5818}
   5819DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme);
   5820DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme);
   5821
   5822static void apex_pci_fixup_class(struct pci_dev *pdev)
   5823{
   5824	pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
   5825}
   5826DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
   5827			       PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
   5828
   5829/*
   5830 * Pericom PI7C9X2G404/PI7C9X2G304/PI7C9X2G303 switch erratum E5 -
   5831 * ACS P2P Request Redirect is not functional
   5832 *
   5833 * When ACS P2P Request Redirect is enabled and bandwidth is not balanced
   5834 * between upstream and downstream ports, packets are queued in an internal
   5835 * buffer until CPLD packet. The workaround is to use the switch in store and
   5836 * forward mode.
   5837 */
   5838#define PI7C9X2Gxxx_MODE_REG		0x74
   5839#define PI7C9X2Gxxx_STORE_FORWARD_MODE	BIT(0)
   5840static void pci_fixup_pericom_acs_store_forward(struct pci_dev *pdev)
   5841{
   5842	struct pci_dev *upstream;
   5843	u16 val;
   5844
   5845	/* Downstream ports only */
   5846	if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
   5847		return;
   5848
   5849	/* Check for ACS P2P Request Redirect use */
   5850	if (!pdev->acs_cap)
   5851		return;
   5852	pci_read_config_word(pdev, pdev->acs_cap + PCI_ACS_CTRL, &val);
   5853	if (!(val & PCI_ACS_RR))
   5854		return;
   5855
   5856	upstream = pci_upstream_bridge(pdev);
   5857	if (!upstream)
   5858		return;
   5859
   5860	pci_read_config_word(upstream, PI7C9X2Gxxx_MODE_REG, &val);
   5861	if (!(val & PI7C9X2Gxxx_STORE_FORWARD_MODE)) {
   5862		pci_info(upstream, "Setting PI7C9X2Gxxx store-forward mode to avoid ACS erratum\n");
   5863		pci_write_config_word(upstream, PI7C9X2Gxxx_MODE_REG, val |
   5864				      PI7C9X2Gxxx_STORE_FORWARD_MODE);
   5865	}
   5866}
   5867/*
   5868 * Apply fixup on enable and on resume, in order to apply the fix up whenever
   5869 * ACS configuration changes or switch mode is reset
   5870 */
   5871DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2404,
   5872			 pci_fixup_pericom_acs_store_forward);
   5873DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2404,
   5874			 pci_fixup_pericom_acs_store_forward);
   5875DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2304,
   5876			 pci_fixup_pericom_acs_store_forward);
   5877DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2304,
   5878			 pci_fixup_pericom_acs_store_forward);
   5879DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303,
   5880			 pci_fixup_pericom_acs_store_forward);
   5881DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303,
   5882			 pci_fixup_pericom_acs_store_forward);
   5883
   5884static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
   5885{
   5886	pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
   5887}
   5888DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
   5889
   5890static void rom_bar_overlap_defect(struct pci_dev *dev)
   5891{
   5892	pci_info(dev, "working around ROM BAR overlap defect\n");
   5893	dev->rom_bar_overlap = 1;
   5894}
   5895DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect);
   5896DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect);
   5897DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect);
   5898DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect);
   5899
   5900#ifdef CONFIG_PCIEASPM
   5901/*
   5902 * Several Intel DG2 graphics devices advertise that they can only tolerate
   5903 * 1us latency when transitioning from L1 to L0, which may prevent ASPM L1
   5904 * from being enabled.  But in fact these devices can tolerate unlimited
   5905 * latency.  Override their Device Capabilities value to allow ASPM L1 to
   5906 * be enabled.
   5907 */
   5908static void aspm_l1_acceptable_latency(struct pci_dev *dev)
   5909{
   5910	u32 l1_lat = FIELD_GET(PCI_EXP_DEVCAP_L1, dev->devcap);
   5911
   5912	if (l1_lat < 7) {
   5913		dev->devcap |= FIELD_PREP(PCI_EXP_DEVCAP_L1, 7);
   5914		pci_info(dev, "ASPM: overriding L1 acceptable latency from %#x to 0x7\n",
   5915			 l1_lat);
   5916	}
   5917}
   5918DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f80, aspm_l1_acceptable_latency);
   5919DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f81, aspm_l1_acceptable_latency);
   5920DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f82, aspm_l1_acceptable_latency);
   5921DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f83, aspm_l1_acceptable_latency);
   5922DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f84, aspm_l1_acceptable_latency);
   5923DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f85, aspm_l1_acceptable_latency);
   5924DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f86, aspm_l1_acceptable_latency);
   5925DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f87, aspm_l1_acceptable_latency);
   5926DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f88, aspm_l1_acceptable_latency);
   5927DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5690, aspm_l1_acceptable_latency);
   5928DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5691, aspm_l1_acceptable_latency);
   5929DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5692, aspm_l1_acceptable_latency);
   5930DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5693, aspm_l1_acceptable_latency);
   5931DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5694, aspm_l1_acceptable_latency);
   5932DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5695, aspm_l1_acceptable_latency);
   5933DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a0, aspm_l1_acceptable_latency);
   5934DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a1, aspm_l1_acceptable_latency);
   5935DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a2, aspm_l1_acceptable_latency);
   5936DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a3, aspm_l1_acceptable_latency);
   5937DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a4, aspm_l1_acceptable_latency);
   5938DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a5, aspm_l1_acceptable_latency);
   5939DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a6, aspm_l1_acceptable_latency);
   5940DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b0, aspm_l1_acceptable_latency);
   5941DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency);
   5942DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency);
   5943DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency);
   5944#endif