cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cptpf_main.c (16746B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2016 Cavium, Inc.
      4 */
      5
      6#include <linux/device.h>
      7#include <linux/firmware.h>
      8#include <linux/interrupt.h>
      9#include <linux/module.h>
     10#include <linux/moduleparam.h>
     11#include <linux/pci.h>
     12#include <linux/printk.h>
     13
     14#include "cptpf.h"
     15
     16#define DRV_NAME	"thunder-cpt"
     17#define DRV_VERSION	"1.0"
     18
     19static u32 num_vfs = 4; /* Default 4 VF enabled */
     20module_param(num_vfs, uint, 0444);
     21MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)");
     22
     23/*
     24 * Disable cores specified by coremask
     25 */
     26static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
     27			      u8 type, u8 grp)
     28{
     29	u64 pf_exe_ctl;
     30	u32 timeout = 100;
     31	u64 grpmask = 0;
     32	struct device *dev = &cpt->pdev->dev;
     33
     34	if (type == AE_TYPES)
     35		coremask = (coremask << cpt->max_se_cores);
     36
     37	/* Disengage the cores from groups */
     38	grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
     39	cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
     40			(grpmask & ~coremask));
     41	udelay(CSR_DELAY);
     42	grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
     43	while (grp & coremask) {
     44		dev_err(dev, "Cores still busy %llx", coremask);
     45		grp = cpt_read_csr64(cpt->reg_base,
     46				     CPTX_PF_EXEC_BUSY(0));
     47		if (timeout--)
     48			break;
     49
     50		udelay(CSR_DELAY);
     51	}
     52
     53	/* Disable the cores */
     54	pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
     55	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
     56			(pf_exe_ctl & ~coremask));
     57	udelay(CSR_DELAY);
     58}
     59
     60/*
     61 * Enable cores specified by coremask
     62 */
     63static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask,
     64			     u8 type)
     65{
     66	u64 pf_exe_ctl;
     67
     68	if (type == AE_TYPES)
     69		coremask = (coremask << cpt->max_se_cores);
     70
     71	pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0));
     72	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0),
     73			(pf_exe_ctl | coremask));
     74	udelay(CSR_DELAY);
     75}
     76
     77static void cpt_configure_group(struct cpt_device *cpt, u8 grp,
     78				u64 coremask, u8 type)
     79{
     80	u64 pf_gx_en = 0;
     81
     82	if (type == AE_TYPES)
     83		coremask = (coremask << cpt->max_se_cores);
     84
     85	pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp));
     86	cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp),
     87			(pf_gx_en | coremask));
     88	udelay(CSR_DELAY);
     89}
     90
     91static void cpt_disable_mbox_interrupts(struct cpt_device *cpt)
     92{
     93	/* Clear mbox(0) interupts for all vfs */
     94	cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull);
     95}
     96
     97static void cpt_disable_ecc_interrupts(struct cpt_device *cpt)
     98{
     99	/* Clear ecc(0) interupts for all vfs */
    100	cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull);
    101}
    102
    103static void cpt_disable_exec_interrupts(struct cpt_device *cpt)
    104{
    105	/* Clear exec interupts for all vfs */
    106	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull);
    107}
    108
    109static void cpt_disable_all_interrupts(struct cpt_device *cpt)
    110{
    111	cpt_disable_mbox_interrupts(cpt);
    112	cpt_disable_ecc_interrupts(cpt);
    113	cpt_disable_exec_interrupts(cpt);
    114}
    115
    116static void cpt_enable_mbox_interrupts(struct cpt_device *cpt)
    117{
    118	/* Set mbox(0) interupts for all vfs */
    119	cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull);
    120}
    121
    122static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode)
    123{
    124	int ret = 0, core = 0, shift = 0;
    125	u32 total_cores = 0;
    126	struct device *dev = &cpt->pdev->dev;
    127
    128	if (!mcode || !mcode->code) {
    129		dev_err(dev, "Either the mcode is null or data is NULL\n");
    130		return -EINVAL;
    131	}
    132
    133	if (mcode->code_size == 0) {
    134		dev_err(dev, "microcode size is 0\n");
    135		return -EINVAL;
    136	}
    137
    138	/* Assumes 0-9 are SE cores for UCODE_BASE registers and
    139	 * AE core bases follow
    140	 */
    141	if (mcode->is_ae) {
    142		core = CPT_MAX_SE_CORES; /* start couting from 10 */
    143		total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */
    144	} else {
    145		core = 0; /* start couting from 0 */
    146		total_cores = CPT_MAX_SE_CORES; /* upto 9 */
    147	}
    148
    149	/* Point to microcode for each core of the group */
    150	for (; core < total_cores ; core++, shift++) {
    151		if (mcode->core_mask & (1 << shift)) {
    152			cpt_write_csr64(cpt->reg_base,
    153					CPTX_PF_ENGX_UCODE_BASE(0, core),
    154					(u64)mcode->phys_base);
    155		}
    156	}
    157	return ret;
    158}
    159
    160static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode)
    161{
    162	int ret = 0;
    163	struct device *dev = &cpt->pdev->dev;
    164
    165	/* Make device not ready */
    166	cpt->flags &= ~CPT_FLAG_DEVICE_READY;
    167	/* Disable All PF interrupts */
    168	cpt_disable_all_interrupts(cpt);
    169	/* Calculate mcode group and coremasks */
    170	if (mcode->is_ae) {
    171		if (mcode->num_cores > cpt->max_ae_cores) {
    172			dev_err(dev, "Requested for more cores than available AE cores\n");
    173			ret = -EINVAL;
    174			goto cpt_init_fail;
    175		}
    176
    177		if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
    178			dev_err(dev, "Can't load, all eight microcode groups in use");
    179			return -ENFILE;
    180		}
    181
    182		mcode->group = cpt->next_group;
    183		/* Convert requested cores to mask */
    184		mcode->core_mask = GENMASK(mcode->num_cores, 0);
    185		cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES,
    186				  mcode->group);
    187		/* Load microcode for AE engines */
    188		ret = cpt_load_microcode(cpt, mcode);
    189		if (ret) {
    190			dev_err(dev, "Microcode load Failed for %s\n",
    191				mcode->version);
    192			goto cpt_init_fail;
    193		}
    194		cpt->next_group++;
    195		/* Configure group mask for the mcode */
    196		cpt_configure_group(cpt, mcode->group, mcode->core_mask,
    197				    AE_TYPES);
    198		/* Enable AE cores for the group mask */
    199		cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES);
    200	} else {
    201		if (mcode->num_cores > cpt->max_se_cores) {
    202			dev_err(dev, "Requested for more cores than available SE cores\n");
    203			ret = -EINVAL;
    204			goto cpt_init_fail;
    205		}
    206		if (cpt->next_group >= CPT_MAX_CORE_GROUPS) {
    207			dev_err(dev, "Can't load, all eight microcode groups in use");
    208			return -ENFILE;
    209		}
    210
    211		mcode->group = cpt->next_group;
    212		/* Covert requested cores to mask */
    213		mcode->core_mask = GENMASK(mcode->num_cores, 0);
    214		cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES,
    215				  mcode->group);
    216		/* Load microcode for SE engines */
    217		ret = cpt_load_microcode(cpt, mcode);
    218		if (ret) {
    219			dev_err(dev, "Microcode load Failed for %s\n",
    220				mcode->version);
    221			goto cpt_init_fail;
    222		}
    223		cpt->next_group++;
    224		/* Configure group mask for the mcode */
    225		cpt_configure_group(cpt, mcode->group, mcode->core_mask,
    226				    SE_TYPES);
    227		/* Enable SE cores for the group mask */
    228		cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES);
    229	}
    230
    231	/* Enabled PF mailbox interrupts */
    232	cpt_enable_mbox_interrupts(cpt);
    233	cpt->flags |= CPT_FLAG_DEVICE_READY;
    234
    235	return ret;
    236
    237cpt_init_fail:
    238	/* Enabled PF mailbox interrupts */
    239	cpt_enable_mbox_interrupts(cpt);
    240
    241	return ret;
    242}
    243
    244struct ucode_header {
    245	u8 version[CPT_UCODE_VERSION_SZ];
    246	__be32 code_length;
    247	u32 data_length;
    248	u64 sram_address;
    249};
    250
    251static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
    252{
    253	const struct firmware *fw_entry;
    254	struct device *dev = &cpt->pdev->dev;
    255	struct ucode_header *ucode;
    256	struct microcode *mcode;
    257	int j, ret = 0;
    258
    259	ret = request_firmware(&fw_entry, fw, dev);
    260	if (ret)
    261		return ret;
    262
    263	ucode = (struct ucode_header *)fw_entry->data;
    264	mcode = &cpt->mcode[cpt->next_mc_idx];
    265	memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ);
    266	mcode->code_size = ntohl(ucode->code_length) * 2;
    267	if (!mcode->code_size) {
    268		ret = -EINVAL;
    269		goto fw_release;
    270	}
    271
    272	mcode->is_ae = is_ae;
    273	mcode->core_mask = 0ULL;
    274	mcode->num_cores = is_ae ? 6 : 10;
    275
    276	/*  Allocate DMAable space */
    277	mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
    278					 &mcode->phys_base, GFP_KERNEL);
    279	if (!mcode->code) {
    280		dev_err(dev, "Unable to allocate space for microcode");
    281		ret = -ENOMEM;
    282		goto fw_release;
    283	}
    284
    285	memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)),
    286	       mcode->code_size);
    287
    288	/* Byte swap 64-bit */
    289	for (j = 0; j < (mcode->code_size / 8); j++)
    290		((__be64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]);
    291	/*  MC needs 16-bit swap */
    292	for (j = 0; j < (mcode->code_size / 2); j++)
    293		((__be16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]);
    294
    295	dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size);
    296	dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae);
    297	dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores);
    298	dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code);
    299	dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base);
    300
    301	ret = do_cpt_init(cpt, mcode);
    302	if (ret) {
    303		dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
    304		goto fw_release;
    305	}
    306
    307	dev_info(dev, "Microcode Loaded %s\n", mcode->version);
    308	mcode->is_mc_valid = 1;
    309	cpt->next_mc_idx++;
    310
    311fw_release:
    312	release_firmware(fw_entry);
    313
    314	return ret;
    315}
    316
    317static int cpt_ucode_load(struct cpt_device *cpt)
    318{
    319	int ret = 0;
    320	struct device *dev = &cpt->pdev->dev;
    321
    322	ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true);
    323	if (ret) {
    324		dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret);
    325		return ret;
    326	}
    327	ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false);
    328	if (ret) {
    329		dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret);
    330		return ret;
    331	}
    332
    333	return ret;
    334}
    335
    336static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq)
    337{
    338	struct cpt_device *cpt = (struct cpt_device *)cpt_irq;
    339
    340	cpt_mbox_intr_handler(cpt, 0);
    341
    342	return IRQ_HANDLED;
    343}
    344
    345static void cpt_reset(struct cpt_device *cpt)
    346{
    347	cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1);
    348}
    349
    350static void cpt_find_max_enabled_cores(struct cpt_device *cpt)
    351{
    352	union cptx_pf_constants pf_cnsts = {0};
    353
    354	pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0));
    355	cpt->max_se_cores = pf_cnsts.s.se;
    356	cpt->max_ae_cores = pf_cnsts.s.ae;
    357}
    358
    359static u32 cpt_check_bist_status(struct cpt_device *cpt)
    360{
    361	union cptx_pf_bist_status bist_sts = {0};
    362
    363	bist_sts.u = cpt_read_csr64(cpt->reg_base,
    364				    CPTX_PF_BIST_STATUS(0));
    365
    366	return bist_sts.u;
    367}
    368
    369static u64 cpt_check_exe_bist_status(struct cpt_device *cpt)
    370{
    371	union cptx_pf_exe_bist_status bist_sts = {0};
    372
    373	bist_sts.u = cpt_read_csr64(cpt->reg_base,
    374				    CPTX_PF_EXE_BIST_STATUS(0));
    375
    376	return bist_sts.u;
    377}
    378
    379static void cpt_disable_all_cores(struct cpt_device *cpt)
    380{
    381	u32 grp, timeout = 100;
    382	struct device *dev = &cpt->pdev->dev;
    383
    384	/* Disengage the cores from groups */
    385	for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
    386		cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0);
    387		udelay(CSR_DELAY);
    388	}
    389
    390	grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0));
    391	while (grp) {
    392		dev_err(dev, "Cores still busy");
    393		grp = cpt_read_csr64(cpt->reg_base,
    394				     CPTX_PF_EXEC_BUSY(0));
    395		if (timeout--)
    396			break;
    397
    398		udelay(CSR_DELAY);
    399	}
    400	/* Disable the cores */
    401	cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0);
    402}
    403
    404/*
    405 * Ensure all cores are disengaged from all groups by
    406 * calling cpt_disable_all_cores() before calling this
    407 * function.
    408 */
    409static void cpt_unload_microcode(struct cpt_device *cpt)
    410{
    411	u32 grp = 0, core;
    412
    413	/* Free microcode bases and reset group masks */
    414	for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) {
    415		struct microcode *mcode = &cpt->mcode[grp];
    416
    417		if (cpt->mcode[grp].code)
    418			dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
    419					  mcode->code, mcode->phys_base);
    420		mcode->code = NULL;
    421	}
    422	/* Clear UCODE_BASE registers for all engines */
    423	for (core = 0; core < CPT_MAX_TOTAL_CORES; core++)
    424		cpt_write_csr64(cpt->reg_base,
    425				CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull);
    426}
    427
    428static int cpt_device_init(struct cpt_device *cpt)
    429{
    430	u64 bist;
    431	struct device *dev = &cpt->pdev->dev;
    432
    433	/* Reset the PF when probed first */
    434	cpt_reset(cpt);
    435	msleep(100);
    436
    437	/*Check BIST status*/
    438	bist = (u64)cpt_check_bist_status(cpt);
    439	if (bist) {
    440		dev_err(dev, "RAM BIST failed with code 0x%llx", bist);
    441		return -ENODEV;
    442	}
    443
    444	bist = cpt_check_exe_bist_status(cpt);
    445	if (bist) {
    446		dev_err(dev, "Engine BIST failed with code 0x%llx", bist);
    447		return -ENODEV;
    448	}
    449
    450	/*Get CLK frequency*/
    451	/*Get max enabled cores */
    452	cpt_find_max_enabled_cores(cpt);
    453	/*Disable all cores*/
    454	cpt_disable_all_cores(cpt);
    455	/*Reset device parameters*/
    456	cpt->next_mc_idx   = 0;
    457	cpt->next_group = 0;
    458	/* PF is ready */
    459	cpt->flags |= CPT_FLAG_DEVICE_READY;
    460
    461	return 0;
    462}
    463
    464static int cpt_register_interrupts(struct cpt_device *cpt)
    465{
    466	int ret;
    467	struct device *dev = &cpt->pdev->dev;
    468
    469	/* Enable MSI-X */
    470	ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS,
    471			CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX);
    472	if (ret < 0) {
    473		dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n",
    474			CPT_PF_MSIX_VECTORS);
    475		return ret;
    476	}
    477
    478	/* Register mailbox interrupt handlers */
    479	ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)),
    480			  cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt);
    481	if (ret)
    482		goto fail;
    483
    484	/* Enable mailbox interrupt */
    485	cpt_enable_mbox_interrupts(cpt);
    486	return 0;
    487
    488fail:
    489	dev_err(dev, "Request irq failed\n");
    490	pci_disable_msix(cpt->pdev);
    491	return ret;
    492}
    493
    494static void cpt_unregister_interrupts(struct cpt_device *cpt)
    495{
    496	free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt);
    497	pci_disable_msix(cpt->pdev);
    498}
    499
    500static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs)
    501{
    502	int pos = 0;
    503	int err;
    504	u16 total_vf_cnt;
    505	struct pci_dev *pdev = cpt->pdev;
    506
    507	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
    508	if (!pos) {
    509		dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
    510		return -ENODEV;
    511	}
    512
    513	cpt->num_vf_en = num_vfs; /* User requested VFs */
    514	pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
    515	if (total_vf_cnt < cpt->num_vf_en)
    516		cpt->num_vf_en = total_vf_cnt;
    517
    518	if (!total_vf_cnt)
    519		return 0;
    520
    521	/*Enabled the available VFs */
    522	err = pci_enable_sriov(pdev, cpt->num_vf_en);
    523	if (err) {
    524		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
    525			cpt->num_vf_en);
    526		cpt->num_vf_en = 0;
    527		return err;
    528	}
    529
    530	/* TODO: Optionally enable static VQ priorities feature */
    531
    532	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
    533		 cpt->num_vf_en);
    534
    535	cpt->flags |= CPT_FLAG_SRIOV_ENABLED;
    536
    537	return 0;
    538}
    539
    540static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
    541{
    542	struct device *dev = &pdev->dev;
    543	struct cpt_device *cpt;
    544	int err;
    545
    546	if (num_vfs > 16 || num_vfs < 4) {
    547		dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n",
    548			 num_vfs);
    549		num_vfs = 4;
    550	}
    551
    552	cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL);
    553	if (!cpt)
    554		return -ENOMEM;
    555
    556	pci_set_drvdata(pdev, cpt);
    557	cpt->pdev = pdev;
    558	err = pci_enable_device(pdev);
    559	if (err) {
    560		dev_err(dev, "Failed to enable PCI device\n");
    561		pci_set_drvdata(pdev, NULL);
    562		return err;
    563	}
    564
    565	err = pci_request_regions(pdev, DRV_NAME);
    566	if (err) {
    567		dev_err(dev, "PCI request regions failed 0x%x\n", err);
    568		goto cpt_err_disable_device;
    569	}
    570
    571	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
    572	if (err) {
    573		dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
    574		goto cpt_err_release_regions;
    575	}
    576
    577	/* MAP PF's configuration registers */
    578	cpt->reg_base = pcim_iomap(pdev, 0, 0);
    579	if (!cpt->reg_base) {
    580		dev_err(dev, "Cannot map config register space, aborting\n");
    581		err = -ENOMEM;
    582		goto cpt_err_release_regions;
    583	}
    584
    585	/* CPT device HW initialization */
    586	cpt_device_init(cpt);
    587
    588	/* Register interrupts */
    589	err = cpt_register_interrupts(cpt);
    590	if (err)
    591		goto cpt_err_release_regions;
    592
    593	err = cpt_ucode_load(cpt);
    594	if (err)
    595		goto cpt_err_unregister_interrupts;
    596
    597	/* Configure SRIOV */
    598	err = cpt_sriov_init(cpt, num_vfs);
    599	if (err)
    600		goto cpt_err_unregister_interrupts;
    601
    602	return 0;
    603
    604cpt_err_unregister_interrupts:
    605	cpt_unregister_interrupts(cpt);
    606cpt_err_release_regions:
    607	pci_release_regions(pdev);
    608cpt_err_disable_device:
    609	pci_disable_device(pdev);
    610	pci_set_drvdata(pdev, NULL);
    611	return err;
    612}
    613
    614static void cpt_remove(struct pci_dev *pdev)
    615{
    616	struct cpt_device *cpt = pci_get_drvdata(pdev);
    617
    618	/* Disengage SE and AE cores from all groups*/
    619	cpt_disable_all_cores(cpt);
    620	/* Unload microcodes */
    621	cpt_unload_microcode(cpt);
    622	cpt_unregister_interrupts(cpt);
    623	pci_disable_sriov(pdev);
    624	pci_release_regions(pdev);
    625	pci_disable_device(pdev);
    626	pci_set_drvdata(pdev, NULL);
    627}
    628
    629static void cpt_shutdown(struct pci_dev *pdev)
    630{
    631	struct cpt_device *cpt = pci_get_drvdata(pdev);
    632
    633	if (!cpt)
    634		return;
    635
    636	dev_info(&pdev->dev, "Shutdown device %x:%x.\n",
    637		 (u32)pdev->vendor, (u32)pdev->device);
    638
    639	cpt_unregister_interrupts(cpt);
    640	pci_release_regions(pdev);
    641	pci_disable_device(pdev);
    642	pci_set_drvdata(pdev, NULL);
    643}
    644
    645/* Supported devices */
    646static const struct pci_device_id cpt_id_table[] = {
    647	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) },
    648	{ 0, }  /* end of table */
    649};
    650
    651static struct pci_driver cpt_pci_driver = {
    652	.name = DRV_NAME,
    653	.id_table = cpt_id_table,
    654	.probe = cpt_probe,
    655	.remove = cpt_remove,
    656	.shutdown = cpt_shutdown,
    657};
    658
    659module_pci_driver(cpt_pci_driver);
    660
    661MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
    662MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver");
    663MODULE_LICENSE("GPL v2");
    664MODULE_VERSION(DRV_VERSION);
    665MODULE_DEVICE_TABLE(pci, cpt_id_table);