cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ufshcd-pci.c (15476B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Universal Flash Storage Host controller PCI glue driver
      4 *
      5 * Copyright (C) 2011-2013 Samsung India Software Operations
      6 *
      7 * Authors:
      8 *	Santosh Yaraganavi <santosh.sy@samsung.com>
      9 *	Vinayak Holikatti <h.vinayak@samsung.com>
     10 */
     11
     12#include <ufs/ufshcd.h>
     13#include <linux/delay.h>
     14#include <linux/module.h>
     15#include <linux/pci.h>
     16#include <linux/pm_runtime.h>
     17#include <linux/pm_qos.h>
     18#include <linux/debugfs.h>
     19#include <linux/uuid.h>
     20#include <linux/acpi.h>
     21#include <linux/gpio/consumer.h>
     22
     23struct ufs_host {
     24	void (*late_init)(struct ufs_hba *hba);
     25};
     26
     27enum {
     28	INTEL_DSM_FNS		=  0,
     29	INTEL_DSM_RESET		=  1,
     30};
     31
     32struct intel_host {
     33	struct ufs_host ufs_host;
     34	u32		dsm_fns;
     35	u32		active_ltr;
     36	u32		idle_ltr;
     37	struct dentry	*debugfs_root;
     38	struct gpio_desc *reset_gpio;
     39};
     40
     41static const guid_t intel_dsm_guid =
     42	GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
     43		  0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
     44
     45static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
     46		       unsigned int fn, u32 *result)
     47{
     48	union acpi_object *obj;
     49	int err = 0;
     50	size_t len;
     51
     52	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
     53	if (!obj)
     54		return -EOPNOTSUPP;
     55
     56	if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
     57		err = -EINVAL;
     58		goto out;
     59	}
     60
     61	len = min_t(size_t, obj->buffer.length, 4);
     62
     63	*result = 0;
     64	memcpy(result, obj->buffer.pointer, len);
     65out:
     66	ACPI_FREE(obj);
     67
     68	return err;
     69}
     70
     71static int intel_dsm(struct intel_host *intel_host, struct device *dev,
     72		     unsigned int fn, u32 *result)
     73{
     74	if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
     75		return -EOPNOTSUPP;
     76
     77	return __intel_dsm(intel_host, dev, fn, result);
     78}
     79
     80static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
     81{
     82	int err;
     83
     84	err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
     85	dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
     86}
     87
     88static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
     89				       enum ufs_notify_change_status status)
     90{
     91	/* Cannot enable ICE until after HC enable */
     92	if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
     93		u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
     94
     95		hce |= CRYPTO_GENERAL_ENABLE;
     96		ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
     97	}
     98
     99	return 0;
    100}
    101
    102static int ufs_intel_disable_lcc(struct ufs_hba *hba)
    103{
    104	u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
    105	u32 lcc_enable = 0;
    106
    107	ufshcd_dme_get(hba, attr, &lcc_enable);
    108	if (lcc_enable)
    109		ufshcd_disable_host_tx_lcc(hba);
    110
    111	return 0;
    112}
    113
    114static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
    115					 enum ufs_notify_change_status status)
    116{
    117	int err = 0;
    118
    119	switch (status) {
    120	case PRE_CHANGE:
    121		err = ufs_intel_disable_lcc(hba);
    122		break;
    123	case POST_CHANGE:
    124		break;
    125	default:
    126		break;
    127	}
    128
    129	return err;
    130}
    131
    132static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
    133{
    134	struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
    135	int ret;
    136
    137	pwr_info.lane_rx = lanes;
    138	pwr_info.lane_tx = lanes;
    139	ret = ufshcd_config_pwr_mode(hba, &pwr_info);
    140	if (ret)
    141		dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
    142			__func__, lanes, ret);
    143	return ret;
    144}
    145
    146static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
    147				enum ufs_notify_change_status status,
    148				struct ufs_pa_layer_attr *dev_max_params,
    149				struct ufs_pa_layer_attr *dev_req_params)
    150{
    151	int err = 0;
    152
    153	switch (status) {
    154	case PRE_CHANGE:
    155		if (ufshcd_is_hs_mode(dev_max_params) &&
    156		    (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
    157			ufs_intel_set_lanes(hba, 2);
    158		memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
    159		break;
    160	case POST_CHANGE:
    161		if (ufshcd_is_hs_mode(dev_req_params)) {
    162			u32 peer_granularity;
    163
    164			usleep_range(1000, 1250);
    165			err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
    166						  &peer_granularity);
    167		}
    168		break;
    169	default:
    170		break;
    171	}
    172
    173	return err;
    174}
    175
    176static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
    177{
    178	u32 granularity, peer_granularity;
    179	u32 pa_tactivate, peer_pa_tactivate;
    180	int ret;
    181
    182	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
    183	if (ret)
    184		goto out;
    185
    186	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
    187	if (ret)
    188		goto out;
    189
    190	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
    191	if (ret)
    192		goto out;
    193
    194	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
    195	if (ret)
    196		goto out;
    197
    198	if (granularity == peer_granularity) {
    199		u32 new_peer_pa_tactivate = pa_tactivate + 2;
    200
    201		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
    202	}
    203out:
    204	return ret;
    205}
    206
    207#define INTEL_ACTIVELTR		0x804
    208#define INTEL_IDLELTR		0x808
    209
    210#define INTEL_LTR_REQ		BIT(15)
    211#define INTEL_LTR_SCALE_MASK	GENMASK(11, 10)
    212#define INTEL_LTR_SCALE_1US	(2 << 10)
    213#define INTEL_LTR_SCALE_32US	(3 << 10)
    214#define INTEL_LTR_VALUE_MASK	GENMASK(9, 0)
    215
    216static void intel_cache_ltr(struct ufs_hba *hba)
    217{
    218	struct intel_host *host = ufshcd_get_variant(hba);
    219
    220	host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
    221	host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
    222}
    223
    224static void intel_ltr_set(struct device *dev, s32 val)
    225{
    226	struct ufs_hba *hba = dev_get_drvdata(dev);
    227	struct intel_host *host = ufshcd_get_variant(hba);
    228	u32 ltr;
    229
    230	pm_runtime_get_sync(dev);
    231
    232	/*
    233	 * Program latency tolerance (LTR) accordingly what has been asked
    234	 * by the PM QoS layer or disable it in case we were passed
    235	 * negative value or PM_QOS_LATENCY_ANY.
    236	 */
    237	ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
    238
    239	if (val == PM_QOS_LATENCY_ANY || val < 0) {
    240		ltr &= ~INTEL_LTR_REQ;
    241	} else {
    242		ltr |= INTEL_LTR_REQ;
    243		ltr &= ~INTEL_LTR_SCALE_MASK;
    244		ltr &= ~INTEL_LTR_VALUE_MASK;
    245
    246		if (val > INTEL_LTR_VALUE_MASK) {
    247			val >>= 5;
    248			if (val > INTEL_LTR_VALUE_MASK)
    249				val = INTEL_LTR_VALUE_MASK;
    250			ltr |= INTEL_LTR_SCALE_32US | val;
    251		} else {
    252			ltr |= INTEL_LTR_SCALE_1US | val;
    253		}
    254	}
    255
    256	if (ltr == host->active_ltr)
    257		goto out;
    258
    259	writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
    260	writel(ltr, hba->mmio_base + INTEL_IDLELTR);
    261
    262	/* Cache the values into intel_host structure */
    263	intel_cache_ltr(hba);
    264out:
    265	pm_runtime_put(dev);
    266}
    267
    268static void intel_ltr_expose(struct device *dev)
    269{
    270	dev->power.set_latency_tolerance = intel_ltr_set;
    271	dev_pm_qos_expose_latency_tolerance(dev);
    272}
    273
    274static void intel_ltr_hide(struct device *dev)
    275{
    276	dev_pm_qos_hide_latency_tolerance(dev);
    277	dev->power.set_latency_tolerance = NULL;
    278}
    279
    280static void intel_add_debugfs(struct ufs_hba *hba)
    281{
    282	struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
    283	struct intel_host *host = ufshcd_get_variant(hba);
    284
    285	intel_cache_ltr(hba);
    286
    287	host->debugfs_root = dir;
    288	debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
    289	debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
    290}
    291
    292static void intel_remove_debugfs(struct ufs_hba *hba)
    293{
    294	struct intel_host *host = ufshcd_get_variant(hba);
    295
    296	debugfs_remove_recursive(host->debugfs_root);
    297}
    298
    299static int ufs_intel_device_reset(struct ufs_hba *hba)
    300{
    301	struct intel_host *host = ufshcd_get_variant(hba);
    302
    303	if (host->dsm_fns & INTEL_DSM_RESET) {
    304		u32 result = 0;
    305		int err;
    306
    307		err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
    308		if (!err && !result)
    309			err = -EIO;
    310		if (err)
    311			dev_err(hba->dev, "%s: DSM error %d result %u\n",
    312				__func__, err, result);
    313		return err;
    314	}
    315
    316	if (!host->reset_gpio)
    317		return -EOPNOTSUPP;
    318
    319	gpiod_set_value_cansleep(host->reset_gpio, 1);
    320	usleep_range(10, 15);
    321
    322	gpiod_set_value_cansleep(host->reset_gpio, 0);
    323	usleep_range(10, 15);
    324
    325	return 0;
    326}
    327
    328static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
    329{
    330	/* GPIO in _DSD has active low setting */
    331	return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
    332}
    333
    334static int ufs_intel_common_init(struct ufs_hba *hba)
    335{
    336	struct intel_host *host;
    337
    338	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
    339
    340	host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
    341	if (!host)
    342		return -ENOMEM;
    343	ufshcd_set_variant(hba, host);
    344	intel_dsm_init(host, hba->dev);
    345	if (host->dsm_fns & INTEL_DSM_RESET) {
    346		if (hba->vops->device_reset)
    347			hba->caps |= UFSHCD_CAP_DEEPSLEEP;
    348	} else {
    349		if (hba->vops->device_reset)
    350			host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
    351		if (IS_ERR(host->reset_gpio)) {
    352			dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
    353				__func__, PTR_ERR(host->reset_gpio));
    354			host->reset_gpio = NULL;
    355		}
    356		if (host->reset_gpio) {
    357			gpiod_set_value_cansleep(host->reset_gpio, 0);
    358			hba->caps |= UFSHCD_CAP_DEEPSLEEP;
    359		}
    360	}
    361	intel_ltr_expose(hba->dev);
    362	intel_add_debugfs(hba);
    363	return 0;
    364}
    365
    366static void ufs_intel_common_exit(struct ufs_hba *hba)
    367{
    368	intel_remove_debugfs(hba);
    369	intel_ltr_hide(hba->dev);
    370}
    371
    372static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
    373{
    374	if (ufshcd_is_link_hibern8(hba)) {
    375		int ret = ufshcd_uic_hibern8_exit(hba);
    376
    377		if (!ret) {
    378			ufshcd_set_link_active(hba);
    379		} else {
    380			dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
    381				__func__, ret);
    382			/*
    383			 * Force reset and restore. Any other actions can lead
    384			 * to an unrecoverable state.
    385			 */
    386			ufshcd_set_link_off(hba);
    387		}
    388	}
    389
    390	return 0;
    391}
    392
    393static int ufs_intel_ehl_init(struct ufs_hba *hba)
    394{
    395	hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
    396	return ufs_intel_common_init(hba);
    397}
    398
    399static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
    400{
    401	/* LKF always needs a full reset, so set PM accordingly */
    402	if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
    403		hba->spm_lvl = UFS_PM_LVL_6;
    404		hba->rpm_lvl = UFS_PM_LVL_6;
    405	} else {
    406		hba->spm_lvl = UFS_PM_LVL_5;
    407		hba->rpm_lvl = UFS_PM_LVL_5;
    408	}
    409}
    410
    411static int ufs_intel_lkf_init(struct ufs_hba *hba)
    412{
    413	struct ufs_host *ufs_host;
    414	int err;
    415
    416	hba->nop_out_timeout = 200;
    417	hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
    418	hba->caps |= UFSHCD_CAP_CRYPTO;
    419	err = ufs_intel_common_init(hba);
    420	ufs_host = ufshcd_get_variant(hba);
    421	ufs_host->late_init = ufs_intel_lkf_late_init;
    422	return err;
    423}
    424
    425static int ufs_intel_adl_init(struct ufs_hba *hba)
    426{
    427	hba->nop_out_timeout = 200;
    428	hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
    429	return ufs_intel_common_init(hba);
    430}
    431
    432static int ufs_intel_mtl_init(struct ufs_hba *hba)
    433{
    434	hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
    435	return ufs_intel_common_init(hba);
    436}
    437
    438static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
    439	.name                   = "intel-pci",
    440	.init			= ufs_intel_common_init,
    441	.exit			= ufs_intel_common_exit,
    442	.link_startup_notify	= ufs_intel_link_startup_notify,
    443	.resume			= ufs_intel_resume,
    444};
    445
    446static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
    447	.name                   = "intel-pci",
    448	.init			= ufs_intel_ehl_init,
    449	.exit			= ufs_intel_common_exit,
    450	.link_startup_notify	= ufs_intel_link_startup_notify,
    451	.resume			= ufs_intel_resume,
    452};
    453
    454static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
    455	.name                   = "intel-pci",
    456	.init			= ufs_intel_lkf_init,
    457	.exit			= ufs_intel_common_exit,
    458	.hce_enable_notify	= ufs_intel_hce_enable_notify,
    459	.link_startup_notify	= ufs_intel_link_startup_notify,
    460	.pwr_change_notify	= ufs_intel_lkf_pwr_change_notify,
    461	.apply_dev_quirks	= ufs_intel_lkf_apply_dev_quirks,
    462	.resume			= ufs_intel_resume,
    463	.device_reset		= ufs_intel_device_reset,
    464};
    465
    466static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
    467	.name			= "intel-pci",
    468	.init			= ufs_intel_adl_init,
    469	.exit			= ufs_intel_common_exit,
    470	.link_startup_notify	= ufs_intel_link_startup_notify,
    471	.resume			= ufs_intel_resume,
    472	.device_reset		= ufs_intel_device_reset,
    473};
    474
    475static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
    476	.name                   = "intel-pci",
    477	.init			= ufs_intel_mtl_init,
    478	.exit			= ufs_intel_common_exit,
    479	.hce_enable_notify	= ufs_intel_hce_enable_notify,
    480	.link_startup_notify	= ufs_intel_link_startup_notify,
    481	.resume			= ufs_intel_resume,
    482	.device_reset		= ufs_intel_device_reset,
    483};
    484
    485#ifdef CONFIG_PM_SLEEP
    486static int ufshcd_pci_restore(struct device *dev)
    487{
    488	struct ufs_hba *hba = dev_get_drvdata(dev);
    489
    490	/* Force a full reset and restore */
    491	ufshcd_set_link_off(hba);
    492
    493	return ufshcd_system_resume(dev);
    494}
    495#endif
    496
    497/**
    498 * ufshcd_pci_shutdown - main function to put the controller in reset state
    499 * @pdev: pointer to PCI device handle
    500 */
    501static void ufshcd_pci_shutdown(struct pci_dev *pdev)
    502{
    503	ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
    504}
    505
    506/**
    507 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
    508 *		data structure memory
    509 * @pdev: pointer to PCI handle
    510 */
    511static void ufshcd_pci_remove(struct pci_dev *pdev)
    512{
    513	struct ufs_hba *hba = pci_get_drvdata(pdev);
    514
    515	pm_runtime_forbid(&pdev->dev);
    516	pm_runtime_get_noresume(&pdev->dev);
    517	ufshcd_remove(hba);
    518	ufshcd_dealloc_host(hba);
    519}
    520
    521/**
    522 * ufshcd_pci_probe - probe routine of the driver
    523 * @pdev: pointer to PCI device handle
    524 * @id: PCI device id
    525 *
    526 * Returns 0 on success, non-zero value on failure
    527 */
    528static int
    529ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
    530{
    531	struct ufs_host *ufs_host;
    532	struct ufs_hba *hba;
    533	void __iomem *mmio_base;
    534	int err;
    535
    536	err = pcim_enable_device(pdev);
    537	if (err) {
    538		dev_err(&pdev->dev, "pcim_enable_device failed\n");
    539		return err;
    540	}
    541
    542	pci_set_master(pdev);
    543
    544	err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
    545	if (err < 0) {
    546		dev_err(&pdev->dev, "request and iomap failed\n");
    547		return err;
    548	}
    549
    550	mmio_base = pcim_iomap_table(pdev)[0];
    551
    552	err = ufshcd_alloc_host(&pdev->dev, &hba);
    553	if (err) {
    554		dev_err(&pdev->dev, "Allocation failed\n");
    555		return err;
    556	}
    557
    558	hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
    559
    560	err = ufshcd_init(hba, mmio_base, pdev->irq);
    561	if (err) {
    562		dev_err(&pdev->dev, "Initialization failed\n");
    563		ufshcd_dealloc_host(hba);
    564		return err;
    565	}
    566
    567	ufs_host = ufshcd_get_variant(hba);
    568	if (ufs_host && ufs_host->late_init)
    569		ufs_host->late_init(hba);
    570
    571	pm_runtime_put_noidle(&pdev->dev);
    572	pm_runtime_allow(&pdev->dev);
    573
    574	return 0;
    575}
    576
    577static const struct dev_pm_ops ufshcd_pci_pm_ops = {
    578	SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
    579#ifdef CONFIG_PM_SLEEP
    580	.suspend	= ufshcd_system_suspend,
    581	.resume		= ufshcd_system_resume,
    582	.freeze		= ufshcd_system_suspend,
    583	.thaw		= ufshcd_system_resume,
    584	.poweroff	= ufshcd_system_suspend,
    585	.restore	= ufshcd_pci_restore,
    586	.prepare	= ufshcd_suspend_prepare,
    587	.complete	= ufshcd_resume_complete,
    588#endif
    589};
    590
    591static const struct pci_device_id ufshcd_pci_tbl[] = {
    592	{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
    593	{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
    594	{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
    595	{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
    596	{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
    597	{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
    598	{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
    599	{ PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
    600	{ }	/* terminate list */
    601};
    602
    603MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
    604
    605static struct pci_driver ufshcd_pci_driver = {
    606	.name = UFSHCD,
    607	.id_table = ufshcd_pci_tbl,
    608	.probe = ufshcd_pci_probe,
    609	.remove = ufshcd_pci_remove,
    610	.shutdown = ufshcd_pci_shutdown,
    611	.driver = {
    612		.pm = &ufshcd_pci_pm_ops
    613	},
    614};
    615
    616module_pci_driver(ufshcd_pci_driver);
    617
    618MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
    619MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
    620MODULE_DESCRIPTION("UFS host controller PCI glue driver");
    621MODULE_LICENSE("GPL");