cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dimmtemp.c (16616B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2// Copyright (c) 2018-2021 Intel Corporation
      3
      4#include <linux/auxiliary_bus.h>
      5#include <linux/bitfield.h>
      6#include <linux/bitops.h>
      7#include <linux/devm-helpers.h>
      8#include <linux/hwmon.h>
      9#include <linux/jiffies.h>
     10#include <linux/module.h>
     11#include <linux/peci.h>
     12#include <linux/peci-cpu.h>
     13#include <linux/units.h>
     14#include <linux/workqueue.h>
     15
     16#include "common.h"
     17
     18#define DIMM_MASK_CHECK_DELAY_JIFFIES	msecs_to_jiffies(5000)
     19
     20/* Max number of channel ranks and DIMM index per channel */
     21#define CHAN_RANK_MAX_ON_HSX	8
     22#define DIMM_IDX_MAX_ON_HSX	3
     23#define CHAN_RANK_MAX_ON_BDX	4
     24#define DIMM_IDX_MAX_ON_BDX	3
     25#define CHAN_RANK_MAX_ON_BDXD	2
     26#define DIMM_IDX_MAX_ON_BDXD	2
     27#define CHAN_RANK_MAX_ON_SKX	6
     28#define DIMM_IDX_MAX_ON_SKX	2
     29#define CHAN_RANK_MAX_ON_ICX	8
     30#define DIMM_IDX_MAX_ON_ICX	2
     31#define CHAN_RANK_MAX_ON_ICXD	4
     32#define DIMM_IDX_MAX_ON_ICXD	2
     33
     34#define CHAN_RANK_MAX		CHAN_RANK_MAX_ON_HSX
     35#define DIMM_IDX_MAX		DIMM_IDX_MAX_ON_HSX
     36#define DIMM_NUMS_MAX		(CHAN_RANK_MAX * DIMM_IDX_MAX)
     37
     38#define CPU_SEG_MASK		GENMASK(23, 16)
     39#define GET_CPU_SEG(x)		(((x) & CPU_SEG_MASK) >> 16)
     40#define CPU_BUS_MASK		GENMASK(7, 0)
     41#define GET_CPU_BUS(x)		((x) & CPU_BUS_MASK)
     42
     43#define DIMM_TEMP_MAX		GENMASK(15, 8)
     44#define DIMM_TEMP_CRIT		GENMASK(23, 16)
     45#define GET_TEMP_MAX(x)		(((x) & DIMM_TEMP_MAX) >> 8)
     46#define GET_TEMP_CRIT(x)	(((x) & DIMM_TEMP_CRIT) >> 16)
     47
     48#define NO_DIMM_RETRY_COUNT_MAX	5
     49
     50struct peci_dimmtemp;
     51
     52struct dimm_info {
     53	int chan_rank_max;
     54	int dimm_idx_max;
     55	u8 min_peci_revision;
     56	int (*read_thresholds)(struct peci_dimmtemp *priv, int dimm_order,
     57			       int chan_rank, u32 *data);
     58};
     59
     60struct peci_dimm_thresholds {
     61	long temp_max;
     62	long temp_crit;
     63	struct peci_sensor_state state;
     64};
     65
     66enum peci_dimm_threshold_type {
     67	temp_max_type,
     68	temp_crit_type,
     69};
     70
     71struct peci_dimmtemp {
     72	struct peci_device *peci_dev;
     73	struct device *dev;
     74	const char *name;
     75	const struct dimm_info *gen_info;
     76	struct delayed_work detect_work;
     77	struct {
     78		struct peci_sensor_data temp;
     79		struct peci_dimm_thresholds thresholds;
     80	} dimm[DIMM_NUMS_MAX];
     81	char **dimmtemp_label;
     82	DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX);
     83	u8 no_dimm_retry_count;
     84};
     85
     86static u8 __dimm_temp(u32 reg, int dimm_order)
     87{
     88	return (reg >> (dimm_order * 8)) & 0xff;
     89}
     90
     91static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val)
     92{
     93	int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
     94	int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
     95	int ret = 0;
     96	u32 data;
     97
     98	mutex_lock(&priv->dimm[dimm_no].temp.state.lock);
     99	if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state))
    100		goto skip_update;
    101
    102	ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data);
    103	if (ret)
    104		goto unlock;
    105
    106	priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE;
    107
    108	peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state);
    109
    110skip_update:
    111	*val = priv->dimm[dimm_no].temp.value;
    112unlock:
    113	mutex_unlock(&priv->dimm[dimm_no].temp.state.lock);
    114	return ret;
    115}
    116
    117static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
    118{
    119	int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
    120	int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
    121	u32 data;
    122	int ret;
    123
    124	if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state))
    125		return 0;
    126
    127	ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
    128	if (ret == -ENODATA) /* Use default or previous value */
    129		return 0;
    130	if (ret)
    131		return ret;
    132
    133	priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE;
    134	priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE;
    135
    136	peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state);
    137
    138	return 0;
    139}
    140
    141static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_threshold_type type,
    142			       int dimm_no, long *val)
    143{
    144	int ret;
    145
    146	mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock);
    147	ret = update_thresholds(priv, dimm_no);
    148	if (ret)
    149		goto unlock;
    150
    151	switch (type) {
    152	case temp_max_type:
    153		*val = priv->dimm[dimm_no].thresholds.temp_max;
    154		break;
    155	case temp_crit_type:
    156		*val = priv->dimm[dimm_no].thresholds.temp_crit;
    157		break;
    158	default:
    159		ret = -EOPNOTSUPP;
    160		break;
    161	}
    162unlock:
    163	mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock);
    164
    165	return ret;
    166}
    167
    168static int dimmtemp_read_string(struct device *dev,
    169				enum hwmon_sensor_types type,
    170				u32 attr, int channel, const char **str)
    171{
    172	struct peci_dimmtemp *priv = dev_get_drvdata(dev);
    173
    174	if (attr != hwmon_temp_label)
    175		return -EOPNOTSUPP;
    176
    177	*str = (const char *)priv->dimmtemp_label[channel];
    178
    179	return 0;
    180}
    181
    182static int dimmtemp_read(struct device *dev, enum hwmon_sensor_types type,
    183			 u32 attr, int channel, long *val)
    184{
    185	struct peci_dimmtemp *priv = dev_get_drvdata(dev);
    186
    187	switch (attr) {
    188	case hwmon_temp_input:
    189		return get_dimm_temp(priv, channel, val);
    190	case hwmon_temp_max:
    191		return get_dimm_thresholds(priv, temp_max_type, channel, val);
    192	case hwmon_temp_crit:
    193		return get_dimm_thresholds(priv, temp_crit_type, channel, val);
    194	default:
    195		break;
    196	}
    197
    198	return -EOPNOTSUPP;
    199}
    200
    201static umode_t dimmtemp_is_visible(const void *data, enum hwmon_sensor_types type,
    202				   u32 attr, int channel)
    203{
    204	const struct peci_dimmtemp *priv = data;
    205
    206	if (test_bit(channel, priv->dimm_mask))
    207		return 0444;
    208
    209	return 0;
    210}
    211
    212static const struct hwmon_ops peci_dimmtemp_ops = {
    213	.is_visible = dimmtemp_is_visible,
    214	.read_string = dimmtemp_read_string,
    215	.read = dimmtemp_read,
    216};
    217
    218static int check_populated_dimms(struct peci_dimmtemp *priv)
    219{
    220	int chan_rank_max = priv->gen_info->chan_rank_max;
    221	int dimm_idx_max = priv->gen_info->dimm_idx_max;
    222	u32 chan_rank_empty = 0;
    223	u32 dimm_mask = 0;
    224	int chan_rank, dimm_idx, ret;
    225	u32 pcs;
    226
    227	BUILD_BUG_ON(BITS_PER_TYPE(chan_rank_empty) < CHAN_RANK_MAX);
    228	BUILD_BUG_ON(BITS_PER_TYPE(dimm_mask) < DIMM_NUMS_MAX);
    229	if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) {
    230		WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d",
    231			  chan_rank_max, dimm_idx_max);
    232		return -EINVAL;
    233	}
    234
    235	for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
    236		ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs);
    237		if (ret) {
    238			/*
    239			 * Overall, we expect either success or -EINVAL in
    240			 * order to determine whether DIMM is populated or not.
    241			 * For anything else we fall back to deferring the
    242			 * detection to be performed at a later point in time.
    243			 */
    244			if (ret == -EINVAL) {
    245				chan_rank_empty |= BIT(chan_rank);
    246				continue;
    247			}
    248
    249			return -EAGAIN;
    250		}
    251
    252		for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++)
    253			if (__dimm_temp(pcs, dimm_idx))
    254				dimm_mask |= BIT(chan_rank * dimm_idx_max + dimm_idx);
    255	}
    256
    257	/*
    258	 * If we got all -EINVALs, it means that the CPU doesn't have any
    259	 * DIMMs. Unfortunately, it may also happen at the very start of
    260	 * host platform boot. Retrying a couple of times lets us make sure
    261	 * that the state is persistent.
    262	 */
    263	if (chan_rank_empty == GENMASK(chan_rank_max - 1, 0)) {
    264		if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) {
    265			priv->no_dimm_retry_count++;
    266
    267			return -EAGAIN;
    268		}
    269
    270		return -ENODEV;
    271	}
    272
    273	/*
    274	 * It's possible that memory training is not done yet. In this case we
    275	 * defer the detection to be performed at a later point in time.
    276	 */
    277	if (!dimm_mask) {
    278		priv->no_dimm_retry_count = 0;
    279		return -EAGAIN;
    280	}
    281
    282	dev_dbg(priv->dev, "Scanned populated DIMMs: %#x\n", dimm_mask);
    283
    284	bitmap_from_arr32(priv->dimm_mask, &dimm_mask, DIMM_NUMS_MAX);
    285
    286	return 0;
    287}
    288
    289static int create_dimm_temp_label(struct peci_dimmtemp *priv, int chan)
    290{
    291	int rank = chan / priv->gen_info->dimm_idx_max;
    292	int idx = chan % priv->gen_info->dimm_idx_max;
    293
    294	priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL,
    295						    "DIMM %c%d", 'A' + rank,
    296						    idx + 1);
    297	if (!priv->dimmtemp_label[chan])
    298		return -ENOMEM;
    299
    300	return 0;
    301}
    302
    303static const struct hwmon_channel_info *peci_dimmtemp_temp_info[] = {
    304	HWMON_CHANNEL_INFO(temp,
    305			   [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL |
    306				HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT),
    307	NULL
    308};
    309
    310static const struct hwmon_chip_info peci_dimmtemp_chip_info = {
    311	.ops = &peci_dimmtemp_ops,
    312	.info = peci_dimmtemp_temp_info,
    313};
    314
    315static int create_dimm_temp_info(struct peci_dimmtemp *priv)
    316{
    317	int ret, i, channels;
    318	struct device *dev;
    319
    320	/*
    321	 * We expect to either find populated DIMMs and carry on with creating
    322	 * sensors, or find out that there are no DIMMs populated.
    323	 * All other states mean that the platform never reached the state that
    324	 * allows to check DIMM state - causing us to retry later on.
    325	 */
    326	ret = check_populated_dimms(priv);
    327	if (ret == -ENODEV) {
    328		dev_dbg(priv->dev, "No DIMMs found\n");
    329		return 0;
    330	} else if (ret) {
    331		schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES);
    332		dev_dbg(priv->dev, "Deferred populating DIMM temp info\n");
    333		return ret;
    334	}
    335
    336	channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max;
    337
    338	priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL);
    339	if (!priv->dimmtemp_label)
    340		return -ENOMEM;
    341
    342	for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) {
    343		ret = create_dimm_temp_label(priv, i);
    344		if (ret)
    345			return ret;
    346		mutex_init(&priv->dimm[i].thresholds.state.lock);
    347		mutex_init(&priv->dimm[i].temp.state.lock);
    348	}
    349
    350	dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv,
    351						   &peci_dimmtemp_chip_info, NULL);
    352	if (IS_ERR(dev)) {
    353		dev_err(priv->dev, "Failed to register hwmon device\n");
    354		return PTR_ERR(dev);
    355	}
    356
    357	dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name);
    358
    359	return 0;
    360}
    361
    362static void create_dimm_temp_info_delayed(struct work_struct *work)
    363{
    364	struct peci_dimmtemp *priv = container_of(to_delayed_work(work),
    365						  struct peci_dimmtemp,
    366						  detect_work);
    367	int ret;
    368
    369	ret = create_dimm_temp_info(priv);
    370	if (ret && ret != -EAGAIN)
    371		dev_err(priv->dev, "Failed to populate DIMM temp info\n");
    372}
    373
    374static int peci_dimmtemp_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
    375{
    376	struct device *dev = &adev->dev;
    377	struct peci_device *peci_dev = to_peci_device(dev->parent);
    378	struct peci_dimmtemp *priv;
    379	int ret;
    380
    381	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
    382	if (!priv)
    383		return -ENOMEM;
    384
    385	priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d",
    386				    peci_dev->info.socket_id);
    387	if (!priv->name)
    388		return -ENOMEM;
    389
    390	priv->dev = dev;
    391	priv->peci_dev = peci_dev;
    392	priv->gen_info = (const struct dimm_info *)id->driver_data;
    393
    394	/*
    395	 * This is just a sanity check. Since we're using commands that are
    396	 * guaranteed to be supported on a given platform, we should never see
    397	 * revision lower than expected.
    398	 */
    399	if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision)
    400		dev_warn(priv->dev,
    401			 "Unexpected PECI revision %#x, some features may be unavailable\n",
    402			 peci_dev->info.peci_revision);
    403
    404	ret = devm_delayed_work_autocancel(priv->dev, &priv->detect_work,
    405					   create_dimm_temp_info_delayed);
    406	if (ret)
    407		return ret;
    408
    409	ret = create_dimm_temp_info(priv);
    410	if (ret && ret != -EAGAIN) {
    411		dev_err(dev, "Failed to populate DIMM temp info\n");
    412		return ret;
    413	}
    414
    415	return 0;
    416}
    417
    418static int
    419read_thresholds_hsx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
    420{
    421	u8 dev, func;
    422	u16 reg;
    423	int ret;
    424
    425	/*
    426	 * Device 20, Function 0: IMC 0 channel 0 -> rank 0
    427	 * Device 20, Function 1: IMC 0 channel 1 -> rank 1
    428	 * Device 21, Function 0: IMC 0 channel 2 -> rank 2
    429	 * Device 21, Function 1: IMC 0 channel 3 -> rank 3
    430	 * Device 23, Function 0: IMC 1 channel 0 -> rank 4
    431	 * Device 23, Function 1: IMC 1 channel 1 -> rank 5
    432	 * Device 24, Function 0: IMC 1 channel 2 -> rank 6
    433	 * Device 24, Function 1: IMC 1 channel 3 -> rank 7
    434	 */
    435	dev = 20 + chan_rank / 2 + chan_rank / 4;
    436	func = chan_rank % 2;
    437	reg = 0x120 + dimm_order * 4;
    438
    439	ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data);
    440	if (ret)
    441		return ret;
    442
    443	return 0;
    444}
    445
    446static int
    447read_thresholds_bdxd(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
    448{
    449	u8 dev, func;
    450	u16 reg;
    451	int ret;
    452
    453	/*
    454	 * Device 10, Function 2: IMC 0 channel 0 -> rank 0
    455	 * Device 10, Function 6: IMC 0 channel 1 -> rank 1
    456	 * Device 12, Function 2: IMC 1 channel 0 -> rank 2
    457	 * Device 12, Function 6: IMC 1 channel 1 -> rank 3
    458	 */
    459	dev = 10 + chan_rank / 2 * 2;
    460	func = (chan_rank % 2) ? 6 : 2;
    461	reg = 0x120 + dimm_order * 4;
    462
    463	ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
    464	if (ret)
    465		return ret;
    466
    467	return 0;
    468}
    469
    470static int
    471read_thresholds_skx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
    472{
    473	u8 dev, func;
    474	u16 reg;
    475	int ret;
    476
    477	/*
    478	 * Device 10, Function 2: IMC 0 channel 0 -> rank 0
    479	 * Device 10, Function 6: IMC 0 channel 1 -> rank 1
    480	 * Device 11, Function 2: IMC 0 channel 2 -> rank 2
    481	 * Device 12, Function 2: IMC 1 channel 0 -> rank 3
    482	 * Device 12, Function 6: IMC 1 channel 1 -> rank 4
    483	 * Device 13, Function 2: IMC 1 channel 2 -> rank 5
    484	 */
    485	dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0);
    486	func = chan_rank % 3 == 1 ? 6 : 2;
    487	reg = 0x120 + dimm_order * 4;
    488
    489	ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data);
    490	if (ret)
    491		return ret;
    492
    493	return 0;
    494}
    495
    496static int
    497read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
    498{
    499	u32 reg_val;
    500	u64 offset;
    501	int ret;
    502	u8 dev;
    503
    504	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
    505	if (ret || !(reg_val & BIT(31)))
    506		return -ENODATA; /* Use default or previous value */
    507
    508	ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
    509	if (ret)
    510		return -ENODATA; /* Use default or previous value */
    511
    512	/*
    513	 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
    514	 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1
    515	 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2
    516	 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3
    517	 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4
    518	 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5
    519	 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6
    520	 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7
    521	 */
    522	dev = 26 + chan_rank / 2;
    523	offset = 0x224e0 + dimm_order * 4 + (chan_rank % 2) * 0x4000;
    524
    525	ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val),
    526			     dev, 0, offset, data);
    527	if (ret)
    528		return ret;
    529
    530	return 0;
    531}
    532
    533static const struct dimm_info dimm_hsx = {
    534	.chan_rank_max	= CHAN_RANK_MAX_ON_HSX,
    535	.dimm_idx_max	= DIMM_IDX_MAX_ON_HSX,
    536	.min_peci_revision = 0x33,
    537	.read_thresholds = &read_thresholds_hsx,
    538};
    539
    540static const struct dimm_info dimm_bdx = {
    541	.chan_rank_max	= CHAN_RANK_MAX_ON_BDX,
    542	.dimm_idx_max	= DIMM_IDX_MAX_ON_BDX,
    543	.min_peci_revision = 0x33,
    544	.read_thresholds = &read_thresholds_hsx,
    545};
    546
    547static const struct dimm_info dimm_bdxd = {
    548	.chan_rank_max	= CHAN_RANK_MAX_ON_BDXD,
    549	.dimm_idx_max	= DIMM_IDX_MAX_ON_BDXD,
    550	.min_peci_revision = 0x33,
    551	.read_thresholds = &read_thresholds_bdxd,
    552};
    553
    554static const struct dimm_info dimm_skx = {
    555	.chan_rank_max	= CHAN_RANK_MAX_ON_SKX,
    556	.dimm_idx_max	= DIMM_IDX_MAX_ON_SKX,
    557	.min_peci_revision = 0x33,
    558	.read_thresholds = &read_thresholds_skx,
    559};
    560
    561static const struct dimm_info dimm_icx = {
    562	.chan_rank_max	= CHAN_RANK_MAX_ON_ICX,
    563	.dimm_idx_max	= DIMM_IDX_MAX_ON_ICX,
    564	.min_peci_revision = 0x40,
    565	.read_thresholds = &read_thresholds_icx,
    566};
    567
    568static const struct dimm_info dimm_icxd = {
    569	.chan_rank_max	= CHAN_RANK_MAX_ON_ICXD,
    570	.dimm_idx_max	= DIMM_IDX_MAX_ON_ICXD,
    571	.min_peci_revision = 0x40,
    572	.read_thresholds = &read_thresholds_icx,
    573};
    574
    575static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
    576	{
    577		.name = "peci_cpu.dimmtemp.hsx",
    578		.driver_data = (kernel_ulong_t)&dimm_hsx,
    579	},
    580	{
    581		.name = "peci_cpu.dimmtemp.bdx",
    582		.driver_data = (kernel_ulong_t)&dimm_bdx,
    583	},
    584	{
    585		.name = "peci_cpu.dimmtemp.bdxd",
    586		.driver_data = (kernel_ulong_t)&dimm_bdxd,
    587	},
    588	{
    589		.name = "peci_cpu.dimmtemp.skx",
    590		.driver_data = (kernel_ulong_t)&dimm_skx,
    591	},
    592	{
    593		.name = "peci_cpu.dimmtemp.icx",
    594		.driver_data = (kernel_ulong_t)&dimm_icx,
    595	},
    596	{
    597		.name = "peci_cpu.dimmtemp.icxd",
    598		.driver_data = (kernel_ulong_t)&dimm_icxd,
    599	},
    600	{ }
    601};
    602MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids);
    603
    604static struct auxiliary_driver peci_dimmtemp_driver = {
    605	.probe		= peci_dimmtemp_probe,
    606	.id_table	= peci_dimmtemp_ids,
    607};
    608
    609module_auxiliary_driver(peci_dimmtemp_driver);
    610
    611MODULE_AUTHOR("Jae Hyun Yoo <jae.hyun.yoo@linux.intel.com>");
    612MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
    613MODULE_DESCRIPTION("PECI dimmtemp driver");
    614MODULE_LICENSE("GPL");
    615MODULE_IMPORT_NS(PECI_CPU);