cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

coresight-cpu-debug.c (16701B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2017 Linaro Limited. All rights reserved.
      4 *
      5 * Author: Leo Yan <leo.yan@linaro.org>
      6 */
      7#include <linux/amba/bus.h>
      8#include <linux/coresight.h>
      9#include <linux/cpu.h>
     10#include <linux/debugfs.h>
     11#include <linux/delay.h>
     12#include <linux/device.h>
     13#include <linux/err.h>
     14#include <linux/init.h>
     15#include <linux/io.h>
     16#include <linux/iopoll.h>
     17#include <linux/kernel.h>
     18#include <linux/module.h>
     19#include <linux/moduleparam.h>
     20#include <linux/panic_notifier.h>
     21#include <linux/pm_qos.h>
     22#include <linux/slab.h>
     23#include <linux/smp.h>
     24#include <linux/types.h>
     25#include <linux/uaccess.h>
     26
     27#include "coresight-priv.h"
     28
     29#define EDPCSR				0x0A0
     30#define EDCIDSR				0x0A4
     31#define EDVIDSR				0x0A8
     32#define EDPCSR_HI			0x0AC
     33#define EDOSLAR				0x300
     34#define EDPRCR				0x310
     35#define EDPRSR				0x314
     36#define EDDEVID1			0xFC4
     37#define EDDEVID				0xFC8
     38
     39#define EDPCSR_PROHIBITED		0xFFFFFFFF
     40
     41/* bits definition for EDPCSR */
     42#define EDPCSR_THUMB			BIT(0)
     43#define EDPCSR_ARM_INST_MASK		GENMASK(31, 2)
     44#define EDPCSR_THUMB_INST_MASK		GENMASK(31, 1)
     45
     46/* bits definition for EDPRCR */
     47#define EDPRCR_COREPURQ			BIT(3)
     48#define EDPRCR_CORENPDRQ		BIT(0)
     49
     50/* bits definition for EDPRSR */
     51#define EDPRSR_DLK			BIT(6)
     52#define EDPRSR_PU			BIT(0)
     53
     54/* bits definition for EDVIDSR */
     55#define EDVIDSR_NS			BIT(31)
     56#define EDVIDSR_E2			BIT(30)
     57#define EDVIDSR_E3			BIT(29)
     58#define EDVIDSR_HV			BIT(28)
     59#define EDVIDSR_VMID			GENMASK(7, 0)
     60
     61/*
     62 * bits definition for EDDEVID1:PSCROffset
     63 *
     64 * NOTE: armv8 and armv7 have different definition for the register,
     65 * so consolidate the bits definition as below:
     66 *
     67 * 0b0000 - Sample offset applies based on the instruction state, we
     68 *          rely on EDDEVID to check if EDPCSR is implemented or not
     69 * 0b0001 - No offset applies.
     70 * 0b0010 - No offset applies, but do not use in AArch32 mode
     71 *
     72 */
     73#define EDDEVID1_PCSR_OFFSET_MASK	GENMASK(3, 0)
     74#define EDDEVID1_PCSR_OFFSET_INS_SET	(0x0)
     75#define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32	(0x2)
     76
     77/* bits definition for EDDEVID */
     78#define EDDEVID_PCSAMPLE_MODE		GENMASK(3, 0)
     79#define EDDEVID_IMPL_EDPCSR		(0x1)
     80#define EDDEVID_IMPL_EDPCSR_EDCIDSR	(0x2)
     81#define EDDEVID_IMPL_FULL		(0x3)
     82
     83#define DEBUG_WAIT_SLEEP		1000
     84#define DEBUG_WAIT_TIMEOUT		32000
     85
     86struct debug_drvdata {
     87	void __iomem	*base;
     88	struct device	*dev;
     89	int		cpu;
     90
     91	bool		edpcsr_present;
     92	bool		edcidsr_present;
     93	bool		edvidsr_present;
     94	bool		pc_has_offset;
     95
     96	u32		edpcsr;
     97	u32		edpcsr_hi;
     98	u32		edprsr;
     99	u32		edvidsr;
    100	u32		edcidsr;
    101};
    102
    103static DEFINE_MUTEX(debug_lock);
    104static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
    105static int debug_count;
    106static struct dentry *debug_debugfs_dir;
    107
    108static bool debug_enable = IS_ENABLED(CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON);
    109module_param_named(enable, debug_enable, bool, 0600);
    110MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
    111
    112static void debug_os_unlock(struct debug_drvdata *drvdata)
    113{
    114	/* Unlocks the debug registers */
    115	writel_relaxed(0x0, drvdata->base + EDOSLAR);
    116
    117	/* Make sure the registers are unlocked before accessing */
    118	wmb();
    119}
    120
    121/*
    122 * According to ARM DDI 0487A.k, before access external debug
    123 * registers should firstly check the access permission; if any
    124 * below condition has been met then cannot access debug
    125 * registers to avoid lockup issue:
    126 *
    127 * - CPU power domain is powered off;
    128 * - The OS Double Lock is locked;
    129 *
    130 * By checking EDPRSR can get to know if meet these conditions.
    131 */
    132static bool debug_access_permitted(struct debug_drvdata *drvdata)
    133{
    134	/* CPU is powered off */
    135	if (!(drvdata->edprsr & EDPRSR_PU))
    136		return false;
    137
    138	/* The OS Double Lock is locked */
    139	if (drvdata->edprsr & EDPRSR_DLK)
    140		return false;
    141
    142	return true;
    143}
    144
    145static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
    146{
    147	u32 edprcr;
    148
    149try_again:
    150
    151	/*
    152	 * Send request to power management controller and assert
    153	 * DBGPWRUPREQ signal; if power management controller has
    154	 * sane implementation, it should enable CPU power domain
    155	 * in case CPU is in low power state.
    156	 */
    157	edprcr = readl_relaxed(drvdata->base + EDPRCR);
    158	edprcr |= EDPRCR_COREPURQ;
    159	writel_relaxed(edprcr, drvdata->base + EDPRCR);
    160
    161	/* Wait for CPU to be powered up (timeout~=32ms) */
    162	if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
    163			drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
    164			DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
    165		/*
    166		 * Unfortunately the CPU cannot be powered up, so return
    167		 * back and later has no permission to access other
    168		 * registers. For this case, should disable CPU low power
    169		 * states to ensure CPU power domain is enabled!
    170		 */
    171		dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
    172			__func__, drvdata->cpu);
    173		return;
    174	}
    175
    176	/*
    177	 * At this point the CPU is powered up, so set the no powerdown
    178	 * request bit so we don't lose power and emulate power down.
    179	 */
    180	edprcr = readl_relaxed(drvdata->base + EDPRCR);
    181	edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
    182	writel_relaxed(edprcr, drvdata->base + EDPRCR);
    183
    184	drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
    185
    186	/* The core power domain got switched off on use, try again */
    187	if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
    188		goto try_again;
    189}
    190
    191static void debug_read_regs(struct debug_drvdata *drvdata)
    192{
    193	u32 save_edprcr;
    194
    195	CS_UNLOCK(drvdata->base);
    196
    197	/* Unlock os lock */
    198	debug_os_unlock(drvdata);
    199
    200	/* Save EDPRCR register */
    201	save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
    202
    203	/*
    204	 * Ensure CPU power domain is enabled to let registers
    205	 * are accessiable.
    206	 */
    207	debug_force_cpu_powered_up(drvdata);
    208
    209	if (!debug_access_permitted(drvdata))
    210		goto out;
    211
    212	drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
    213
    214	/*
    215	 * As described in ARM DDI 0487A.k, if the processing
    216	 * element (PE) is in debug state, or sample-based
    217	 * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
    218	 * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
    219	 * UNKNOWN state. So directly bail out for this case.
    220	 */
    221	if (drvdata->edpcsr == EDPCSR_PROHIBITED)
    222		goto out;
    223
    224	/*
    225	 * A read of the EDPCSR normally has the side-effect of
    226	 * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
    227	 * at this point it's safe to read value from them.
    228	 */
    229	if (IS_ENABLED(CONFIG_64BIT))
    230		drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
    231
    232	if (drvdata->edcidsr_present)
    233		drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
    234
    235	if (drvdata->edvidsr_present)
    236		drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
    237
    238out:
    239	/* Restore EDPRCR register */
    240	writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
    241
    242	CS_LOCK(drvdata->base);
    243}
    244
    245#ifdef CONFIG_64BIT
    246static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
    247{
    248	return (unsigned long)drvdata->edpcsr_hi << 32 |
    249	       (unsigned long)drvdata->edpcsr;
    250}
    251#else
    252static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
    253{
    254	unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
    255	unsigned long pc;
    256
    257	pc = (unsigned long)drvdata->edpcsr;
    258
    259	if (drvdata->pc_has_offset) {
    260		arm_inst_offset = 8;
    261		thumb_inst_offset = 4;
    262	}
    263
    264	/* Handle thumb instruction */
    265	if (pc & EDPCSR_THUMB) {
    266		pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
    267		return pc;
    268	}
    269
    270	/*
    271	 * Handle arm instruction offset, if the arm instruction
    272	 * is not 4 byte alignment then it's possible the case
    273	 * for implementation defined; keep original value for this
    274	 * case and print info for notice.
    275	 */
    276	if (pc & BIT(1))
    277		dev_emerg(drvdata->dev,
    278			  "Instruction offset is implementation defined\n");
    279	else
    280		pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
    281
    282	return pc;
    283}
    284#endif
    285
    286static void debug_dump_regs(struct debug_drvdata *drvdata)
    287{
    288	struct device *dev = drvdata->dev;
    289	unsigned long pc;
    290
    291	dev_emerg(dev, " EDPRSR:  %08x (Power:%s DLK:%s)\n",
    292		  drvdata->edprsr,
    293		  drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
    294		  drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
    295
    296	if (!debug_access_permitted(drvdata)) {
    297		dev_emerg(dev, "No permission to access debug registers!\n");
    298		return;
    299	}
    300
    301	if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
    302		dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
    303		return;
    304	}
    305
    306	pc = debug_adjust_pc(drvdata);
    307	dev_emerg(dev, " EDPCSR:  %pS\n", (void *)pc);
    308
    309	if (drvdata->edcidsr_present)
    310		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
    311
    312	if (drvdata->edvidsr_present)
    313		dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
    314			  drvdata->edvidsr,
    315			  drvdata->edvidsr & EDVIDSR_NS ?
    316			  "Non-secure" : "Secure",
    317			  drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
    318				(drvdata->edvidsr & EDVIDSR_E2 ?
    319				 "EL2" : "EL1/0"),
    320			  drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
    321			  drvdata->edvidsr & (u32)EDVIDSR_VMID);
    322}
    323
    324static void debug_init_arch_data(void *info)
    325{
    326	struct debug_drvdata *drvdata = info;
    327	u32 mode, pcsr_offset;
    328	u32 eddevid, eddevid1;
    329
    330	CS_UNLOCK(drvdata->base);
    331
    332	/* Read device info */
    333	eddevid  = readl_relaxed(drvdata->base + EDDEVID);
    334	eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
    335
    336	CS_LOCK(drvdata->base);
    337
    338	/* Parse implementation feature */
    339	mode = eddevid & EDDEVID_PCSAMPLE_MODE;
    340	pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
    341
    342	drvdata->edpcsr_present  = false;
    343	drvdata->edcidsr_present = false;
    344	drvdata->edvidsr_present = false;
    345	drvdata->pc_has_offset   = false;
    346
    347	switch (mode) {
    348	case EDDEVID_IMPL_FULL:
    349		drvdata->edvidsr_present = true;
    350		fallthrough;
    351	case EDDEVID_IMPL_EDPCSR_EDCIDSR:
    352		drvdata->edcidsr_present = true;
    353		fallthrough;
    354	case EDDEVID_IMPL_EDPCSR:
    355		/*
    356		 * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
    357		 * define if has the offset for PC sampling value; if read
    358		 * back EDDEVID1.PCSROffset == 0x2, then this means the debug
    359		 * module does not sample the instruction set state when
    360		 * armv8 CPU in AArch32 state.
    361		 */
    362		drvdata->edpcsr_present =
    363			((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
    364			 (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
    365
    366		drvdata->pc_has_offset =
    367			(pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
    368		break;
    369	default:
    370		break;
    371	}
    372}
    373
    374/*
    375 * Dump out information on panic.
    376 */
    377static int debug_notifier_call(struct notifier_block *self,
    378			       unsigned long v, void *p)
    379{
    380	int cpu;
    381	struct debug_drvdata *drvdata;
    382
    383	/* Bail out if we can't acquire the mutex or the functionality is off */
    384	if (!mutex_trylock(&debug_lock))
    385		return NOTIFY_DONE;
    386
    387	if (!debug_enable)
    388		goto skip_dump;
    389
    390	pr_emerg("ARM external debug module:\n");
    391
    392	for_each_possible_cpu(cpu) {
    393		drvdata = per_cpu(debug_drvdata, cpu);
    394		if (!drvdata)
    395			continue;
    396
    397		dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
    398
    399		debug_read_regs(drvdata);
    400		debug_dump_regs(drvdata);
    401	}
    402
    403skip_dump:
    404	mutex_unlock(&debug_lock);
    405	return NOTIFY_DONE;
    406}
    407
    408static struct notifier_block debug_notifier = {
    409	.notifier_call = debug_notifier_call,
    410};
    411
    412static int debug_enable_func(void)
    413{
    414	struct debug_drvdata *drvdata;
    415	int cpu, ret = 0;
    416	cpumask_t mask;
    417
    418	/*
    419	 * Use cpumask to track which debug power domains have
    420	 * been powered on and use it to handle failure case.
    421	 */
    422	cpumask_clear(&mask);
    423
    424	for_each_possible_cpu(cpu) {
    425		drvdata = per_cpu(debug_drvdata, cpu);
    426		if (!drvdata)
    427			continue;
    428
    429		ret = pm_runtime_get_sync(drvdata->dev);
    430		if (ret < 0)
    431			goto err;
    432		else
    433			cpumask_set_cpu(cpu, &mask);
    434	}
    435
    436	return 0;
    437
    438err:
    439	/*
    440	 * If pm_runtime_get_sync() has failed, need rollback on
    441	 * all the other CPUs that have been enabled before that.
    442	 */
    443	for_each_cpu(cpu, &mask) {
    444		drvdata = per_cpu(debug_drvdata, cpu);
    445		pm_runtime_put_noidle(drvdata->dev);
    446	}
    447
    448	return ret;
    449}
    450
    451static int debug_disable_func(void)
    452{
    453	struct debug_drvdata *drvdata;
    454	int cpu, ret, err = 0;
    455
    456	/*
    457	 * Disable debug power domains, records the error and keep
    458	 * circling through all other CPUs when an error has been
    459	 * encountered.
    460	 */
    461	for_each_possible_cpu(cpu) {
    462		drvdata = per_cpu(debug_drvdata, cpu);
    463		if (!drvdata)
    464			continue;
    465
    466		ret = pm_runtime_put(drvdata->dev);
    467		if (ret < 0)
    468			err = ret;
    469	}
    470
    471	return err;
    472}
    473
    474static ssize_t debug_func_knob_write(struct file *f,
    475		const char __user *buf, size_t count, loff_t *ppos)
    476{
    477	u8 val;
    478	int ret;
    479
    480	ret = kstrtou8_from_user(buf, count, 2, &val);
    481	if (ret)
    482		return ret;
    483
    484	mutex_lock(&debug_lock);
    485
    486	if (val == debug_enable)
    487		goto out;
    488
    489	if (val)
    490		ret = debug_enable_func();
    491	else
    492		ret = debug_disable_func();
    493
    494	if (ret) {
    495		pr_err("%s: unable to %s debug function: %d\n",
    496		       __func__, val ? "enable" : "disable", ret);
    497		goto err;
    498	}
    499
    500	debug_enable = val;
    501out:
    502	ret = count;
    503err:
    504	mutex_unlock(&debug_lock);
    505	return ret;
    506}
    507
    508static ssize_t debug_func_knob_read(struct file *f,
    509		char __user *ubuf, size_t count, loff_t *ppos)
    510{
    511	ssize_t ret;
    512	char buf[3];
    513
    514	mutex_lock(&debug_lock);
    515	snprintf(buf, sizeof(buf), "%d\n", debug_enable);
    516	mutex_unlock(&debug_lock);
    517
    518	ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
    519	return ret;
    520}
    521
    522static const struct file_operations debug_func_knob_fops = {
    523	.open	= simple_open,
    524	.read	= debug_func_knob_read,
    525	.write	= debug_func_knob_write,
    526};
    527
    528static int debug_func_init(void)
    529{
    530	int ret;
    531
    532	/* Create debugfs node */
    533	debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
    534	debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
    535			    &debug_func_knob_fops);
    536
    537	/* Register function to be called for panic */
    538	ret = atomic_notifier_chain_register(&panic_notifier_list,
    539					     &debug_notifier);
    540	if (ret) {
    541		pr_err("%s: unable to register notifier: %d\n",
    542		       __func__, ret);
    543		goto err;
    544	}
    545
    546	return 0;
    547
    548err:
    549	debugfs_remove_recursive(debug_debugfs_dir);
    550	return ret;
    551}
    552
    553static void debug_func_exit(void)
    554{
    555	atomic_notifier_chain_unregister(&panic_notifier_list,
    556					 &debug_notifier);
    557	debugfs_remove_recursive(debug_debugfs_dir);
    558}
    559
    560static int debug_probe(struct amba_device *adev, const struct amba_id *id)
    561{
    562	void __iomem *base;
    563	struct device *dev = &adev->dev;
    564	struct debug_drvdata *drvdata;
    565	struct resource *res = &adev->res;
    566	int ret;
    567
    568	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
    569	if (!drvdata)
    570		return -ENOMEM;
    571
    572	drvdata->cpu = coresight_get_cpu(dev);
    573	if (drvdata->cpu < 0)
    574		return drvdata->cpu;
    575
    576	if (per_cpu(debug_drvdata, drvdata->cpu)) {
    577		dev_err(dev, "CPU%d drvdata has already been initialized\n",
    578			drvdata->cpu);
    579		return -EBUSY;
    580	}
    581
    582	drvdata->dev = &adev->dev;
    583	amba_set_drvdata(adev, drvdata);
    584
    585	/* Validity for the resource is already checked by the AMBA core */
    586	base = devm_ioremap_resource(dev, res);
    587	if (IS_ERR(base))
    588		return PTR_ERR(base);
    589
    590	drvdata->base = base;
    591
    592	cpus_read_lock();
    593	per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
    594	ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
    595				       drvdata, 1);
    596	cpus_read_unlock();
    597
    598	if (ret) {
    599		dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
    600		goto err;
    601	}
    602
    603	if (!drvdata->edpcsr_present) {
    604		dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
    605			drvdata->cpu);
    606		ret = -ENXIO;
    607		goto err;
    608	}
    609
    610	if (!debug_count++) {
    611		ret = debug_func_init();
    612		if (ret)
    613			goto err_func_init;
    614	}
    615
    616	mutex_lock(&debug_lock);
    617	/* Turn off debug power domain if debugging is disabled */
    618	if (!debug_enable)
    619		pm_runtime_put(dev);
    620	mutex_unlock(&debug_lock);
    621
    622	dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
    623	return 0;
    624
    625err_func_init:
    626	debug_count--;
    627err:
    628	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
    629	return ret;
    630}
    631
    632static void debug_remove(struct amba_device *adev)
    633{
    634	struct device *dev = &adev->dev;
    635	struct debug_drvdata *drvdata = amba_get_drvdata(adev);
    636
    637	per_cpu(debug_drvdata, drvdata->cpu) = NULL;
    638
    639	mutex_lock(&debug_lock);
    640	/* Turn off debug power domain before rmmod the module */
    641	if (debug_enable)
    642		pm_runtime_put(dev);
    643	mutex_unlock(&debug_lock);
    644
    645	if (!--debug_count)
    646		debug_func_exit();
    647}
    648
    649static const struct amba_cs_uci_id uci_id_debug[] = {
    650	{
    651		/*  CPU Debug UCI data */
    652		.devarch	= 0x47706a15,
    653		.devarch_mask	= 0xfff0ffff,
    654		.devtype	= 0x00000015,
    655	}
    656};
    657
    658static const struct amba_id debug_ids[] = {
    659	CS_AMBA_ID(0x000bbd03),				/* Cortex-A53 */
    660	CS_AMBA_ID(0x000bbd07),				/* Cortex-A57 */
    661	CS_AMBA_ID(0x000bbd08),				/* Cortex-A72 */
    662	CS_AMBA_ID(0x000bbd09),				/* Cortex-A73 */
    663	CS_AMBA_UCI_ID(0x000f0205, uci_id_debug),	/* Qualcomm Kryo */
    664	CS_AMBA_UCI_ID(0x000f0211, uci_id_debug),	/* Qualcomm Kryo */
    665	{},
    666};
    667
    668MODULE_DEVICE_TABLE(amba, debug_ids);
    669
    670static struct amba_driver debug_driver = {
    671	.drv = {
    672		.name   = "coresight-cpu-debug",
    673		.suppress_bind_attrs = true,
    674	},
    675	.probe		= debug_probe,
    676	.remove		= debug_remove,
    677	.id_table	= debug_ids,
    678};
    679
    680module_amba_driver(debug_driver);
    681
    682MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
    683MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
    684MODULE_LICENSE("GPL");