cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

coresight-catu.c (17664B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2018 Arm Limited. All rights reserved.
      4 *
      5 * Coresight Address Translation Unit support
      6 *
      7 * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
      8 */
      9
     10#include <linux/amba/bus.h>
     11#include <linux/device.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/io.h>
     14#include <linux/kernel.h>
     15#include <linux/slab.h>
     16
     17#include "coresight-catu.h"
     18#include "coresight-priv.h"
     19#include "coresight-tmc.h"
     20
     21#define csdev_to_catu_drvdata(csdev)	\
     22	dev_get_drvdata(csdev->dev.parent)
     23
     24/* Verbose output for CATU table contents */
     25#ifdef CATU_DEBUG
     26#define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
     27#else
     28#define catu_dbg(x, ...) do {} while (0)
     29#endif
     30
     31DEFINE_CORESIGHT_DEVLIST(catu_devs, "catu");
     32
     33struct catu_etr_buf {
     34	struct tmc_sg_table *catu_table;
     35	dma_addr_t sladdr;
     36};
     37
     38/*
     39 * CATU uses a page size of 4KB for page tables as well as data pages.
     40 * Each 64bit entry in the table has the following format.
     41 *
     42 *	63			12	1  0
     43 *	------------------------------------
     44 *	|	 Address [63-12] | SBZ	| V|
     45 *	------------------------------------
     46 *
     47 * Where bit[0] V indicates if the address is valid or not.
     48 * Each 4K table pages have upto 256 data page pointers, taking upto 2K
     49 * size. There are two Link pointers, pointing to the previous and next
     50 * table pages respectively at the end of the 4K page. (i.e, entry 510
     51 * and 511).
     52 *  E.g, a table of two pages could look like :
     53 *
     54 *                 Table Page 0               Table Page 1
     55 * SLADDR ===> x------------------x  x--> x-----------------x
     56 * INADDR    ->|  Page 0      | V |  |    | Page 256    | V | <- INADDR+1M
     57 *             |------------------|  |    |-----------------|
     58 * INADDR+4K ->|  Page 1      | V |  |    |                 |
     59 *             |------------------|  |    |-----------------|
     60 *             |  Page 2      | V |  |    |                 |
     61 *             |------------------|  |    |-----------------|
     62 *             |   ...        | V |  |    |    ...          |
     63 *             |------------------|  |    |-----------------|
     64 * INADDR+1020K|  Page 255    | V |  |    |   Page 511  | V |
     65 * SLADDR+2K==>|------------------|  |    |-----------------|
     66 *             |  UNUSED      |   |  |    |                 |
     67 *             |------------------|  |    |                 |
     68 *             |  UNUSED      |   |  |    |                 |
     69 *             |------------------|  |    |                 |
     70 *             |    ...       |   |  |    |                 |
     71 *             |------------------|  |    |-----------------|
     72 *             |   IGNORED    | 0 |  |    | Table Page 0| 1 |
     73 *             |------------------|  |    |-----------------|
     74 *             |  Table Page 1| 1 |--x    | IGNORED     | 0 |
     75 *             x------------------x       x-----------------x
     76 * SLADDR+4K==>
     77 *
     78 * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
     79 * must be aligned to 1MB (the size addressable by a single page table).
     80 * The CATU maps INADDR{LO:HI} to the first page in the table pointed
     81 * to by SLADDR{LO:HI} and so on.
     82 *
     83 */
     84typedef u64 cate_t;
     85
     86#define CATU_PAGE_SHIFT		12
     87#define CATU_PAGE_SIZE		(1UL << CATU_PAGE_SHIFT)
     88#define CATU_PAGES_PER_SYSPAGE	(PAGE_SIZE / CATU_PAGE_SIZE)
     89
     90/* Page pointers are only allocated in the first 2K half */
     91#define CATU_PTRS_PER_PAGE	((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
     92#define CATU_PTRS_PER_SYSPAGE	(CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
     93#define CATU_LINK_PREV		((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
     94#define CATU_LINK_NEXT		((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
     95
     96#define CATU_ADDR_SHIFT		12
     97#define CATU_ADDR_MASK		~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
     98#define CATU_ENTRY_VALID	((cate_t)0x1)
     99#define CATU_VALID_ENTRY(addr) \
    100	(((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
    101#define CATU_ENTRY_ADDR(entry)	((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
    102
    103/* CATU expects the INADDR to be aligned to 1M. */
    104#define CATU_DEFAULT_INADDR	(1ULL << 20)
    105
    106/*
    107 * catu_get_table : Retrieve the table pointers for the given @offset
    108 * within the buffer. The buffer is wrapped around to a valid offset.
    109 *
    110 * Returns : The CPU virtual address for the beginning of the table
    111 * containing the data page pointer for @offset. If @daddrp is not NULL,
    112 * @daddrp points the DMA address of the beginning of the table.
    113 */
    114static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
    115				     unsigned long offset,
    116				     dma_addr_t *daddrp)
    117{
    118	unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
    119	unsigned int table_nr, pg_idx, pg_offset;
    120	struct tmc_pages *table_pages = &catu_table->table_pages;
    121	void *ptr;
    122
    123	/* Make sure offset is within the range */
    124	offset %= buf_size;
    125
    126	/*
    127	 * Each table can address 1MB and a single kernel page can
    128	 * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
    129	 */
    130	table_nr = offset >> 20;
    131	/* Find the table page where the table_nr lies in */
    132	pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
    133	pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
    134	if (daddrp)
    135		*daddrp = table_pages->daddrs[pg_idx] + pg_offset;
    136	ptr = page_address(table_pages->pages[pg_idx]);
    137	return (cate_t *)((unsigned long)ptr + pg_offset);
    138}
    139
    140#ifdef CATU_DEBUG
    141static void catu_dump_table(struct tmc_sg_table *catu_table)
    142{
    143	int i;
    144	cate_t *table;
    145	unsigned long table_end, buf_size, offset = 0;
    146
    147	buf_size = tmc_sg_table_buf_size(catu_table);
    148	dev_dbg(catu_table->dev,
    149		"Dump table %p, tdaddr: %llx\n",
    150		catu_table, catu_table->table_daddr);
    151
    152	while (offset < buf_size) {
    153		table_end = offset + SZ_1M < buf_size ?
    154			    offset + SZ_1M : buf_size;
    155		table = catu_get_table(catu_table, offset, NULL);
    156		for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
    157			dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
    158		dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
    159			table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
    160		dev_dbg(catu_table->dev, "== End of sub-table ===");
    161	}
    162	dev_dbg(catu_table->dev, "== End of Table ===");
    163}
    164
    165#else
    166static inline void catu_dump_table(struct tmc_sg_table *catu_table)
    167{
    168}
    169#endif
    170
    171static inline cate_t catu_make_entry(dma_addr_t addr)
    172{
    173	return addr ? CATU_VALID_ENTRY(addr) : 0;
    174}
    175
    176/*
    177 * catu_populate_table : Populate the given CATU table.
    178 * The table is always populated as a circular table.
    179 * i.e, the "prev" link of the "first" table points to the "last"
    180 * table and the "next" link of the "last" table points to the
    181 * "first" table. The buffer should be made linear by calling
    182 * catu_set_table().
    183 */
    184static void
    185catu_populate_table(struct tmc_sg_table *catu_table)
    186{
    187	int i;
    188	int sys_pidx;	/* Index to current system data page */
    189	int catu_pidx;	/* Index of CATU page within the system data page */
    190	unsigned long offset, buf_size, table_end;
    191	dma_addr_t data_daddr;
    192	dma_addr_t prev_taddr, next_taddr, cur_taddr;
    193	cate_t *table_ptr, *next_table;
    194
    195	buf_size = tmc_sg_table_buf_size(catu_table);
    196	sys_pidx = catu_pidx = 0;
    197	offset = 0;
    198
    199	table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
    200	prev_taddr = 0;	/* Prev link for the first table */
    201
    202	while (offset < buf_size) {
    203		/*
    204		 * The @offset is always 1M aligned here and we have an
    205		 * empty table @table_ptr to fill. Each table can address
    206		 * upto 1MB data buffer. The last table may have fewer
    207		 * entries if the buffer size is not aligned.
    208		 */
    209		table_end = (offset + SZ_1M) < buf_size ?
    210			    (offset + SZ_1M) : buf_size;
    211		for (i = 0; offset < table_end;
    212		     i++, offset += CATU_PAGE_SIZE) {
    213
    214			data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
    215				     catu_pidx * CATU_PAGE_SIZE;
    216			catu_dbg(catu_table->dev,
    217				"[table %5ld:%03d] 0x%llx\n",
    218				(offset >> 20), i, data_daddr);
    219			table_ptr[i] = catu_make_entry(data_daddr);
    220			/* Move the pointers for data pages */
    221			catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
    222			if (catu_pidx == 0)
    223				sys_pidx++;
    224		}
    225
    226		/*
    227		 * If we have finished all the valid entries, fill the rest of
    228		 * the table (i.e, last table page) with invalid entries,
    229		 * to fail the lookups.
    230		 */
    231		if (offset == buf_size) {
    232			memset(&table_ptr[i], 0,
    233			       sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
    234			next_taddr = 0;
    235		} else {
    236			next_table = catu_get_table(catu_table,
    237						    offset, &next_taddr);
    238		}
    239
    240		table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
    241		table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
    242
    243		catu_dbg(catu_table->dev,
    244			"[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
    245			(offset >> 20) - 1,  cur_taddr, prev_taddr, next_taddr);
    246
    247		/* Update the prev/next addresses */
    248		if (next_taddr) {
    249			prev_taddr = cur_taddr;
    250			cur_taddr = next_taddr;
    251			table_ptr = next_table;
    252		}
    253	}
    254
    255	/* Sync the table for device */
    256	tmc_sg_table_sync_table(catu_table);
    257}
    258
    259static struct tmc_sg_table *
    260catu_init_sg_table(struct device *catu_dev, int node,
    261		   ssize_t size, void **pages)
    262{
    263	int nr_tpages;
    264	struct tmc_sg_table *catu_table;
    265
    266	/*
    267	 * Each table can address upto 1MB and we can have
    268	 * CATU_PAGES_PER_SYSPAGE tables in a system page.
    269	 */
    270	nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
    271	catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
    272					size >> PAGE_SHIFT, pages);
    273	if (IS_ERR(catu_table))
    274		return catu_table;
    275
    276	catu_populate_table(catu_table);
    277	dev_dbg(catu_dev,
    278		"Setup table %p, size %ldKB, %d table pages\n",
    279		catu_table, (unsigned long)size >> 10,  nr_tpages);
    280	catu_dump_table(catu_table);
    281	return catu_table;
    282}
    283
    284static void catu_free_etr_buf(struct etr_buf *etr_buf)
    285{
    286	struct catu_etr_buf *catu_buf;
    287
    288	if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
    289		return;
    290
    291	catu_buf = etr_buf->private;
    292	tmc_free_sg_table(catu_buf->catu_table);
    293	kfree(catu_buf);
    294}
    295
    296static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
    297				     size_t len, char **bufpp)
    298{
    299	struct catu_etr_buf *catu_buf = etr_buf->private;
    300
    301	return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
    302}
    303
    304static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
    305{
    306	struct catu_etr_buf *catu_buf = etr_buf->private;
    307	struct tmc_sg_table *catu_table = catu_buf->catu_table;
    308	u64 r_offset, w_offset;
    309
    310	/*
    311	 * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
    312	 * offsets within the trace buffer.
    313	 */
    314	r_offset = rrp - etr_buf->hwaddr;
    315	w_offset = rwp - etr_buf->hwaddr;
    316
    317	if (!etr_buf->full) {
    318		etr_buf->len = w_offset - r_offset;
    319		if (w_offset < r_offset)
    320			etr_buf->len += etr_buf->size;
    321	} else {
    322		etr_buf->len = etr_buf->size;
    323	}
    324
    325	etr_buf->offset = r_offset;
    326	tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
    327}
    328
    329static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
    330			      struct etr_buf *etr_buf, int node, void **pages)
    331{
    332	struct coresight_device *csdev;
    333	struct tmc_sg_table *catu_table;
    334	struct catu_etr_buf *catu_buf;
    335
    336	csdev = tmc_etr_get_catu_device(tmc_drvdata);
    337	if (!csdev)
    338		return -ENODEV;
    339	catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
    340	if (!catu_buf)
    341		return -ENOMEM;
    342
    343	catu_table = catu_init_sg_table(&csdev->dev, node,
    344					etr_buf->size, pages);
    345	if (IS_ERR(catu_table)) {
    346		kfree(catu_buf);
    347		return PTR_ERR(catu_table);
    348	}
    349
    350	etr_buf->mode = ETR_MODE_CATU;
    351	etr_buf->private = catu_buf;
    352	etr_buf->hwaddr = CATU_DEFAULT_INADDR;
    353
    354	catu_buf->catu_table = catu_table;
    355	/* Get the table base address */
    356	catu_buf->sladdr = catu_table->table_daddr;
    357
    358	return 0;
    359}
    360
    361static const struct etr_buf_operations etr_catu_buf_ops = {
    362	.alloc = catu_alloc_etr_buf,
    363	.free = catu_free_etr_buf,
    364	.sync = catu_sync_etr_buf,
    365	.get_data = catu_get_data_etr_buf,
    366};
    367
    368coresight_simple_reg32(struct catu_drvdata, devid, CORESIGHT_DEVID);
    369coresight_simple_reg32(struct catu_drvdata, control, CATU_CONTROL);
    370coresight_simple_reg32(struct catu_drvdata, status, CATU_STATUS);
    371coresight_simple_reg32(struct catu_drvdata, mode, CATU_MODE);
    372coresight_simple_reg32(struct catu_drvdata, axictrl, CATU_AXICTRL);
    373coresight_simple_reg32(struct catu_drvdata, irqen, CATU_IRQEN);
    374coresight_simple_reg64(struct catu_drvdata, sladdr,
    375		       CATU_SLADDRLO, CATU_SLADDRHI);
    376coresight_simple_reg64(struct catu_drvdata, inaddr,
    377		       CATU_INADDRLO, CATU_INADDRHI);
    378
    379static struct attribute *catu_mgmt_attrs[] = {
    380	&dev_attr_devid.attr,
    381	&dev_attr_control.attr,
    382	&dev_attr_status.attr,
    383	&dev_attr_mode.attr,
    384	&dev_attr_axictrl.attr,
    385	&dev_attr_irqen.attr,
    386	&dev_attr_sladdr.attr,
    387	&dev_attr_inaddr.attr,
    388	NULL,
    389};
    390
    391static const struct attribute_group catu_mgmt_group = {
    392	.attrs = catu_mgmt_attrs,
    393	.name = "mgmt",
    394};
    395
    396static const struct attribute_group *catu_groups[] = {
    397	&catu_mgmt_group,
    398	NULL,
    399};
    400
    401
    402static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
    403{
    404	struct csdev_access *csa = &drvdata->csdev->access;
    405
    406	return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
    407}
    408
    409static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
    410{
    411	int rc;
    412	u32 control, mode;
    413	struct etr_buf *etr_buf = data;
    414	struct device *dev = &drvdata->csdev->dev;
    415	struct coresight_device *csdev = drvdata->csdev;
    416
    417	if (catu_wait_for_ready(drvdata))
    418		dev_warn(dev, "Timeout while waiting for READY\n");
    419
    420	control = catu_read_control(drvdata);
    421	if (control & BIT(CATU_CONTROL_ENABLE)) {
    422		dev_warn(dev, "CATU is already enabled\n");
    423		return -EBUSY;
    424	}
    425
    426	rc = coresight_claim_device_unlocked(csdev);
    427	if (rc)
    428		return rc;
    429
    430	control |= BIT(CATU_CONTROL_ENABLE);
    431
    432	if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
    433		struct catu_etr_buf *catu_buf = etr_buf->private;
    434
    435		mode = CATU_MODE_TRANSLATE;
    436		catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
    437		catu_write_sladdr(drvdata, catu_buf->sladdr);
    438		catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
    439	} else {
    440		mode = CATU_MODE_PASS_THROUGH;
    441		catu_write_sladdr(drvdata, 0);
    442		catu_write_inaddr(drvdata, 0);
    443	}
    444
    445	catu_write_irqen(drvdata, 0);
    446	catu_write_mode(drvdata, mode);
    447	catu_write_control(drvdata, control);
    448	dev_dbg(dev, "Enabled in %s mode\n",
    449		(mode == CATU_MODE_PASS_THROUGH) ?
    450		"Pass through" :
    451		"Translate");
    452	return 0;
    453}
    454
    455static int catu_enable(struct coresight_device *csdev, void *data)
    456{
    457	int rc;
    458	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
    459
    460	CS_UNLOCK(catu_drvdata->base);
    461	rc = catu_enable_hw(catu_drvdata, data);
    462	CS_LOCK(catu_drvdata->base);
    463	return rc;
    464}
    465
    466static int catu_disable_hw(struct catu_drvdata *drvdata)
    467{
    468	int rc = 0;
    469	struct device *dev = &drvdata->csdev->dev;
    470	struct coresight_device *csdev = drvdata->csdev;
    471
    472	catu_write_control(drvdata, 0);
    473	coresight_disclaim_device_unlocked(csdev);
    474	if (catu_wait_for_ready(drvdata)) {
    475		dev_info(dev, "Timeout while waiting for READY\n");
    476		rc = -EAGAIN;
    477	}
    478
    479	dev_dbg(dev, "Disabled\n");
    480	return rc;
    481}
    482
    483static int catu_disable(struct coresight_device *csdev, void *__unused)
    484{
    485	int rc;
    486	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
    487
    488	CS_UNLOCK(catu_drvdata->base);
    489	rc = catu_disable_hw(catu_drvdata);
    490	CS_LOCK(catu_drvdata->base);
    491	return rc;
    492}
    493
    494static const struct coresight_ops_helper catu_helper_ops = {
    495	.enable = catu_enable,
    496	.disable = catu_disable,
    497};
    498
    499static const struct coresight_ops catu_ops = {
    500	.helper_ops = &catu_helper_ops,
    501};
    502
    503static int catu_probe(struct amba_device *adev, const struct amba_id *id)
    504{
    505	int ret = 0;
    506	u32 dma_mask;
    507	struct catu_drvdata *drvdata;
    508	struct coresight_desc catu_desc;
    509	struct coresight_platform_data *pdata = NULL;
    510	struct device *dev = &adev->dev;
    511	void __iomem *base;
    512
    513	catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
    514	if (!catu_desc.name)
    515		return -ENOMEM;
    516
    517	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
    518	if (!drvdata) {
    519		ret = -ENOMEM;
    520		goto out;
    521	}
    522
    523	dev_set_drvdata(dev, drvdata);
    524	base = devm_ioremap_resource(dev, &adev->res);
    525	if (IS_ERR(base)) {
    526		ret = PTR_ERR(base);
    527		goto out;
    528	}
    529
    530	/* Setup dma mask for the device */
    531	dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
    532	switch (dma_mask) {
    533	case 32:
    534	case 40:
    535	case 44:
    536	case 48:
    537	case 52:
    538	case 56:
    539	case 64:
    540		break;
    541	default:
    542		/* Default to the 40bits as supported by TMC-ETR */
    543		dma_mask = 40;
    544	}
    545	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
    546	if (ret)
    547		goto out;
    548
    549	pdata = coresight_get_platform_data(dev);
    550	if (IS_ERR(pdata)) {
    551		ret = PTR_ERR(pdata);
    552		goto out;
    553	}
    554	dev->platform_data = pdata;
    555
    556	drvdata->base = base;
    557	catu_desc.access = CSDEV_ACCESS_IOMEM(base);
    558	catu_desc.pdata = pdata;
    559	catu_desc.dev = dev;
    560	catu_desc.groups = catu_groups;
    561	catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
    562	catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
    563	catu_desc.ops = &catu_ops;
    564
    565	drvdata->csdev = coresight_register(&catu_desc);
    566	if (IS_ERR(drvdata->csdev))
    567		ret = PTR_ERR(drvdata->csdev);
    568	else
    569		pm_runtime_put(&adev->dev);
    570out:
    571	return ret;
    572}
    573
    574static void catu_remove(struct amba_device *adev)
    575{
    576	struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
    577
    578	coresight_unregister(drvdata->csdev);
    579}
    580
    581static struct amba_id catu_ids[] = {
    582	CS_AMBA_ID(0x000bb9ee),
    583	{},
    584};
    585
    586MODULE_DEVICE_TABLE(amba, catu_ids);
    587
    588static struct amba_driver catu_driver = {
    589	.drv = {
    590		.name			= "coresight-catu",
    591		.owner			= THIS_MODULE,
    592		.suppress_bind_attrs	= true,
    593	},
    594	.probe				= catu_probe,
    595	.remove				= catu_remove,
    596	.id_table			= catu_ids,
    597};
    598
    599static int __init catu_init(void)
    600{
    601	int ret;
    602
    603	ret = amba_driver_register(&catu_driver);
    604	if (ret)
    605		pr_info("Error registering catu driver\n");
    606	tmc_etr_set_catu_ops(&etr_catu_buf_ops);
    607	return ret;
    608}
    609
    610static void __exit catu_exit(void)
    611{
    612	tmc_etr_remove_catu_ops();
    613	amba_driver_unregister(&catu_driver);
    614}
    615
    616module_init(catu_init);
    617module_exit(catu_exit);
    618
    619MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
    620MODULE_DESCRIPTION("Arm CoreSight Address Translation Unit (CATU) Driver");
    621MODULE_LICENSE("GPL v2");