cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dma_test.c (19241B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * DMA traffic test driver
      4 *
      5 * Copyright (C) 2020, Intel Corporation
      6 * Authors: Isaac Hazan <isaac.hazan@intel.com>
      7 *	    Mika Westerberg <mika.westerberg@linux.intel.com>
      8 */
      9
     10#include <linux/completion.h>
     11#include <linux/debugfs.h>
     12#include <linux/module.h>
     13#include <linux/sizes.h>
     14#include <linux/thunderbolt.h>
     15
     16#define DMA_TEST_TX_RING_SIZE		64
     17#define DMA_TEST_RX_RING_SIZE		256
     18#define DMA_TEST_FRAME_SIZE		SZ_4K
     19#define DMA_TEST_DATA_PATTERN		0x0123456789abcdefLL
     20#define DMA_TEST_MAX_PACKETS		1000
     21
     22enum dma_test_frame_pdf {
     23	DMA_TEST_PDF_FRAME_START = 1,
     24	DMA_TEST_PDF_FRAME_END,
     25};
     26
     27struct dma_test_frame {
     28	struct dma_test *dma_test;
     29	void *data;
     30	struct ring_frame frame;
     31};
     32
     33enum dma_test_test_error {
     34	DMA_TEST_NO_ERROR,
     35	DMA_TEST_INTERRUPTED,
     36	DMA_TEST_BUFFER_ERROR,
     37	DMA_TEST_DMA_ERROR,
     38	DMA_TEST_CONFIG_ERROR,
     39	DMA_TEST_SPEED_ERROR,
     40	DMA_TEST_WIDTH_ERROR,
     41	DMA_TEST_BONDING_ERROR,
     42	DMA_TEST_PACKET_ERROR,
     43};
     44
     45static const char * const dma_test_error_names[] = {
     46	[DMA_TEST_NO_ERROR] = "no errors",
     47	[DMA_TEST_INTERRUPTED] = "interrupted by signal",
     48	[DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers",
     49	[DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
     50	[DMA_TEST_CONFIG_ERROR] = "configuration is not valid",
     51	[DMA_TEST_SPEED_ERROR] = "unexpected link speed",
     52	[DMA_TEST_WIDTH_ERROR] = "unexpected link width",
     53	[DMA_TEST_BONDING_ERROR] = "lane bonding configuration error",
     54	[DMA_TEST_PACKET_ERROR] = "packet check failed",
     55};
     56
     57enum dma_test_result {
     58	DMA_TEST_NOT_RUN,
     59	DMA_TEST_SUCCESS,
     60	DMA_TEST_FAIL,
     61};
     62
     63static const char * const dma_test_result_names[] = {
     64	[DMA_TEST_NOT_RUN] = "not run",
     65	[DMA_TEST_SUCCESS] = "success",
     66	[DMA_TEST_FAIL] = "failed",
     67};
     68
     69/**
     70 * struct dma_test - DMA test device driver private data
     71 * @svc: XDomain service the driver is bound to
     72 * @xd: XDomain the service belongs to
     73 * @rx_ring: Software ring holding RX frames
     74 * @rx_hopid: HopID used for receiving frames
     75 * @tx_ring: Software ring holding TX frames
     76 * @tx_hopid: HopID used for sending fames
     77 * @packets_to_send: Number of packets to send
     78 * @packets_to_receive: Number of packets to receive
     79 * @packets_sent: Actual number of packets sent
     80 * @packets_received: Actual number of packets received
     81 * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
     82 * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
     83 * @crc_errors: Number of CRC errors during the test run
     84 * @buffer_overflow_errors: Number of buffer overflow errors during the test
     85 *			    run
     86 * @result: Result of the last run
     87 * @error_code: Error code of the last run
     88 * @complete: Used to wait for the Rx to complete
     89 * @lock: Lock serializing access to this structure
     90 * @debugfs_dir: dentry of this dma_test
     91 */
     92struct dma_test {
     93	const struct tb_service *svc;
     94	struct tb_xdomain *xd;
     95	struct tb_ring *rx_ring;
     96	int rx_hopid;
     97	struct tb_ring *tx_ring;
     98	int tx_hopid;
     99	unsigned int packets_to_send;
    100	unsigned int packets_to_receive;
    101	unsigned int packets_sent;
    102	unsigned int packets_received;
    103	unsigned int link_speed;
    104	unsigned int link_width;
    105	unsigned int crc_errors;
    106	unsigned int buffer_overflow_errors;
    107	enum dma_test_result result;
    108	enum dma_test_test_error error_code;
    109	struct completion complete;
    110	struct mutex lock;
    111	struct dentry *debugfs_dir;
    112};
    113
    114/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
    115static const uuid_t dma_test_dir_uuid =
    116	UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
    117		  0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
    118
    119static struct tb_property_dir *dma_test_dir;
    120static void *dma_test_pattern;
    121
    122static void dma_test_free_rings(struct dma_test *dt)
    123{
    124	if (dt->rx_ring) {
    125		tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
    126		tb_ring_free(dt->rx_ring);
    127		dt->rx_ring = NULL;
    128	}
    129	if (dt->tx_ring) {
    130		tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
    131		tb_ring_free(dt->tx_ring);
    132		dt->tx_ring = NULL;
    133	}
    134}
    135
    136static int dma_test_start_rings(struct dma_test *dt)
    137{
    138	unsigned int flags = RING_FLAG_FRAME;
    139	struct tb_xdomain *xd = dt->xd;
    140	int ret, e2e_tx_hop = 0;
    141	struct tb_ring *ring;
    142
    143	/*
    144	 * If we are both sender and receiver (traffic goes over a
    145	 * special loopback dongle) enable E2E flow control. This avoids
    146	 * losing packets.
    147	 */
    148	if (dt->packets_to_send && dt->packets_to_receive)
    149		flags |= RING_FLAG_E2E;
    150
    151	if (dt->packets_to_send) {
    152		ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
    153					flags);
    154		if (!ring)
    155			return -ENOMEM;
    156
    157		dt->tx_ring = ring;
    158		e2e_tx_hop = ring->hop;
    159
    160		ret = tb_xdomain_alloc_out_hopid(xd, -1);
    161		if (ret < 0) {
    162			dma_test_free_rings(dt);
    163			return ret;
    164		}
    165
    166		dt->tx_hopid = ret;
    167	}
    168
    169	if (dt->packets_to_receive) {
    170		u16 sof_mask, eof_mask;
    171
    172		sof_mask = BIT(DMA_TEST_PDF_FRAME_START);
    173		eof_mask = BIT(DMA_TEST_PDF_FRAME_END);
    174
    175		ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
    176					flags, e2e_tx_hop, sof_mask, eof_mask,
    177					NULL, NULL);
    178		if (!ring) {
    179			dma_test_free_rings(dt);
    180			return -ENOMEM;
    181		}
    182
    183		dt->rx_ring = ring;
    184
    185		ret = tb_xdomain_alloc_in_hopid(xd, -1);
    186		if (ret < 0) {
    187			dma_test_free_rings(dt);
    188			return ret;
    189		}
    190
    191		dt->rx_hopid = ret;
    192	}
    193
    194	ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
    195				      dt->tx_ring ? dt->tx_ring->hop : 0,
    196				      dt->rx_hopid,
    197				      dt->rx_ring ? dt->rx_ring->hop : 0);
    198	if (ret) {
    199		dma_test_free_rings(dt);
    200		return ret;
    201	}
    202
    203	if (dt->tx_ring)
    204		tb_ring_start(dt->tx_ring);
    205	if (dt->rx_ring)
    206		tb_ring_start(dt->rx_ring);
    207
    208	return 0;
    209}
    210
    211static void dma_test_stop_rings(struct dma_test *dt)
    212{
    213	int ret;
    214
    215	if (dt->rx_ring)
    216		tb_ring_stop(dt->rx_ring);
    217	if (dt->tx_ring)
    218		tb_ring_stop(dt->tx_ring);
    219
    220	ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
    221				       dt->tx_ring ? dt->tx_ring->hop : 0,
    222				       dt->rx_hopid,
    223				       dt->rx_ring ? dt->rx_ring->hop : 0);
    224	if (ret)
    225		dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
    226
    227	dma_test_free_rings(dt);
    228}
    229
    230static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
    231				 bool canceled)
    232{
    233	struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
    234	struct dma_test *dt = tf->dma_test;
    235	struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
    236
    237	dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
    238			 DMA_FROM_DEVICE);
    239	kfree(tf->data);
    240
    241	if (canceled) {
    242		kfree(tf);
    243		return;
    244	}
    245
    246	dt->packets_received++;
    247	dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
    248		dt->packets_to_receive);
    249
    250	if (tf->frame.flags & RING_DESC_CRC_ERROR)
    251		dt->crc_errors++;
    252	if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN)
    253		dt->buffer_overflow_errors++;
    254
    255	kfree(tf);
    256
    257	if (dt->packets_received == dt->packets_to_receive)
    258		complete(&dt->complete);
    259}
    260
    261static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
    262{
    263	struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
    264	int i;
    265
    266	for (i = 0; i < npackets; i++) {
    267		struct dma_test_frame *tf;
    268		dma_addr_t dma_addr;
    269
    270		tf = kzalloc(sizeof(*tf), GFP_KERNEL);
    271		if (!tf)
    272			return -ENOMEM;
    273
    274		tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
    275		if (!tf->data) {
    276			kfree(tf);
    277			return -ENOMEM;
    278		}
    279
    280		dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
    281					  DMA_FROM_DEVICE);
    282		if (dma_mapping_error(dma_dev, dma_addr)) {
    283			kfree(tf->data);
    284			kfree(tf);
    285			return -ENOMEM;
    286		}
    287
    288		tf->frame.buffer_phy = dma_addr;
    289		tf->frame.callback = dma_test_rx_callback;
    290		tf->dma_test = dt;
    291		INIT_LIST_HEAD(&tf->frame.list);
    292
    293		tb_ring_rx(dt->rx_ring, &tf->frame);
    294	}
    295
    296	return 0;
    297}
    298
    299static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
    300				 bool canceled)
    301{
    302	struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
    303	struct dma_test *dt = tf->dma_test;
    304	struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
    305
    306	dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
    307			 DMA_TO_DEVICE);
    308	kfree(tf->data);
    309	kfree(tf);
    310}
    311
    312static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
    313{
    314	struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
    315	int i;
    316
    317	for (i = 0; i < npackets; i++) {
    318		struct dma_test_frame *tf;
    319		dma_addr_t dma_addr;
    320
    321		tf = kzalloc(sizeof(*tf), GFP_KERNEL);
    322		if (!tf)
    323			return -ENOMEM;
    324
    325		tf->frame.size = 0; /* means 4096 */
    326		tf->dma_test = dt;
    327
    328		tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
    329		if (!tf->data) {
    330			kfree(tf);
    331			return -ENOMEM;
    332		}
    333
    334		dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
    335					  DMA_TO_DEVICE);
    336		if (dma_mapping_error(dma_dev, dma_addr)) {
    337			kfree(tf->data);
    338			kfree(tf);
    339			return -ENOMEM;
    340		}
    341
    342		tf->frame.buffer_phy = dma_addr;
    343		tf->frame.callback = dma_test_tx_callback;
    344		tf->frame.sof = DMA_TEST_PDF_FRAME_START;
    345		tf->frame.eof = DMA_TEST_PDF_FRAME_END;
    346		INIT_LIST_HEAD(&tf->frame.list);
    347
    348		dt->packets_sent++;
    349		dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
    350			dt->packets_to_send);
    351
    352		tb_ring_tx(dt->tx_ring, &tf->frame);
    353	}
    354
    355	return 0;
    356}
    357
    358#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set)	\
    359static int __fops ## _show(void *data, u64 *val)		\
    360{								\
    361	struct tb_service *svc = data;				\
    362	struct dma_test *dt = tb_service_get_drvdata(svc);	\
    363	int ret;						\
    364								\
    365	ret = mutex_lock_interruptible(&dt->lock);		\
    366	if (ret)						\
    367		return ret;					\
    368	__get(dt, val);						\
    369	mutex_unlock(&dt->lock);				\
    370	return 0;						\
    371}								\
    372static int __fops ## _store(void *data, u64 val)		\
    373{								\
    374	struct tb_service *svc = data;				\
    375	struct dma_test *dt = tb_service_get_drvdata(svc);	\
    376	int ret;						\
    377								\
    378	ret = __validate(val);					\
    379	if (ret)						\
    380		return ret;					\
    381	ret = mutex_lock_interruptible(&dt->lock);		\
    382	if (ret)						\
    383		return ret;					\
    384	__set(dt, val);						\
    385	mutex_unlock(&dt->lock);				\
    386	return 0;						\
    387}								\
    388DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show,	\
    389			 __fops ## _store, "%llu\n")
    390
    391static void lanes_get(const struct dma_test *dt, u64 *val)
    392{
    393	*val = dt->link_width;
    394}
    395
    396static int lanes_validate(u64 val)
    397{
    398	return val > 2 ? -EINVAL : 0;
    399}
    400
    401static void lanes_set(struct dma_test *dt, u64 val)
    402{
    403	dt->link_width = val;
    404}
    405DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set);
    406
    407static void speed_get(const struct dma_test *dt, u64 *val)
    408{
    409	*val = dt->link_speed;
    410}
    411
    412static int speed_validate(u64 val)
    413{
    414	switch (val) {
    415	case 20:
    416	case 10:
    417	case 0:
    418		return 0;
    419	default:
    420		return -EINVAL;
    421	}
    422}
    423
    424static void speed_set(struct dma_test *dt, u64 val)
    425{
    426	dt->link_speed = val;
    427}
    428DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set);
    429
    430static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
    431{
    432	*val = dt->packets_to_receive;
    433}
    434
    435static int packets_to_receive_validate(u64 val)
    436{
    437	return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
    438}
    439
    440static void packets_to_receive_set(struct dma_test *dt, u64 val)
    441{
    442	dt->packets_to_receive = val;
    443}
    444DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get,
    445		      packets_to_receive_validate, packets_to_receive_set);
    446
    447static void packets_to_send_get(const struct dma_test *dt, u64 *val)
    448{
    449	*val = dt->packets_to_send;
    450}
    451
    452static int packets_to_send_validate(u64 val)
    453{
    454	return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
    455}
    456
    457static void packets_to_send_set(struct dma_test *dt, u64 val)
    458{
    459	dt->packets_to_send = val;
    460}
    461DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
    462		      packets_to_send_validate, packets_to_send_set);
    463
    464static int dma_test_set_bonding(struct dma_test *dt)
    465{
    466	switch (dt->link_width) {
    467	case 2:
    468		return tb_xdomain_lane_bonding_enable(dt->xd);
    469	case 1:
    470		tb_xdomain_lane_bonding_disable(dt->xd);
    471		fallthrough;
    472	default:
    473		return 0;
    474	}
    475}
    476
    477static bool dma_test_validate_config(struct dma_test *dt)
    478{
    479	if (!dt->packets_to_send && !dt->packets_to_receive)
    480		return false;
    481	if (dt->packets_to_send && dt->packets_to_receive &&
    482	    dt->packets_to_send != dt->packets_to_receive)
    483		return false;
    484	return true;
    485}
    486
    487static void dma_test_check_errors(struct dma_test *dt, int ret)
    488{
    489	if (!dt->error_code) {
    490		if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
    491			dt->error_code = DMA_TEST_SPEED_ERROR;
    492		} else if (dt->link_width &&
    493			   dt->xd->link_width != dt->link_width) {
    494			dt->error_code = DMA_TEST_WIDTH_ERROR;
    495		} else if (dt->packets_to_send != dt->packets_sent ||
    496			 dt->packets_to_receive != dt->packets_received ||
    497			 dt->crc_errors || dt->buffer_overflow_errors) {
    498			dt->error_code = DMA_TEST_PACKET_ERROR;
    499		} else {
    500			return;
    501		}
    502	}
    503
    504	dt->result = DMA_TEST_FAIL;
    505}
    506
    507static int test_store(void *data, u64 val)
    508{
    509	struct tb_service *svc = data;
    510	struct dma_test *dt = tb_service_get_drvdata(svc);
    511	int ret;
    512
    513	if (val != 1)
    514		return -EINVAL;
    515
    516	ret = mutex_lock_interruptible(&dt->lock);
    517	if (ret)
    518		return ret;
    519
    520	dt->packets_sent = 0;
    521	dt->packets_received = 0;
    522	dt->crc_errors = 0;
    523	dt->buffer_overflow_errors = 0;
    524	dt->result = DMA_TEST_SUCCESS;
    525	dt->error_code = DMA_TEST_NO_ERROR;
    526
    527	dev_dbg(&svc->dev, "DMA test starting\n");
    528	if (dt->link_speed)
    529		dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
    530	if (dt->link_width)
    531		dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
    532	dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
    533	dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
    534
    535	if (!dma_test_validate_config(dt)) {
    536		dev_err(&svc->dev, "invalid test configuration\n");
    537		dt->error_code = DMA_TEST_CONFIG_ERROR;
    538		goto out_unlock;
    539	}
    540
    541	ret = dma_test_set_bonding(dt);
    542	if (ret) {
    543		dev_err(&svc->dev, "failed to set lanes\n");
    544		dt->error_code = DMA_TEST_BONDING_ERROR;
    545		goto out_unlock;
    546	}
    547
    548	ret = dma_test_start_rings(dt);
    549	if (ret) {
    550		dev_err(&svc->dev, "failed to enable DMA rings\n");
    551		dt->error_code = DMA_TEST_DMA_ERROR;
    552		goto out_unlock;
    553	}
    554
    555	if (dt->packets_to_receive) {
    556		reinit_completion(&dt->complete);
    557		ret = dma_test_submit_rx(dt, dt->packets_to_receive);
    558		if (ret) {
    559			dev_err(&svc->dev, "failed to submit receive buffers\n");
    560			dt->error_code = DMA_TEST_BUFFER_ERROR;
    561			goto out_stop;
    562		}
    563	}
    564
    565	if (dt->packets_to_send) {
    566		ret = dma_test_submit_tx(dt, dt->packets_to_send);
    567		if (ret) {
    568			dev_err(&svc->dev, "failed to submit transmit buffers\n");
    569			dt->error_code = DMA_TEST_BUFFER_ERROR;
    570			goto out_stop;
    571		}
    572	}
    573
    574	if (dt->packets_to_receive) {
    575		ret = wait_for_completion_interruptible(&dt->complete);
    576		if (ret) {
    577			dt->error_code = DMA_TEST_INTERRUPTED;
    578			goto out_stop;
    579		}
    580	}
    581
    582out_stop:
    583	dma_test_stop_rings(dt);
    584out_unlock:
    585	dma_test_check_errors(dt, ret);
    586	mutex_unlock(&dt->lock);
    587
    588	dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
    589	return ret;
    590}
    591DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n");
    592
    593static int status_show(struct seq_file *s, void *not_used)
    594{
    595	struct tb_service *svc = s->private;
    596	struct dma_test *dt = tb_service_get_drvdata(svc);
    597	int ret;
    598
    599	ret = mutex_lock_interruptible(&dt->lock);
    600	if (ret)
    601		return ret;
    602
    603	seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
    604	if (dt->result == DMA_TEST_NOT_RUN)
    605		goto out_unlock;
    606
    607	seq_printf(s, "packets received: %u\n", dt->packets_received);
    608	seq_printf(s, "packets sent: %u\n", dt->packets_sent);
    609	seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
    610	seq_printf(s, "buffer overflow errors: %u\n",
    611		   dt->buffer_overflow_errors);
    612	seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
    613
    614out_unlock:
    615	mutex_unlock(&dt->lock);
    616	return 0;
    617}
    618DEFINE_SHOW_ATTRIBUTE(status);
    619
    620static void dma_test_debugfs_init(struct tb_service *svc)
    621{
    622	struct dma_test *dt = tb_service_get_drvdata(svc);
    623
    624	dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
    625
    626	debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
    627	debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
    628	debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
    629			    &packets_to_receive_fops);
    630	debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
    631			    &packets_to_send_fops);
    632	debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
    633	debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
    634}
    635
    636static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id)
    637{
    638	struct tb_xdomain *xd = tb_service_parent(svc);
    639	struct dma_test *dt;
    640
    641	dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
    642	if (!dt)
    643		return -ENOMEM;
    644
    645	dt->svc = svc;
    646	dt->xd = xd;
    647	mutex_init(&dt->lock);
    648	init_completion(&dt->complete);
    649
    650	tb_service_set_drvdata(svc, dt);
    651	dma_test_debugfs_init(svc);
    652
    653	return 0;
    654}
    655
    656static void dma_test_remove(struct tb_service *svc)
    657{
    658	struct dma_test *dt = tb_service_get_drvdata(svc);
    659
    660	mutex_lock(&dt->lock);
    661	debugfs_remove_recursive(dt->debugfs_dir);
    662	mutex_unlock(&dt->lock);
    663}
    664
    665static int __maybe_unused dma_test_suspend(struct device *dev)
    666{
    667	/*
    668	 * No need to do anything special here. If userspace is writing
    669	 * to the test attribute when suspend started, it comes out from
    670	 * wait_for_completion_interruptible() with -ERESTARTSYS and the
    671	 * DMA test fails tearing down the rings. Once userspace is
    672	 * thawed the kernel restarts the write syscall effectively
    673	 * re-running the test.
    674	 */
    675	return 0;
    676}
    677
    678static int __maybe_unused dma_test_resume(struct device *dev)
    679{
    680	return 0;
    681}
    682
    683static const struct dev_pm_ops dma_test_pm_ops = {
    684	SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume)
    685};
    686
    687static const struct tb_service_id dma_test_ids[] = {
    688	{ TB_SERVICE("dma_test", 1) },
    689	{ },
    690};
    691MODULE_DEVICE_TABLE(tbsvc, dma_test_ids);
    692
    693static struct tb_service_driver dma_test_driver = {
    694	.driver = {
    695		.owner = THIS_MODULE,
    696		.name = "thunderbolt_dma_test",
    697		.pm = &dma_test_pm_ops,
    698	},
    699	.probe = dma_test_probe,
    700	.remove = dma_test_remove,
    701	.id_table = dma_test_ids,
    702};
    703
    704static int __init dma_test_init(void)
    705{
    706	u64 data_value = DMA_TEST_DATA_PATTERN;
    707	int i, ret;
    708
    709	dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
    710	if (!dma_test_pattern)
    711		return -ENOMEM;
    712
    713	for (i = 0; i <	DMA_TEST_FRAME_SIZE / sizeof(data_value); i++)
    714		((u32 *)dma_test_pattern)[i] = data_value++;
    715
    716	dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid);
    717	if (!dma_test_dir) {
    718		ret = -ENOMEM;
    719		goto err_free_pattern;
    720	}
    721
    722	tb_property_add_immediate(dma_test_dir, "prtcid", 1);
    723	tb_property_add_immediate(dma_test_dir, "prtcvers", 1);
    724	tb_property_add_immediate(dma_test_dir, "prtcrevs", 0);
    725	tb_property_add_immediate(dma_test_dir, "prtcstns", 0);
    726
    727	ret = tb_register_property_dir("dma_test", dma_test_dir);
    728	if (ret)
    729		goto err_free_dir;
    730
    731	ret = tb_register_service_driver(&dma_test_driver);
    732	if (ret)
    733		goto err_unregister_dir;
    734
    735	return 0;
    736
    737err_unregister_dir:
    738	tb_unregister_property_dir("dma_test", dma_test_dir);
    739err_free_dir:
    740	tb_property_free_dir(dma_test_dir);
    741err_free_pattern:
    742	kfree(dma_test_pattern);
    743
    744	return ret;
    745}
    746module_init(dma_test_init);
    747
    748static void __exit dma_test_exit(void)
    749{
    750	tb_unregister_service_driver(&dma_test_driver);
    751	tb_unregister_property_dir("dma_test", dma_test_dir);
    752	tb_property_free_dir(dma_test_dir);
    753	kfree(dma_test_pattern);
    754}
    755module_exit(dma_test_exit);
    756
    757MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>");
    758MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
    759MODULE_DESCRIPTION("DMA traffic test driver");
    760MODULE_LICENSE("GPL v2");