cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ntb_perf.c (39477B)


      1/*
      2 * This file is provided under a dual BSD/GPLv2 license.  When using or
      3 *   redistributing this file, you may do so under either license.
      4 *
      5 *   GPL LICENSE SUMMARY
      6 *
      7 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
      8 *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
      9 *
     10 *   This program is free software; you can redistribute it and/or modify
     11 *   it under the terms of version 2 of the GNU General Public License as
     12 *   published by the Free Software Foundation.
     13 *
     14 *   BSD LICENSE
     15 *
     16 *   Copyright(c) 2015 Intel Corporation. All rights reserved.
     17 *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
     18 *
     19 *   Redistribution and use in source and binary forms, with or without
     20 *   modification, are permitted provided that the following conditions
     21 *   are met:
     22 *
     23 *     * Redistributions of source code must retain the above copyright
     24 *       notice, this list of conditions and the following disclaimer.
     25 *     * Redistributions in binary form must reproduce the above copy
     26 *       notice, this list of conditions and the following disclaimer in
     27 *       the documentation and/or other materials provided with the
     28 *       distribution.
     29 *     * Neither the name of Intel Corporation nor the names of its
     30 *       contributors may be used to endorse or promote products derived
     31 *       from this software without specific prior written permission.
     32 *
     33 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     34 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     35 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     36 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     37 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     38 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     39 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     40 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     41 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     42 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     43 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     44 *
     45 * PCIe NTB Perf Linux driver
     46 */
     47
     48/*
     49 * How to use this tool, by example.
     50 *
     51 * Assuming $DBG_DIR is something like:
     52 * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
     53 * Suppose aside from local device there is at least one remote device
     54 * connected to NTB with index 0.
     55 *-----------------------------------------------------------------------------
     56 * Eg: install driver with specified chunk/total orders and dma-enabled flag
     57 *
     58 * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
     59 *-----------------------------------------------------------------------------
     60 * Eg: check NTB ports (index) and MW mapping information
     61 *
     62 * root@self# cat $DBG_DIR/info
     63 *-----------------------------------------------------------------------------
     64 * Eg: start performance test with peer (index 0) and get the test metrics
     65 *
     66 * root@self# echo 0 > $DBG_DIR/run
     67 * root@self# cat $DBG_DIR/run
     68 */
     69
     70#include <linux/init.h>
     71#include <linux/kernel.h>
     72#include <linux/module.h>
     73#include <linux/sched.h>
     74#include <linux/wait.h>
     75#include <linux/dma-mapping.h>
     76#include <linux/dmaengine.h>
     77#include <linux/pci.h>
     78#include <linux/ktime.h>
     79#include <linux/slab.h>
     80#include <linux/delay.h>
     81#include <linux/sizes.h>
     82#include <linux/workqueue.h>
     83#include <linux/debugfs.h>
     84#include <linux/random.h>
     85#include <linux/ntb.h>
     86
     87#define DRIVER_NAME		"ntb_perf"
     88#define DRIVER_VERSION		"2.0"
     89
     90MODULE_LICENSE("Dual BSD/GPL");
     91MODULE_VERSION(DRIVER_VERSION);
     92MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
     93MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
     94
     95#define MAX_THREADS_CNT		32
     96#define DEF_THREADS_CNT		1
     97#define MAX_CHUNK_SIZE		SZ_1M
     98#define MAX_CHUNK_ORDER		20 /* no larger than 1M */
     99
    100#define DMA_TRIES		100
    101#define DMA_MDELAY		10
    102
    103#define MSG_TRIES		1000
    104#define MSG_UDELAY_LOW		1000000
    105#define MSG_UDELAY_HIGH		2000000
    106
    107#define PERF_BUF_LEN 1024
    108
    109static unsigned long max_mw_size;
    110module_param(max_mw_size, ulong, 0644);
    111MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
    112
    113static unsigned char chunk_order = 19; /* 512K */
    114module_param(chunk_order, byte, 0644);
    115MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
    116
    117static unsigned char total_order = 30; /* 1G */
    118module_param(total_order, byte, 0644);
    119MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
    120
    121static bool use_dma; /* default to 0 */
    122module_param(use_dma, bool, 0644);
    123MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
    124
    125/*==============================================================================
    126 *                         Perf driver data definition
    127 *==============================================================================
    128 */
    129
    130enum perf_cmd {
    131	PERF_CMD_INVAL = -1,/* invalid spad command */
    132	PERF_CMD_SSIZE = 0, /* send out buffer size */
    133	PERF_CMD_RSIZE = 1, /* recv in  buffer size */
    134	PERF_CMD_SXLAT = 2, /* send in  buffer xlat */
    135	PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
    136	PERF_CMD_CLEAR = 4, /* clear allocated memory */
    137	PERF_STS_DONE  = 5, /* init is done */
    138	PERF_STS_LNKUP = 6, /* link up state flag */
    139};
    140
    141struct perf_ctx;
    142
    143struct perf_peer {
    144	struct perf_ctx	*perf;
    145	int pidx;
    146	int gidx;
    147
    148	/* Outbound MW params */
    149	u64 outbuf_xlat;
    150	resource_size_t outbuf_size;
    151	void __iomem *outbuf;
    152	phys_addr_t out_phys_addr;
    153	dma_addr_t dma_dst_addr;
    154	/* Inbound MW params */
    155	dma_addr_t inbuf_xlat;
    156	resource_size_t inbuf_size;
    157	void		*inbuf;
    158
    159	/* NTB connection setup service */
    160	struct work_struct	service;
    161	unsigned long		sts;
    162
    163	struct completion init_comp;
    164};
    165#define to_peer_service(__work) \
    166	container_of(__work, struct perf_peer, service)
    167
    168struct perf_thread {
    169	struct perf_ctx *perf;
    170	int tidx;
    171
    172	/* DMA-based test sync parameters */
    173	atomic_t dma_sync;
    174	wait_queue_head_t dma_wait;
    175	struct dma_chan *dma_chan;
    176
    177	/* Data source and measured statistics */
    178	void *src;
    179	u64 copied;
    180	ktime_t duration;
    181	int status;
    182	struct work_struct work;
    183};
    184#define to_thread_work(__work) \
    185	container_of(__work, struct perf_thread, work)
    186
    187struct perf_ctx {
    188	struct ntb_dev *ntb;
    189
    190	/* Global device index and peers descriptors */
    191	int gidx;
    192	int pcnt;
    193	struct perf_peer *peers;
    194
    195	/* Performance measuring work-threads interface */
    196	unsigned long busy_flag;
    197	wait_queue_head_t twait;
    198	atomic_t tsync;
    199	u8 tcnt;
    200	struct perf_peer *test_peer;
    201	struct perf_thread threads[MAX_THREADS_CNT];
    202
    203	/* Scratchpad/Message IO operations */
    204	int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
    205	int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
    206			u64 *data);
    207
    208	struct dentry *dbgfs_dir;
    209};
    210
    211/*
    212 * Scratchpads-base commands interface
    213 */
    214#define PERF_SPAD_CNT(_pcnt) \
    215	(3*((_pcnt) + 1))
    216#define PERF_SPAD_CMD(_gidx) \
    217	(3*(_gidx))
    218#define PERF_SPAD_LDATA(_gidx) \
    219	(3*(_gidx) + 1)
    220#define PERF_SPAD_HDATA(_gidx) \
    221	(3*(_gidx) + 2)
    222#define PERF_SPAD_NOTIFY(_gidx) \
    223	(BIT_ULL(_gidx))
    224
    225/*
    226 * Messages-base commands interface
    227 */
    228#define PERF_MSG_CNT		3
    229#define PERF_MSG_CMD		0
    230#define PERF_MSG_LDATA		1
    231#define PERF_MSG_HDATA		2
    232
    233/*==============================================================================
    234 *                           Static data declarations
    235 *==============================================================================
    236 */
    237
    238static struct dentry *perf_dbgfs_topdir;
    239
    240static struct workqueue_struct *perf_wq __read_mostly;
    241
    242/*==============================================================================
    243 *                  NTB cross-link commands execution service
    244 *==============================================================================
    245 */
    246
    247static void perf_terminate_test(struct perf_ctx *perf);
    248
    249static inline bool perf_link_is_up(struct perf_peer *peer)
    250{
    251	u64 link;
    252
    253	link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
    254	return !!(link & BIT_ULL_MASK(peer->pidx));
    255}
    256
    257static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
    258			      u64 data)
    259{
    260	struct perf_ctx *perf = peer->perf;
    261	int try;
    262	u32 sts;
    263
    264	dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
    265
    266	/*
    267	 * Perform predefined number of attempts before give up.
    268	 * We are sending the data to the port specific scratchpad, so
    269	 * to prevent a multi-port access race-condition. Additionally
    270	 * there is no need in local locking since only thread-safe
    271	 * service work is using this method.
    272	 */
    273	for (try = 0; try < MSG_TRIES; try++) {
    274		if (!perf_link_is_up(peer))
    275			return -ENOLINK;
    276
    277		sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
    278					 PERF_SPAD_CMD(perf->gidx));
    279		if (sts != PERF_CMD_INVAL) {
    280			usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
    281			continue;
    282		}
    283
    284		ntb_peer_spad_write(perf->ntb, peer->pidx,
    285				    PERF_SPAD_LDATA(perf->gidx),
    286				    lower_32_bits(data));
    287		ntb_peer_spad_write(perf->ntb, peer->pidx,
    288				    PERF_SPAD_HDATA(perf->gidx),
    289				    upper_32_bits(data));
    290		ntb_peer_spad_write(perf->ntb, peer->pidx,
    291				    PERF_SPAD_CMD(perf->gidx),
    292				    cmd);
    293		ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
    294
    295		dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
    296			PERF_SPAD_NOTIFY(peer->gidx));
    297
    298		break;
    299	}
    300
    301	return try < MSG_TRIES ? 0 : -EAGAIN;
    302}
    303
    304static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
    305			      enum perf_cmd *cmd, u64 *data)
    306{
    307	struct perf_peer *peer;
    308	u32 val;
    309
    310	ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
    311
    312	/*
    313	 * We start scanning all over, since cleared DB may have been set
    314	 * by any peer. Yes, it makes peer with smaller index being
    315	 * serviced with greater priority, but it's convenient for spad
    316	 * and message code unification and simplicity.
    317	 */
    318	for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
    319		peer = &perf->peers[*pidx];
    320
    321		if (!perf_link_is_up(peer))
    322			continue;
    323
    324		val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
    325		if (val == PERF_CMD_INVAL)
    326			continue;
    327
    328		*cmd = val;
    329
    330		val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
    331		*data = val;
    332
    333		val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
    334		*data |= (u64)val << 32;
    335
    336		/* Next command can be retrieved from now */
    337		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
    338			       PERF_CMD_INVAL);
    339
    340		dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
    341
    342		return 0;
    343	}
    344
    345	return -ENODATA;
    346}
    347
    348static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
    349			     u64 data)
    350{
    351	struct perf_ctx *perf = peer->perf;
    352	int try, ret;
    353	u64 outbits;
    354
    355	dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
    356
    357	/*
    358	 * Perform predefined number of attempts before give up. Message
    359	 * registers are free of race-condition problem when accessed
    360	 * from different ports, so we don't need splitting registers
    361	 * by global device index. We also won't have local locking,
    362	 * since the method is used from service work only.
    363	 */
    364	outbits = ntb_msg_outbits(perf->ntb);
    365	for (try = 0; try < MSG_TRIES; try++) {
    366		if (!perf_link_is_up(peer))
    367			return -ENOLINK;
    368
    369		ret = ntb_msg_clear_sts(perf->ntb, outbits);
    370		if (ret)
    371			return ret;
    372
    373		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
    374				   lower_32_bits(data));
    375
    376		if (ntb_msg_read_sts(perf->ntb) & outbits) {
    377			usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
    378			continue;
    379		}
    380
    381		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
    382				   upper_32_bits(data));
    383
    384		/* This call shall trigger peer message event */
    385		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
    386
    387		break;
    388	}
    389
    390	return try < MSG_TRIES ? 0 : -EAGAIN;
    391}
    392
    393static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
    394			     enum perf_cmd *cmd, u64 *data)
    395{
    396	u64 inbits;
    397	u32 val;
    398
    399	inbits = ntb_msg_inbits(perf->ntb);
    400
    401	if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
    402		return -ENODATA;
    403
    404	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
    405	*cmd = val;
    406
    407	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
    408	*data = val;
    409
    410	val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
    411	*data |= (u64)val << 32;
    412
    413	/* Next command can be retrieved from now */
    414	ntb_msg_clear_sts(perf->ntb, inbits);
    415
    416	dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
    417
    418	return 0;
    419}
    420
    421static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
    422{
    423	struct perf_ctx *perf = peer->perf;
    424
    425	if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
    426		return perf->cmd_send(peer, cmd, data);
    427
    428	dev_err(&perf->ntb->dev, "Send invalid command\n");
    429	return -EINVAL;
    430}
    431
    432static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
    433{
    434	switch (cmd) {
    435	case PERF_CMD_SSIZE:
    436	case PERF_CMD_RSIZE:
    437	case PERF_CMD_SXLAT:
    438	case PERF_CMD_RXLAT:
    439	case PERF_CMD_CLEAR:
    440		break;
    441	default:
    442		dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
    443		return -EINVAL;
    444	}
    445
    446	/* No need of memory barrier, since bit ops have invernal lock */
    447	set_bit(cmd, &peer->sts);
    448
    449	dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
    450
    451	(void)queue_work(system_highpri_wq, &peer->service);
    452
    453	return 0;
    454}
    455
    456static int perf_cmd_recv(struct perf_ctx *perf)
    457{
    458	struct perf_peer *peer;
    459	int ret, pidx, cmd;
    460	u64 data;
    461
    462	while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
    463		peer = &perf->peers[pidx];
    464
    465		switch (cmd) {
    466		case PERF_CMD_SSIZE:
    467			peer->inbuf_size = data;
    468			return perf_cmd_exec(peer, PERF_CMD_RSIZE);
    469		case PERF_CMD_SXLAT:
    470			peer->outbuf_xlat = data;
    471			return perf_cmd_exec(peer, PERF_CMD_RXLAT);
    472		default:
    473			dev_err(&perf->ntb->dev, "Recv invalid command\n");
    474			return -EINVAL;
    475		}
    476	}
    477
    478	/* Return 0 if no data left to process, otherwise an error */
    479	return ret == -ENODATA ? 0 : ret;
    480}
    481
    482static void perf_link_event(void *ctx)
    483{
    484	struct perf_ctx *perf = ctx;
    485	struct perf_peer *peer;
    486	bool lnk_up;
    487	int pidx;
    488
    489	for (pidx = 0; pidx < perf->pcnt; pidx++) {
    490		peer = &perf->peers[pidx];
    491
    492		lnk_up = perf_link_is_up(peer);
    493
    494		if (lnk_up &&
    495		    !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
    496			perf_cmd_exec(peer, PERF_CMD_SSIZE);
    497		} else if (!lnk_up &&
    498			   test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
    499			perf_cmd_exec(peer, PERF_CMD_CLEAR);
    500		}
    501	}
    502}
    503
    504static void perf_db_event(void *ctx, int vec)
    505{
    506	struct perf_ctx *perf = ctx;
    507
    508	dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
    509		ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
    510
    511	/* Just receive all available commands */
    512	(void)perf_cmd_recv(perf);
    513}
    514
    515static void perf_msg_event(void *ctx)
    516{
    517	struct perf_ctx *perf = ctx;
    518
    519	dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
    520		ntb_msg_read_sts(perf->ntb));
    521
    522	/* Messages are only sent one-by-one */
    523	(void)perf_cmd_recv(perf);
    524}
    525
    526static const struct ntb_ctx_ops perf_ops = {
    527	.link_event = perf_link_event,
    528	.db_event = perf_db_event,
    529	.msg_event = perf_msg_event
    530};
    531
    532static void perf_free_outbuf(struct perf_peer *peer)
    533{
    534	(void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
    535}
    536
    537static int perf_setup_outbuf(struct perf_peer *peer)
    538{
    539	struct perf_ctx *perf = peer->perf;
    540	int ret;
    541
    542	/* Outbuf size can be unaligned due to custom max_mw_size */
    543	ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
    544				    peer->outbuf_xlat, peer->outbuf_size);
    545	if (ret) {
    546		dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
    547		return ret;
    548	}
    549
    550	/* Initialization is finally done */
    551	set_bit(PERF_STS_DONE, &peer->sts);
    552	complete_all(&peer->init_comp);
    553
    554	return 0;
    555}
    556
    557static void perf_free_inbuf(struct perf_peer *peer)
    558{
    559	if (!peer->inbuf)
    560		return;
    561
    562	(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
    563	dma_free_coherent(&peer->perf->ntb->pdev->dev, peer->inbuf_size,
    564			  peer->inbuf, peer->inbuf_xlat);
    565	peer->inbuf = NULL;
    566}
    567
    568static int perf_setup_inbuf(struct perf_peer *peer)
    569{
    570	resource_size_t xlat_align, size_align, size_max;
    571	struct perf_ctx *perf = peer->perf;
    572	int ret;
    573
    574	/* Get inbound MW parameters */
    575	ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
    576			       &xlat_align, &size_align, &size_max);
    577	if (ret) {
    578		dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
    579		return ret;
    580	}
    581
    582	if (peer->inbuf_size > size_max) {
    583		dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
    584			&peer->inbuf_size, &size_max);
    585		return -EINVAL;
    586	}
    587
    588	peer->inbuf_size = round_up(peer->inbuf_size, size_align);
    589
    590	perf_free_inbuf(peer);
    591
    592	peer->inbuf = dma_alloc_coherent(&perf->ntb->pdev->dev,
    593					 peer->inbuf_size, &peer->inbuf_xlat,
    594					 GFP_KERNEL);
    595	if (!peer->inbuf) {
    596		dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
    597			&peer->inbuf_size);
    598		return -ENOMEM;
    599	}
    600	if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
    601		ret = -EINVAL;
    602		dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
    603		goto err_free_inbuf;
    604	}
    605
    606	ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
    607			       peer->inbuf_xlat, peer->inbuf_size);
    608	if (ret) {
    609		dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
    610		goto err_free_inbuf;
    611	}
    612
    613	/*
    614	 * We submit inbuf xlat transmission cmd for execution here to follow
    615	 * the code architecture, even though this method is called from service
    616	 * work itself so the command will be executed right after it returns.
    617	 */
    618	(void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
    619
    620	return 0;
    621
    622err_free_inbuf:
    623	perf_free_inbuf(peer);
    624
    625	return ret;
    626}
    627
    628static void perf_service_work(struct work_struct *work)
    629{
    630	struct perf_peer *peer = to_peer_service(work);
    631
    632	if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
    633		perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
    634
    635	if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
    636		perf_setup_inbuf(peer);
    637
    638	if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
    639		perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
    640
    641	if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
    642		perf_setup_outbuf(peer);
    643
    644	if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
    645		init_completion(&peer->init_comp);
    646		clear_bit(PERF_STS_DONE, &peer->sts);
    647		if (test_bit(0, &peer->perf->busy_flag) &&
    648		    peer == peer->perf->test_peer) {
    649			dev_warn(&peer->perf->ntb->dev,
    650				"Freeing while test on-fly\n");
    651			perf_terminate_test(peer->perf);
    652		}
    653		perf_free_outbuf(peer);
    654		perf_free_inbuf(peer);
    655	}
    656}
    657
    658static int perf_init_service(struct perf_ctx *perf)
    659{
    660	u64 mask;
    661
    662	if (ntb_peer_mw_count(perf->ntb) < perf->pcnt) {
    663		dev_err(&perf->ntb->dev, "Not enough memory windows\n");
    664		return -EINVAL;
    665	}
    666
    667	if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
    668		perf->cmd_send = perf_msg_cmd_send;
    669		perf->cmd_recv = perf_msg_cmd_recv;
    670
    671		dev_dbg(&perf->ntb->dev, "Message service initialized\n");
    672
    673		return 0;
    674	}
    675
    676	dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
    677
    678	mask = GENMASK_ULL(perf->pcnt, 0);
    679	if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
    680	    (ntb_db_valid_mask(perf->ntb) & mask) == mask) {
    681		perf->cmd_send = perf_spad_cmd_send;
    682		perf->cmd_recv = perf_spad_cmd_recv;
    683
    684		dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
    685
    686		return 0;
    687	}
    688
    689	dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
    690
    691	dev_err(&perf->ntb->dev, "Command services unsupported\n");
    692
    693	return -EINVAL;
    694}
    695
    696static int perf_enable_service(struct perf_ctx *perf)
    697{
    698	u64 mask, incmd_bit;
    699	int ret, sidx, scnt;
    700
    701	mask = ntb_db_valid_mask(perf->ntb);
    702	(void)ntb_db_set_mask(perf->ntb, mask);
    703
    704	ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
    705	if (ret)
    706		return ret;
    707
    708	if (perf->cmd_send == perf_msg_cmd_send) {
    709		u64 inbits, outbits;
    710
    711		inbits = ntb_msg_inbits(perf->ntb);
    712		outbits = ntb_msg_outbits(perf->ntb);
    713		(void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
    714
    715		incmd_bit = BIT_ULL(__ffs64(inbits));
    716		ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
    717
    718		dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
    719	} else {
    720		scnt = ntb_spad_count(perf->ntb);
    721		for (sidx = 0; sidx < scnt; sidx++)
    722			ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
    723		incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
    724		ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
    725
    726		dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
    727	}
    728	if (ret) {
    729		ntb_clear_ctx(perf->ntb);
    730		return ret;
    731	}
    732
    733	ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
    734	/* Might be not necessary */
    735	ntb_link_event(perf->ntb);
    736
    737	return 0;
    738}
    739
    740static void perf_disable_service(struct perf_ctx *perf)
    741{
    742	int pidx;
    743
    744	if (perf->cmd_send == perf_msg_cmd_send) {
    745		u64 inbits;
    746
    747		inbits = ntb_msg_inbits(perf->ntb);
    748		(void)ntb_msg_set_mask(perf->ntb, inbits);
    749	} else {
    750		(void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
    751	}
    752
    753	ntb_clear_ctx(perf->ntb);
    754
    755	for (pidx = 0; pidx < perf->pcnt; pidx++)
    756		perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
    757
    758	for (pidx = 0; pidx < perf->pcnt; pidx++)
    759		flush_work(&perf->peers[pidx].service);
    760
    761	for (pidx = 0; pidx < perf->pcnt; pidx++) {
    762		struct perf_peer *peer = &perf->peers[pidx];
    763
    764		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
    765	}
    766
    767	ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
    768
    769	ntb_link_disable(perf->ntb);
    770}
    771
    772/*==============================================================================
    773 *                      Performance measuring work-thread
    774 *==============================================================================
    775 */
    776
    777static void perf_dma_copy_callback(void *data)
    778{
    779	struct perf_thread *pthr = data;
    780
    781	atomic_dec(&pthr->dma_sync);
    782	wake_up(&pthr->dma_wait);
    783}
    784
    785static int perf_copy_chunk(struct perf_thread *pthr,
    786			   void __iomem *dst, void *src, size_t len)
    787{
    788	struct dma_async_tx_descriptor *tx;
    789	struct dmaengine_unmap_data *unmap;
    790	struct device *dma_dev;
    791	int try = 0, ret = 0;
    792	struct perf_peer *peer = pthr->perf->test_peer;
    793	void __iomem *vbase;
    794	void __iomem *dst_vaddr;
    795	dma_addr_t dst_dma_addr;
    796
    797	if (!use_dma) {
    798		memcpy_toio(dst, src, len);
    799		goto ret_check_tsync;
    800	}
    801
    802	dma_dev = pthr->dma_chan->device->dev;
    803
    804	if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
    805				 offset_in_page(dst), len))
    806		return -EIO;
    807
    808	vbase = peer->outbuf;
    809	dst_vaddr = dst;
    810	dst_dma_addr = peer->dma_dst_addr + (dst_vaddr - vbase);
    811
    812	unmap = dmaengine_get_unmap_data(dma_dev, 1, GFP_NOWAIT);
    813	if (!unmap)
    814		return -ENOMEM;
    815
    816	unmap->len = len;
    817	unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
    818		offset_in_page(src), len, DMA_TO_DEVICE);
    819	if (dma_mapping_error(dma_dev, unmap->addr[0])) {
    820		ret = -EIO;
    821		goto err_free_resource;
    822	}
    823	unmap->to_cnt = 1;
    824
    825	do {
    826		tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr,
    827			unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    828		if (!tx)
    829			msleep(DMA_MDELAY);
    830	} while (!tx && (try++ < DMA_TRIES));
    831
    832	if (!tx) {
    833		ret = -EIO;
    834		goto err_free_resource;
    835	}
    836
    837	tx->callback = perf_dma_copy_callback;
    838	tx->callback_param = pthr;
    839	dma_set_unmap(tx, unmap);
    840
    841	ret = dma_submit_error(dmaengine_submit(tx));
    842	if (ret) {
    843		dmaengine_unmap_put(unmap);
    844		goto err_free_resource;
    845	}
    846
    847	dmaengine_unmap_put(unmap);
    848
    849	atomic_inc(&pthr->dma_sync);
    850	dma_async_issue_pending(pthr->dma_chan);
    851
    852ret_check_tsync:
    853	return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
    854
    855err_free_resource:
    856	dmaengine_unmap_put(unmap);
    857
    858	return ret;
    859}
    860
    861static bool perf_dma_filter(struct dma_chan *chan, void *data)
    862{
    863	struct perf_ctx *perf = data;
    864	int node;
    865
    866	node = dev_to_node(&perf->ntb->dev);
    867
    868	return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
    869}
    870
    871static int perf_init_test(struct perf_thread *pthr)
    872{
    873	struct perf_ctx *perf = pthr->perf;
    874	dma_cap_mask_t dma_mask;
    875	struct perf_peer *peer = pthr->perf->test_peer;
    876
    877	pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
    878				 dev_to_node(&perf->ntb->dev));
    879	if (!pthr->src)
    880		return -ENOMEM;
    881
    882	get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
    883
    884	if (!use_dma)
    885		return 0;
    886
    887	dma_cap_zero(dma_mask);
    888	dma_cap_set(DMA_MEMCPY, dma_mask);
    889	pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
    890	if (!pthr->dma_chan) {
    891		dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
    892			pthr->tidx);
    893		goto err_free;
    894	}
    895	peer->dma_dst_addr =
    896		dma_map_resource(pthr->dma_chan->device->dev,
    897				 peer->out_phys_addr, peer->outbuf_size,
    898				 DMA_FROM_DEVICE, 0);
    899	if (dma_mapping_error(pthr->dma_chan->device->dev,
    900			      peer->dma_dst_addr)) {
    901		dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n",
    902			pthr->tidx);
    903		peer->dma_dst_addr = 0;
    904		dma_release_channel(pthr->dma_chan);
    905		goto err_free;
    906	}
    907	dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n",
    908			pthr->tidx,
    909			&peer->out_phys_addr,
    910			&peer->dma_dst_addr);
    911
    912	atomic_set(&pthr->dma_sync, 0);
    913	return 0;
    914
    915err_free:
    916	atomic_dec(&perf->tsync);
    917	wake_up(&perf->twait);
    918	kfree(pthr->src);
    919	return -ENODEV;
    920}
    921
    922static int perf_run_test(struct perf_thread *pthr)
    923{
    924	struct perf_peer *peer = pthr->perf->test_peer;
    925	struct perf_ctx *perf = pthr->perf;
    926	void __iomem *flt_dst, *bnd_dst;
    927	u64 total_size, chunk_size;
    928	void *flt_src;
    929	int ret = 0;
    930
    931	total_size = 1ULL << total_order;
    932	chunk_size = 1ULL << chunk_order;
    933	chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
    934
    935	flt_src = pthr->src;
    936	bnd_dst = peer->outbuf + peer->outbuf_size;
    937	flt_dst = peer->outbuf;
    938
    939	pthr->duration = ktime_get();
    940
    941	/* Copied field is cleared on test launch stage */
    942	while (pthr->copied < total_size) {
    943		ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
    944		if (ret) {
    945			dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
    946				pthr->tidx, ret);
    947			return ret;
    948		}
    949
    950		pthr->copied += chunk_size;
    951
    952		flt_dst += chunk_size;
    953		flt_src += chunk_size;
    954		if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
    955			flt_dst = peer->outbuf;
    956			flt_src = pthr->src;
    957		}
    958
    959		/* Give up CPU to give a chance for other threads to use it */
    960		schedule();
    961	}
    962
    963	return 0;
    964}
    965
    966static int perf_sync_test(struct perf_thread *pthr)
    967{
    968	struct perf_ctx *perf = pthr->perf;
    969
    970	if (!use_dma)
    971		goto no_dma_ret;
    972
    973	wait_event(pthr->dma_wait,
    974		   (atomic_read(&pthr->dma_sync) == 0 ||
    975		    atomic_read(&perf->tsync) < 0));
    976
    977	if (atomic_read(&perf->tsync) < 0)
    978		return -EINTR;
    979
    980no_dma_ret:
    981	pthr->duration = ktime_sub(ktime_get(), pthr->duration);
    982
    983	dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
    984		pthr->tidx, pthr->copied);
    985
    986	dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
    987		pthr->tidx, ktime_to_us(pthr->duration));
    988
    989	dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
    990		div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
    991
    992	return 0;
    993}
    994
    995static void perf_clear_test(struct perf_thread *pthr)
    996{
    997	struct perf_ctx *perf = pthr->perf;
    998
    999	if (!use_dma)
   1000		goto no_dma_notify;
   1001
   1002	/*
   1003	 * If test finished without errors, termination isn't needed.
   1004	 * We call it anyway just to be sure of the transfers completion.
   1005	 */
   1006	(void)dmaengine_terminate_sync(pthr->dma_chan);
   1007	if (pthr->perf->test_peer->dma_dst_addr)
   1008		dma_unmap_resource(pthr->dma_chan->device->dev,
   1009				   pthr->perf->test_peer->dma_dst_addr,
   1010				   pthr->perf->test_peer->outbuf_size,
   1011				   DMA_FROM_DEVICE, 0);
   1012
   1013	dma_release_channel(pthr->dma_chan);
   1014
   1015no_dma_notify:
   1016	atomic_dec(&perf->tsync);
   1017	wake_up(&perf->twait);
   1018	kfree(pthr->src);
   1019}
   1020
   1021static void perf_thread_work(struct work_struct *work)
   1022{
   1023	struct perf_thread *pthr = to_thread_work(work);
   1024	int ret;
   1025
   1026	/*
   1027	 * Perform stages in compliance with use_dma flag value.
   1028	 * Test status is changed only if error happened, otherwise
   1029	 * status -ENODATA is kept while test is on-fly. Results
   1030	 * synchronization is performed only if test fininshed
   1031	 * without an error or interruption.
   1032	 */
   1033	ret = perf_init_test(pthr);
   1034	if (ret) {
   1035		pthr->status = ret;
   1036		return;
   1037	}
   1038
   1039	ret = perf_run_test(pthr);
   1040	if (ret) {
   1041		pthr->status = ret;
   1042		goto err_clear_test;
   1043	}
   1044
   1045	pthr->status = perf_sync_test(pthr);
   1046
   1047err_clear_test:
   1048	perf_clear_test(pthr);
   1049}
   1050
   1051static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
   1052{
   1053	if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
   1054		return -EINVAL;
   1055
   1056	if (test_and_set_bit_lock(0, &perf->busy_flag))
   1057		return -EBUSY;
   1058
   1059	perf->tcnt = tcnt;
   1060
   1061	clear_bit_unlock(0, &perf->busy_flag);
   1062
   1063	return 0;
   1064}
   1065
   1066static void perf_terminate_test(struct perf_ctx *perf)
   1067{
   1068	int tidx;
   1069
   1070	atomic_set(&perf->tsync, -1);
   1071	wake_up(&perf->twait);
   1072
   1073	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
   1074		wake_up(&perf->threads[tidx].dma_wait);
   1075		cancel_work_sync(&perf->threads[tidx].work);
   1076	}
   1077}
   1078
   1079static int perf_submit_test(struct perf_peer *peer)
   1080{
   1081	struct perf_ctx *perf = peer->perf;
   1082	struct perf_thread *pthr;
   1083	int tidx, ret;
   1084
   1085	ret = wait_for_completion_interruptible(&peer->init_comp);
   1086	if (ret < 0)
   1087		return ret;
   1088
   1089	if (test_and_set_bit_lock(0, &perf->busy_flag))
   1090		return -EBUSY;
   1091
   1092	perf->test_peer = peer;
   1093	atomic_set(&perf->tsync, perf->tcnt);
   1094
   1095	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
   1096		pthr = &perf->threads[tidx];
   1097
   1098		pthr->status = -ENODATA;
   1099		pthr->copied = 0;
   1100		pthr->duration = ktime_set(0, 0);
   1101		if (tidx < perf->tcnt)
   1102			(void)queue_work(perf_wq, &pthr->work);
   1103	}
   1104
   1105	ret = wait_event_interruptible(perf->twait,
   1106				       atomic_read(&perf->tsync) <= 0);
   1107	if (ret == -ERESTARTSYS) {
   1108		perf_terminate_test(perf);
   1109		ret = -EINTR;
   1110	}
   1111
   1112	clear_bit_unlock(0, &perf->busy_flag);
   1113
   1114	return ret;
   1115}
   1116
   1117static int perf_read_stats(struct perf_ctx *perf, char *buf,
   1118			   size_t size, ssize_t *pos)
   1119{
   1120	struct perf_thread *pthr;
   1121	int tidx;
   1122
   1123	if (test_and_set_bit_lock(0, &perf->busy_flag))
   1124		return -EBUSY;
   1125
   1126	(*pos) += scnprintf(buf + *pos, size - *pos,
   1127		"    Peer %d test statistics:\n", perf->test_peer->pidx);
   1128
   1129	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
   1130		pthr = &perf->threads[tidx];
   1131
   1132		if (pthr->status == -ENODATA)
   1133			continue;
   1134
   1135		if (pthr->status) {
   1136			(*pos) += scnprintf(buf + *pos, size - *pos,
   1137				"%d: error status %d\n", tidx, pthr->status);
   1138			continue;
   1139		}
   1140
   1141		(*pos) += scnprintf(buf + *pos, size - *pos,
   1142			"%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
   1143			tidx, pthr->copied, ktime_to_us(pthr->duration),
   1144			div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
   1145	}
   1146
   1147	clear_bit_unlock(0, &perf->busy_flag);
   1148
   1149	return 0;
   1150}
   1151
   1152static void perf_init_threads(struct perf_ctx *perf)
   1153{
   1154	struct perf_thread *pthr;
   1155	int tidx;
   1156
   1157	perf->tcnt = DEF_THREADS_CNT;
   1158	perf->test_peer = &perf->peers[0];
   1159	init_waitqueue_head(&perf->twait);
   1160
   1161	for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
   1162		pthr = &perf->threads[tidx];
   1163
   1164		pthr->perf = perf;
   1165		pthr->tidx = tidx;
   1166		pthr->status = -ENODATA;
   1167		init_waitqueue_head(&pthr->dma_wait);
   1168		INIT_WORK(&pthr->work, perf_thread_work);
   1169	}
   1170}
   1171
   1172static void perf_clear_threads(struct perf_ctx *perf)
   1173{
   1174	perf_terminate_test(perf);
   1175}
   1176
   1177/*==============================================================================
   1178 *                               DebugFS nodes
   1179 *==============================================================================
   1180 */
   1181
   1182static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
   1183				    size_t size, loff_t *offp)
   1184{
   1185	struct perf_ctx *perf = filep->private_data;
   1186	struct perf_peer *peer;
   1187	size_t buf_size;
   1188	ssize_t pos = 0;
   1189	int ret, pidx;
   1190	char *buf;
   1191
   1192	buf_size = min_t(size_t, size, 0x1000U);
   1193
   1194	buf = kmalloc(buf_size, GFP_KERNEL);
   1195	if (!buf)
   1196		return -ENOMEM;
   1197
   1198	pos += scnprintf(buf + pos, buf_size - pos,
   1199		"    Performance measuring tool info:\n\n");
   1200
   1201	pos += scnprintf(buf + pos, buf_size - pos,
   1202		"Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
   1203		perf->gidx);
   1204	pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
   1205	if (test_bit(0, &perf->busy_flag)) {
   1206		pos += scnprintf(buf + pos, buf_size - pos,
   1207			"on-fly with port %d (%d)\n",
   1208			ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
   1209			perf->test_peer->pidx);
   1210	} else {
   1211		pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
   1212	}
   1213
   1214	for (pidx = 0; pidx < perf->pcnt; pidx++) {
   1215		peer = &perf->peers[pidx];
   1216
   1217		pos += scnprintf(buf + pos, buf_size - pos,
   1218			"Port %d (%d), Global index %d:\n",
   1219			ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
   1220			peer->gidx);
   1221
   1222		pos += scnprintf(buf + pos, buf_size - pos,
   1223			"\tLink status: %s\n",
   1224			test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
   1225
   1226		pos += scnprintf(buf + pos, buf_size - pos,
   1227			"\tOut buffer addr 0x%pK\n", peer->outbuf);
   1228
   1229		pos += scnprintf(buf + pos, buf_size - pos,
   1230			"\tOut buff phys addr %pa[p]\n", &peer->out_phys_addr);
   1231
   1232		pos += scnprintf(buf + pos, buf_size - pos,
   1233			"\tOut buffer size %pa\n", &peer->outbuf_size);
   1234
   1235		pos += scnprintf(buf + pos, buf_size - pos,
   1236			"\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
   1237
   1238		if (!peer->inbuf) {
   1239			pos += scnprintf(buf + pos, buf_size - pos,
   1240				"\tIn buffer addr: unallocated\n");
   1241			continue;
   1242		}
   1243
   1244		pos += scnprintf(buf + pos, buf_size - pos,
   1245			"\tIn buffer addr 0x%pK\n", peer->inbuf);
   1246
   1247		pos += scnprintf(buf + pos, buf_size - pos,
   1248			"\tIn buffer size %pa\n", &peer->inbuf_size);
   1249
   1250		pos += scnprintf(buf + pos, buf_size - pos,
   1251			"\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
   1252	}
   1253
   1254	ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
   1255	kfree(buf);
   1256
   1257	return ret;
   1258}
   1259
   1260static const struct file_operations perf_dbgfs_info = {
   1261	.open = simple_open,
   1262	.read = perf_dbgfs_read_info
   1263};
   1264
   1265static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
   1266				   size_t size, loff_t *offp)
   1267{
   1268	struct perf_ctx *perf = filep->private_data;
   1269	ssize_t ret, pos = 0;
   1270	char *buf;
   1271
   1272	buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
   1273	if (!buf)
   1274		return -ENOMEM;
   1275
   1276	ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
   1277	if (ret)
   1278		goto err_free;
   1279
   1280	ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
   1281err_free:
   1282	kfree(buf);
   1283
   1284	return ret;
   1285}
   1286
   1287static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
   1288				    size_t size, loff_t *offp)
   1289{
   1290	struct perf_ctx *perf = filep->private_data;
   1291	struct perf_peer *peer;
   1292	int pidx, ret;
   1293
   1294	ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
   1295	if (ret)
   1296		return ret;
   1297
   1298	if (pidx < 0 || pidx >= perf->pcnt)
   1299		return -EINVAL;
   1300
   1301	peer = &perf->peers[pidx];
   1302
   1303	ret = perf_submit_test(peer);
   1304	if (ret)
   1305		return ret;
   1306
   1307	return size;
   1308}
   1309
   1310static const struct file_operations perf_dbgfs_run = {
   1311	.open = simple_open,
   1312	.read = perf_dbgfs_read_run,
   1313	.write = perf_dbgfs_write_run
   1314};
   1315
   1316static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
   1317				    size_t size, loff_t *offp)
   1318{
   1319	struct perf_ctx *perf = filep->private_data;
   1320	char buf[8];
   1321	ssize_t pos;
   1322
   1323	pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
   1324
   1325	return simple_read_from_buffer(ubuf, size, offp, buf, pos);
   1326}
   1327
   1328static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
   1329				     const char __user *ubuf,
   1330				     size_t size, loff_t *offp)
   1331{
   1332	struct perf_ctx *perf = filep->private_data;
   1333	int ret;
   1334	u8 val;
   1335
   1336	ret = kstrtou8_from_user(ubuf, size, 0, &val);
   1337	if (ret)
   1338		return ret;
   1339
   1340	ret = perf_set_tcnt(perf, val);
   1341	if (ret)
   1342		return ret;
   1343
   1344	return size;
   1345}
   1346
   1347static const struct file_operations perf_dbgfs_tcnt = {
   1348	.open = simple_open,
   1349	.read = perf_dbgfs_read_tcnt,
   1350	.write = perf_dbgfs_write_tcnt
   1351};
   1352
   1353static void perf_setup_dbgfs(struct perf_ctx *perf)
   1354{
   1355	struct pci_dev *pdev = perf->ntb->pdev;
   1356
   1357	perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
   1358	if (!perf->dbgfs_dir) {
   1359		dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
   1360		return;
   1361	}
   1362
   1363	debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
   1364			    &perf_dbgfs_info);
   1365
   1366	debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
   1367			    &perf_dbgfs_run);
   1368
   1369	debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
   1370			    &perf_dbgfs_tcnt);
   1371
   1372	/* They are made read-only for test exec safety and integrity */
   1373	debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
   1374
   1375	debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
   1376
   1377	debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
   1378}
   1379
   1380static void perf_clear_dbgfs(struct perf_ctx *perf)
   1381{
   1382	debugfs_remove_recursive(perf->dbgfs_dir);
   1383}
   1384
   1385/*==============================================================================
   1386 *                        Basic driver initialization
   1387 *==============================================================================
   1388 */
   1389
   1390static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
   1391{
   1392	struct perf_ctx *perf;
   1393
   1394	perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
   1395	if (!perf)
   1396		return ERR_PTR(-ENOMEM);
   1397
   1398	perf->pcnt = ntb_peer_port_count(ntb);
   1399	perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
   1400				  GFP_KERNEL);
   1401	if (!perf->peers)
   1402		return ERR_PTR(-ENOMEM);
   1403
   1404	perf->ntb = ntb;
   1405
   1406	return perf;
   1407}
   1408
   1409static int perf_setup_peer_mw(struct perf_peer *peer)
   1410{
   1411	struct perf_ctx *perf = peer->perf;
   1412	phys_addr_t phys_addr;
   1413	int ret;
   1414
   1415	/* Get outbound MW parameters and map it */
   1416	ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
   1417				   &peer->outbuf_size);
   1418	if (ret)
   1419		return ret;
   1420
   1421	peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
   1422					peer->outbuf_size);
   1423	if (!peer->outbuf)
   1424		return -ENOMEM;
   1425
   1426	peer->out_phys_addr = phys_addr;
   1427
   1428	if (max_mw_size && peer->outbuf_size > max_mw_size) {
   1429		peer->outbuf_size = max_mw_size;
   1430		dev_warn(&peer->perf->ntb->dev,
   1431			"Peer %d outbuf reduced to %pa\n", peer->pidx,
   1432			&peer->outbuf_size);
   1433	}
   1434
   1435	return 0;
   1436}
   1437
   1438static int perf_init_peers(struct perf_ctx *perf)
   1439{
   1440	struct perf_peer *peer;
   1441	int pidx, lport, ret;
   1442
   1443	lport = ntb_port_number(perf->ntb);
   1444	perf->gidx = -1;
   1445	for (pidx = 0; pidx < perf->pcnt; pidx++) {
   1446		peer = &perf->peers[pidx];
   1447
   1448		peer->perf = perf;
   1449		peer->pidx = pidx;
   1450		if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
   1451			if (perf->gidx == -1)
   1452				perf->gidx = pidx;
   1453			peer->gidx = pidx + 1;
   1454		} else {
   1455			peer->gidx = pidx;
   1456		}
   1457		INIT_WORK(&peer->service, perf_service_work);
   1458		init_completion(&peer->init_comp);
   1459	}
   1460	if (perf->gidx == -1)
   1461		perf->gidx = pidx;
   1462
   1463	/*
   1464	 * Hardware with only two ports may not have unique port
   1465	 * numbers. In this case, the gidxs should all be zero.
   1466	 */
   1467	if (perf->pcnt == 1 &&  ntb_port_number(perf->ntb) == 0 &&
   1468	    ntb_peer_port_number(perf->ntb, 0) == 0) {
   1469		perf->gidx = 0;
   1470		perf->peers[0].gidx = 0;
   1471	}
   1472
   1473	for (pidx = 0; pidx < perf->pcnt; pidx++) {
   1474		ret = perf_setup_peer_mw(&perf->peers[pidx]);
   1475		if (ret)
   1476			return ret;
   1477	}
   1478
   1479	dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
   1480
   1481	return 0;
   1482}
   1483
   1484static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
   1485{
   1486	struct perf_ctx *perf;
   1487	int ret;
   1488
   1489	perf = perf_create_data(ntb);
   1490	if (IS_ERR(perf))
   1491		return PTR_ERR(perf);
   1492
   1493	ret = perf_init_peers(perf);
   1494	if (ret)
   1495		return ret;
   1496
   1497	perf_init_threads(perf);
   1498
   1499	ret = perf_init_service(perf);
   1500	if (ret)
   1501		return ret;
   1502
   1503	ret = perf_enable_service(perf);
   1504	if (ret)
   1505		return ret;
   1506
   1507	perf_setup_dbgfs(perf);
   1508
   1509	return 0;
   1510}
   1511
   1512static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
   1513{
   1514	struct perf_ctx *perf = ntb->ctx;
   1515
   1516	perf_clear_dbgfs(perf);
   1517
   1518	perf_disable_service(perf);
   1519
   1520	perf_clear_threads(perf);
   1521}
   1522
   1523static struct ntb_client perf_client = {
   1524	.ops = {
   1525		.probe = perf_probe,
   1526		.remove = perf_remove
   1527	}
   1528};
   1529
   1530static int __init perf_init(void)
   1531{
   1532	int ret;
   1533
   1534	if (chunk_order > MAX_CHUNK_ORDER) {
   1535		chunk_order = MAX_CHUNK_ORDER;
   1536		pr_info("Chunk order reduced to %hhu\n", chunk_order);
   1537	}
   1538
   1539	if (total_order < chunk_order) {
   1540		total_order = chunk_order;
   1541		pr_info("Total data order reduced to %hhu\n", total_order);
   1542	}
   1543
   1544	perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
   1545	if (!perf_wq)
   1546		return -ENOMEM;
   1547
   1548	if (debugfs_initialized())
   1549		perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
   1550
   1551	ret = ntb_register_client(&perf_client);
   1552	if (ret) {
   1553		debugfs_remove_recursive(perf_dbgfs_topdir);
   1554		destroy_workqueue(perf_wq);
   1555	}
   1556
   1557	return ret;
   1558}
   1559module_init(perf_init);
   1560
   1561static void __exit perf_exit(void)
   1562{
   1563	ntb_unregister_client(&perf_client);
   1564	debugfs_remove_recursive(perf_dbgfs_topdir);
   1565	destroy_workqueue(perf_wq);
   1566}
   1567module_exit(perf_exit);