cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

msu.c (49688B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Intel(R) Trace Hub Memory Storage Unit
      4 *
      5 * Copyright (C) 2014-2015 Intel Corporation.
      6 */
      7
      8#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
      9
     10#include <linux/types.h>
     11#include <linux/module.h>
     12#include <linux/device.h>
     13#include <linux/uaccess.h>
     14#include <linux/sizes.h>
     15#include <linux/printk.h>
     16#include <linux/slab.h>
     17#include <linux/mm.h>
     18#include <linux/fs.h>
     19#include <linux/io.h>
     20#include <linux/workqueue.h>
     21#include <linux/dma-mapping.h>
     22
     23#ifdef CONFIG_X86
     24#include <asm/set_memory.h>
     25#endif
     26
     27#include <linux/intel_th.h>
     28#include "intel_th.h"
     29#include "msu.h"
     30
     31#define msc_dev(x) (&(x)->thdev->dev)
     32
     33/*
     34 * Lockout state transitions:
     35 *   READY -> INUSE -+-> LOCKED -+-> READY -> etc.
     36 *                   \-----------/
     37 * WIN_READY:	window can be used by HW
     38 * WIN_INUSE:	window is in use
     39 * WIN_LOCKED:	window is filled up and is being processed by the buffer
     40 * handling code
     41 *
     42 * All state transitions happen automatically, except for the LOCKED->READY,
     43 * which needs to be signalled by the buffer code by calling
     44 * intel_th_msc_window_unlock().
     45 *
     46 * When the interrupt handler has to switch to the next window, it checks
     47 * whether it's READY, and if it is, it performs the switch and tracing
     48 * continues. If it's LOCKED, it stops the trace.
     49 */
     50enum lockout_state {
     51	WIN_READY = 0,
     52	WIN_INUSE,
     53	WIN_LOCKED
     54};
     55
     56/**
     57 * struct msc_window - multiblock mode window descriptor
     58 * @entry:	window list linkage (msc::win_list)
     59 * @pgoff:	page offset into the buffer that this window starts at
     60 * @lockout:	lockout state, see comment below
     61 * @lo_lock:	lockout state serialization
     62 * @nr_blocks:	number of blocks (pages) in this window
     63 * @nr_segs:	number of segments in this window (<= @nr_blocks)
     64 * @_sgt:	array of block descriptors
     65 * @sgt:	array of block descriptors
     66 */
     67struct msc_window {
     68	struct list_head	entry;
     69	unsigned long		pgoff;
     70	enum lockout_state	lockout;
     71	spinlock_t		lo_lock;
     72	unsigned int		nr_blocks;
     73	unsigned int		nr_segs;
     74	struct msc		*msc;
     75	struct sg_table		_sgt;
     76	struct sg_table		*sgt;
     77};
     78
     79/**
     80 * struct msc_iter - iterator for msc buffer
     81 * @entry:		msc::iter_list linkage
     82 * @msc:		pointer to the MSC device
     83 * @start_win:		oldest window
     84 * @win:		current window
     85 * @offset:		current logical offset into the buffer
     86 * @start_block:	oldest block in the window
     87 * @block:		block number in the window
     88 * @block_off:		offset into current block
     89 * @wrap_count:		block wrapping handling
     90 * @eof:		end of buffer reached
     91 */
     92struct msc_iter {
     93	struct list_head	entry;
     94	struct msc		*msc;
     95	struct msc_window	*start_win;
     96	struct msc_window	*win;
     97	unsigned long		offset;
     98	struct scatterlist	*start_block;
     99	struct scatterlist	*block;
    100	unsigned int		block_off;
    101	unsigned int		wrap_count;
    102	unsigned int		eof;
    103};
    104
    105/**
    106 * struct msc - MSC device representation
    107 * @reg_base:		register window base address
    108 * @thdev:		intel_th_device pointer
    109 * @mbuf:		MSU buffer, if assigned
    110 * @mbuf_priv		MSU buffer's private data, if @mbuf
    111 * @win_list:		list of windows in multiblock mode
    112 * @single_sgt:		single mode buffer
    113 * @cur_win:		current window
    114 * @nr_pages:		total number of pages allocated for this buffer
    115 * @single_sz:		amount of data in single mode
    116 * @single_wrap:	single mode wrap occurred
    117 * @base:		buffer's base pointer
    118 * @base_addr:		buffer's base address
    119 * @user_count:		number of users of the buffer
    120 * @mmap_count:		number of mappings
    121 * @buf_mutex:		mutex to serialize access to buffer-related bits
    122
    123 * @enabled:		MSC is enabled
    124 * @wrap:		wrapping is enabled
    125 * @mode:		MSC operating mode
    126 * @burst_len:		write burst length
    127 * @index:		number of this MSC in the MSU
    128 */
    129struct msc {
    130	void __iomem		*reg_base;
    131	void __iomem		*msu_base;
    132	struct intel_th_device	*thdev;
    133
    134	const struct msu_buffer	*mbuf;
    135	void			*mbuf_priv;
    136
    137	struct work_struct	work;
    138	struct list_head	win_list;
    139	struct sg_table		single_sgt;
    140	struct msc_window	*cur_win;
    141	struct msc_window	*switch_on_unlock;
    142	unsigned long		nr_pages;
    143	unsigned long		single_sz;
    144	unsigned int		single_wrap : 1;
    145	void			*base;
    146	dma_addr_t		base_addr;
    147	u32			orig_addr;
    148	u32			orig_sz;
    149
    150	/* <0: no buffer, 0: no users, >0: active users */
    151	atomic_t		user_count;
    152
    153	atomic_t		mmap_count;
    154	struct mutex		buf_mutex;
    155
    156	struct list_head	iter_list;
    157
    158	bool			stop_on_full;
    159
    160	/* config */
    161	unsigned int		enabled : 1,
    162				wrap	: 1,
    163				do_irq	: 1,
    164				multi_is_broken : 1;
    165	unsigned int		mode;
    166	unsigned int		burst_len;
    167	unsigned int		index;
    168};
    169
    170static LIST_HEAD(msu_buffer_list);
    171static DEFINE_MUTEX(msu_buffer_mutex);
    172
    173/**
    174 * struct msu_buffer_entry - internal MSU buffer bookkeeping
    175 * @entry:	link to msu_buffer_list
    176 * @mbuf:	MSU buffer object
    177 * @owner:	module that provides this MSU buffer
    178 */
    179struct msu_buffer_entry {
    180	struct list_head	entry;
    181	const struct msu_buffer	*mbuf;
    182	struct module		*owner;
    183};
    184
    185static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
    186{
    187	struct msu_buffer_entry *mbe;
    188
    189	lockdep_assert_held(&msu_buffer_mutex);
    190
    191	list_for_each_entry(mbe, &msu_buffer_list, entry) {
    192		if (!strcmp(mbe->mbuf->name, name))
    193			return mbe;
    194	}
    195
    196	return NULL;
    197}
    198
    199static const struct msu_buffer *
    200msu_buffer_get(const char *name)
    201{
    202	struct msu_buffer_entry *mbe;
    203
    204	mutex_lock(&msu_buffer_mutex);
    205	mbe = __msu_buffer_entry_find(name);
    206	if (mbe && !try_module_get(mbe->owner))
    207		mbe = NULL;
    208	mutex_unlock(&msu_buffer_mutex);
    209
    210	return mbe ? mbe->mbuf : NULL;
    211}
    212
    213static void msu_buffer_put(const struct msu_buffer *mbuf)
    214{
    215	struct msu_buffer_entry *mbe;
    216
    217	mutex_lock(&msu_buffer_mutex);
    218	mbe = __msu_buffer_entry_find(mbuf->name);
    219	if (mbe)
    220		module_put(mbe->owner);
    221	mutex_unlock(&msu_buffer_mutex);
    222}
    223
    224int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
    225				 struct module *owner)
    226{
    227	struct msu_buffer_entry *mbe;
    228	int ret = 0;
    229
    230	mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
    231	if (!mbe)
    232		return -ENOMEM;
    233
    234	mutex_lock(&msu_buffer_mutex);
    235	if (__msu_buffer_entry_find(mbuf->name)) {
    236		ret = -EEXIST;
    237		kfree(mbe);
    238		goto unlock;
    239	}
    240
    241	mbe->mbuf = mbuf;
    242	mbe->owner = owner;
    243	list_add_tail(&mbe->entry, &msu_buffer_list);
    244unlock:
    245	mutex_unlock(&msu_buffer_mutex);
    246
    247	return ret;
    248}
    249EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
    250
    251void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
    252{
    253	struct msu_buffer_entry *mbe;
    254
    255	mutex_lock(&msu_buffer_mutex);
    256	mbe = __msu_buffer_entry_find(mbuf->name);
    257	if (mbe) {
    258		list_del(&mbe->entry);
    259		kfree(mbe);
    260	}
    261	mutex_unlock(&msu_buffer_mutex);
    262}
    263EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
    264
    265static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
    266{
    267	/* header hasn't been written */
    268	if (!bdesc->valid_dw)
    269		return true;
    270
    271	/* valid_dw includes the header */
    272	if (!msc_data_sz(bdesc))
    273		return true;
    274
    275	return false;
    276}
    277
    278static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
    279{
    280	return win->sgt->sgl;
    281}
    282
    283static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
    284{
    285	return sg_virt(msc_win_base_sg(win));
    286}
    287
    288static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
    289{
    290	return sg_dma_address(msc_win_base_sg(win));
    291}
    292
    293static inline unsigned long
    294msc_win_base_pfn(struct msc_window *win)
    295{
    296	return PFN_DOWN(msc_win_base_dma(win));
    297}
    298
    299/**
    300 * msc_is_last_win() - check if a window is the last one for a given MSC
    301 * @win:	window
    302 * Return:	true if @win is the last window in MSC's multiblock buffer
    303 */
    304static inline bool msc_is_last_win(struct msc_window *win)
    305{
    306	return win->entry.next == &win->msc->win_list;
    307}
    308
    309/**
    310 * msc_next_window() - return next window in the multiblock buffer
    311 * @win:	current window
    312 *
    313 * Return:	window following the current one
    314 */
    315static struct msc_window *msc_next_window(struct msc_window *win)
    316{
    317	if (msc_is_last_win(win))
    318		return list_first_entry(&win->msc->win_list, struct msc_window,
    319					entry);
    320
    321	return list_next_entry(win, entry);
    322}
    323
    324static size_t msc_win_total_sz(struct msc_window *win)
    325{
    326	struct scatterlist *sg;
    327	unsigned int blk;
    328	size_t size = 0;
    329
    330	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
    331		struct msc_block_desc *bdesc = sg_virt(sg);
    332
    333		if (msc_block_wrapped(bdesc))
    334			return (size_t)win->nr_blocks << PAGE_SHIFT;
    335
    336		size += msc_total_sz(bdesc);
    337		if (msc_block_last_written(bdesc))
    338			break;
    339	}
    340
    341	return size;
    342}
    343
    344/**
    345 * msc_find_window() - find a window matching a given sg_table
    346 * @msc:	MSC device
    347 * @sgt:	SG table of the window
    348 * @nonempty:	skip over empty windows
    349 *
    350 * Return:	MSC window structure pointer or NULL if the window
    351 *		could not be found.
    352 */
    353static struct msc_window *
    354msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
    355{
    356	struct msc_window *win;
    357	unsigned int found = 0;
    358
    359	if (list_empty(&msc->win_list))
    360		return NULL;
    361
    362	/*
    363	 * we might need a radix tree for this, depending on how
    364	 * many windows a typical user would allocate; ideally it's
    365	 * something like 2, in which case we're good
    366	 */
    367	list_for_each_entry(win, &msc->win_list, entry) {
    368		if (win->sgt == sgt)
    369			found++;
    370
    371		/* skip the empty ones */
    372		if (nonempty && msc_block_is_empty(msc_win_base(win)))
    373			continue;
    374
    375		if (found)
    376			return win;
    377	}
    378
    379	return NULL;
    380}
    381
    382/**
    383 * msc_oldest_window() - locate the window with oldest data
    384 * @msc:	MSC device
    385 *
    386 * This should only be used in multiblock mode. Caller should hold the
    387 * msc::user_count reference.
    388 *
    389 * Return:	the oldest window with valid data
    390 */
    391static struct msc_window *msc_oldest_window(struct msc *msc)
    392{
    393	struct msc_window *win;
    394
    395	if (list_empty(&msc->win_list))
    396		return NULL;
    397
    398	win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
    399	if (win)
    400		return win;
    401
    402	return list_first_entry(&msc->win_list, struct msc_window, entry);
    403}
    404
    405/**
    406 * msc_win_oldest_sg() - locate the oldest block in a given window
    407 * @win:	window to look at
    408 *
    409 * Return:	index of the block with the oldest data
    410 */
    411static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
    412{
    413	unsigned int blk;
    414	struct scatterlist *sg;
    415	struct msc_block_desc *bdesc = msc_win_base(win);
    416
    417	/* without wrapping, first block is the oldest */
    418	if (!msc_block_wrapped(bdesc))
    419		return msc_win_base_sg(win);
    420
    421	/*
    422	 * with wrapping, last written block contains both the newest and the
    423	 * oldest data for this window.
    424	 */
    425	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
    426		struct msc_block_desc *bdesc = sg_virt(sg);
    427
    428		if (msc_block_last_written(bdesc))
    429			return sg;
    430	}
    431
    432	return msc_win_base_sg(win);
    433}
    434
    435static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
    436{
    437	return sg_virt(iter->block);
    438}
    439
    440static struct msc_iter *msc_iter_install(struct msc *msc)
    441{
    442	struct msc_iter *iter;
    443
    444	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
    445	if (!iter)
    446		return ERR_PTR(-ENOMEM);
    447
    448	mutex_lock(&msc->buf_mutex);
    449
    450	/*
    451	 * Reading and tracing are mutually exclusive; if msc is
    452	 * enabled, open() will fail; otherwise existing readers
    453	 * will prevent enabling the msc and the rest of fops don't
    454	 * need to worry about it.
    455	 */
    456	if (msc->enabled) {
    457		kfree(iter);
    458		iter = ERR_PTR(-EBUSY);
    459		goto unlock;
    460	}
    461
    462	iter->msc = msc;
    463
    464	list_add_tail(&iter->entry, &msc->iter_list);
    465unlock:
    466	mutex_unlock(&msc->buf_mutex);
    467
    468	return iter;
    469}
    470
    471static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
    472{
    473	mutex_lock(&msc->buf_mutex);
    474	list_del(&iter->entry);
    475	mutex_unlock(&msc->buf_mutex);
    476
    477	kfree(iter);
    478}
    479
    480static void msc_iter_block_start(struct msc_iter *iter)
    481{
    482	if (iter->start_block)
    483		return;
    484
    485	iter->start_block = msc_win_oldest_sg(iter->win);
    486	iter->block = iter->start_block;
    487	iter->wrap_count = 0;
    488
    489	/*
    490	 * start with the block with oldest data; if data has wrapped
    491	 * in this window, it should be in this block
    492	 */
    493	if (msc_block_wrapped(msc_iter_bdesc(iter)))
    494		iter->wrap_count = 2;
    495
    496}
    497
    498static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
    499{
    500	/* already started, nothing to do */
    501	if (iter->start_win)
    502		return 0;
    503
    504	iter->start_win = msc_oldest_window(msc);
    505	if (!iter->start_win)
    506		return -EINVAL;
    507
    508	iter->win = iter->start_win;
    509	iter->start_block = NULL;
    510
    511	msc_iter_block_start(iter);
    512
    513	return 0;
    514}
    515
    516static int msc_iter_win_advance(struct msc_iter *iter)
    517{
    518	iter->win = msc_next_window(iter->win);
    519	iter->start_block = NULL;
    520
    521	if (iter->win == iter->start_win) {
    522		iter->eof++;
    523		return 1;
    524	}
    525
    526	msc_iter_block_start(iter);
    527
    528	return 0;
    529}
    530
    531static int msc_iter_block_advance(struct msc_iter *iter)
    532{
    533	iter->block_off = 0;
    534
    535	/* wrapping */
    536	if (iter->wrap_count && iter->block == iter->start_block) {
    537		iter->wrap_count--;
    538		if (!iter->wrap_count)
    539			/* copied newest data from the wrapped block */
    540			return msc_iter_win_advance(iter);
    541	}
    542
    543	/* no wrapping, check for last written block */
    544	if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
    545		/* copied newest data for the window */
    546		return msc_iter_win_advance(iter);
    547
    548	/* block advance */
    549	if (sg_is_last(iter->block))
    550		iter->block = msc_win_base_sg(iter->win);
    551	else
    552		iter->block = sg_next(iter->block);
    553
    554	/* no wrapping, sanity check in case there is no last written block */
    555	if (!iter->wrap_count && iter->block == iter->start_block)
    556		return msc_iter_win_advance(iter);
    557
    558	return 0;
    559}
    560
    561/**
    562 * msc_buffer_iterate() - go through multiblock buffer's data
    563 * @iter:	iterator structure
    564 * @size:	amount of data to scan
    565 * @data:	callback's private data
    566 * @fn:		iterator callback
    567 *
    568 * This will start at the window which will be written to next (containing
    569 * the oldest data) and work its way to the current window, calling @fn
    570 * for each chunk of data as it goes.
    571 *
    572 * Caller should have msc::user_count reference to make sure the buffer
    573 * doesn't disappear from under us.
    574 *
    575 * Return:	amount of data actually scanned.
    576 */
    577static ssize_t
    578msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
    579		   unsigned long (*fn)(void *, void *, size_t))
    580{
    581	struct msc *msc = iter->msc;
    582	size_t len = size;
    583	unsigned int advance;
    584
    585	if (iter->eof)
    586		return 0;
    587
    588	/* start with the oldest window */
    589	if (msc_iter_win_start(iter, msc))
    590		return 0;
    591
    592	do {
    593		unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
    594		void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
    595		size_t tocopy = data_bytes, copied = 0;
    596		size_t remaining = 0;
    597
    598		advance = 1;
    599
    600		/*
    601		 * If block wrapping happened, we need to visit the last block
    602		 * twice, because it contains both the oldest and the newest
    603		 * data in this window.
    604		 *
    605		 * First time (wrap_count==2), in the very beginning, to collect
    606		 * the oldest data, which is in the range
    607		 * (data_bytes..DATA_IN_PAGE).
    608		 *
    609		 * Second time (wrap_count==1), it's just like any other block,
    610		 * containing data in the range of [MSC_BDESC..data_bytes].
    611		 */
    612		if (iter->block == iter->start_block && iter->wrap_count == 2) {
    613			tocopy = DATA_IN_PAGE - data_bytes;
    614			src += data_bytes;
    615		}
    616
    617		if (!tocopy)
    618			goto next_block;
    619
    620		tocopy -= iter->block_off;
    621		src += iter->block_off;
    622
    623		if (len < tocopy) {
    624			tocopy = len;
    625			advance = 0;
    626		}
    627
    628		remaining = fn(data, src, tocopy);
    629
    630		if (remaining)
    631			advance = 0;
    632
    633		copied = tocopy - remaining;
    634		len -= copied;
    635		iter->block_off += copied;
    636		iter->offset += copied;
    637
    638		if (!advance)
    639			break;
    640
    641next_block:
    642		if (msc_iter_block_advance(iter))
    643			break;
    644
    645	} while (len);
    646
    647	return size - len;
    648}
    649
    650/**
    651 * msc_buffer_clear_hw_header() - clear hw header for multiblock
    652 * @msc:	MSC device
    653 */
    654static void msc_buffer_clear_hw_header(struct msc *msc)
    655{
    656	struct msc_window *win;
    657	struct scatterlist *sg;
    658
    659	list_for_each_entry(win, &msc->win_list, entry) {
    660		unsigned int blk;
    661
    662		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
    663			struct msc_block_desc *bdesc = sg_virt(sg);
    664
    665			memset_startat(bdesc, 0, hw_tag);
    666		}
    667	}
    668}
    669
    670static int intel_th_msu_init(struct msc *msc)
    671{
    672	u32 mintctl, msusts;
    673
    674	if (!msc->do_irq)
    675		return 0;
    676
    677	if (!msc->mbuf)
    678		return 0;
    679
    680	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
    681	mintctl |= msc->index ? M1BLIE : M0BLIE;
    682	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
    683	if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
    684		dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
    685		msc->do_irq = 0;
    686		return 0;
    687	}
    688
    689	msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
    690	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
    691
    692	return 0;
    693}
    694
    695static void intel_th_msu_deinit(struct msc *msc)
    696{
    697	u32 mintctl;
    698
    699	if (!msc->do_irq)
    700		return;
    701
    702	mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
    703	mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
    704	iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
    705}
    706
    707static int msc_win_set_lockout(struct msc_window *win,
    708			       enum lockout_state expect,
    709			       enum lockout_state new)
    710{
    711	enum lockout_state old;
    712	unsigned long flags;
    713	int ret = 0;
    714
    715	if (!win->msc->mbuf)
    716		return 0;
    717
    718	spin_lock_irqsave(&win->lo_lock, flags);
    719	old = win->lockout;
    720
    721	if (old != expect) {
    722		ret = -EINVAL;
    723		goto unlock;
    724	}
    725
    726	win->lockout = new;
    727
    728	if (old == expect && new == WIN_LOCKED)
    729		atomic_inc(&win->msc->user_count);
    730	else if (old == expect && old == WIN_LOCKED)
    731		atomic_dec(&win->msc->user_count);
    732
    733unlock:
    734	spin_unlock_irqrestore(&win->lo_lock, flags);
    735
    736	if (ret) {
    737		if (expect == WIN_READY && old == WIN_LOCKED)
    738			return -EBUSY;
    739
    740		/* from intel_th_msc_window_unlock(), don't warn if not locked */
    741		if (expect == WIN_LOCKED && old == new)
    742			return 0;
    743
    744		dev_warn_ratelimited(msc_dev(win->msc),
    745				     "expected lockout state %d, got %d\n",
    746				     expect, old);
    747	}
    748
    749	return ret;
    750}
    751/**
    752 * msc_configure() - set up MSC hardware
    753 * @msc:	the MSC device to configure
    754 *
    755 * Program storage mode, wrapping, burst length and trace buffer address
    756 * into a given MSC. Then, enable tracing and set msc::enabled.
    757 * The latter is serialized on msc::buf_mutex, so make sure to hold it.
    758 */
    759static int msc_configure(struct msc *msc)
    760{
    761	u32 reg;
    762
    763	lockdep_assert_held(&msc->buf_mutex);
    764
    765	if (msc->mode > MSC_MODE_MULTI)
    766		return -EINVAL;
    767
    768	if (msc->mode == MSC_MODE_MULTI) {
    769		if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
    770			return -EBUSY;
    771
    772		msc_buffer_clear_hw_header(msc);
    773	}
    774
    775	msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
    776	msc->orig_sz   = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
    777
    778	reg = msc->base_addr >> PAGE_SHIFT;
    779	iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
    780
    781	if (msc->mode == MSC_MODE_SINGLE) {
    782		reg = msc->nr_pages;
    783		iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
    784	}
    785
    786	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
    787	reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
    788
    789	reg |= MSC_EN;
    790	reg |= msc->mode << __ffs(MSC_MODE);
    791	reg |= msc->burst_len << __ffs(MSC_LEN);
    792
    793	if (msc->wrap)
    794		reg |= MSC_WRAPEN;
    795
    796	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
    797
    798	intel_th_msu_init(msc);
    799
    800	msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
    801	intel_th_trace_enable(msc->thdev);
    802	msc->enabled = 1;
    803
    804	if (msc->mbuf && msc->mbuf->activate)
    805		msc->mbuf->activate(msc->mbuf_priv);
    806
    807	return 0;
    808}
    809
    810/**
    811 * msc_disable() - disable MSC hardware
    812 * @msc:	MSC device to disable
    813 *
    814 * If @msc is enabled, disable tracing on the switch and then disable MSC
    815 * storage. Caller must hold msc::buf_mutex.
    816 */
    817static void msc_disable(struct msc *msc)
    818{
    819	struct msc_window *win = msc->cur_win;
    820	u32 reg;
    821
    822	lockdep_assert_held(&msc->buf_mutex);
    823
    824	if (msc->mode == MSC_MODE_MULTI)
    825		msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
    826
    827	if (msc->mbuf && msc->mbuf->deactivate)
    828		msc->mbuf->deactivate(msc->mbuf_priv);
    829	intel_th_msu_deinit(msc);
    830	intel_th_trace_disable(msc->thdev);
    831
    832	if (msc->mode == MSC_MODE_SINGLE) {
    833		reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
    834		msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
    835
    836		reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
    837		msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
    838		dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
    839			reg, msc->single_sz, msc->single_wrap);
    840	}
    841
    842	reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
    843	reg &= ~MSC_EN;
    844	iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
    845
    846	if (msc->mbuf && msc->mbuf->ready)
    847		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
    848				 msc_win_total_sz(win));
    849
    850	msc->enabled = 0;
    851
    852	iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
    853	iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
    854
    855	dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
    856		ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
    857
    858	reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
    859	dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
    860
    861	reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
    862	reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
    863	iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
    864}
    865
    866static int intel_th_msc_activate(struct intel_th_device *thdev)
    867{
    868	struct msc *msc = dev_get_drvdata(&thdev->dev);
    869	int ret = -EBUSY;
    870
    871	if (!atomic_inc_unless_negative(&msc->user_count))
    872		return -ENODEV;
    873
    874	mutex_lock(&msc->buf_mutex);
    875
    876	/* if there are readers, refuse */
    877	if (list_empty(&msc->iter_list))
    878		ret = msc_configure(msc);
    879
    880	mutex_unlock(&msc->buf_mutex);
    881
    882	if (ret)
    883		atomic_dec(&msc->user_count);
    884
    885	return ret;
    886}
    887
    888static void intel_th_msc_deactivate(struct intel_th_device *thdev)
    889{
    890	struct msc *msc = dev_get_drvdata(&thdev->dev);
    891
    892	mutex_lock(&msc->buf_mutex);
    893	if (msc->enabled) {
    894		msc_disable(msc);
    895		atomic_dec(&msc->user_count);
    896	}
    897	mutex_unlock(&msc->buf_mutex);
    898}
    899
    900/**
    901 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
    902 * @msc:	MSC device
    903 * @size:	allocation size in bytes
    904 *
    905 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
    906 * caller is expected to hold it.
    907 *
    908 * Return:	0 on success, -errno otherwise.
    909 */
    910static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
    911{
    912	unsigned long nr_pages = size >> PAGE_SHIFT;
    913	unsigned int order = get_order(size);
    914	struct page *page;
    915	int ret;
    916
    917	if (!size)
    918		return 0;
    919
    920	ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
    921	if (ret)
    922		goto err_out;
    923
    924	ret = -ENOMEM;
    925	page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
    926	if (!page)
    927		goto err_free_sgt;
    928
    929	split_page(page, order);
    930	sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
    931
    932	ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
    933			 DMA_FROM_DEVICE);
    934	if (ret < 0)
    935		goto err_free_pages;
    936
    937	msc->nr_pages = nr_pages;
    938	msc->base = page_address(page);
    939	msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
    940
    941	return 0;
    942
    943err_free_pages:
    944	__free_pages(page, order);
    945
    946err_free_sgt:
    947	sg_free_table(&msc->single_sgt);
    948
    949err_out:
    950	return ret;
    951}
    952
    953/**
    954 * msc_buffer_contig_free() - free a contiguous buffer
    955 * @msc:	MSC configured in SINGLE mode
    956 */
    957static void msc_buffer_contig_free(struct msc *msc)
    958{
    959	unsigned long off;
    960
    961	dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
    962		     1, DMA_FROM_DEVICE);
    963	sg_free_table(&msc->single_sgt);
    964
    965	for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
    966		struct page *page = virt_to_page(msc->base + off);
    967
    968		page->mapping = NULL;
    969		__free_page(page);
    970	}
    971
    972	msc->nr_pages = 0;
    973}
    974
    975/**
    976 * msc_buffer_contig_get_page() - find a page at a given offset
    977 * @msc:	MSC configured in SINGLE mode
    978 * @pgoff:	page offset
    979 *
    980 * Return:	page, if @pgoff is within the range, NULL otherwise.
    981 */
    982static struct page *msc_buffer_contig_get_page(struct msc *msc,
    983					       unsigned long pgoff)
    984{
    985	if (pgoff >= msc->nr_pages)
    986		return NULL;
    987
    988	return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
    989}
    990
    991static int __msc_buffer_win_alloc(struct msc_window *win,
    992				  unsigned int nr_segs)
    993{
    994	struct scatterlist *sg_ptr;
    995	void *block;
    996	int i, ret;
    997
    998	ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
    999	if (ret)
   1000		return -ENOMEM;
   1001
   1002	for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
   1003		block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
   1004					  PAGE_SIZE, &sg_dma_address(sg_ptr),
   1005					  GFP_KERNEL);
   1006		if (!block)
   1007			goto err_nomem;
   1008
   1009		sg_set_buf(sg_ptr, block, PAGE_SIZE);
   1010	}
   1011
   1012	return nr_segs;
   1013
   1014err_nomem:
   1015	for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
   1016		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
   1017				  sg_virt(sg_ptr), sg_dma_address(sg_ptr));
   1018
   1019	sg_free_table(win->sgt);
   1020
   1021	return -ENOMEM;
   1022}
   1023
   1024#ifdef CONFIG_X86
   1025static void msc_buffer_set_uc(struct msc *msc)
   1026{
   1027	struct scatterlist *sg_ptr;
   1028	struct msc_window *win;
   1029	int i;
   1030
   1031	if (msc->mode == MSC_MODE_SINGLE) {
   1032		set_memory_uc((unsigned long)msc->base, msc->nr_pages);
   1033		return;
   1034	}
   1035
   1036	list_for_each_entry(win, &msc->win_list, entry) {
   1037		for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
   1038			/* Set the page as uncached */
   1039			set_memory_uc((unsigned long)sg_virt(sg_ptr),
   1040					PFN_DOWN(sg_ptr->length));
   1041		}
   1042	}
   1043}
   1044
   1045static void msc_buffer_set_wb(struct msc *msc)
   1046{
   1047	struct scatterlist *sg_ptr;
   1048	struct msc_window *win;
   1049	int i;
   1050
   1051	if (msc->mode == MSC_MODE_SINGLE) {
   1052		set_memory_wb((unsigned long)msc->base, msc->nr_pages);
   1053		return;
   1054	}
   1055
   1056	list_for_each_entry(win, &msc->win_list, entry) {
   1057		for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
   1058			/* Reset the page to write-back */
   1059			set_memory_wb((unsigned long)sg_virt(sg_ptr),
   1060					PFN_DOWN(sg_ptr->length));
   1061		}
   1062	}
   1063}
   1064#else /* !X86 */
   1065static inline void
   1066msc_buffer_set_uc(struct msc *msc) {}
   1067static inline void msc_buffer_set_wb(struct msc *msc) {}
   1068#endif /* CONFIG_X86 */
   1069
   1070/**
   1071 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
   1072 * @msc:	MSC device
   1073 * @nr_blocks:	number of pages in this window
   1074 *
   1075 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
   1076 * to serialize, so the caller is expected to hold it.
   1077 *
   1078 * Return:	0 on success, -errno otherwise.
   1079 */
   1080static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
   1081{
   1082	struct msc_window *win;
   1083	int ret = -ENOMEM;
   1084
   1085	if (!nr_blocks)
   1086		return 0;
   1087
   1088	win = kzalloc(sizeof(*win), GFP_KERNEL);
   1089	if (!win)
   1090		return -ENOMEM;
   1091
   1092	win->msc = msc;
   1093	win->sgt = &win->_sgt;
   1094	win->lockout = WIN_READY;
   1095	spin_lock_init(&win->lo_lock);
   1096
   1097	if (!list_empty(&msc->win_list)) {
   1098		struct msc_window *prev = list_last_entry(&msc->win_list,
   1099							  struct msc_window,
   1100							  entry);
   1101
   1102		win->pgoff = prev->pgoff + prev->nr_blocks;
   1103	}
   1104
   1105	if (msc->mbuf && msc->mbuf->alloc_window)
   1106		ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
   1107					      nr_blocks << PAGE_SHIFT);
   1108	else
   1109		ret = __msc_buffer_win_alloc(win, nr_blocks);
   1110
   1111	if (ret <= 0)
   1112		goto err_nomem;
   1113
   1114	win->nr_segs = ret;
   1115	win->nr_blocks = nr_blocks;
   1116
   1117	if (list_empty(&msc->win_list)) {
   1118		msc->base = msc_win_base(win);
   1119		msc->base_addr = msc_win_base_dma(win);
   1120		msc->cur_win = win;
   1121	}
   1122
   1123	list_add_tail(&win->entry, &msc->win_list);
   1124	msc->nr_pages += nr_blocks;
   1125
   1126	return 0;
   1127
   1128err_nomem:
   1129	kfree(win);
   1130
   1131	return ret;
   1132}
   1133
   1134static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
   1135{
   1136	struct scatterlist *sg;
   1137	int i;
   1138
   1139	for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
   1140		struct page *page = sg_page(sg);
   1141
   1142		page->mapping = NULL;
   1143		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
   1144				  sg_virt(sg), sg_dma_address(sg));
   1145	}
   1146	sg_free_table(win->sgt);
   1147}
   1148
   1149/**
   1150 * msc_buffer_win_free() - free a window from MSC's window list
   1151 * @msc:	MSC device
   1152 * @win:	window to free
   1153 *
   1154 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
   1155 * to serialize, so the caller is expected to hold it.
   1156 */
   1157static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
   1158{
   1159	msc->nr_pages -= win->nr_blocks;
   1160
   1161	list_del(&win->entry);
   1162	if (list_empty(&msc->win_list)) {
   1163		msc->base = NULL;
   1164		msc->base_addr = 0;
   1165	}
   1166
   1167	if (msc->mbuf && msc->mbuf->free_window)
   1168		msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
   1169	else
   1170		__msc_buffer_win_free(msc, win);
   1171
   1172	kfree(win);
   1173}
   1174
   1175/**
   1176 * msc_buffer_relink() - set up block descriptors for multiblock mode
   1177 * @msc:	MSC device
   1178 *
   1179 * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
   1180 * so the caller is expected to hold it.
   1181 */
   1182static void msc_buffer_relink(struct msc *msc)
   1183{
   1184	struct msc_window *win, *next_win;
   1185
   1186	/* call with msc::mutex locked */
   1187	list_for_each_entry(win, &msc->win_list, entry) {
   1188		struct scatterlist *sg;
   1189		unsigned int blk;
   1190		u32 sw_tag = 0;
   1191
   1192		/*
   1193		 * Last window's next_win should point to the first window
   1194		 * and MSC_SW_TAG_LASTWIN should be set.
   1195		 */
   1196		if (msc_is_last_win(win)) {
   1197			sw_tag |= MSC_SW_TAG_LASTWIN;
   1198			next_win = list_first_entry(&msc->win_list,
   1199						    struct msc_window, entry);
   1200		} else {
   1201			next_win = list_next_entry(win, entry);
   1202		}
   1203
   1204		for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
   1205			struct msc_block_desc *bdesc = sg_virt(sg);
   1206
   1207			memset(bdesc, 0, sizeof(*bdesc));
   1208
   1209			bdesc->next_win = msc_win_base_pfn(next_win);
   1210
   1211			/*
   1212			 * Similarly to last window, last block should point
   1213			 * to the first one.
   1214			 */
   1215			if (blk == win->nr_segs - 1) {
   1216				sw_tag |= MSC_SW_TAG_LASTBLK;
   1217				bdesc->next_blk = msc_win_base_pfn(win);
   1218			} else {
   1219				dma_addr_t addr = sg_dma_address(sg_next(sg));
   1220
   1221				bdesc->next_blk = PFN_DOWN(addr);
   1222			}
   1223
   1224			bdesc->sw_tag = sw_tag;
   1225			bdesc->block_sz = sg->length / 64;
   1226		}
   1227	}
   1228
   1229	/*
   1230	 * Make the above writes globally visible before tracing is
   1231	 * enabled to make sure hardware sees them coherently.
   1232	 */
   1233	wmb();
   1234}
   1235
   1236static void msc_buffer_multi_free(struct msc *msc)
   1237{
   1238	struct msc_window *win, *iter;
   1239
   1240	list_for_each_entry_safe(win, iter, &msc->win_list, entry)
   1241		msc_buffer_win_free(msc, win);
   1242}
   1243
   1244static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
   1245				  unsigned int nr_wins)
   1246{
   1247	int ret, i;
   1248
   1249	for (i = 0; i < nr_wins; i++) {
   1250		ret = msc_buffer_win_alloc(msc, nr_pages[i]);
   1251		if (ret) {
   1252			msc_buffer_multi_free(msc);
   1253			return ret;
   1254		}
   1255	}
   1256
   1257	msc_buffer_relink(msc);
   1258
   1259	return 0;
   1260}
   1261
   1262/**
   1263 * msc_buffer_free() - free buffers for MSC
   1264 * @msc:	MSC device
   1265 *
   1266 * Free MSC's storage buffers.
   1267 *
   1268 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
   1269 * serialize, so the caller is expected to hold it.
   1270 */
   1271static void msc_buffer_free(struct msc *msc)
   1272{
   1273	msc_buffer_set_wb(msc);
   1274
   1275	if (msc->mode == MSC_MODE_SINGLE)
   1276		msc_buffer_contig_free(msc);
   1277	else if (msc->mode == MSC_MODE_MULTI)
   1278		msc_buffer_multi_free(msc);
   1279}
   1280
   1281/**
   1282 * msc_buffer_alloc() - allocate a buffer for MSC
   1283 * @msc:	MSC device
   1284 * @size:	allocation size in bytes
   1285 *
   1286 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
   1287 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
   1288 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
   1289 * window per invocation, so in multiblock mode this can be called multiple
   1290 * times for the same MSC to allocate multiple windows.
   1291 *
   1292 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
   1293 * to serialize, so the caller is expected to hold it.
   1294 *
   1295 * Return:	0 on success, -errno otherwise.
   1296 */
   1297static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
   1298			    unsigned int nr_wins)
   1299{
   1300	int ret;
   1301
   1302	/* -1: buffer not allocated */
   1303	if (atomic_read(&msc->user_count) != -1)
   1304		return -EBUSY;
   1305
   1306	if (msc->mode == MSC_MODE_SINGLE) {
   1307		if (nr_wins != 1)
   1308			return -EINVAL;
   1309
   1310		ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
   1311	} else if (msc->mode == MSC_MODE_MULTI) {
   1312		ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
   1313	} else {
   1314		ret = -EINVAL;
   1315	}
   1316
   1317	if (!ret) {
   1318		msc_buffer_set_uc(msc);
   1319
   1320		/* allocation should be visible before the counter goes to 0 */
   1321		smp_mb__before_atomic();
   1322
   1323		if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
   1324			return -EINVAL;
   1325	}
   1326
   1327	return ret;
   1328}
   1329
   1330/**
   1331 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
   1332 * @msc:	MSC device
   1333 *
   1334 * This will free MSC buffer unless it is in use or there is no allocated
   1335 * buffer.
   1336 * Caller needs to hold msc::buf_mutex.
   1337 *
   1338 * Return:	0 on successful deallocation or if there was no buffer to
   1339 *		deallocate, -EBUSY if there are active users.
   1340 */
   1341static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
   1342{
   1343	int count, ret = 0;
   1344
   1345	count = atomic_cmpxchg(&msc->user_count, 0, -1);
   1346
   1347	/* > 0: buffer is allocated and has users */
   1348	if (count > 0)
   1349		ret = -EBUSY;
   1350	/* 0: buffer is allocated, no users */
   1351	else if (!count)
   1352		msc_buffer_free(msc);
   1353	/* < 0: no buffer, nothing to do */
   1354
   1355	return ret;
   1356}
   1357
   1358/**
   1359 * msc_buffer_free_unless_used() - free a buffer unless it's in use
   1360 * @msc:	MSC device
   1361 *
   1362 * This is a locked version of msc_buffer_unlocked_free_unless_used().
   1363 */
   1364static int msc_buffer_free_unless_used(struct msc *msc)
   1365{
   1366	int ret;
   1367
   1368	mutex_lock(&msc->buf_mutex);
   1369	ret = msc_buffer_unlocked_free_unless_used(msc);
   1370	mutex_unlock(&msc->buf_mutex);
   1371
   1372	return ret;
   1373}
   1374
   1375/**
   1376 * msc_buffer_get_page() - get MSC buffer page at a given offset
   1377 * @msc:	MSC device
   1378 * @pgoff:	page offset into the storage buffer
   1379 *
   1380 * This traverses msc::win_list, so holding msc::buf_mutex is expected from
   1381 * the caller.
   1382 *
   1383 * Return:	page if @pgoff corresponds to a valid buffer page or NULL.
   1384 */
   1385static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
   1386{
   1387	struct msc_window *win;
   1388	struct scatterlist *sg;
   1389	unsigned int blk;
   1390
   1391	if (msc->mode == MSC_MODE_SINGLE)
   1392		return msc_buffer_contig_get_page(msc, pgoff);
   1393
   1394	list_for_each_entry(win, &msc->win_list, entry)
   1395		if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
   1396			goto found;
   1397
   1398	return NULL;
   1399
   1400found:
   1401	pgoff -= win->pgoff;
   1402
   1403	for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
   1404		struct page *page = sg_page(sg);
   1405		size_t pgsz = PFN_DOWN(sg->length);
   1406
   1407		if (pgoff < pgsz)
   1408			return page + pgoff;
   1409
   1410		pgoff -= pgsz;
   1411	}
   1412
   1413	return NULL;
   1414}
   1415
   1416/**
   1417 * struct msc_win_to_user_struct - data for copy_to_user() callback
   1418 * @buf:	userspace buffer to copy data to
   1419 * @offset:	running offset
   1420 */
   1421struct msc_win_to_user_struct {
   1422	char __user	*buf;
   1423	unsigned long	offset;
   1424};
   1425
   1426/**
   1427 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
   1428 * @data:	callback's private data
   1429 * @src:	source buffer
   1430 * @len:	amount of data to copy from the source buffer
   1431 */
   1432static unsigned long msc_win_to_user(void *data, void *src, size_t len)
   1433{
   1434	struct msc_win_to_user_struct *u = data;
   1435	unsigned long ret;
   1436
   1437	ret = copy_to_user(u->buf + u->offset, src, len);
   1438	u->offset += len - ret;
   1439
   1440	return ret;
   1441}
   1442
   1443
   1444/*
   1445 * file operations' callbacks
   1446 */
   1447
   1448static int intel_th_msc_open(struct inode *inode, struct file *file)
   1449{
   1450	struct intel_th_device *thdev = file->private_data;
   1451	struct msc *msc = dev_get_drvdata(&thdev->dev);
   1452	struct msc_iter *iter;
   1453
   1454	if (!capable(CAP_SYS_RAWIO))
   1455		return -EPERM;
   1456
   1457	iter = msc_iter_install(msc);
   1458	if (IS_ERR(iter))
   1459		return PTR_ERR(iter);
   1460
   1461	file->private_data = iter;
   1462
   1463	return nonseekable_open(inode, file);
   1464}
   1465
   1466static int intel_th_msc_release(struct inode *inode, struct file *file)
   1467{
   1468	struct msc_iter *iter = file->private_data;
   1469	struct msc *msc = iter->msc;
   1470
   1471	msc_iter_remove(iter, msc);
   1472
   1473	return 0;
   1474}
   1475
   1476static ssize_t
   1477msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
   1478{
   1479	unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
   1480	unsigned long start = off, tocopy = 0;
   1481
   1482	if (msc->single_wrap) {
   1483		start += msc->single_sz;
   1484		if (start < size) {
   1485			tocopy = min(rem, size - start);
   1486			if (copy_to_user(buf, msc->base + start, tocopy))
   1487				return -EFAULT;
   1488
   1489			buf += tocopy;
   1490			rem -= tocopy;
   1491			start += tocopy;
   1492		}
   1493
   1494		start &= size - 1;
   1495		if (rem) {
   1496			tocopy = min(rem, msc->single_sz - start);
   1497			if (copy_to_user(buf, msc->base + start, tocopy))
   1498				return -EFAULT;
   1499
   1500			rem -= tocopy;
   1501		}
   1502
   1503		return len - rem;
   1504	}
   1505
   1506	if (copy_to_user(buf, msc->base + start, rem))
   1507		return -EFAULT;
   1508
   1509	return len;
   1510}
   1511
   1512static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
   1513				 size_t len, loff_t *ppos)
   1514{
   1515	struct msc_iter *iter = file->private_data;
   1516	struct msc *msc = iter->msc;
   1517	size_t size;
   1518	loff_t off = *ppos;
   1519	ssize_t ret = 0;
   1520
   1521	if (!atomic_inc_unless_negative(&msc->user_count))
   1522		return 0;
   1523
   1524	if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
   1525		size = msc->single_sz;
   1526	else
   1527		size = msc->nr_pages << PAGE_SHIFT;
   1528
   1529	if (!size)
   1530		goto put_count;
   1531
   1532	if (off >= size)
   1533		goto put_count;
   1534
   1535	if (off + len >= size)
   1536		len = size - off;
   1537
   1538	if (msc->mode == MSC_MODE_SINGLE) {
   1539		ret = msc_single_to_user(msc, buf, off, len);
   1540		if (ret >= 0)
   1541			*ppos += ret;
   1542	} else if (msc->mode == MSC_MODE_MULTI) {
   1543		struct msc_win_to_user_struct u = {
   1544			.buf	= buf,
   1545			.offset	= 0,
   1546		};
   1547
   1548		ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
   1549		if (ret >= 0)
   1550			*ppos = iter->offset;
   1551	} else {
   1552		ret = -EINVAL;
   1553	}
   1554
   1555put_count:
   1556	atomic_dec(&msc->user_count);
   1557
   1558	return ret;
   1559}
   1560
   1561/*
   1562 * vm operations callbacks (vm_ops)
   1563 */
   1564
   1565static void msc_mmap_open(struct vm_area_struct *vma)
   1566{
   1567	struct msc_iter *iter = vma->vm_file->private_data;
   1568	struct msc *msc = iter->msc;
   1569
   1570	atomic_inc(&msc->mmap_count);
   1571}
   1572
   1573static void msc_mmap_close(struct vm_area_struct *vma)
   1574{
   1575	struct msc_iter *iter = vma->vm_file->private_data;
   1576	struct msc *msc = iter->msc;
   1577	unsigned long pg;
   1578
   1579	if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
   1580		return;
   1581
   1582	/* drop page _refcounts */
   1583	for (pg = 0; pg < msc->nr_pages; pg++) {
   1584		struct page *page = msc_buffer_get_page(msc, pg);
   1585
   1586		if (WARN_ON_ONCE(!page))
   1587			continue;
   1588
   1589		if (page->mapping)
   1590			page->mapping = NULL;
   1591	}
   1592
   1593	/* last mapping -- drop user_count */
   1594	atomic_dec(&msc->user_count);
   1595	mutex_unlock(&msc->buf_mutex);
   1596}
   1597
   1598static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
   1599{
   1600	struct msc_iter *iter = vmf->vma->vm_file->private_data;
   1601	struct msc *msc = iter->msc;
   1602
   1603	vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
   1604	if (!vmf->page)
   1605		return VM_FAULT_SIGBUS;
   1606
   1607	get_page(vmf->page);
   1608	vmf->page->mapping = vmf->vma->vm_file->f_mapping;
   1609	vmf->page->index = vmf->pgoff;
   1610
   1611	return 0;
   1612}
   1613
   1614static const struct vm_operations_struct msc_mmap_ops = {
   1615	.open	= msc_mmap_open,
   1616	.close	= msc_mmap_close,
   1617	.fault	= msc_mmap_fault,
   1618};
   1619
   1620static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
   1621{
   1622	unsigned long size = vma->vm_end - vma->vm_start;
   1623	struct msc_iter *iter = vma->vm_file->private_data;
   1624	struct msc *msc = iter->msc;
   1625	int ret = -EINVAL;
   1626
   1627	if (!size || offset_in_page(size))
   1628		return -EINVAL;
   1629
   1630	if (vma->vm_pgoff)
   1631		return -EINVAL;
   1632
   1633	/* grab user_count once per mmap; drop in msc_mmap_close() */
   1634	if (!atomic_inc_unless_negative(&msc->user_count))
   1635		return -EINVAL;
   1636
   1637	if (msc->mode != MSC_MODE_SINGLE &&
   1638	    msc->mode != MSC_MODE_MULTI)
   1639		goto out;
   1640
   1641	if (size >> PAGE_SHIFT != msc->nr_pages)
   1642		goto out;
   1643
   1644	atomic_set(&msc->mmap_count, 1);
   1645	ret = 0;
   1646
   1647out:
   1648	if (ret)
   1649		atomic_dec(&msc->user_count);
   1650
   1651	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
   1652	vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
   1653	vma->vm_ops = &msc_mmap_ops;
   1654	return ret;
   1655}
   1656
   1657static const struct file_operations intel_th_msc_fops = {
   1658	.open		= intel_th_msc_open,
   1659	.release	= intel_th_msc_release,
   1660	.read		= intel_th_msc_read,
   1661	.mmap		= intel_th_msc_mmap,
   1662	.llseek		= no_llseek,
   1663	.owner		= THIS_MODULE,
   1664};
   1665
   1666static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
   1667{
   1668	struct msc *msc = dev_get_drvdata(&thdev->dev);
   1669	unsigned long count;
   1670	u32 reg;
   1671
   1672	for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
   1673	     count && !(reg & MSCSTS_PLE); count--) {
   1674		reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
   1675		cpu_relax();
   1676	}
   1677
   1678	if (!count)
   1679		dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
   1680}
   1681
   1682static int intel_th_msc_init(struct msc *msc)
   1683{
   1684	atomic_set(&msc->user_count, -1);
   1685
   1686	msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
   1687	mutex_init(&msc->buf_mutex);
   1688	INIT_LIST_HEAD(&msc->win_list);
   1689	INIT_LIST_HEAD(&msc->iter_list);
   1690
   1691	msc->burst_len =
   1692		(ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
   1693		__ffs(MSC_LEN);
   1694
   1695	return 0;
   1696}
   1697
   1698static int msc_win_switch(struct msc *msc)
   1699{
   1700	struct msc_window *first;
   1701
   1702	if (list_empty(&msc->win_list))
   1703		return -EINVAL;
   1704
   1705	first = list_first_entry(&msc->win_list, struct msc_window, entry);
   1706
   1707	if (msc_is_last_win(msc->cur_win))
   1708		msc->cur_win = first;
   1709	else
   1710		msc->cur_win = list_next_entry(msc->cur_win, entry);
   1711
   1712	msc->base = msc_win_base(msc->cur_win);
   1713	msc->base_addr = msc_win_base_dma(msc->cur_win);
   1714
   1715	intel_th_trace_switch(msc->thdev);
   1716
   1717	return 0;
   1718}
   1719
   1720/**
   1721 * intel_th_msc_window_unlock - put the window back in rotation
   1722 * @dev:	MSC device to which this relates
   1723 * @sgt:	buffer's sg_table for the window, does nothing if NULL
   1724 */
   1725void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
   1726{
   1727	struct msc *msc = dev_get_drvdata(dev);
   1728	struct msc_window *win;
   1729
   1730	if (!sgt)
   1731		return;
   1732
   1733	win = msc_find_window(msc, sgt, false);
   1734	if (!win)
   1735		return;
   1736
   1737	msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
   1738	if (msc->switch_on_unlock == win) {
   1739		msc->switch_on_unlock = NULL;
   1740		msc_win_switch(msc);
   1741	}
   1742}
   1743EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
   1744
   1745static void msc_work(struct work_struct *work)
   1746{
   1747	struct msc *msc = container_of(work, struct msc, work);
   1748
   1749	intel_th_msc_deactivate(msc->thdev);
   1750}
   1751
   1752static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
   1753{
   1754	struct msc *msc = dev_get_drvdata(&thdev->dev);
   1755	u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
   1756	u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
   1757	struct msc_window *win, *next_win;
   1758
   1759	if (!msc->do_irq || !msc->mbuf)
   1760		return IRQ_NONE;
   1761
   1762	msusts &= mask;
   1763
   1764	if (!msusts)
   1765		return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
   1766
   1767	iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
   1768
   1769	if (!msc->enabled)
   1770		return IRQ_NONE;
   1771
   1772	/* grab the window before we do the switch */
   1773	win = msc->cur_win;
   1774	if (!win)
   1775		return IRQ_HANDLED;
   1776	next_win = msc_next_window(win);
   1777	if (!next_win)
   1778		return IRQ_HANDLED;
   1779
   1780	/* next window: if READY, proceed, if LOCKED, stop the trace */
   1781	if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
   1782		if (msc->stop_on_full)
   1783			schedule_work(&msc->work);
   1784		else
   1785			msc->switch_on_unlock = next_win;
   1786
   1787		return IRQ_HANDLED;
   1788	}
   1789
   1790	/* current window: INUSE -> LOCKED */
   1791	msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
   1792
   1793	msc_win_switch(msc);
   1794
   1795	if (msc->mbuf && msc->mbuf->ready)
   1796		msc->mbuf->ready(msc->mbuf_priv, win->sgt,
   1797				 msc_win_total_sz(win));
   1798
   1799	return IRQ_HANDLED;
   1800}
   1801
   1802static const char * const msc_mode[] = {
   1803	[MSC_MODE_SINGLE]	= "single",
   1804	[MSC_MODE_MULTI]	= "multi",
   1805	[MSC_MODE_EXI]		= "ExI",
   1806	[MSC_MODE_DEBUG]	= "debug",
   1807};
   1808
   1809static ssize_t
   1810wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
   1811{
   1812	struct msc *msc = dev_get_drvdata(dev);
   1813
   1814	return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
   1815}
   1816
   1817static ssize_t
   1818wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
   1819	   size_t size)
   1820{
   1821	struct msc *msc = dev_get_drvdata(dev);
   1822	unsigned long val;
   1823	int ret;
   1824
   1825	ret = kstrtoul(buf, 10, &val);
   1826	if (ret)
   1827		return ret;
   1828
   1829	msc->wrap = !!val;
   1830
   1831	return size;
   1832}
   1833
   1834static DEVICE_ATTR_RW(wrap);
   1835
   1836static void msc_buffer_unassign(struct msc *msc)
   1837{
   1838	lockdep_assert_held(&msc->buf_mutex);
   1839
   1840	if (!msc->mbuf)
   1841		return;
   1842
   1843	msc->mbuf->unassign(msc->mbuf_priv);
   1844	msu_buffer_put(msc->mbuf);
   1845	msc->mbuf_priv = NULL;
   1846	msc->mbuf = NULL;
   1847}
   1848
   1849static ssize_t
   1850mode_show(struct device *dev, struct device_attribute *attr, char *buf)
   1851{
   1852	struct msc *msc = dev_get_drvdata(dev);
   1853	const char *mode = msc_mode[msc->mode];
   1854	ssize_t ret;
   1855
   1856	mutex_lock(&msc->buf_mutex);
   1857	if (msc->mbuf)
   1858		mode = msc->mbuf->name;
   1859	ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
   1860	mutex_unlock(&msc->buf_mutex);
   1861
   1862	return ret;
   1863}
   1864
   1865static ssize_t
   1866mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
   1867	   size_t size)
   1868{
   1869	const struct msu_buffer *mbuf = NULL;
   1870	struct msc *msc = dev_get_drvdata(dev);
   1871	size_t len = size;
   1872	char *cp, *mode;
   1873	int i, ret;
   1874
   1875	if (!capable(CAP_SYS_RAWIO))
   1876		return -EPERM;
   1877
   1878	cp = memchr(buf, '\n', len);
   1879	if (cp)
   1880		len = cp - buf;
   1881
   1882	mode = kstrndup(buf, len, GFP_KERNEL);
   1883	if (!mode)
   1884		return -ENOMEM;
   1885
   1886	i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
   1887	if (i >= 0) {
   1888		kfree(mode);
   1889		goto found;
   1890	}
   1891
   1892	/* Buffer sinks only work with a usable IRQ */
   1893	if (!msc->do_irq) {
   1894		kfree(mode);
   1895		return -EINVAL;
   1896	}
   1897
   1898	mbuf = msu_buffer_get(mode);
   1899	kfree(mode);
   1900	if (mbuf)
   1901		goto found;
   1902
   1903	return -EINVAL;
   1904
   1905found:
   1906	if (i == MSC_MODE_MULTI && msc->multi_is_broken)
   1907		return -EOPNOTSUPP;
   1908
   1909	mutex_lock(&msc->buf_mutex);
   1910	ret = 0;
   1911
   1912	/* Same buffer: do nothing */
   1913	if (mbuf && mbuf == msc->mbuf) {
   1914		/* put the extra reference we just got */
   1915		msu_buffer_put(mbuf);
   1916		goto unlock;
   1917	}
   1918
   1919	ret = msc_buffer_unlocked_free_unless_used(msc);
   1920	if (ret)
   1921		goto unlock;
   1922
   1923	if (mbuf) {
   1924		void *mbuf_priv = mbuf->assign(dev, &i);
   1925
   1926		if (!mbuf_priv) {
   1927			ret = -ENOMEM;
   1928			goto unlock;
   1929		}
   1930
   1931		msc_buffer_unassign(msc);
   1932		msc->mbuf_priv = mbuf_priv;
   1933		msc->mbuf = mbuf;
   1934	} else {
   1935		msc_buffer_unassign(msc);
   1936	}
   1937
   1938	msc->mode = i;
   1939
   1940unlock:
   1941	if (ret && mbuf)
   1942		msu_buffer_put(mbuf);
   1943	mutex_unlock(&msc->buf_mutex);
   1944
   1945	return ret ? ret : size;
   1946}
   1947
   1948static DEVICE_ATTR_RW(mode);
   1949
   1950static ssize_t
   1951nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
   1952{
   1953	struct msc *msc = dev_get_drvdata(dev);
   1954	struct msc_window *win;
   1955	size_t count = 0;
   1956
   1957	mutex_lock(&msc->buf_mutex);
   1958
   1959	if (msc->mode == MSC_MODE_SINGLE)
   1960		count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
   1961	else if (msc->mode == MSC_MODE_MULTI) {
   1962		list_for_each_entry(win, &msc->win_list, entry) {
   1963			count += scnprintf(buf + count, PAGE_SIZE - count,
   1964					   "%d%c", win->nr_blocks,
   1965					   msc_is_last_win(win) ? '\n' : ',');
   1966		}
   1967	} else {
   1968		count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
   1969	}
   1970
   1971	mutex_unlock(&msc->buf_mutex);
   1972
   1973	return count;
   1974}
   1975
   1976static ssize_t
   1977nr_pages_store(struct device *dev, struct device_attribute *attr,
   1978	       const char *buf, size_t size)
   1979{
   1980	struct msc *msc = dev_get_drvdata(dev);
   1981	unsigned long val, *win = NULL, *rewin;
   1982	size_t len = size;
   1983	const char *p = buf;
   1984	char *end, *s;
   1985	int ret, nr_wins = 0;
   1986
   1987	if (!capable(CAP_SYS_RAWIO))
   1988		return -EPERM;
   1989
   1990	ret = msc_buffer_free_unless_used(msc);
   1991	if (ret)
   1992		return ret;
   1993
   1994	/* scan the comma-separated list of allocation sizes */
   1995	end = memchr(buf, '\n', len);
   1996	if (end)
   1997		len = end - buf;
   1998
   1999	do {
   2000		end = memchr(p, ',', len);
   2001		s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
   2002		if (!s) {
   2003			ret = -ENOMEM;
   2004			goto free_win;
   2005		}
   2006
   2007		ret = kstrtoul(s, 10, &val);
   2008		kfree(s);
   2009
   2010		if (ret || !val)
   2011			goto free_win;
   2012
   2013		if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
   2014			ret = -EINVAL;
   2015			goto free_win;
   2016		}
   2017
   2018		nr_wins++;
   2019		rewin = krealloc_array(win, nr_wins, sizeof(*win), GFP_KERNEL);
   2020		if (!rewin) {
   2021			kfree(win);
   2022			return -ENOMEM;
   2023		}
   2024
   2025		win = rewin;
   2026		win[nr_wins - 1] = val;
   2027
   2028		if (!end)
   2029			break;
   2030
   2031		/* consume the number and the following comma, hence +1 */
   2032		len -= end - p + 1;
   2033		p = end + 1;
   2034	} while (len);
   2035
   2036	mutex_lock(&msc->buf_mutex);
   2037	ret = msc_buffer_alloc(msc, win, nr_wins);
   2038	mutex_unlock(&msc->buf_mutex);
   2039
   2040free_win:
   2041	kfree(win);
   2042
   2043	return ret ? ret : size;
   2044}
   2045
   2046static DEVICE_ATTR_RW(nr_pages);
   2047
   2048static ssize_t
   2049win_switch_store(struct device *dev, struct device_attribute *attr,
   2050		 const char *buf, size_t size)
   2051{
   2052	struct msc *msc = dev_get_drvdata(dev);
   2053	unsigned long val;
   2054	int ret;
   2055
   2056	ret = kstrtoul(buf, 10, &val);
   2057	if (ret)
   2058		return ret;
   2059
   2060	if (val != 1)
   2061		return -EINVAL;
   2062
   2063	ret = -EINVAL;
   2064	mutex_lock(&msc->buf_mutex);
   2065	/*
   2066	 * Window switch can only happen in the "multi" mode.
   2067	 * If a external buffer is engaged, they have the full
   2068	 * control over window switching.
   2069	 */
   2070	if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
   2071		ret = msc_win_switch(msc);
   2072	mutex_unlock(&msc->buf_mutex);
   2073
   2074	return ret ? ret : size;
   2075}
   2076
   2077static DEVICE_ATTR_WO(win_switch);
   2078
   2079static ssize_t stop_on_full_show(struct device *dev,
   2080				 struct device_attribute *attr, char *buf)
   2081{
   2082	struct msc *msc = dev_get_drvdata(dev);
   2083
   2084	return sprintf(buf, "%d\n", msc->stop_on_full);
   2085}
   2086
   2087static ssize_t stop_on_full_store(struct device *dev,
   2088				  struct device_attribute *attr,
   2089				  const char *buf, size_t size)
   2090{
   2091	struct msc *msc = dev_get_drvdata(dev);
   2092	int ret;
   2093
   2094	ret = kstrtobool(buf, &msc->stop_on_full);
   2095	if (ret)
   2096		return ret;
   2097
   2098	return size;
   2099}
   2100
   2101static DEVICE_ATTR_RW(stop_on_full);
   2102
   2103static struct attribute *msc_output_attrs[] = {
   2104	&dev_attr_wrap.attr,
   2105	&dev_attr_mode.attr,
   2106	&dev_attr_nr_pages.attr,
   2107	&dev_attr_win_switch.attr,
   2108	&dev_attr_stop_on_full.attr,
   2109	NULL,
   2110};
   2111
   2112static const struct attribute_group msc_output_group = {
   2113	.attrs	= msc_output_attrs,
   2114};
   2115
   2116static int intel_th_msc_probe(struct intel_th_device *thdev)
   2117{
   2118	struct device *dev = &thdev->dev;
   2119	struct resource *res;
   2120	struct msc *msc;
   2121	void __iomem *base;
   2122	int err;
   2123
   2124	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
   2125	if (!res)
   2126		return -ENODEV;
   2127
   2128	base = devm_ioremap(dev, res->start, resource_size(res));
   2129	if (!base)
   2130		return -ENOMEM;
   2131
   2132	msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
   2133	if (!msc)
   2134		return -ENOMEM;
   2135
   2136	res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
   2137	if (!res)
   2138		msc->do_irq = 1;
   2139
   2140	if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
   2141		msc->multi_is_broken = 1;
   2142
   2143	msc->index = thdev->id;
   2144
   2145	msc->thdev = thdev;
   2146	msc->reg_base = base + msc->index * 0x100;
   2147	msc->msu_base = base;
   2148
   2149	INIT_WORK(&msc->work, msc_work);
   2150	err = intel_th_msc_init(msc);
   2151	if (err)
   2152		return err;
   2153
   2154	dev_set_drvdata(dev, msc);
   2155
   2156	return 0;
   2157}
   2158
   2159static void intel_th_msc_remove(struct intel_th_device *thdev)
   2160{
   2161	struct msc *msc = dev_get_drvdata(&thdev->dev);
   2162	int ret;
   2163
   2164	intel_th_msc_deactivate(thdev);
   2165
   2166	/*
   2167	 * Buffers should not be used at this point except if the
   2168	 * output character device is still open and the parent
   2169	 * device gets detached from its bus, which is a FIXME.
   2170	 */
   2171	ret = msc_buffer_free_unless_used(msc);
   2172	WARN_ON_ONCE(ret);
   2173}
   2174
   2175static struct intel_th_driver intel_th_msc_driver = {
   2176	.probe	= intel_th_msc_probe,
   2177	.remove	= intel_th_msc_remove,
   2178	.irq		= intel_th_msc_interrupt,
   2179	.wait_empty	= intel_th_msc_wait_empty,
   2180	.activate	= intel_th_msc_activate,
   2181	.deactivate	= intel_th_msc_deactivate,
   2182	.fops	= &intel_th_msc_fops,
   2183	.attr_group	= &msc_output_group,
   2184	.driver	= {
   2185		.name	= "msc",
   2186		.owner	= THIS_MODULE,
   2187	},
   2188};
   2189
   2190module_driver(intel_th_msc_driver,
   2191	      intel_th_driver_register,
   2192	      intel_th_driver_unregister);
   2193
   2194MODULE_LICENSE("GPL v2");
   2195MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
   2196MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");