cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (36040B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * core.c - Implementation of core module of MOST Linux driver stack
      4 *
      5 * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
      6 */
      7
      8#include <linux/module.h>
      9#include <linux/fs.h>
     10#include <linux/slab.h>
     11#include <linux/init.h>
     12#include <linux/device.h>
     13#include <linux/list.h>
     14#include <linux/poll.h>
     15#include <linux/wait.h>
     16#include <linux/kobject.h>
     17#include <linux/mutex.h>
     18#include <linux/completion.h>
     19#include <linux/sysfs.h>
     20#include <linux/kthread.h>
     21#include <linux/dma-mapping.h>
     22#include <linux/idr.h>
     23#include <linux/most.h>
     24
     25#define MAX_CHANNELS	64
     26#define STRING_SIZE	80
     27
     28static struct ida mdev_id;
     29static int dummy_num_buffers;
     30static struct list_head comp_list;
     31
     32struct pipe {
     33	struct most_component *comp;
     34	int refs;
     35	int num_buffers;
     36};
     37
     38struct most_channel {
     39	struct device dev;
     40	struct completion cleanup;
     41	atomic_t mbo_ref;
     42	atomic_t mbo_nq_level;
     43	u16 channel_id;
     44	char name[STRING_SIZE];
     45	bool is_poisoned;
     46	struct mutex start_mutex; /* channel activation synchronization */
     47	struct mutex nq_mutex; /* nq thread synchronization */
     48	int is_starving;
     49	struct most_interface *iface;
     50	struct most_channel_config cfg;
     51	bool keep_mbo;
     52	bool enqueue_halt;
     53	struct list_head fifo;
     54	spinlock_t fifo_lock; /* fifo access synchronization */
     55	struct list_head halt_fifo;
     56	struct list_head list;
     57	struct pipe pipe0;
     58	struct pipe pipe1;
     59	struct list_head trash_fifo;
     60	struct task_struct *hdm_enqueue_task;
     61	wait_queue_head_t hdm_fifo_wq;
     62
     63};
     64
     65#define to_channel(d) container_of(d, struct most_channel, dev)
     66
     67struct interface_private {
     68	int dev_id;
     69	char name[STRING_SIZE];
     70	struct most_channel *channel[MAX_CHANNELS];
     71	struct list_head channel_list;
     72};
     73
     74static const struct {
     75	int most_ch_data_type;
     76	const char *name;
     77} ch_data_type[] = {
     78	{ MOST_CH_CONTROL, "control" },
     79	{ MOST_CH_ASYNC, "async" },
     80	{ MOST_CH_SYNC, "sync" },
     81	{ MOST_CH_ISOC, "isoc"},
     82	{ MOST_CH_ISOC, "isoc_avp"},
     83};
     84
     85/**
     86 * list_pop_mbo - retrieves the first MBO of the list and removes it
     87 * @ptr: the list head to grab the MBO from.
     88 */
     89#define list_pop_mbo(ptr)						\
     90({									\
     91	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
     92	list_del(&_mbo->list);						\
     93	_mbo;								\
     94})
     95
     96/**
     97 * most_free_mbo_coherent - free an MBO and its coherent buffer
     98 * @mbo: most buffer
     99 */
    100static void most_free_mbo_coherent(struct mbo *mbo)
    101{
    102	struct most_channel *c = mbo->context;
    103	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
    104
    105	if (c->iface->dma_free)
    106		c->iface->dma_free(mbo, coherent_buf_size);
    107	else
    108		kfree(mbo->virt_address);
    109	kfree(mbo);
    110	if (atomic_sub_and_test(1, &c->mbo_ref))
    111		complete(&c->cleanup);
    112}
    113
    114/**
    115 * flush_channel_fifos - clear the channel fifos
    116 * @c: pointer to channel object
    117 */
    118static void flush_channel_fifos(struct most_channel *c)
    119{
    120	unsigned long flags, hf_flags;
    121	struct mbo *mbo, *tmp;
    122
    123	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
    124		return;
    125
    126	spin_lock_irqsave(&c->fifo_lock, flags);
    127	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
    128		list_del(&mbo->list);
    129		spin_unlock_irqrestore(&c->fifo_lock, flags);
    130		most_free_mbo_coherent(mbo);
    131		spin_lock_irqsave(&c->fifo_lock, flags);
    132	}
    133	spin_unlock_irqrestore(&c->fifo_lock, flags);
    134
    135	spin_lock_irqsave(&c->fifo_lock, hf_flags);
    136	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
    137		list_del(&mbo->list);
    138		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
    139		most_free_mbo_coherent(mbo);
    140		spin_lock_irqsave(&c->fifo_lock, hf_flags);
    141	}
    142	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
    143
    144	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
    145		dev_warn(&c->dev, "Channel or trash fifo not empty\n");
    146}
    147
    148/**
    149 * flush_trash_fifo - clear the trash fifo
    150 * @c: pointer to channel object
    151 */
    152static int flush_trash_fifo(struct most_channel *c)
    153{
    154	struct mbo *mbo, *tmp;
    155	unsigned long flags;
    156
    157	spin_lock_irqsave(&c->fifo_lock, flags);
    158	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
    159		list_del(&mbo->list);
    160		spin_unlock_irqrestore(&c->fifo_lock, flags);
    161		most_free_mbo_coherent(mbo);
    162		spin_lock_irqsave(&c->fifo_lock, flags);
    163	}
    164	spin_unlock_irqrestore(&c->fifo_lock, flags);
    165	return 0;
    166}
    167
    168static ssize_t available_directions_show(struct device *dev,
    169					 struct device_attribute *attr,
    170					 char *buf)
    171{
    172	struct most_channel *c = to_channel(dev);
    173	unsigned int i = c->channel_id;
    174
    175	strcpy(buf, "");
    176	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
    177		strcat(buf, "rx ");
    178	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
    179		strcat(buf, "tx ");
    180	strcat(buf, "\n");
    181	return strlen(buf);
    182}
    183
    184static ssize_t available_datatypes_show(struct device *dev,
    185					struct device_attribute *attr,
    186					char *buf)
    187{
    188	struct most_channel *c = to_channel(dev);
    189	unsigned int i = c->channel_id;
    190
    191	strcpy(buf, "");
    192	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
    193		strcat(buf, "control ");
    194	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
    195		strcat(buf, "async ");
    196	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
    197		strcat(buf, "sync ");
    198	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
    199		strcat(buf, "isoc ");
    200	strcat(buf, "\n");
    201	return strlen(buf);
    202}
    203
    204static ssize_t number_of_packet_buffers_show(struct device *dev,
    205					     struct device_attribute *attr,
    206					     char *buf)
    207{
    208	struct most_channel *c = to_channel(dev);
    209	unsigned int i = c->channel_id;
    210
    211	return snprintf(buf, PAGE_SIZE, "%d\n",
    212			c->iface->channel_vector[i].num_buffers_packet);
    213}
    214
    215static ssize_t number_of_stream_buffers_show(struct device *dev,
    216					     struct device_attribute *attr,
    217					     char *buf)
    218{
    219	struct most_channel *c = to_channel(dev);
    220	unsigned int i = c->channel_id;
    221
    222	return snprintf(buf, PAGE_SIZE, "%d\n",
    223			c->iface->channel_vector[i].num_buffers_streaming);
    224}
    225
    226static ssize_t size_of_packet_buffer_show(struct device *dev,
    227					  struct device_attribute *attr,
    228					  char *buf)
    229{
    230	struct most_channel *c = to_channel(dev);
    231	unsigned int i = c->channel_id;
    232
    233	return snprintf(buf, PAGE_SIZE, "%d\n",
    234			c->iface->channel_vector[i].buffer_size_packet);
    235}
    236
    237static ssize_t size_of_stream_buffer_show(struct device *dev,
    238					  struct device_attribute *attr,
    239					  char *buf)
    240{
    241	struct most_channel *c = to_channel(dev);
    242	unsigned int i = c->channel_id;
    243
    244	return snprintf(buf, PAGE_SIZE, "%d\n",
    245			c->iface->channel_vector[i].buffer_size_streaming);
    246}
    247
    248static ssize_t channel_starving_show(struct device *dev,
    249				     struct device_attribute *attr,
    250				     char *buf)
    251{
    252	struct most_channel *c = to_channel(dev);
    253
    254	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
    255}
    256
    257static ssize_t set_number_of_buffers_show(struct device *dev,
    258					  struct device_attribute *attr,
    259					  char *buf)
    260{
    261	struct most_channel *c = to_channel(dev);
    262
    263	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
    264}
    265
    266static ssize_t set_buffer_size_show(struct device *dev,
    267				    struct device_attribute *attr,
    268				    char *buf)
    269{
    270	struct most_channel *c = to_channel(dev);
    271
    272	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
    273}
    274
    275static ssize_t set_direction_show(struct device *dev,
    276				  struct device_attribute *attr,
    277				  char *buf)
    278{
    279	struct most_channel *c = to_channel(dev);
    280
    281	if (c->cfg.direction & MOST_CH_TX)
    282		return snprintf(buf, PAGE_SIZE, "tx\n");
    283	else if (c->cfg.direction & MOST_CH_RX)
    284		return snprintf(buf, PAGE_SIZE, "rx\n");
    285	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
    286}
    287
    288static ssize_t set_datatype_show(struct device *dev,
    289				 struct device_attribute *attr,
    290				 char *buf)
    291{
    292	int i;
    293	struct most_channel *c = to_channel(dev);
    294
    295	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
    296		if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
    297			return snprintf(buf, PAGE_SIZE, "%s",
    298					ch_data_type[i].name);
    299	}
    300	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
    301}
    302
    303static ssize_t set_subbuffer_size_show(struct device *dev,
    304				       struct device_attribute *attr,
    305				       char *buf)
    306{
    307	struct most_channel *c = to_channel(dev);
    308
    309	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
    310}
    311
    312static ssize_t set_packets_per_xact_show(struct device *dev,
    313					 struct device_attribute *attr,
    314					 char *buf)
    315{
    316	struct most_channel *c = to_channel(dev);
    317
    318	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
    319}
    320
    321static ssize_t set_dbr_size_show(struct device *dev,
    322				 struct device_attribute *attr, char *buf)
    323{
    324	struct most_channel *c = to_channel(dev);
    325
    326	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
    327}
    328
    329#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
    330static umode_t channel_attr_is_visible(struct kobject *kobj,
    331				       struct attribute *attr, int index)
    332{
    333	struct device_attribute *dev_attr = to_dev_attr(attr);
    334	struct device *dev = kobj_to_dev(kobj);
    335	struct most_channel *c = to_channel(dev);
    336
    337	if (!strcmp(dev_attr->attr.name, "set_dbr_size") &&
    338	    (c->iface->interface != ITYPE_MEDIALB_DIM2))
    339		return 0;
    340	if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") &&
    341	    (c->iface->interface != ITYPE_USB))
    342		return 0;
    343
    344	return attr->mode;
    345}
    346
    347#define DEV_ATTR(_name)  (&dev_attr_##_name.attr)
    348
    349static DEVICE_ATTR_RO(available_directions);
    350static DEVICE_ATTR_RO(available_datatypes);
    351static DEVICE_ATTR_RO(number_of_packet_buffers);
    352static DEVICE_ATTR_RO(number_of_stream_buffers);
    353static DEVICE_ATTR_RO(size_of_stream_buffer);
    354static DEVICE_ATTR_RO(size_of_packet_buffer);
    355static DEVICE_ATTR_RO(channel_starving);
    356static DEVICE_ATTR_RO(set_buffer_size);
    357static DEVICE_ATTR_RO(set_number_of_buffers);
    358static DEVICE_ATTR_RO(set_direction);
    359static DEVICE_ATTR_RO(set_datatype);
    360static DEVICE_ATTR_RO(set_subbuffer_size);
    361static DEVICE_ATTR_RO(set_packets_per_xact);
    362static DEVICE_ATTR_RO(set_dbr_size);
    363
    364static struct attribute *channel_attrs[] = {
    365	DEV_ATTR(available_directions),
    366	DEV_ATTR(available_datatypes),
    367	DEV_ATTR(number_of_packet_buffers),
    368	DEV_ATTR(number_of_stream_buffers),
    369	DEV_ATTR(size_of_stream_buffer),
    370	DEV_ATTR(size_of_packet_buffer),
    371	DEV_ATTR(channel_starving),
    372	DEV_ATTR(set_buffer_size),
    373	DEV_ATTR(set_number_of_buffers),
    374	DEV_ATTR(set_direction),
    375	DEV_ATTR(set_datatype),
    376	DEV_ATTR(set_subbuffer_size),
    377	DEV_ATTR(set_packets_per_xact),
    378	DEV_ATTR(set_dbr_size),
    379	NULL,
    380};
    381
    382static const struct attribute_group channel_attr_group = {
    383	.attrs = channel_attrs,
    384	.is_visible = channel_attr_is_visible,
    385};
    386
    387static const struct attribute_group *channel_attr_groups[] = {
    388	&channel_attr_group,
    389	NULL,
    390};
    391
    392static ssize_t description_show(struct device *dev,
    393				struct device_attribute *attr,
    394				char *buf)
    395{
    396	struct most_interface *iface = dev_get_drvdata(dev);
    397
    398	return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
    399}
    400
    401static ssize_t interface_show(struct device *dev,
    402			      struct device_attribute *attr,
    403			      char *buf)
    404{
    405	struct most_interface *iface = dev_get_drvdata(dev);
    406
    407	switch (iface->interface) {
    408	case ITYPE_LOOPBACK:
    409		return snprintf(buf, PAGE_SIZE, "loopback\n");
    410	case ITYPE_I2C:
    411		return snprintf(buf, PAGE_SIZE, "i2c\n");
    412	case ITYPE_I2S:
    413		return snprintf(buf, PAGE_SIZE, "i2s\n");
    414	case ITYPE_TSI:
    415		return snprintf(buf, PAGE_SIZE, "tsi\n");
    416	case ITYPE_HBI:
    417		return snprintf(buf, PAGE_SIZE, "hbi\n");
    418	case ITYPE_MEDIALB_DIM:
    419		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
    420	case ITYPE_MEDIALB_DIM2:
    421		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
    422	case ITYPE_USB:
    423		return snprintf(buf, PAGE_SIZE, "usb\n");
    424	case ITYPE_PCIE:
    425		return snprintf(buf, PAGE_SIZE, "pcie\n");
    426	}
    427	return snprintf(buf, PAGE_SIZE, "unknown\n");
    428}
    429
    430static DEVICE_ATTR_RO(description);
    431static DEVICE_ATTR_RO(interface);
    432
    433static struct attribute *interface_attrs[] = {
    434	DEV_ATTR(description),
    435	DEV_ATTR(interface),
    436	NULL,
    437};
    438
    439static const struct attribute_group interface_attr_group = {
    440	.attrs = interface_attrs,
    441};
    442
    443static const struct attribute_group *interface_attr_groups[] = {
    444	&interface_attr_group,
    445	NULL,
    446};
    447
    448static struct most_component *match_component(char *name)
    449{
    450	struct most_component *comp;
    451
    452	list_for_each_entry(comp, &comp_list, list) {
    453		if (!strcmp(comp->name, name))
    454			return comp;
    455	}
    456	return NULL;
    457}
    458
    459struct show_links_data {
    460	int offs;
    461	char *buf;
    462};
    463
    464static int print_links(struct device *dev, void *data)
    465{
    466	struct show_links_data *d = data;
    467	int offs = d->offs;
    468	char *buf = d->buf;
    469	struct most_channel *c;
    470	struct most_interface *iface = dev_get_drvdata(dev);
    471
    472	list_for_each_entry(c, &iface->p->channel_list, list) {
    473		if (c->pipe0.comp) {
    474			offs += scnprintf(buf + offs,
    475					 PAGE_SIZE - offs,
    476					 "%s:%s:%s\n",
    477					 c->pipe0.comp->name,
    478					 dev_name(iface->dev),
    479					 dev_name(&c->dev));
    480		}
    481		if (c->pipe1.comp) {
    482			offs += scnprintf(buf + offs,
    483					 PAGE_SIZE - offs,
    484					 "%s:%s:%s\n",
    485					 c->pipe1.comp->name,
    486					 dev_name(iface->dev),
    487					 dev_name(&c->dev));
    488		}
    489	}
    490	d->offs = offs;
    491	return 0;
    492}
    493
    494static int most_match(struct device *dev, struct device_driver *drv)
    495{
    496	if (!strcmp(dev_name(dev), "most"))
    497		return 0;
    498	else
    499		return 1;
    500}
    501
    502static struct bus_type mostbus = {
    503	.name = "most",
    504	.match = most_match,
    505};
    506
    507static ssize_t links_show(struct device_driver *drv, char *buf)
    508{
    509	struct show_links_data d = { .buf = buf };
    510
    511	bus_for_each_dev(&mostbus, NULL, &d, print_links);
    512	return d.offs;
    513}
    514
    515static ssize_t components_show(struct device_driver *drv, char *buf)
    516{
    517	struct most_component *comp;
    518	int offs = 0;
    519
    520	list_for_each_entry(comp, &comp_list, list) {
    521		offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
    522				 comp->name);
    523	}
    524	return offs;
    525}
    526
    527/**
    528 * get_channel - get pointer to channel
    529 * @mdev: name of the device interface
    530 * @mdev_ch: name of channel
    531 */
    532static struct most_channel *get_channel(char *mdev, char *mdev_ch)
    533{
    534	struct device *dev = NULL;
    535	struct most_interface *iface;
    536	struct most_channel *c, *tmp;
    537
    538	dev = bus_find_device_by_name(&mostbus, NULL, mdev);
    539	if (!dev)
    540		return NULL;
    541	put_device(dev);
    542	iface = dev_get_drvdata(dev);
    543	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
    544		if (!strcmp(dev_name(&c->dev), mdev_ch))
    545			return c;
    546	}
    547	return NULL;
    548}
    549
    550static
    551inline int link_channel_to_component(struct most_channel *c,
    552				     struct most_component *comp,
    553				     char *name,
    554				     char *comp_param)
    555{
    556	int ret;
    557	struct most_component **comp_ptr;
    558
    559	if (!c->pipe0.comp)
    560		comp_ptr = &c->pipe0.comp;
    561	else if (!c->pipe1.comp)
    562		comp_ptr = &c->pipe1.comp;
    563	else
    564		return -ENOSPC;
    565
    566	*comp_ptr = comp;
    567	ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name,
    568				  comp_param);
    569	if (ret) {
    570		*comp_ptr = NULL;
    571		return ret;
    572	}
    573	return 0;
    574}
    575
    576int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val)
    577{
    578	struct most_channel *c = get_channel(mdev, mdev_ch);
    579
    580	if (!c)
    581		return -ENODEV;
    582	c->cfg.buffer_size = val;
    583	return 0;
    584}
    585
    586int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val)
    587{
    588	struct most_channel *c = get_channel(mdev, mdev_ch);
    589
    590	if (!c)
    591		return -ENODEV;
    592	c->cfg.subbuffer_size = val;
    593	return 0;
    594}
    595
    596int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val)
    597{
    598	struct most_channel *c = get_channel(mdev, mdev_ch);
    599
    600	if (!c)
    601		return -ENODEV;
    602	c->cfg.dbr_size = val;
    603	return 0;
    604}
    605
    606int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val)
    607{
    608	struct most_channel *c = get_channel(mdev, mdev_ch);
    609
    610	if (!c)
    611		return -ENODEV;
    612	c->cfg.num_buffers = val;
    613	return 0;
    614}
    615
    616int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
    617{
    618	int i;
    619	struct most_channel *c = get_channel(mdev, mdev_ch);
    620
    621	if (!c)
    622		return -ENODEV;
    623	for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
    624		if (!strcmp(buf, ch_data_type[i].name)) {
    625			c->cfg.data_type = ch_data_type[i].most_ch_data_type;
    626			break;
    627		}
    628	}
    629
    630	if (i == ARRAY_SIZE(ch_data_type))
    631		dev_warn(&c->dev, "Invalid attribute settings\n");
    632	return 0;
    633}
    634
    635int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf)
    636{
    637	struct most_channel *c = get_channel(mdev, mdev_ch);
    638
    639	if (!c)
    640		return -ENODEV;
    641	if (!strcmp(buf, "dir_rx")) {
    642		c->cfg.direction = MOST_CH_RX;
    643	} else if (!strcmp(buf, "rx")) {
    644		c->cfg.direction = MOST_CH_RX;
    645	} else if (!strcmp(buf, "dir_tx")) {
    646		c->cfg.direction = MOST_CH_TX;
    647	} else if (!strcmp(buf, "tx")) {
    648		c->cfg.direction = MOST_CH_TX;
    649	} else {
    650		dev_err(&c->dev, "Invalid direction\n");
    651		return -ENODATA;
    652	}
    653	return 0;
    654}
    655
    656int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val)
    657{
    658	struct most_channel *c = get_channel(mdev, mdev_ch);
    659
    660	if (!c)
    661		return -ENODEV;
    662	c->cfg.packets_per_xact = val;
    663	return 0;
    664}
    665
    666int most_cfg_complete(char *comp_name)
    667{
    668	struct most_component *comp;
    669
    670	comp = match_component(comp_name);
    671	if (!comp)
    672		return -ENODEV;
    673
    674	return comp->cfg_complete();
    675}
    676
    677int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
    678		  char *comp_param)
    679{
    680	struct most_channel *c = get_channel(mdev, mdev_ch);
    681	struct most_component *comp = match_component(comp_name);
    682
    683	if (!c || !comp)
    684		return -ENODEV;
    685
    686	return link_channel_to_component(c, comp, link_name, comp_param);
    687}
    688
    689int most_remove_link(char *mdev, char *mdev_ch, char *comp_name)
    690{
    691	struct most_channel *c;
    692	struct most_component *comp;
    693
    694	comp = match_component(comp_name);
    695	if (!comp)
    696		return -ENODEV;
    697	c = get_channel(mdev, mdev_ch);
    698	if (!c)
    699		return -ENODEV;
    700
    701	if (comp->disconnect_channel(c->iface, c->channel_id))
    702		return -EIO;
    703	if (c->pipe0.comp == comp)
    704		c->pipe0.comp = NULL;
    705	if (c->pipe1.comp == comp)
    706		c->pipe1.comp = NULL;
    707	return 0;
    708}
    709
    710#define DRV_ATTR(_name)  (&driver_attr_##_name.attr)
    711
    712static DRIVER_ATTR_RO(links);
    713static DRIVER_ATTR_RO(components);
    714
    715static struct attribute *mc_attrs[] = {
    716	DRV_ATTR(links),
    717	DRV_ATTR(components),
    718	NULL,
    719};
    720
    721static const struct attribute_group mc_attr_group = {
    722	.attrs = mc_attrs,
    723};
    724
    725static const struct attribute_group *mc_attr_groups[] = {
    726	&mc_attr_group,
    727	NULL,
    728};
    729
    730static struct device_driver mostbus_driver = {
    731	.name = "most_core",
    732	.bus = &mostbus,
    733	.groups = mc_attr_groups,
    734};
    735
    736static inline void trash_mbo(struct mbo *mbo)
    737{
    738	unsigned long flags;
    739	struct most_channel *c = mbo->context;
    740
    741	spin_lock_irqsave(&c->fifo_lock, flags);
    742	list_add(&mbo->list, &c->trash_fifo);
    743	spin_unlock_irqrestore(&c->fifo_lock, flags);
    744}
    745
    746static bool hdm_mbo_ready(struct most_channel *c)
    747{
    748	bool empty;
    749
    750	if (c->enqueue_halt)
    751		return false;
    752
    753	spin_lock_irq(&c->fifo_lock);
    754	empty = list_empty(&c->halt_fifo);
    755	spin_unlock_irq(&c->fifo_lock);
    756
    757	return !empty;
    758}
    759
    760static void nq_hdm_mbo(struct mbo *mbo)
    761{
    762	unsigned long flags;
    763	struct most_channel *c = mbo->context;
    764
    765	spin_lock_irqsave(&c->fifo_lock, flags);
    766	list_add_tail(&mbo->list, &c->halt_fifo);
    767	spin_unlock_irqrestore(&c->fifo_lock, flags);
    768	wake_up_interruptible(&c->hdm_fifo_wq);
    769}
    770
    771static int hdm_enqueue_thread(void *data)
    772{
    773	struct most_channel *c = data;
    774	struct mbo *mbo;
    775	int ret;
    776	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
    777
    778	while (likely(!kthread_should_stop())) {
    779		wait_event_interruptible(c->hdm_fifo_wq,
    780					 hdm_mbo_ready(c) ||
    781					 kthread_should_stop());
    782
    783		mutex_lock(&c->nq_mutex);
    784		spin_lock_irq(&c->fifo_lock);
    785		if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
    786			spin_unlock_irq(&c->fifo_lock);
    787			mutex_unlock(&c->nq_mutex);
    788			continue;
    789		}
    790
    791		mbo = list_pop_mbo(&c->halt_fifo);
    792		spin_unlock_irq(&c->fifo_lock);
    793
    794		if (c->cfg.direction == MOST_CH_RX)
    795			mbo->buffer_length = c->cfg.buffer_size;
    796
    797		ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
    798		mutex_unlock(&c->nq_mutex);
    799
    800		if (unlikely(ret)) {
    801			dev_err(&c->dev, "Buffer enqueue failed\n");
    802			nq_hdm_mbo(mbo);
    803			c->hdm_enqueue_task = NULL;
    804			return 0;
    805		}
    806	}
    807
    808	return 0;
    809}
    810
    811static int run_enqueue_thread(struct most_channel *c, int channel_id)
    812{
    813	struct task_struct *task =
    814		kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
    815			    channel_id);
    816
    817	if (IS_ERR(task))
    818		return PTR_ERR(task);
    819
    820	c->hdm_enqueue_task = task;
    821	return 0;
    822}
    823
    824/**
    825 * arm_mbo - recycle MBO for further usage
    826 * @mbo: most buffer
    827 *
    828 * This puts an MBO back to the list to have it ready for up coming
    829 * tx transactions.
    830 *
    831 * In case the MBO belongs to a channel that recently has been
    832 * poisoned, the MBO is scheduled to be trashed.
    833 * Calls the completion handler of an attached component.
    834 */
    835static void arm_mbo(struct mbo *mbo)
    836{
    837	unsigned long flags;
    838	struct most_channel *c;
    839
    840	c = mbo->context;
    841
    842	if (c->is_poisoned) {
    843		trash_mbo(mbo);
    844		return;
    845	}
    846
    847	spin_lock_irqsave(&c->fifo_lock, flags);
    848	++*mbo->num_buffers_ptr;
    849	list_add_tail(&mbo->list, &c->fifo);
    850	spin_unlock_irqrestore(&c->fifo_lock, flags);
    851
    852	if (c->pipe0.refs && c->pipe0.comp->tx_completion)
    853		c->pipe0.comp->tx_completion(c->iface, c->channel_id);
    854
    855	if (c->pipe1.refs && c->pipe1.comp->tx_completion)
    856		c->pipe1.comp->tx_completion(c->iface, c->channel_id);
    857}
    858
    859/**
    860 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
    861 * @c: pointer to interface channel
    862 * @dir: direction of the channel
    863 * @compl: pointer to completion function
    864 *
    865 * This allocates buffer objects including the containing DMA coherent
    866 * buffer and puts them in the fifo.
    867 * Buffers of Rx channels are put in the kthread fifo, hence immediately
    868 * submitted to the HDM.
    869 *
    870 * Returns the number of allocated and enqueued MBOs.
    871 */
    872static int arm_mbo_chain(struct most_channel *c, int dir,
    873			 void (*compl)(struct mbo *))
    874{
    875	unsigned int i;
    876	struct mbo *mbo;
    877	unsigned long flags;
    878	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
    879
    880	atomic_set(&c->mbo_nq_level, 0);
    881
    882	for (i = 0; i < c->cfg.num_buffers; i++) {
    883		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
    884		if (!mbo)
    885			goto flush_fifos;
    886
    887		mbo->context = c;
    888		mbo->ifp = c->iface;
    889		mbo->hdm_channel_id = c->channel_id;
    890		if (c->iface->dma_alloc) {
    891			mbo->virt_address =
    892				c->iface->dma_alloc(mbo, coherent_buf_size);
    893		} else {
    894			mbo->virt_address =
    895				kzalloc(coherent_buf_size, GFP_KERNEL);
    896		}
    897		if (!mbo->virt_address)
    898			goto release_mbo;
    899
    900		mbo->complete = compl;
    901		mbo->num_buffers_ptr = &dummy_num_buffers;
    902		if (dir == MOST_CH_RX) {
    903			nq_hdm_mbo(mbo);
    904			atomic_inc(&c->mbo_nq_level);
    905		} else {
    906			spin_lock_irqsave(&c->fifo_lock, flags);
    907			list_add_tail(&mbo->list, &c->fifo);
    908			spin_unlock_irqrestore(&c->fifo_lock, flags);
    909		}
    910	}
    911	return c->cfg.num_buffers;
    912
    913release_mbo:
    914	kfree(mbo);
    915
    916flush_fifos:
    917	flush_channel_fifos(c);
    918	return 0;
    919}
    920
    921/**
    922 * most_submit_mbo - submits an MBO to fifo
    923 * @mbo: most buffer
    924 */
    925void most_submit_mbo(struct mbo *mbo)
    926{
    927	if (WARN_ONCE(!mbo || !mbo->context,
    928		      "Bad buffer or missing channel reference\n"))
    929		return;
    930
    931	nq_hdm_mbo(mbo);
    932}
    933EXPORT_SYMBOL_GPL(most_submit_mbo);
    934
    935/**
    936 * most_write_completion - write completion handler
    937 * @mbo: most buffer
    938 *
    939 * This recycles the MBO for further usage. In case the channel has been
    940 * poisoned, the MBO is scheduled to be trashed.
    941 */
    942static void most_write_completion(struct mbo *mbo)
    943{
    944	struct most_channel *c;
    945
    946	c = mbo->context;
    947	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
    948		trash_mbo(mbo);
    949	else
    950		arm_mbo(mbo);
    951}
    952
    953int channel_has_mbo(struct most_interface *iface, int id,
    954		    struct most_component *comp)
    955{
    956	struct most_channel *c = iface->p->channel[id];
    957	unsigned long flags;
    958	int empty;
    959
    960	if (unlikely(!c))
    961		return -EINVAL;
    962
    963	if (c->pipe0.refs && c->pipe1.refs &&
    964	    ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
    965	     (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
    966		return 0;
    967
    968	spin_lock_irqsave(&c->fifo_lock, flags);
    969	empty = list_empty(&c->fifo);
    970	spin_unlock_irqrestore(&c->fifo_lock, flags);
    971	return !empty;
    972}
    973EXPORT_SYMBOL_GPL(channel_has_mbo);
    974
    975/**
    976 * most_get_mbo - get pointer to an MBO of pool
    977 * @iface: pointer to interface instance
    978 * @id: channel ID
    979 * @comp: driver component
    980 *
    981 * This attempts to get a free buffer out of the channel fifo.
    982 * Returns a pointer to MBO on success or NULL otherwise.
    983 */
    984struct mbo *most_get_mbo(struct most_interface *iface, int id,
    985			 struct most_component *comp)
    986{
    987	struct mbo *mbo;
    988	struct most_channel *c;
    989	unsigned long flags;
    990	int *num_buffers_ptr;
    991
    992	c = iface->p->channel[id];
    993	if (unlikely(!c))
    994		return NULL;
    995
    996	if (c->pipe0.refs && c->pipe1.refs &&
    997	    ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
    998	     (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
    999		return NULL;
   1000
   1001	if (comp == c->pipe0.comp)
   1002		num_buffers_ptr = &c->pipe0.num_buffers;
   1003	else if (comp == c->pipe1.comp)
   1004		num_buffers_ptr = &c->pipe1.num_buffers;
   1005	else
   1006		num_buffers_ptr = &dummy_num_buffers;
   1007
   1008	spin_lock_irqsave(&c->fifo_lock, flags);
   1009	if (list_empty(&c->fifo)) {
   1010		spin_unlock_irqrestore(&c->fifo_lock, flags);
   1011		return NULL;
   1012	}
   1013	mbo = list_pop_mbo(&c->fifo);
   1014	--*num_buffers_ptr;
   1015	spin_unlock_irqrestore(&c->fifo_lock, flags);
   1016
   1017	mbo->num_buffers_ptr = num_buffers_ptr;
   1018	mbo->buffer_length = c->cfg.buffer_size;
   1019	return mbo;
   1020}
   1021EXPORT_SYMBOL_GPL(most_get_mbo);
   1022
   1023/**
   1024 * most_put_mbo - return buffer to pool
   1025 * @mbo: most buffer
   1026 */
   1027void most_put_mbo(struct mbo *mbo)
   1028{
   1029	struct most_channel *c = mbo->context;
   1030
   1031	if (c->cfg.direction == MOST_CH_TX) {
   1032		arm_mbo(mbo);
   1033		return;
   1034	}
   1035	nq_hdm_mbo(mbo);
   1036	atomic_inc(&c->mbo_nq_level);
   1037}
   1038EXPORT_SYMBOL_GPL(most_put_mbo);
   1039
   1040/**
   1041 * most_read_completion - read completion handler
   1042 * @mbo: most buffer
   1043 *
   1044 * This function is called by the HDM when data has been received from the
   1045 * hardware and copied to the buffer of the MBO.
   1046 *
   1047 * In case the channel has been poisoned it puts the buffer in the trash queue.
   1048 * Otherwise, it passes the buffer to an component for further processing.
   1049 */
   1050static void most_read_completion(struct mbo *mbo)
   1051{
   1052	struct most_channel *c = mbo->context;
   1053
   1054	if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
   1055		trash_mbo(mbo);
   1056		return;
   1057	}
   1058
   1059	if (mbo->status == MBO_E_INVAL) {
   1060		nq_hdm_mbo(mbo);
   1061		atomic_inc(&c->mbo_nq_level);
   1062		return;
   1063	}
   1064
   1065	if (atomic_sub_and_test(1, &c->mbo_nq_level))
   1066		c->is_starving = 1;
   1067
   1068	if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
   1069	    c->pipe0.comp->rx_completion(mbo) == 0)
   1070		return;
   1071
   1072	if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
   1073	    c->pipe1.comp->rx_completion(mbo) == 0)
   1074		return;
   1075
   1076	most_put_mbo(mbo);
   1077}
   1078
   1079/**
   1080 * most_start_channel - prepares a channel for communication
   1081 * @iface: pointer to interface instance
   1082 * @id: channel ID
   1083 * @comp: driver component
   1084 *
   1085 * This prepares the channel for usage. Cross-checks whether the
   1086 * channel's been properly configured.
   1087 *
   1088 * Returns 0 on success or error code otherwise.
   1089 */
   1090int most_start_channel(struct most_interface *iface, int id,
   1091		       struct most_component *comp)
   1092{
   1093	int num_buffer;
   1094	int ret;
   1095	struct most_channel *c = iface->p->channel[id];
   1096
   1097	if (unlikely(!c))
   1098		return -EINVAL;
   1099
   1100	mutex_lock(&c->start_mutex);
   1101	if (c->pipe0.refs + c->pipe1.refs > 0)
   1102		goto out; /* already started by another component */
   1103
   1104	if (!try_module_get(iface->mod)) {
   1105		dev_err(&c->dev, "Failed to acquire HDM lock\n");
   1106		mutex_unlock(&c->start_mutex);
   1107		return -ENOLCK;
   1108	}
   1109
   1110	c->cfg.extra_len = 0;
   1111	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
   1112		dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
   1113		ret = -EINVAL;
   1114		goto err_put_module;
   1115	}
   1116
   1117	init_waitqueue_head(&c->hdm_fifo_wq);
   1118
   1119	if (c->cfg.direction == MOST_CH_RX)
   1120		num_buffer = arm_mbo_chain(c, c->cfg.direction,
   1121					   most_read_completion);
   1122	else
   1123		num_buffer = arm_mbo_chain(c, c->cfg.direction,
   1124					   most_write_completion);
   1125	if (unlikely(!num_buffer)) {
   1126		ret = -ENOMEM;
   1127		goto err_put_module;
   1128	}
   1129
   1130	ret = run_enqueue_thread(c, id);
   1131	if (ret)
   1132		goto err_put_module;
   1133
   1134	c->is_starving = 0;
   1135	c->pipe0.num_buffers = c->cfg.num_buffers / 2;
   1136	c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
   1137	atomic_set(&c->mbo_ref, num_buffer);
   1138
   1139out:
   1140	if (comp == c->pipe0.comp)
   1141		c->pipe0.refs++;
   1142	if (comp == c->pipe1.comp)
   1143		c->pipe1.refs++;
   1144	mutex_unlock(&c->start_mutex);
   1145	return 0;
   1146
   1147err_put_module:
   1148	module_put(iface->mod);
   1149	mutex_unlock(&c->start_mutex);
   1150	return ret;
   1151}
   1152EXPORT_SYMBOL_GPL(most_start_channel);
   1153
   1154/**
   1155 * most_stop_channel - stops a running channel
   1156 * @iface: pointer to interface instance
   1157 * @id: channel ID
   1158 * @comp: driver component
   1159 */
   1160int most_stop_channel(struct most_interface *iface, int id,
   1161		      struct most_component *comp)
   1162{
   1163	struct most_channel *c;
   1164
   1165	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
   1166		pr_err("Bad interface or index out of range\n");
   1167		return -EINVAL;
   1168	}
   1169	c = iface->p->channel[id];
   1170	if (unlikely(!c))
   1171		return -EINVAL;
   1172
   1173	mutex_lock(&c->start_mutex);
   1174	if (c->pipe0.refs + c->pipe1.refs >= 2)
   1175		goto out;
   1176
   1177	if (c->hdm_enqueue_task)
   1178		kthread_stop(c->hdm_enqueue_task);
   1179	c->hdm_enqueue_task = NULL;
   1180
   1181	if (iface->mod)
   1182		module_put(iface->mod);
   1183
   1184	c->is_poisoned = true;
   1185	if (c->iface->poison_channel(c->iface, c->channel_id)) {
   1186		dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
   1187			c->iface->description);
   1188		mutex_unlock(&c->start_mutex);
   1189		return -EAGAIN;
   1190	}
   1191	flush_trash_fifo(c);
   1192	flush_channel_fifos(c);
   1193
   1194#ifdef CMPL_INTERRUPTIBLE
   1195	if (wait_for_completion_interruptible(&c->cleanup)) {
   1196		dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
   1197		mutex_unlock(&c->start_mutex);
   1198		return -EINTR;
   1199	}
   1200#else
   1201	wait_for_completion(&c->cleanup);
   1202#endif
   1203	c->is_poisoned = false;
   1204
   1205out:
   1206	if (comp == c->pipe0.comp)
   1207		c->pipe0.refs--;
   1208	if (comp == c->pipe1.comp)
   1209		c->pipe1.refs--;
   1210	mutex_unlock(&c->start_mutex);
   1211	return 0;
   1212}
   1213EXPORT_SYMBOL_GPL(most_stop_channel);
   1214
   1215/**
   1216 * most_register_component - registers a driver component with the core
   1217 * @comp: driver component
   1218 */
   1219int most_register_component(struct most_component *comp)
   1220{
   1221	if (!comp) {
   1222		pr_err("Bad component\n");
   1223		return -EINVAL;
   1224	}
   1225	list_add_tail(&comp->list, &comp_list);
   1226	return 0;
   1227}
   1228EXPORT_SYMBOL_GPL(most_register_component);
   1229
   1230static int disconnect_channels(struct device *dev, void *data)
   1231{
   1232	struct most_interface *iface;
   1233	struct most_channel *c, *tmp;
   1234	struct most_component *comp = data;
   1235
   1236	iface = dev_get_drvdata(dev);
   1237	list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
   1238		if (c->pipe0.comp == comp || c->pipe1.comp == comp)
   1239			comp->disconnect_channel(c->iface, c->channel_id);
   1240		if (c->pipe0.comp == comp)
   1241			c->pipe0.comp = NULL;
   1242		if (c->pipe1.comp == comp)
   1243			c->pipe1.comp = NULL;
   1244	}
   1245	return 0;
   1246}
   1247
   1248/**
   1249 * most_deregister_component - deregisters a driver component with the core
   1250 * @comp: driver component
   1251 */
   1252int most_deregister_component(struct most_component *comp)
   1253{
   1254	if (!comp) {
   1255		pr_err("Bad component\n");
   1256		return -EINVAL;
   1257	}
   1258
   1259	bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels);
   1260	list_del(&comp->list);
   1261	return 0;
   1262}
   1263EXPORT_SYMBOL_GPL(most_deregister_component);
   1264
   1265static void release_channel(struct device *dev)
   1266{
   1267	struct most_channel *c = to_channel(dev);
   1268
   1269	kfree(c);
   1270}
   1271
   1272/**
   1273 * most_register_interface - registers an interface with core
   1274 * @iface: device interface
   1275 *
   1276 * Allocates and initializes a new interface instance and all of its channels.
   1277 * Returns a pointer to kobject or an error pointer.
   1278 */
   1279int most_register_interface(struct most_interface *iface)
   1280{
   1281	unsigned int i;
   1282	int id;
   1283	struct most_channel *c;
   1284
   1285	if (!iface || !iface->enqueue || !iface->configure ||
   1286	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
   1287		return -EINVAL;
   1288
   1289	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
   1290	if (id < 0) {
   1291		dev_err(iface->dev, "Failed to allocate device ID\n");
   1292		return id;
   1293	}
   1294
   1295	iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
   1296	if (!iface->p) {
   1297		ida_simple_remove(&mdev_id, id);
   1298		return -ENOMEM;
   1299	}
   1300
   1301	INIT_LIST_HEAD(&iface->p->channel_list);
   1302	iface->p->dev_id = id;
   1303	strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
   1304	iface->dev->bus = &mostbus;
   1305	iface->dev->groups = interface_attr_groups;
   1306	dev_set_drvdata(iface->dev, iface);
   1307	if (device_register(iface->dev)) {
   1308		dev_err(iface->dev, "Failed to register interface device\n");
   1309		kfree(iface->p);
   1310		put_device(iface->dev);
   1311		ida_simple_remove(&mdev_id, id);
   1312		return -ENOMEM;
   1313	}
   1314
   1315	for (i = 0; i < iface->num_channels; i++) {
   1316		const char *name_suffix = iface->channel_vector[i].name_suffix;
   1317
   1318		c = kzalloc(sizeof(*c), GFP_KERNEL);
   1319		if (!c)
   1320			goto err_free_resources;
   1321		if (!name_suffix)
   1322			snprintf(c->name, STRING_SIZE, "ch%d", i);
   1323		else
   1324			snprintf(c->name, STRING_SIZE, "%s", name_suffix);
   1325		c->dev.init_name = c->name;
   1326		c->dev.parent = iface->dev;
   1327		c->dev.groups = channel_attr_groups;
   1328		c->dev.release = release_channel;
   1329		iface->p->channel[i] = c;
   1330		c->is_starving = 0;
   1331		c->iface = iface;
   1332		c->channel_id = i;
   1333		c->keep_mbo = false;
   1334		c->enqueue_halt = false;
   1335		c->is_poisoned = false;
   1336		c->cfg.direction = 0;
   1337		c->cfg.data_type = 0;
   1338		c->cfg.num_buffers = 0;
   1339		c->cfg.buffer_size = 0;
   1340		c->cfg.subbuffer_size = 0;
   1341		c->cfg.packets_per_xact = 0;
   1342		spin_lock_init(&c->fifo_lock);
   1343		INIT_LIST_HEAD(&c->fifo);
   1344		INIT_LIST_HEAD(&c->trash_fifo);
   1345		INIT_LIST_HEAD(&c->halt_fifo);
   1346		init_completion(&c->cleanup);
   1347		atomic_set(&c->mbo_ref, 0);
   1348		mutex_init(&c->start_mutex);
   1349		mutex_init(&c->nq_mutex);
   1350		list_add_tail(&c->list, &iface->p->channel_list);
   1351		if (device_register(&c->dev)) {
   1352			dev_err(&c->dev, "Failed to register channel device\n");
   1353			goto err_free_most_channel;
   1354		}
   1355	}
   1356	most_interface_register_notify(iface->description);
   1357	return 0;
   1358
   1359err_free_most_channel:
   1360	put_device(&c->dev);
   1361
   1362err_free_resources:
   1363	while (i > 0) {
   1364		c = iface->p->channel[--i];
   1365		device_unregister(&c->dev);
   1366	}
   1367	kfree(iface->p);
   1368	device_unregister(iface->dev);
   1369	ida_simple_remove(&mdev_id, id);
   1370	return -ENOMEM;
   1371}
   1372EXPORT_SYMBOL_GPL(most_register_interface);
   1373
   1374/**
   1375 * most_deregister_interface - deregisters an interface with core
   1376 * @iface: device interface
   1377 *
   1378 * Before removing an interface instance from the list, all running
   1379 * channels are stopped and poisoned.
   1380 */
   1381void most_deregister_interface(struct most_interface *iface)
   1382{
   1383	int i;
   1384	struct most_channel *c;
   1385
   1386	for (i = 0; i < iface->num_channels; i++) {
   1387		c = iface->p->channel[i];
   1388		if (c->pipe0.comp)
   1389			c->pipe0.comp->disconnect_channel(c->iface,
   1390							c->channel_id);
   1391		if (c->pipe1.comp)
   1392			c->pipe1.comp->disconnect_channel(c->iface,
   1393							c->channel_id);
   1394		c->pipe0.comp = NULL;
   1395		c->pipe1.comp = NULL;
   1396		list_del(&c->list);
   1397		device_unregister(&c->dev);
   1398	}
   1399
   1400	ida_simple_remove(&mdev_id, iface->p->dev_id);
   1401	kfree(iface->p);
   1402	device_unregister(iface->dev);
   1403}
   1404EXPORT_SYMBOL_GPL(most_deregister_interface);
   1405
   1406/**
   1407 * most_stop_enqueue - prevents core from enqueueing MBOs
   1408 * @iface: pointer to interface
   1409 * @id: channel id
   1410 *
   1411 * This is called by an HDM that _cannot_ attend to its duties and
   1412 * is imminent to get run over by the core. The core is not going to
   1413 * enqueue any further packets unless the flagging HDM calls
   1414 * most_resume enqueue().
   1415 */
   1416void most_stop_enqueue(struct most_interface *iface, int id)
   1417{
   1418	struct most_channel *c = iface->p->channel[id];
   1419
   1420	if (!c)
   1421		return;
   1422
   1423	mutex_lock(&c->nq_mutex);
   1424	c->enqueue_halt = true;
   1425	mutex_unlock(&c->nq_mutex);
   1426}
   1427EXPORT_SYMBOL_GPL(most_stop_enqueue);
   1428
   1429/**
   1430 * most_resume_enqueue - allow core to enqueue MBOs again
   1431 * @iface: pointer to interface
   1432 * @id: channel id
   1433 *
   1434 * This clears the enqueue halt flag and enqueues all MBOs currently
   1435 * sitting in the wait fifo.
   1436 */
   1437void most_resume_enqueue(struct most_interface *iface, int id)
   1438{
   1439	struct most_channel *c = iface->p->channel[id];
   1440
   1441	if (!c)
   1442		return;
   1443
   1444	mutex_lock(&c->nq_mutex);
   1445	c->enqueue_halt = false;
   1446	mutex_unlock(&c->nq_mutex);
   1447
   1448	wake_up_interruptible(&c->hdm_fifo_wq);
   1449}
   1450EXPORT_SYMBOL_GPL(most_resume_enqueue);
   1451
   1452static int __init most_init(void)
   1453{
   1454	int err;
   1455
   1456	INIT_LIST_HEAD(&comp_list);
   1457	ida_init(&mdev_id);
   1458
   1459	err = bus_register(&mostbus);
   1460	if (err) {
   1461		pr_err("Failed to register most bus\n");
   1462		return err;
   1463	}
   1464	err = driver_register(&mostbus_driver);
   1465	if (err) {
   1466		pr_err("Failed to register core driver\n");
   1467		goto err_unregister_bus;
   1468	}
   1469	configfs_init();
   1470	return 0;
   1471
   1472err_unregister_bus:
   1473	bus_unregister(&mostbus);
   1474	return err;
   1475}
   1476
   1477static void __exit most_exit(void)
   1478{
   1479	driver_unregister(&mostbus_driver);
   1480	bus_unregister(&mostbus);
   1481	ida_destroy(&mdev_id);
   1482}
   1483
   1484subsys_initcall(most_init);
   1485module_exit(most_exit);
   1486MODULE_LICENSE("GPL");
   1487MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
   1488MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");