cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xen_snd_front_evtchnl.c (11684B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2
      3/*
      4 * Xen para-virtual sound device
      5 *
      6 * Copyright (C) 2016-2018 EPAM Systems Inc.
      7 *
      8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
      9 */
     10
     11#include <xen/events.h>
     12#include <xen/grant_table.h>
     13#include <xen/xen.h>
     14#include <xen/xenbus.h>
     15
     16#include "xen_snd_front.h"
     17#include "xen_snd_front_alsa.h"
     18#include "xen_snd_front_cfg.h"
     19#include "xen_snd_front_evtchnl.h"
     20
     21static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
     22{
     23	struct xen_snd_front_evtchnl *channel = dev_id;
     24	struct xen_snd_front_info *front_info = channel->front_info;
     25	struct xensnd_resp *resp;
     26	RING_IDX i, rp;
     27
     28	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
     29		return IRQ_HANDLED;
     30
     31	mutex_lock(&channel->ring_io_lock);
     32
     33again:
     34	rp = channel->u.req.ring.sring->rsp_prod;
     35	/* Ensure we see queued responses up to rp. */
     36	rmb();
     37
     38	/*
     39	 * Assume that the backend is trusted to always write sane values
     40	 * to the ring counters, so no overflow checks on frontend side
     41	 * are required.
     42	 */
     43	for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
     44		resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
     45		if (resp->id != channel->evt_id)
     46			continue;
     47		switch (resp->operation) {
     48		case XENSND_OP_OPEN:
     49		case XENSND_OP_CLOSE:
     50		case XENSND_OP_READ:
     51		case XENSND_OP_WRITE:
     52		case XENSND_OP_TRIGGER:
     53			channel->u.req.resp_status = resp->status;
     54			complete(&channel->u.req.completion);
     55			break;
     56		case XENSND_OP_HW_PARAM_QUERY:
     57			channel->u.req.resp_status = resp->status;
     58			channel->u.req.resp.hw_param =
     59					resp->resp.hw_param;
     60			complete(&channel->u.req.completion);
     61			break;
     62
     63		default:
     64			dev_err(&front_info->xb_dev->dev,
     65				"Operation %d is not supported\n",
     66				resp->operation);
     67			break;
     68		}
     69	}
     70
     71	channel->u.req.ring.rsp_cons = i;
     72	if (i != channel->u.req.ring.req_prod_pvt) {
     73		int more_to_do;
     74
     75		RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
     76					       more_to_do);
     77		if (more_to_do)
     78			goto again;
     79	} else {
     80		channel->u.req.ring.sring->rsp_event = i + 1;
     81	}
     82
     83	mutex_unlock(&channel->ring_io_lock);
     84	return IRQ_HANDLED;
     85}
     86
     87static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
     88{
     89	struct xen_snd_front_evtchnl *channel = dev_id;
     90	struct xensnd_event_page *page = channel->u.evt.page;
     91	u32 cons, prod;
     92
     93	if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
     94		return IRQ_HANDLED;
     95
     96	mutex_lock(&channel->ring_io_lock);
     97
     98	prod = page->in_prod;
     99	/* Ensure we see ring contents up to prod. */
    100	virt_rmb();
    101	if (prod == page->in_cons)
    102		goto out;
    103
    104	/*
    105	 * Assume that the backend is trusted to always write sane values
    106	 * to the ring counters, so no overflow checks on frontend side
    107	 * are required.
    108	 */
    109	for (cons = page->in_cons; cons != prod; cons++) {
    110		struct xensnd_evt *event;
    111
    112		event = &XENSND_IN_RING_REF(page, cons);
    113		if (unlikely(event->id != channel->evt_id++))
    114			continue;
    115
    116		switch (event->type) {
    117		case XENSND_EVT_CUR_POS:
    118			xen_snd_front_alsa_handle_cur_pos(channel,
    119							  event->op.cur_pos.position);
    120			break;
    121		}
    122	}
    123
    124	page->in_cons = cons;
    125	/* Ensure ring contents. */
    126	virt_wmb();
    127
    128out:
    129	mutex_unlock(&channel->ring_io_lock);
    130	return IRQ_HANDLED;
    131}
    132
    133void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
    134{
    135	int notify;
    136
    137	channel->u.req.ring.req_prod_pvt++;
    138	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
    139	if (notify)
    140		notify_remote_via_irq(channel->irq);
    141}
    142
    143static void evtchnl_free(struct xen_snd_front_info *front_info,
    144			 struct xen_snd_front_evtchnl *channel)
    145{
    146	void *page = NULL;
    147
    148	if (channel->type == EVTCHNL_TYPE_REQ)
    149		page = channel->u.req.ring.sring;
    150	else if (channel->type == EVTCHNL_TYPE_EVT)
    151		page = channel->u.evt.page;
    152
    153	if (!page)
    154		return;
    155
    156	channel->state = EVTCHNL_STATE_DISCONNECTED;
    157	if (channel->type == EVTCHNL_TYPE_REQ) {
    158		/* Release all who still waits for response if any. */
    159		channel->u.req.resp_status = -EIO;
    160		complete_all(&channel->u.req.completion);
    161	}
    162
    163	if (channel->irq)
    164		unbind_from_irqhandler(channel->irq, channel);
    165
    166	if (channel->port)
    167		xenbus_free_evtchn(front_info->xb_dev, channel->port);
    168
    169	/* End access and free the page. */
    170	xenbus_teardown_ring(&page, 1, &channel->gref);
    171
    172	memset(channel, 0, sizeof(*channel));
    173}
    174
    175void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
    176{
    177	int i;
    178
    179	if (!front_info->evt_pairs)
    180		return;
    181
    182	for (i = 0; i < front_info->num_evt_pairs; i++) {
    183		evtchnl_free(front_info, &front_info->evt_pairs[i].req);
    184		evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
    185	}
    186
    187	kfree(front_info->evt_pairs);
    188	front_info->evt_pairs = NULL;
    189}
    190
    191static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
    192			 struct xen_snd_front_evtchnl *channel,
    193			 enum xen_snd_front_evtchnl_type type)
    194{
    195	struct xenbus_device *xb_dev = front_info->xb_dev;
    196	void *page;
    197	irq_handler_t handler;
    198	char *handler_name = NULL;
    199	int ret;
    200
    201	memset(channel, 0, sizeof(*channel));
    202	channel->type = type;
    203	channel->index = index;
    204	channel->front_info = front_info;
    205	channel->state = EVTCHNL_STATE_DISCONNECTED;
    206	ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref);
    207	if (ret)
    208		goto fail;
    209
    210	handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
    211				 type == EVTCHNL_TYPE_REQ ?
    212				 XENSND_FIELD_RING_REF :
    213				 XENSND_FIELD_EVT_RING_REF);
    214	if (!handler_name) {
    215		ret = -ENOMEM;
    216		goto fail;
    217	}
    218
    219	mutex_init(&channel->ring_io_lock);
    220
    221	if (type == EVTCHNL_TYPE_REQ) {
    222		struct xen_sndif_sring *sring = page;
    223
    224		init_completion(&channel->u.req.completion);
    225		mutex_init(&channel->u.req.req_io_lock);
    226		XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
    227
    228		handler = evtchnl_interrupt_req;
    229	} else {
    230		channel->u.evt.page = page;
    231		handler = evtchnl_interrupt_evt;
    232	}
    233
    234	ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
    235	if (ret < 0)
    236		goto fail;
    237
    238	ret = bind_evtchn_to_irq(channel->port);
    239	if (ret < 0) {
    240		dev_err(&xb_dev->dev,
    241			"Failed to bind IRQ for domid %d port %d: %d\n",
    242			front_info->xb_dev->otherend_id, channel->port, ret);
    243		goto fail;
    244	}
    245
    246	channel->irq = ret;
    247
    248	ret = request_threaded_irq(channel->irq, NULL, handler,
    249				   IRQF_ONESHOT, handler_name, channel);
    250	if (ret < 0) {
    251		dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
    252			channel->irq, ret);
    253		goto fail;
    254	}
    255
    256	kfree(handler_name);
    257	return 0;
    258
    259fail:
    260	kfree(handler_name);
    261	dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
    262	return ret;
    263}
    264
    265int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
    266				     int num_streams)
    267{
    268	struct xen_front_cfg_card *cfg = &front_info->cfg;
    269	struct device *dev = &front_info->xb_dev->dev;
    270	int d, ret = 0;
    271
    272	front_info->evt_pairs =
    273			kcalloc(num_streams,
    274				sizeof(struct xen_snd_front_evtchnl_pair),
    275				GFP_KERNEL);
    276	if (!front_info->evt_pairs)
    277		return -ENOMEM;
    278
    279	/* Iterate over devices and their streams and create event channels. */
    280	for (d = 0; d < cfg->num_pcm_instances; d++) {
    281		struct xen_front_cfg_pcm_instance *pcm_instance;
    282		int s, index;
    283
    284		pcm_instance = &cfg->pcm_instances[d];
    285
    286		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
    287			index = pcm_instance->streams_pb[s].index;
    288
    289			ret = evtchnl_alloc(front_info, index,
    290					    &front_info->evt_pairs[index].req,
    291					    EVTCHNL_TYPE_REQ);
    292			if (ret < 0) {
    293				dev_err(dev, "Error allocating control channel\n");
    294				goto fail;
    295			}
    296
    297			ret = evtchnl_alloc(front_info, index,
    298					    &front_info->evt_pairs[index].evt,
    299					    EVTCHNL_TYPE_EVT);
    300			if (ret < 0) {
    301				dev_err(dev, "Error allocating in-event channel\n");
    302				goto fail;
    303			}
    304		}
    305
    306		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
    307			index = pcm_instance->streams_cap[s].index;
    308
    309			ret = evtchnl_alloc(front_info, index,
    310					    &front_info->evt_pairs[index].req,
    311					    EVTCHNL_TYPE_REQ);
    312			if (ret < 0) {
    313				dev_err(dev, "Error allocating control channel\n");
    314				goto fail;
    315			}
    316
    317			ret = evtchnl_alloc(front_info, index,
    318					    &front_info->evt_pairs[index].evt,
    319					    EVTCHNL_TYPE_EVT);
    320			if (ret < 0) {
    321				dev_err(dev, "Error allocating in-event channel\n");
    322				goto fail;
    323			}
    324		}
    325	}
    326
    327	front_info->num_evt_pairs = num_streams;
    328	return 0;
    329
    330fail:
    331	xen_snd_front_evtchnl_free_all(front_info);
    332	return ret;
    333}
    334
    335static int evtchnl_publish(struct xenbus_transaction xbt,
    336			   struct xen_snd_front_evtchnl *channel,
    337			   const char *path, const char *node_ring,
    338			   const char *node_chnl)
    339{
    340	struct xenbus_device *xb_dev = channel->front_info->xb_dev;
    341	int ret;
    342
    343	/* Write control channel ring reference. */
    344	ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
    345	if (ret < 0) {
    346		dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
    347		return ret;
    348	}
    349
    350	/* Write event channel ring reference. */
    351	ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
    352	if (ret < 0) {
    353		dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
    354		return ret;
    355	}
    356
    357	return 0;
    358}
    359
    360int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
    361{
    362	struct xen_front_cfg_card *cfg = &front_info->cfg;
    363	struct xenbus_transaction xbt;
    364	int ret, d;
    365
    366again:
    367	ret = xenbus_transaction_start(&xbt);
    368	if (ret < 0) {
    369		xenbus_dev_fatal(front_info->xb_dev, ret,
    370				 "starting transaction");
    371		return ret;
    372	}
    373
    374	for (d = 0; d < cfg->num_pcm_instances; d++) {
    375		struct xen_front_cfg_pcm_instance *pcm_instance;
    376		int s, index;
    377
    378		pcm_instance = &cfg->pcm_instances[d];
    379
    380		for (s = 0; s < pcm_instance->num_streams_pb; s++) {
    381			index = pcm_instance->streams_pb[s].index;
    382
    383			ret = evtchnl_publish(xbt,
    384					      &front_info->evt_pairs[index].req,
    385					      pcm_instance->streams_pb[s].xenstore_path,
    386					      XENSND_FIELD_RING_REF,
    387					      XENSND_FIELD_EVT_CHNL);
    388			if (ret < 0)
    389				goto fail;
    390
    391			ret = evtchnl_publish(xbt,
    392					      &front_info->evt_pairs[index].evt,
    393					      pcm_instance->streams_pb[s].xenstore_path,
    394					      XENSND_FIELD_EVT_RING_REF,
    395					      XENSND_FIELD_EVT_EVT_CHNL);
    396			if (ret < 0)
    397				goto fail;
    398		}
    399
    400		for (s = 0; s < pcm_instance->num_streams_cap; s++) {
    401			index = pcm_instance->streams_cap[s].index;
    402
    403			ret = evtchnl_publish(xbt,
    404					      &front_info->evt_pairs[index].req,
    405					      pcm_instance->streams_cap[s].xenstore_path,
    406					      XENSND_FIELD_RING_REF,
    407					      XENSND_FIELD_EVT_CHNL);
    408			if (ret < 0)
    409				goto fail;
    410
    411			ret = evtchnl_publish(xbt,
    412					      &front_info->evt_pairs[index].evt,
    413					      pcm_instance->streams_cap[s].xenstore_path,
    414					      XENSND_FIELD_EVT_RING_REF,
    415					      XENSND_FIELD_EVT_EVT_CHNL);
    416			if (ret < 0)
    417				goto fail;
    418		}
    419	}
    420	ret = xenbus_transaction_end(xbt, 0);
    421	if (ret < 0) {
    422		if (ret == -EAGAIN)
    423			goto again;
    424
    425		xenbus_dev_fatal(front_info->xb_dev, ret,
    426				 "completing transaction");
    427		goto fail_to_end;
    428	}
    429	return 0;
    430fail:
    431	xenbus_transaction_end(xbt, 1);
    432fail_to_end:
    433	xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
    434	return ret;
    435}
    436
    437void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
    438					      bool is_connected)
    439{
    440	enum xen_snd_front_evtchnl_state state;
    441
    442	if (is_connected)
    443		state = EVTCHNL_STATE_CONNECTED;
    444	else
    445		state = EVTCHNL_STATE_DISCONNECTED;
    446
    447	mutex_lock(&evt_pair->req.ring_io_lock);
    448	evt_pair->req.state = state;
    449	mutex_unlock(&evt_pair->req.ring_io_lock);
    450
    451	mutex_lock(&evt_pair->evt.ring_io_lock);
    452	evt_pair->evt.state = state;
    453	mutex_unlock(&evt_pair->evt.ring_io_lock);
    454}
    455
    456void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
    457{
    458	mutex_lock(&evt_pair->req.ring_io_lock);
    459	evt_pair->req.evt_next_id = 0;
    460	mutex_unlock(&evt_pair->req.ring_io_lock);
    461
    462	mutex_lock(&evt_pair->evt.ring_io_lock);
    463	evt_pair->evt.evt_next_id = 0;
    464	mutex_unlock(&evt_pair->evt.ring_io_lock);
    465}
    466