cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xen_drm_front.c (20655B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2
      3/*
      4 *  Xen para-virtual DRM device
      5 *
      6 * Copyright (C) 2016-2018 EPAM Systems Inc.
      7 *
      8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
      9 */
     10
     11#include <linux/delay.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/module.h>
     14#include <linux/of_device.h>
     15
     16#include <drm/drm_atomic_helper.h>
     17#include <drm/drm_drv.h>
     18#include <drm/drm_ioctl.h>
     19#include <drm/drm_probe_helper.h>
     20#include <drm/drm_file.h>
     21#include <drm/drm_gem.h>
     22
     23#include <xen/platform_pci.h>
     24#include <xen/xen.h>
     25#include <xen/xenbus.h>
     26
     27#include <xen/xen-front-pgdir-shbuf.h>
     28#include <xen/interface/io/displif.h>
     29
     30#include "xen_drm_front.h"
     31#include "xen_drm_front_cfg.h"
     32#include "xen_drm_front_evtchnl.h"
     33#include "xen_drm_front_gem.h"
     34#include "xen_drm_front_kms.h"
     35
     36struct xen_drm_front_dbuf {
     37	struct list_head list;
     38	u64 dbuf_cookie;
     39	u64 fb_cookie;
     40
     41	struct xen_front_pgdir_shbuf shbuf;
     42};
     43
     44static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
     45			     struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
     46{
     47	dbuf->dbuf_cookie = dbuf_cookie;
     48	list_add(&dbuf->list, &front_info->dbuf_list);
     49}
     50
     51static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
     52					   u64 dbuf_cookie)
     53{
     54	struct xen_drm_front_dbuf *buf, *q;
     55
     56	list_for_each_entry_safe(buf, q, dbuf_list, list)
     57		if (buf->dbuf_cookie == dbuf_cookie)
     58			return buf;
     59
     60	return NULL;
     61}
     62
     63static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
     64{
     65	struct xen_drm_front_dbuf *buf, *q;
     66
     67	list_for_each_entry_safe(buf, q, dbuf_list, list)
     68		if (buf->dbuf_cookie == dbuf_cookie) {
     69			list_del(&buf->list);
     70			xen_front_pgdir_shbuf_unmap(&buf->shbuf);
     71			xen_front_pgdir_shbuf_free(&buf->shbuf);
     72			kfree(buf);
     73			break;
     74		}
     75}
     76
     77static void dbuf_free_all(struct list_head *dbuf_list)
     78{
     79	struct xen_drm_front_dbuf *buf, *q;
     80
     81	list_for_each_entry_safe(buf, q, dbuf_list, list) {
     82		list_del(&buf->list);
     83		xen_front_pgdir_shbuf_unmap(&buf->shbuf);
     84		xen_front_pgdir_shbuf_free(&buf->shbuf);
     85		kfree(buf);
     86	}
     87}
     88
     89static struct xendispl_req *
     90be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
     91{
     92	struct xendispl_req *req;
     93
     94	req = RING_GET_REQUEST(&evtchnl->u.req.ring,
     95			       evtchnl->u.req.ring.req_prod_pvt);
     96	req->operation = operation;
     97	req->id = evtchnl->evt_next_id++;
     98	evtchnl->evt_id = req->id;
     99	return req;
    100}
    101
    102static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
    103			   struct xendispl_req *req)
    104{
    105	reinit_completion(&evtchnl->u.req.completion);
    106	if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
    107		return -EIO;
    108
    109	xen_drm_front_evtchnl_flush(evtchnl);
    110	return 0;
    111}
    112
    113static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
    114{
    115	if (wait_for_completion_timeout(&evtchnl->u.req.completion,
    116			msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
    117		return -ETIMEDOUT;
    118
    119	return evtchnl->u.req.resp_status;
    120}
    121
    122int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
    123			   u32 x, u32 y, u32 width, u32 height,
    124			   u32 bpp, u64 fb_cookie)
    125{
    126	struct xen_drm_front_evtchnl *evtchnl;
    127	struct xen_drm_front_info *front_info;
    128	struct xendispl_req *req;
    129	unsigned long flags;
    130	int ret;
    131
    132	front_info = pipeline->drm_info->front_info;
    133	evtchnl = &front_info->evt_pairs[pipeline->index].req;
    134	if (unlikely(!evtchnl))
    135		return -EIO;
    136
    137	mutex_lock(&evtchnl->u.req.req_io_lock);
    138
    139	spin_lock_irqsave(&front_info->io_lock, flags);
    140	req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
    141	req->op.set_config.x = x;
    142	req->op.set_config.y = y;
    143	req->op.set_config.width = width;
    144	req->op.set_config.height = height;
    145	req->op.set_config.bpp = bpp;
    146	req->op.set_config.fb_cookie = fb_cookie;
    147
    148	ret = be_stream_do_io(evtchnl, req);
    149	spin_unlock_irqrestore(&front_info->io_lock, flags);
    150
    151	if (ret == 0)
    152		ret = be_stream_wait_io(evtchnl);
    153
    154	mutex_unlock(&evtchnl->u.req.req_io_lock);
    155	return ret;
    156}
    157
    158int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
    159			      u64 dbuf_cookie, u32 width, u32 height,
    160			      u32 bpp, u64 size, u32 offset,
    161			      struct page **pages)
    162{
    163	struct xen_drm_front_evtchnl *evtchnl;
    164	struct xen_drm_front_dbuf *dbuf;
    165	struct xendispl_req *req;
    166	struct xen_front_pgdir_shbuf_cfg buf_cfg;
    167	unsigned long flags;
    168	int ret;
    169
    170	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    171	if (unlikely(!evtchnl))
    172		return -EIO;
    173
    174	dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
    175	if (!dbuf)
    176		return -ENOMEM;
    177
    178	dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
    179
    180	memset(&buf_cfg, 0, sizeof(buf_cfg));
    181	buf_cfg.xb_dev = front_info->xb_dev;
    182	buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
    183	buf_cfg.pages = pages;
    184	buf_cfg.pgdir = &dbuf->shbuf;
    185	buf_cfg.be_alloc = front_info->cfg.be_alloc;
    186
    187	ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
    188	if (ret < 0)
    189		goto fail_shbuf_alloc;
    190
    191	mutex_lock(&evtchnl->u.req.req_io_lock);
    192
    193	spin_lock_irqsave(&front_info->io_lock, flags);
    194	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
    195	req->op.dbuf_create.gref_directory =
    196			xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
    197	req->op.dbuf_create.buffer_sz = size;
    198	req->op.dbuf_create.data_ofs = offset;
    199	req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
    200	req->op.dbuf_create.width = width;
    201	req->op.dbuf_create.height = height;
    202	req->op.dbuf_create.bpp = bpp;
    203	if (buf_cfg.be_alloc)
    204		req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
    205
    206	ret = be_stream_do_io(evtchnl, req);
    207	spin_unlock_irqrestore(&front_info->io_lock, flags);
    208
    209	if (ret < 0)
    210		goto fail;
    211
    212	ret = be_stream_wait_io(evtchnl);
    213	if (ret < 0)
    214		goto fail;
    215
    216	ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
    217	if (ret < 0)
    218		goto fail;
    219
    220	mutex_unlock(&evtchnl->u.req.req_io_lock);
    221	return 0;
    222
    223fail:
    224	mutex_unlock(&evtchnl->u.req.req_io_lock);
    225fail_shbuf_alloc:
    226	dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    227	return ret;
    228}
    229
    230static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
    231				      u64 dbuf_cookie)
    232{
    233	struct xen_drm_front_evtchnl *evtchnl;
    234	struct xendispl_req *req;
    235	unsigned long flags;
    236	bool be_alloc;
    237	int ret;
    238
    239	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    240	if (unlikely(!evtchnl))
    241		return -EIO;
    242
    243	be_alloc = front_info->cfg.be_alloc;
    244
    245	/*
    246	 * For the backend allocated buffer release references now, so backend
    247	 * can free the buffer.
    248	 */
    249	if (be_alloc)
    250		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    251
    252	mutex_lock(&evtchnl->u.req.req_io_lock);
    253
    254	spin_lock_irqsave(&front_info->io_lock, flags);
    255	req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
    256	req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
    257
    258	ret = be_stream_do_io(evtchnl, req);
    259	spin_unlock_irqrestore(&front_info->io_lock, flags);
    260
    261	if (ret == 0)
    262		ret = be_stream_wait_io(evtchnl);
    263
    264	/*
    265	 * Do this regardless of communication status with the backend:
    266	 * if we cannot remove remote resources remove what we can locally.
    267	 */
    268	if (!be_alloc)
    269		dbuf_free(&front_info->dbuf_list, dbuf_cookie);
    270
    271	mutex_unlock(&evtchnl->u.req.req_io_lock);
    272	return ret;
    273}
    274
    275int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
    276			    u64 dbuf_cookie, u64 fb_cookie, u32 width,
    277			    u32 height, u32 pixel_format)
    278{
    279	struct xen_drm_front_evtchnl *evtchnl;
    280	struct xen_drm_front_dbuf *buf;
    281	struct xendispl_req *req;
    282	unsigned long flags;
    283	int ret;
    284
    285	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    286	if (unlikely(!evtchnl))
    287		return -EIO;
    288
    289	buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
    290	if (!buf)
    291		return -EINVAL;
    292
    293	buf->fb_cookie = fb_cookie;
    294
    295	mutex_lock(&evtchnl->u.req.req_io_lock);
    296
    297	spin_lock_irqsave(&front_info->io_lock, flags);
    298	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
    299	req->op.fb_attach.dbuf_cookie = dbuf_cookie;
    300	req->op.fb_attach.fb_cookie = fb_cookie;
    301	req->op.fb_attach.width = width;
    302	req->op.fb_attach.height = height;
    303	req->op.fb_attach.pixel_format = pixel_format;
    304
    305	ret = be_stream_do_io(evtchnl, req);
    306	spin_unlock_irqrestore(&front_info->io_lock, flags);
    307
    308	if (ret == 0)
    309		ret = be_stream_wait_io(evtchnl);
    310
    311	mutex_unlock(&evtchnl->u.req.req_io_lock);
    312	return ret;
    313}
    314
    315int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
    316			    u64 fb_cookie)
    317{
    318	struct xen_drm_front_evtchnl *evtchnl;
    319	struct xendispl_req *req;
    320	unsigned long flags;
    321	int ret;
    322
    323	evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
    324	if (unlikely(!evtchnl))
    325		return -EIO;
    326
    327	mutex_lock(&evtchnl->u.req.req_io_lock);
    328
    329	spin_lock_irqsave(&front_info->io_lock, flags);
    330	req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
    331	req->op.fb_detach.fb_cookie = fb_cookie;
    332
    333	ret = be_stream_do_io(evtchnl, req);
    334	spin_unlock_irqrestore(&front_info->io_lock, flags);
    335
    336	if (ret == 0)
    337		ret = be_stream_wait_io(evtchnl);
    338
    339	mutex_unlock(&evtchnl->u.req.req_io_lock);
    340	return ret;
    341}
    342
    343int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
    344			    int conn_idx, u64 fb_cookie)
    345{
    346	struct xen_drm_front_evtchnl *evtchnl;
    347	struct xendispl_req *req;
    348	unsigned long flags;
    349	int ret;
    350
    351	if (unlikely(conn_idx >= front_info->num_evt_pairs))
    352		return -EINVAL;
    353
    354	evtchnl = &front_info->evt_pairs[conn_idx].req;
    355
    356	mutex_lock(&evtchnl->u.req.req_io_lock);
    357
    358	spin_lock_irqsave(&front_info->io_lock, flags);
    359	req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
    360	req->op.pg_flip.fb_cookie = fb_cookie;
    361
    362	ret = be_stream_do_io(evtchnl, req);
    363	spin_unlock_irqrestore(&front_info->io_lock, flags);
    364
    365	if (ret == 0)
    366		ret = be_stream_wait_io(evtchnl);
    367
    368	mutex_unlock(&evtchnl->u.req.req_io_lock);
    369	return ret;
    370}
    371
    372void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
    373				 int conn_idx, u64 fb_cookie)
    374{
    375	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
    376
    377	if (unlikely(conn_idx >= front_info->cfg.num_connectors))
    378		return;
    379
    380	xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
    381					fb_cookie);
    382}
    383
    384void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
    385{
    386	struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
    387	int idx;
    388
    389	if (drm_dev_enter(obj->dev, &idx)) {
    390		xen_drm_front_dbuf_destroy(drm_info->front_info,
    391					   xen_drm_front_dbuf_to_cookie(obj));
    392		drm_dev_exit(idx);
    393	} else {
    394		dbuf_free(&drm_info->front_info->dbuf_list,
    395			  xen_drm_front_dbuf_to_cookie(obj));
    396	}
    397
    398	xen_drm_front_gem_free_object_unlocked(obj);
    399}
    400
    401static int xen_drm_drv_dumb_create(struct drm_file *filp,
    402				   struct drm_device *dev,
    403				   struct drm_mode_create_dumb *args)
    404{
    405	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
    406	struct drm_gem_object *obj;
    407	int ret;
    408
    409	/*
    410	 * Dumb creation is a two stage process: first we create a fully
    411	 * constructed GEM object which is communicated to the backend, and
    412	 * only after that we can create GEM's handle. This is done so,
    413	 * because of the possible races: once you create a handle it becomes
    414	 * immediately visible to user-space, so the latter can try accessing
    415	 * object without pages etc.
    416	 * For details also see drm_gem_handle_create
    417	 */
    418	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
    419	args->size = args->pitch * args->height;
    420
    421	obj = xen_drm_front_gem_create(dev, args->size);
    422	if (IS_ERR(obj)) {
    423		ret = PTR_ERR(obj);
    424		goto fail;
    425	}
    426
    427	ret = xen_drm_front_dbuf_create(drm_info->front_info,
    428					xen_drm_front_dbuf_to_cookie(obj),
    429					args->width, args->height, args->bpp,
    430					args->size, 0,
    431					xen_drm_front_gem_get_pages(obj));
    432	if (ret)
    433		goto fail_backend;
    434
    435	/* This is the tail of GEM object creation */
    436	ret = drm_gem_handle_create(filp, obj, &args->handle);
    437	if (ret)
    438		goto fail_handle;
    439
    440	/* Drop reference from allocate - handle holds it now */
    441	drm_gem_object_put(obj);
    442	return 0;
    443
    444fail_handle:
    445	xen_drm_front_dbuf_destroy(drm_info->front_info,
    446				   xen_drm_front_dbuf_to_cookie(obj));
    447fail_backend:
    448	/* drop reference from allocate */
    449	drm_gem_object_put(obj);
    450fail:
    451	DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
    452	return ret;
    453}
    454
    455static void xen_drm_drv_release(struct drm_device *dev)
    456{
    457	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
    458	struct xen_drm_front_info *front_info = drm_info->front_info;
    459
    460	xen_drm_front_kms_fini(drm_info);
    461
    462	drm_atomic_helper_shutdown(dev);
    463	drm_mode_config_cleanup(dev);
    464
    465	if (front_info->cfg.be_alloc)
    466		xenbus_switch_state(front_info->xb_dev,
    467				    XenbusStateInitialising);
    468
    469	kfree(drm_info);
    470}
    471
    472DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
    473
    474static const struct drm_driver xen_drm_driver = {
    475	.driver_features           = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
    476	.release                   = xen_drm_drv_release,
    477	.prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
    478	.prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
    479	.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
    480	.gem_prime_mmap            = drm_gem_prime_mmap,
    481	.dumb_create               = xen_drm_drv_dumb_create,
    482	.fops                      = &xen_drm_dev_fops,
    483	.name                      = "xendrm-du",
    484	.desc                      = "Xen PV DRM Display Unit",
    485	.date                      = "20180221",
    486	.major                     = 1,
    487	.minor                     = 0,
    488
    489};
    490
    491static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
    492{
    493	struct device *dev = &front_info->xb_dev->dev;
    494	struct xen_drm_front_drm_info *drm_info;
    495	struct drm_device *drm_dev;
    496	int ret;
    497
    498	if (drm_firmware_drivers_only())
    499		return -ENODEV;
    500
    501	DRM_INFO("Creating %s\n", xen_drm_driver.desc);
    502
    503	drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
    504	if (!drm_info) {
    505		ret = -ENOMEM;
    506		goto fail;
    507	}
    508
    509	drm_info->front_info = front_info;
    510	front_info->drm_info = drm_info;
    511
    512	drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
    513	if (IS_ERR(drm_dev)) {
    514		ret = PTR_ERR(drm_dev);
    515		goto fail_dev;
    516	}
    517
    518	drm_info->drm_dev = drm_dev;
    519
    520	drm_dev->dev_private = drm_info;
    521
    522	ret = xen_drm_front_kms_init(drm_info);
    523	if (ret) {
    524		DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
    525		goto fail_modeset;
    526	}
    527
    528	ret = drm_dev_register(drm_dev, 0);
    529	if (ret)
    530		goto fail_register;
    531
    532	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
    533		 xen_drm_driver.name, xen_drm_driver.major,
    534		 xen_drm_driver.minor, xen_drm_driver.patchlevel,
    535		 xen_drm_driver.date, drm_dev->primary->index);
    536
    537	return 0;
    538
    539fail_register:
    540	drm_dev_unregister(drm_dev);
    541fail_modeset:
    542	drm_kms_helper_poll_fini(drm_dev);
    543	drm_mode_config_cleanup(drm_dev);
    544	drm_dev_put(drm_dev);
    545fail_dev:
    546	kfree(drm_info);
    547	front_info->drm_info = NULL;
    548fail:
    549	return ret;
    550}
    551
    552static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
    553{
    554	struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
    555	struct drm_device *dev;
    556
    557	if (!drm_info)
    558		return;
    559
    560	dev = drm_info->drm_dev;
    561	if (!dev)
    562		return;
    563
    564	/* Nothing to do if device is already unplugged */
    565	if (drm_dev_is_unplugged(dev))
    566		return;
    567
    568	drm_kms_helper_poll_fini(dev);
    569	drm_dev_unplug(dev);
    570	drm_dev_put(dev);
    571
    572	front_info->drm_info = NULL;
    573
    574	xen_drm_front_evtchnl_free_all(front_info);
    575	dbuf_free_all(&front_info->dbuf_list);
    576
    577	/*
    578	 * If we are not using backend allocated buffers, then tell the
    579	 * backend we are ready to (re)initialize. Otherwise, wait for
    580	 * drm_driver.release.
    581	 */
    582	if (!front_info->cfg.be_alloc)
    583		xenbus_switch_state(front_info->xb_dev,
    584				    XenbusStateInitialising);
    585}
    586
    587static int displback_initwait(struct xen_drm_front_info *front_info)
    588{
    589	struct xen_drm_front_cfg *cfg = &front_info->cfg;
    590	int ret;
    591
    592	cfg->front_info = front_info;
    593	ret = xen_drm_front_cfg_card(front_info, cfg);
    594	if (ret < 0)
    595		return ret;
    596
    597	DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
    598	/* Create event channels for all connectors and publish */
    599	ret = xen_drm_front_evtchnl_create_all(front_info);
    600	if (ret < 0)
    601		return ret;
    602
    603	return xen_drm_front_evtchnl_publish_all(front_info);
    604}
    605
    606static int displback_connect(struct xen_drm_front_info *front_info)
    607{
    608	xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
    609	return xen_drm_drv_init(front_info);
    610}
    611
    612static void displback_disconnect(struct xen_drm_front_info *front_info)
    613{
    614	if (!front_info->drm_info)
    615		return;
    616
    617	/* Tell the backend to wait until we release the DRM driver. */
    618	xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
    619
    620	xen_drm_drv_fini(front_info);
    621}
    622
    623static void displback_changed(struct xenbus_device *xb_dev,
    624			      enum xenbus_state backend_state)
    625{
    626	struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
    627	int ret;
    628
    629	DRM_DEBUG("Backend state is %s, front is %s\n",
    630		  xenbus_strstate(backend_state),
    631		  xenbus_strstate(xb_dev->state));
    632
    633	switch (backend_state) {
    634	case XenbusStateReconfiguring:
    635	case XenbusStateReconfigured:
    636	case XenbusStateInitialised:
    637		break;
    638
    639	case XenbusStateInitialising:
    640		if (xb_dev->state == XenbusStateReconfiguring)
    641			break;
    642
    643		/* recovering after backend unexpected closure */
    644		displback_disconnect(front_info);
    645		break;
    646
    647	case XenbusStateInitWait:
    648		if (xb_dev->state == XenbusStateReconfiguring)
    649			break;
    650
    651		/* recovering after backend unexpected closure */
    652		displback_disconnect(front_info);
    653		if (xb_dev->state != XenbusStateInitialising)
    654			break;
    655
    656		ret = displback_initwait(front_info);
    657		if (ret < 0)
    658			xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
    659		else
    660			xenbus_switch_state(xb_dev, XenbusStateInitialised);
    661		break;
    662
    663	case XenbusStateConnected:
    664		if (xb_dev->state != XenbusStateInitialised)
    665			break;
    666
    667		ret = displback_connect(front_info);
    668		if (ret < 0) {
    669			displback_disconnect(front_info);
    670			xenbus_dev_fatal(xb_dev, ret, "connecting backend");
    671		} else {
    672			xenbus_switch_state(xb_dev, XenbusStateConnected);
    673		}
    674		break;
    675
    676	case XenbusStateClosing:
    677		/*
    678		 * in this state backend starts freeing resources,
    679		 * so let it go into closed state, so we can also
    680		 * remove ours
    681		 */
    682		break;
    683
    684	case XenbusStateUnknown:
    685	case XenbusStateClosed:
    686		if (xb_dev->state == XenbusStateClosed)
    687			break;
    688
    689		displback_disconnect(front_info);
    690		break;
    691	}
    692}
    693
    694static int xen_drv_probe(struct xenbus_device *xb_dev,
    695			 const struct xenbus_device_id *id)
    696{
    697	struct xen_drm_front_info *front_info;
    698	struct device *dev = &xb_dev->dev;
    699	int ret;
    700
    701	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
    702	if (ret < 0) {
    703		DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
    704		return ret;
    705	}
    706
    707	front_info = devm_kzalloc(&xb_dev->dev,
    708				  sizeof(*front_info), GFP_KERNEL);
    709	if (!front_info)
    710		return -ENOMEM;
    711
    712	front_info->xb_dev = xb_dev;
    713	spin_lock_init(&front_info->io_lock);
    714	INIT_LIST_HEAD(&front_info->dbuf_list);
    715	dev_set_drvdata(&xb_dev->dev, front_info);
    716
    717	return xenbus_switch_state(xb_dev, XenbusStateInitialising);
    718}
    719
    720static int xen_drv_remove(struct xenbus_device *dev)
    721{
    722	struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
    723	int to = 100;
    724
    725	xenbus_switch_state(dev, XenbusStateClosing);
    726
    727	/*
    728	 * On driver removal it is disconnected from XenBus,
    729	 * so no backend state change events come via .otherend_changed
    730	 * callback. This prevents us from exiting gracefully, e.g.
    731	 * signaling the backend to free event channels, waiting for its
    732	 * state to change to XenbusStateClosed and cleaning at our end.
    733	 * Normally when front driver removed backend will finally go into
    734	 * XenbusStateInitWait state.
    735	 *
    736	 * Workaround: read backend's state manually and wait with time-out.
    737	 */
    738	while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
    739				     XenbusStateUnknown) != XenbusStateInitWait) &&
    740				     --to)
    741		msleep(10);
    742
    743	if (!to) {
    744		unsigned int state;
    745
    746		state = xenbus_read_unsigned(front_info->xb_dev->otherend,
    747					     "state", XenbusStateUnknown);
    748		DRM_ERROR("Backend state is %s while removing driver\n",
    749			  xenbus_strstate(state));
    750	}
    751
    752	xen_drm_drv_fini(front_info);
    753	xenbus_frontend_closed(dev);
    754	return 0;
    755}
    756
    757static const struct xenbus_device_id xen_driver_ids[] = {
    758	{ XENDISPL_DRIVER_NAME },
    759	{ "" }
    760};
    761
    762static struct xenbus_driver xen_driver = {
    763	.ids = xen_driver_ids,
    764	.probe = xen_drv_probe,
    765	.remove = xen_drv_remove,
    766	.otherend_changed = displback_changed,
    767	.not_essential = true,
    768};
    769
    770static int __init xen_drv_init(void)
    771{
    772	/* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
    773	if (XEN_PAGE_SIZE != PAGE_SIZE) {
    774		DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
    775			  XEN_PAGE_SIZE, PAGE_SIZE);
    776		return -ENODEV;
    777	}
    778
    779	if (!xen_domain())
    780		return -ENODEV;
    781
    782	if (!xen_has_pv_devices())
    783		return -ENODEV;
    784
    785	DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
    786	return xenbus_register_frontend(&xen_driver);
    787}
    788
    789static void __exit xen_drv_fini(void)
    790{
    791	DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
    792	xenbus_unregister_driver(&xen_driver);
    793}
    794
    795module_init(xen_drv_init);
    796module_exit(xen_drv_fini);
    797
    798MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
    799MODULE_LICENSE("GPL");
    800MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);