cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xenbus.c (29866B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Xenbus code for netif backend
      4 *
      5 * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
      6 * Copyright (C) 2005 XenSource Ltd
      7*/
      8
      9#include "common.h"
     10#include <linux/vmalloc.h>
     11#include <linux/rtnetlink.h>
     12
     13static int connect_data_rings(struct backend_info *be,
     14			      struct xenvif_queue *queue);
     15static void connect(struct backend_info *be);
     16static int read_xenbus_vif_flags(struct backend_info *be);
     17static int backend_create_xenvif(struct backend_info *be);
     18static void unregister_hotplug_status_watch(struct backend_info *be);
     19static void xen_unregister_watchers(struct xenvif *vif);
     20static void set_backend_state(struct backend_info *be,
     21			      enum xenbus_state state);
     22
     23#ifdef CONFIG_DEBUG_FS
     24struct dentry *xen_netback_dbg_root = NULL;
     25
     26static int xenvif_read_io_ring(struct seq_file *m, void *v)
     27{
     28	struct xenvif_queue *queue = m->private;
     29	struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
     30	struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
     31	struct netdev_queue *dev_queue;
     32
     33	if (tx_ring->sring) {
     34		struct xen_netif_tx_sring *sring = tx_ring->sring;
     35
     36		seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
     37			   tx_ring->nr_ents);
     38		seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
     39			   sring->req_prod,
     40			   sring->req_prod - sring->rsp_prod,
     41			   tx_ring->req_cons,
     42			   tx_ring->req_cons - sring->rsp_prod,
     43			   sring->req_event,
     44			   sring->req_event - sring->rsp_prod);
     45		seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
     46			   sring->rsp_prod,
     47			   tx_ring->rsp_prod_pvt,
     48			   tx_ring->rsp_prod_pvt - sring->rsp_prod,
     49			   sring->rsp_event,
     50			   sring->rsp_event - sring->rsp_prod);
     51		seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
     52			   queue->pending_prod,
     53			   queue->pending_cons,
     54			   nr_pending_reqs(queue));
     55		seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
     56			   queue->dealloc_prod,
     57			   queue->dealloc_cons,
     58			   queue->dealloc_prod - queue->dealloc_cons);
     59	}
     60
     61	if (rx_ring->sring) {
     62		struct xen_netif_rx_sring *sring = rx_ring->sring;
     63
     64		seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
     65		seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
     66			   sring->req_prod,
     67			   sring->req_prod - sring->rsp_prod,
     68			   rx_ring->req_cons,
     69			   rx_ring->req_cons - sring->rsp_prod,
     70			   sring->req_event,
     71			   sring->req_event - sring->rsp_prod);
     72		seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
     73			   sring->rsp_prod,
     74			   rx_ring->rsp_prod_pvt,
     75			   rx_ring->rsp_prod_pvt - sring->rsp_prod,
     76			   sring->rsp_event,
     77			   sring->rsp_event - sring->rsp_prod);
     78	}
     79
     80	seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
     81		   "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
     82		   "remaining: %lu, expires: %lu, now: %lu\n",
     83		   queue->napi.state, queue->napi.weight,
     84		   skb_queue_len(&queue->tx_queue),
     85		   timer_pending(&queue->credit_timeout),
     86		   queue->credit_bytes,
     87		   queue->credit_usec,
     88		   queue->remaining_credit,
     89		   queue->credit_timeout.expires,
     90		   jiffies);
     91
     92	dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
     93
     94	seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
     95		   queue->rx_queue_len, queue->rx_queue_max,
     96		   skb_queue_len(&queue->rx_queue),
     97		   netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
     98
     99	return 0;
    100}
    101
    102#define XENVIF_KICK_STR "kick"
    103#define BUFFER_SIZE     32
    104
    105static ssize_t
    106xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
    107		     loff_t *ppos)
    108{
    109	struct xenvif_queue *queue =
    110		((struct seq_file *)filp->private_data)->private;
    111	int len;
    112	char write[BUFFER_SIZE];
    113
    114	/* don't allow partial writes and check the length */
    115	if (*ppos != 0)
    116		return 0;
    117	if (count >= sizeof(write))
    118		return -ENOSPC;
    119
    120	len = simple_write_to_buffer(write,
    121				     sizeof(write) - 1,
    122				     ppos,
    123				     buf,
    124				     count);
    125	if (len < 0)
    126		return len;
    127
    128	write[len] = '\0';
    129
    130	if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
    131		xenvif_interrupt(0, (void *)queue);
    132	else {
    133		pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
    134			queue->id);
    135		count = -EINVAL;
    136	}
    137	return count;
    138}
    139
    140static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
    141{
    142	int ret;
    143	void *queue = NULL;
    144
    145	if (inode->i_private)
    146		queue = inode->i_private;
    147	ret = single_open(filp, xenvif_read_io_ring, queue);
    148	filp->f_mode |= FMODE_PWRITE;
    149	return ret;
    150}
    151
    152static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
    153	.owner = THIS_MODULE,
    154	.open = xenvif_io_ring_open,
    155	.read = seq_read,
    156	.llseek = seq_lseek,
    157	.release = single_release,
    158	.write = xenvif_write_io_ring,
    159};
    160
    161static int xenvif_ctrl_show(struct seq_file *m, void *v)
    162{
    163	struct xenvif *vif = m->private;
    164
    165	xenvif_dump_hash_info(vif, m);
    166
    167	return 0;
    168}
    169DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
    170
    171static void xenvif_debugfs_addif(struct xenvif *vif)
    172{
    173	int i;
    174
    175	vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
    176						  xen_netback_dbg_root);
    177	for (i = 0; i < vif->num_queues; ++i) {
    178		char filename[sizeof("io_ring_q") + 4];
    179
    180		snprintf(filename, sizeof(filename), "io_ring_q%d", i);
    181		debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
    182				    &vif->queues[i],
    183				    &xenvif_dbg_io_ring_ops_fops);
    184	}
    185
    186	if (vif->ctrl_irq)
    187		debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
    188				    &xenvif_ctrl_fops);
    189}
    190
    191static void xenvif_debugfs_delif(struct xenvif *vif)
    192{
    193	debugfs_remove_recursive(vif->xenvif_dbg_root);
    194	vif->xenvif_dbg_root = NULL;
    195}
    196#endif /* CONFIG_DEBUG_FS */
    197
    198/*
    199 * Handle the creation of the hotplug script environment.  We add the script
    200 * and vif variables to the environment, for the benefit of the vif-* hotplug
    201 * scripts.
    202 */
    203static int netback_uevent(struct xenbus_device *xdev,
    204			  struct kobj_uevent_env *env)
    205{
    206	struct backend_info *be = dev_get_drvdata(&xdev->dev);
    207
    208	if (!be)
    209		return 0;
    210
    211	if (add_uevent_var(env, "script=%s", be->hotplug_script))
    212		return -ENOMEM;
    213
    214	if (!be->vif)
    215		return 0;
    216
    217	return add_uevent_var(env, "vif=%s", be->vif->dev->name);
    218}
    219
    220
    221static int backend_create_xenvif(struct backend_info *be)
    222{
    223	int err;
    224	long handle;
    225	struct xenbus_device *dev = be->dev;
    226	struct xenvif *vif;
    227
    228	if (be->vif != NULL)
    229		return 0;
    230
    231	err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
    232	if (err != 1) {
    233		xenbus_dev_fatal(dev, err, "reading handle");
    234		return (err < 0) ? err : -EINVAL;
    235	}
    236
    237	vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
    238	if (IS_ERR(vif)) {
    239		err = PTR_ERR(vif);
    240		xenbus_dev_fatal(dev, err, "creating interface");
    241		return err;
    242	}
    243	be->vif = vif;
    244	vif->be = be;
    245
    246	kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
    247	return 0;
    248}
    249
    250static void backend_disconnect(struct backend_info *be)
    251{
    252	struct xenvif *vif = be->vif;
    253
    254	if (vif) {
    255		unsigned int num_queues = vif->num_queues;
    256		unsigned int queue_index;
    257
    258		xen_unregister_watchers(vif);
    259		xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
    260#ifdef CONFIG_DEBUG_FS
    261		xenvif_debugfs_delif(vif);
    262#endif /* CONFIG_DEBUG_FS */
    263		xenvif_disconnect_data(vif);
    264
    265		/* At this point some of the handlers may still be active
    266		 * so we need to have additional synchronization here.
    267		 */
    268		vif->num_queues = 0;
    269		synchronize_net();
    270
    271		for (queue_index = 0; queue_index < num_queues; ++queue_index)
    272			xenvif_deinit_queue(&vif->queues[queue_index]);
    273
    274		vfree(vif->queues);
    275		vif->queues = NULL;
    276
    277		xenvif_disconnect_ctrl(vif);
    278	}
    279}
    280
    281static void backend_connect(struct backend_info *be)
    282{
    283	if (be->vif)
    284		connect(be);
    285}
    286
    287static inline void backend_switch_state(struct backend_info *be,
    288					enum xenbus_state state)
    289{
    290	struct xenbus_device *dev = be->dev;
    291
    292	pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
    293	be->state = state;
    294
    295	/* If we are waiting for a hotplug script then defer the
    296	 * actual xenbus state change.
    297	 */
    298	if (!be->have_hotplug_status_watch)
    299		xenbus_switch_state(dev, state);
    300}
    301
    302/* Handle backend state transitions:
    303 *
    304 * The backend state starts in Initialising and the following transitions are
    305 * allowed.
    306 *
    307 * Initialising -> InitWait -> Connected
    308 *          \
    309 *           \        ^    \         |
    310 *            \       |     \        |
    311 *             \      |      \       |
    312 *              \     |       \      |
    313 *               \    |        \     |
    314 *                \   |         \    |
    315 *                 V  |          V   V
    316 *
    317 *                  Closed  <-> Closing
    318 *
    319 * The state argument specifies the eventual state of the backend and the
    320 * function transitions to that state via the shortest path.
    321 */
    322static void set_backend_state(struct backend_info *be,
    323			      enum xenbus_state state)
    324{
    325	while (be->state != state) {
    326		switch (be->state) {
    327		case XenbusStateInitialising:
    328			switch (state) {
    329			case XenbusStateInitWait:
    330			case XenbusStateConnected:
    331			case XenbusStateClosing:
    332				backend_switch_state(be, XenbusStateInitWait);
    333				break;
    334			case XenbusStateClosed:
    335				backend_switch_state(be, XenbusStateClosed);
    336				break;
    337			default:
    338				BUG();
    339			}
    340			break;
    341		case XenbusStateClosed:
    342			switch (state) {
    343			case XenbusStateInitWait:
    344			case XenbusStateConnected:
    345				backend_switch_state(be, XenbusStateInitWait);
    346				break;
    347			case XenbusStateClosing:
    348				backend_switch_state(be, XenbusStateClosing);
    349				break;
    350			default:
    351				BUG();
    352			}
    353			break;
    354		case XenbusStateInitWait:
    355			switch (state) {
    356			case XenbusStateConnected:
    357				backend_connect(be);
    358				backend_switch_state(be, XenbusStateConnected);
    359				break;
    360			case XenbusStateClosing:
    361			case XenbusStateClosed:
    362				backend_switch_state(be, XenbusStateClosing);
    363				break;
    364			default:
    365				BUG();
    366			}
    367			break;
    368		case XenbusStateConnected:
    369			switch (state) {
    370			case XenbusStateInitWait:
    371			case XenbusStateClosing:
    372			case XenbusStateClosed:
    373				backend_disconnect(be);
    374				backend_switch_state(be, XenbusStateClosing);
    375				break;
    376			default:
    377				BUG();
    378			}
    379			break;
    380		case XenbusStateClosing:
    381			switch (state) {
    382			case XenbusStateInitWait:
    383			case XenbusStateConnected:
    384			case XenbusStateClosed:
    385				backend_switch_state(be, XenbusStateClosed);
    386				break;
    387			default:
    388				BUG();
    389			}
    390			break;
    391		default:
    392			BUG();
    393		}
    394	}
    395}
    396
    397static void read_xenbus_frontend_xdp(struct backend_info *be,
    398				      struct xenbus_device *dev)
    399{
    400	struct xenvif *vif = be->vif;
    401	u16 headroom;
    402	int err;
    403
    404	err = xenbus_scanf(XBT_NIL, dev->otherend,
    405			   "xdp-headroom", "%hu", &headroom);
    406	if (err != 1) {
    407		vif->xdp_headroom = 0;
    408		return;
    409	}
    410	if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
    411		headroom = XEN_NETIF_MAX_XDP_HEADROOM;
    412	vif->xdp_headroom = headroom;
    413}
    414
    415/*
    416 * Callback received when the frontend's state changes.
    417 */
    418static void frontend_changed(struct xenbus_device *dev,
    419			     enum xenbus_state frontend_state)
    420{
    421	struct backend_info *be = dev_get_drvdata(&dev->dev);
    422
    423	pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
    424
    425	be->frontend_state = frontend_state;
    426
    427	switch (frontend_state) {
    428	case XenbusStateInitialising:
    429		set_backend_state(be, XenbusStateInitWait);
    430		break;
    431
    432	case XenbusStateInitialised:
    433		break;
    434
    435	case XenbusStateConnected:
    436		set_backend_state(be, XenbusStateConnected);
    437		break;
    438
    439	case XenbusStateReconfiguring:
    440		read_xenbus_frontend_xdp(be, dev);
    441		xenbus_switch_state(dev, XenbusStateReconfigured);
    442		break;
    443
    444	case XenbusStateClosing:
    445		set_backend_state(be, XenbusStateClosing);
    446		break;
    447
    448	case XenbusStateClosed:
    449		set_backend_state(be, XenbusStateClosed);
    450		if (xenbus_dev_is_online(dev))
    451			break;
    452		fallthrough;	/* if not online */
    453	case XenbusStateUnknown:
    454		set_backend_state(be, XenbusStateClosed);
    455		device_unregister(&dev->dev);
    456		break;
    457
    458	default:
    459		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
    460				 frontend_state);
    461		break;
    462	}
    463}
    464
    465
    466static void xen_net_read_rate(struct xenbus_device *dev,
    467			      unsigned long *bytes, unsigned long *usec)
    468{
    469	char *s, *e;
    470	unsigned long b, u;
    471	char *ratestr;
    472
    473	/* Default to unlimited bandwidth. */
    474	*bytes = ~0UL;
    475	*usec = 0;
    476
    477	ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
    478	if (IS_ERR(ratestr))
    479		return;
    480
    481	s = ratestr;
    482	b = simple_strtoul(s, &e, 10);
    483	if ((s == e) || (*e != ','))
    484		goto fail;
    485
    486	s = e + 1;
    487	u = simple_strtoul(s, &e, 10);
    488	if ((s == e) || (*e != '\0'))
    489		goto fail;
    490
    491	*bytes = b;
    492	*usec = u;
    493
    494	kfree(ratestr);
    495	return;
    496
    497 fail:
    498	pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
    499	kfree(ratestr);
    500}
    501
    502static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
    503{
    504	char *s, *e, *macstr;
    505	int i;
    506
    507	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
    508	if (IS_ERR(macstr))
    509		return PTR_ERR(macstr);
    510
    511	for (i = 0; i < ETH_ALEN; i++) {
    512		mac[i] = simple_strtoul(s, &e, 16);
    513		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
    514			kfree(macstr);
    515			return -ENOENT;
    516		}
    517		s = e+1;
    518	}
    519
    520	kfree(macstr);
    521	return 0;
    522}
    523
    524static void xen_net_rate_changed(struct xenbus_watch *watch,
    525				 const char *path, const char *token)
    526{
    527	struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
    528	struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
    529	unsigned long   credit_bytes;
    530	unsigned long   credit_usec;
    531	unsigned int queue_index;
    532
    533	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
    534	for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
    535		struct xenvif_queue *queue = &vif->queues[queue_index];
    536
    537		queue->credit_bytes = credit_bytes;
    538		queue->credit_usec = credit_usec;
    539		if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
    540			queue->remaining_credit > queue->credit_bytes) {
    541			queue->remaining_credit = queue->credit_bytes;
    542		}
    543	}
    544}
    545
    546static int xen_register_credit_watch(struct xenbus_device *dev,
    547				     struct xenvif *vif)
    548{
    549	int err = 0;
    550	char *node;
    551	unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
    552
    553	if (vif->credit_watch.node)
    554		return -EADDRINUSE;
    555
    556	node = kmalloc(maxlen, GFP_KERNEL);
    557	if (!node)
    558		return -ENOMEM;
    559	snprintf(node, maxlen, "%s/rate", dev->nodename);
    560	vif->credit_watch.node = node;
    561	vif->credit_watch.will_handle = NULL;
    562	vif->credit_watch.callback = xen_net_rate_changed;
    563	err = register_xenbus_watch(&vif->credit_watch);
    564	if (err) {
    565		pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
    566		kfree(node);
    567		vif->credit_watch.node = NULL;
    568		vif->credit_watch.will_handle = NULL;
    569		vif->credit_watch.callback = NULL;
    570	}
    571	return err;
    572}
    573
    574static void xen_unregister_credit_watch(struct xenvif *vif)
    575{
    576	if (vif->credit_watch.node) {
    577		unregister_xenbus_watch(&vif->credit_watch);
    578		kfree(vif->credit_watch.node);
    579		vif->credit_watch.node = NULL;
    580	}
    581}
    582
    583static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
    584				   const char *path, const char *token)
    585{
    586	struct xenvif *vif = container_of(watch, struct xenvif,
    587					  mcast_ctrl_watch);
    588	struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
    589
    590	vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
    591					"request-multicast-control", 0);
    592}
    593
    594static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
    595					 struct xenvif *vif)
    596{
    597	int err = 0;
    598	char *node;
    599	unsigned maxlen = strlen(dev->otherend) +
    600		sizeof("/request-multicast-control");
    601
    602	if (vif->mcast_ctrl_watch.node) {
    603		pr_err_ratelimited("Watch is already registered\n");
    604		return -EADDRINUSE;
    605	}
    606
    607	node = kmalloc(maxlen, GFP_KERNEL);
    608	if (!node) {
    609		pr_err("Failed to allocate memory for watch\n");
    610		return -ENOMEM;
    611	}
    612	snprintf(node, maxlen, "%s/request-multicast-control",
    613		 dev->otherend);
    614	vif->mcast_ctrl_watch.node = node;
    615	vif->mcast_ctrl_watch.will_handle = NULL;
    616	vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
    617	err = register_xenbus_watch(&vif->mcast_ctrl_watch);
    618	if (err) {
    619		pr_err("Failed to set watcher %s\n",
    620		       vif->mcast_ctrl_watch.node);
    621		kfree(node);
    622		vif->mcast_ctrl_watch.node = NULL;
    623		vif->mcast_ctrl_watch.will_handle = NULL;
    624		vif->mcast_ctrl_watch.callback = NULL;
    625	}
    626	return err;
    627}
    628
    629static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
    630{
    631	if (vif->mcast_ctrl_watch.node) {
    632		unregister_xenbus_watch(&vif->mcast_ctrl_watch);
    633		kfree(vif->mcast_ctrl_watch.node);
    634		vif->mcast_ctrl_watch.node = NULL;
    635	}
    636}
    637
    638static void xen_register_watchers(struct xenbus_device *dev,
    639				  struct xenvif *vif)
    640{
    641	xen_register_credit_watch(dev, vif);
    642	xen_register_mcast_ctrl_watch(dev, vif);
    643}
    644
    645static void xen_unregister_watchers(struct xenvif *vif)
    646{
    647	xen_unregister_mcast_ctrl_watch(vif);
    648	xen_unregister_credit_watch(vif);
    649}
    650
    651static void unregister_hotplug_status_watch(struct backend_info *be)
    652{
    653	if (be->have_hotplug_status_watch) {
    654		unregister_xenbus_watch(&be->hotplug_status_watch);
    655		kfree(be->hotplug_status_watch.node);
    656	}
    657	be->have_hotplug_status_watch = 0;
    658}
    659
    660static void hotplug_status_changed(struct xenbus_watch *watch,
    661				   const char *path,
    662				   const char *token)
    663{
    664	struct backend_info *be = container_of(watch,
    665					       struct backend_info,
    666					       hotplug_status_watch);
    667	char *str;
    668	unsigned int len;
    669
    670	str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
    671	if (IS_ERR(str))
    672		return;
    673	if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
    674		/* Complete any pending state change */
    675		xenbus_switch_state(be->dev, be->state);
    676
    677		/* Not interested in this watch anymore. */
    678		unregister_hotplug_status_watch(be);
    679	}
    680	kfree(str);
    681}
    682
    683static int connect_ctrl_ring(struct backend_info *be)
    684{
    685	struct xenbus_device *dev = be->dev;
    686	struct xenvif *vif = be->vif;
    687	unsigned int val;
    688	grant_ref_t ring_ref;
    689	unsigned int evtchn;
    690	int err;
    691
    692	err = xenbus_scanf(XBT_NIL, dev->otherend,
    693			   "ctrl-ring-ref", "%u", &val);
    694	if (err < 0)
    695		goto done; /* The frontend does not have a control ring */
    696
    697	ring_ref = val;
    698
    699	err = xenbus_scanf(XBT_NIL, dev->otherend,
    700			   "event-channel-ctrl", "%u", &val);
    701	if (err < 0) {
    702		xenbus_dev_fatal(dev, err,
    703				 "reading %s/event-channel-ctrl",
    704				 dev->otherend);
    705		goto fail;
    706	}
    707
    708	evtchn = val;
    709
    710	err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
    711	if (err) {
    712		xenbus_dev_fatal(dev, err,
    713				 "mapping shared-frame %u port %u",
    714				 ring_ref, evtchn);
    715		goto fail;
    716	}
    717
    718done:
    719	return 0;
    720
    721fail:
    722	return err;
    723}
    724
    725static void connect(struct backend_info *be)
    726{
    727	int err;
    728	struct xenbus_device *dev = be->dev;
    729	unsigned long credit_bytes, credit_usec;
    730	unsigned int queue_index;
    731	unsigned int requested_num_queues;
    732	struct xenvif_queue *queue;
    733
    734	/* Check whether the frontend requested multiple queues
    735	 * and read the number requested.
    736	 */
    737	requested_num_queues = xenbus_read_unsigned(dev->otherend,
    738					"multi-queue-num-queues", 1);
    739	if (requested_num_queues > xenvif_max_queues) {
    740		/* buggy or malicious guest */
    741		xenbus_dev_fatal(dev, -EINVAL,
    742				 "guest requested %u queues, exceeding the maximum of %u.",
    743				 requested_num_queues, xenvif_max_queues);
    744		return;
    745	}
    746
    747	err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
    748	if (err) {
    749		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
    750		return;
    751	}
    752
    753	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
    754	xen_unregister_watchers(be->vif);
    755	xen_register_watchers(dev, be->vif);
    756	read_xenbus_vif_flags(be);
    757
    758	err = connect_ctrl_ring(be);
    759	if (err) {
    760		xenbus_dev_fatal(dev, err, "connecting control ring");
    761		return;
    762	}
    763
    764	/* Use the number of queues requested by the frontend */
    765	be->vif->queues = vzalloc(array_size(requested_num_queues,
    766					     sizeof(struct xenvif_queue)));
    767	if (!be->vif->queues) {
    768		xenbus_dev_fatal(dev, -ENOMEM,
    769				 "allocating queues");
    770		return;
    771	}
    772
    773	be->vif->num_queues = requested_num_queues;
    774	be->vif->stalled_queues = requested_num_queues;
    775
    776	for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
    777		queue = &be->vif->queues[queue_index];
    778		queue->vif = be->vif;
    779		queue->id = queue_index;
    780		snprintf(queue->name, sizeof(queue->name), "%s-q%u",
    781				be->vif->dev->name, queue->id);
    782
    783		err = xenvif_init_queue(queue);
    784		if (err) {
    785			/* xenvif_init_queue() cleans up after itself on
    786			 * failure, but we need to clean up any previously
    787			 * initialised queues. Set num_queues to i so that
    788			 * earlier queues can be destroyed using the regular
    789			 * disconnect logic.
    790			 */
    791			be->vif->num_queues = queue_index;
    792			goto err;
    793		}
    794
    795		queue->credit_bytes = credit_bytes;
    796		queue->remaining_credit = credit_bytes;
    797		queue->credit_usec = credit_usec;
    798
    799		err = connect_data_rings(be, queue);
    800		if (err) {
    801			/* connect_data_rings() cleans up after itself on
    802			 * failure, but we need to clean up after
    803			 * xenvif_init_queue() here, and also clean up any
    804			 * previously initialised queues.
    805			 */
    806			xenvif_deinit_queue(queue);
    807			be->vif->num_queues = queue_index;
    808			goto err;
    809		}
    810	}
    811
    812#ifdef CONFIG_DEBUG_FS
    813	xenvif_debugfs_addif(be->vif);
    814#endif /* CONFIG_DEBUG_FS */
    815
    816	/* Initialisation completed, tell core driver the number of
    817	 * active queues.
    818	 */
    819	rtnl_lock();
    820	netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
    821	netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
    822	rtnl_unlock();
    823
    824	xenvif_carrier_on(be->vif);
    825
    826	unregister_hotplug_status_watch(be);
    827	err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
    828				   hotplug_status_changed,
    829				   "%s/%s", dev->nodename, "hotplug-status");
    830	if (!err)
    831		be->have_hotplug_status_watch = 1;
    832
    833	netif_tx_wake_all_queues(be->vif->dev);
    834
    835	return;
    836
    837err:
    838	if (be->vif->num_queues > 0)
    839		xenvif_disconnect_data(be->vif); /* Clean up existing queues */
    840	for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
    841		xenvif_deinit_queue(&be->vif->queues[queue_index]);
    842	vfree(be->vif->queues);
    843	be->vif->queues = NULL;
    844	be->vif->num_queues = 0;
    845	xenvif_disconnect_ctrl(be->vif);
    846	return;
    847}
    848
    849
    850static int connect_data_rings(struct backend_info *be,
    851			      struct xenvif_queue *queue)
    852{
    853	struct xenbus_device *dev = be->dev;
    854	unsigned int num_queues = queue->vif->num_queues;
    855	unsigned long tx_ring_ref, rx_ring_ref;
    856	unsigned int tx_evtchn, rx_evtchn;
    857	int err;
    858	char *xspath;
    859	size_t xspathsize;
    860	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
    861
    862	/* If the frontend requested 1 queue, or we have fallen back
    863	 * to single queue due to lack of frontend support for multi-
    864	 * queue, expect the remaining XenStore keys in the toplevel
    865	 * directory. Otherwise, expect them in a subdirectory called
    866	 * queue-N.
    867	 */
    868	if (num_queues == 1) {
    869		xspath = kzalloc(strlen(dev->otherend) + 1, GFP_KERNEL);
    870		if (!xspath) {
    871			xenbus_dev_fatal(dev, -ENOMEM,
    872					 "reading ring references");
    873			return -ENOMEM;
    874		}
    875		strcpy(xspath, dev->otherend);
    876	} else {
    877		xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
    878		xspath = kzalloc(xspathsize, GFP_KERNEL);
    879		if (!xspath) {
    880			xenbus_dev_fatal(dev, -ENOMEM,
    881					 "reading ring references");
    882			return -ENOMEM;
    883		}
    884		snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
    885			 queue->id);
    886	}
    887
    888	err = xenbus_gather(XBT_NIL, xspath,
    889			    "tx-ring-ref", "%lu", &tx_ring_ref,
    890			    "rx-ring-ref", "%lu", &rx_ring_ref, NULL);
    891	if (err) {
    892		xenbus_dev_fatal(dev, err,
    893				 "reading %s/ring-ref",
    894				 xspath);
    895		goto err;
    896	}
    897
    898	/* Try split event channels first, then single event channel. */
    899	err = xenbus_gather(XBT_NIL, xspath,
    900			    "event-channel-tx", "%u", &tx_evtchn,
    901			    "event-channel-rx", "%u", &rx_evtchn, NULL);
    902	if (err < 0) {
    903		err = xenbus_scanf(XBT_NIL, xspath,
    904				   "event-channel", "%u", &tx_evtchn);
    905		if (err < 0) {
    906			xenbus_dev_fatal(dev, err,
    907					 "reading %s/event-channel(-tx/rx)",
    908					 xspath);
    909			goto err;
    910		}
    911		rx_evtchn = tx_evtchn;
    912	}
    913
    914	/* Map the shared frame, irq etc. */
    915	err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
    916				  tx_evtchn, rx_evtchn);
    917	if (err) {
    918		xenbus_dev_fatal(dev, err,
    919				 "mapping shared-frames %lu/%lu port tx %u rx %u",
    920				 tx_ring_ref, rx_ring_ref,
    921				 tx_evtchn, rx_evtchn);
    922		goto err;
    923	}
    924
    925	err = 0;
    926err: /* Regular return falls through with err == 0 */
    927	kfree(xspath);
    928	return err;
    929}
    930
    931static int read_xenbus_vif_flags(struct backend_info *be)
    932{
    933	struct xenvif *vif = be->vif;
    934	struct xenbus_device *dev = be->dev;
    935	unsigned int rx_copy;
    936	int err;
    937
    938	err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
    939			   &rx_copy);
    940	if (err == -ENOENT) {
    941		err = 0;
    942		rx_copy = 0;
    943	}
    944	if (err < 0) {
    945		xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
    946				 dev->otherend);
    947		return err;
    948	}
    949	if (!rx_copy)
    950		return -EOPNOTSUPP;
    951
    952	if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
    953		/* - Reduce drain timeout to poll more frequently for
    954		 *   Rx requests.
    955		 * - Disable Rx stall detection.
    956		 */
    957		be->vif->drain_timeout = msecs_to_jiffies(30);
    958		be->vif->stall_timeout = 0;
    959	}
    960
    961	vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
    962
    963	vif->gso_mask = 0;
    964
    965	if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
    966		vif->gso_mask |= GSO_BIT(TCPV4);
    967
    968	if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
    969		vif->gso_mask |= GSO_BIT(TCPV6);
    970
    971	vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
    972					     "feature-no-csum-offload", 0);
    973
    974	vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
    975						"feature-ipv6-csum-offload", 0);
    976
    977	read_xenbus_frontend_xdp(be, dev);
    978
    979	return 0;
    980}
    981
    982static int netback_remove(struct xenbus_device *dev)
    983{
    984	struct backend_info *be = dev_get_drvdata(&dev->dev);
    985
    986	unregister_hotplug_status_watch(be);
    987	if (be->vif) {
    988		kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
    989		backend_disconnect(be);
    990		xenvif_free(be->vif);
    991		be->vif = NULL;
    992	}
    993	kfree(be->hotplug_script);
    994	kfree(be);
    995	dev_set_drvdata(&dev->dev, NULL);
    996	return 0;
    997}
    998
    999/*
   1000 * Entry point to this code when a new device is created.  Allocate the basic
   1001 * structures and switch to InitWait.
   1002 */
   1003static int netback_probe(struct xenbus_device *dev,
   1004			 const struct xenbus_device_id *id)
   1005{
   1006	const char *message;
   1007	struct xenbus_transaction xbt;
   1008	int err;
   1009	int sg;
   1010	const char *script;
   1011	struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
   1012
   1013	if (!be) {
   1014		xenbus_dev_fatal(dev, -ENOMEM,
   1015				 "allocating backend structure");
   1016		return -ENOMEM;
   1017	}
   1018
   1019	be->dev = dev;
   1020	dev_set_drvdata(&dev->dev, be);
   1021
   1022	sg = 1;
   1023
   1024	do {
   1025		err = xenbus_transaction_start(&xbt);
   1026		if (err) {
   1027			xenbus_dev_fatal(dev, err, "starting transaction");
   1028			goto fail;
   1029		}
   1030
   1031		err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
   1032		if (err) {
   1033			message = "writing feature-sg";
   1034			goto abort_transaction;
   1035		}
   1036
   1037		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
   1038				    "%d", sg);
   1039		if (err) {
   1040			message = "writing feature-gso-tcpv4";
   1041			goto abort_transaction;
   1042		}
   1043
   1044		err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
   1045				    "%d", sg);
   1046		if (err) {
   1047			message = "writing feature-gso-tcpv6";
   1048			goto abort_transaction;
   1049		}
   1050
   1051		/* We support partial checksum setup for IPv6 packets */
   1052		err = xenbus_printf(xbt, dev->nodename,
   1053				    "feature-ipv6-csum-offload",
   1054				    "%d", 1);
   1055		if (err) {
   1056			message = "writing feature-ipv6-csum-offload";
   1057			goto abort_transaction;
   1058		}
   1059
   1060		/* We support rx-copy path. */
   1061		err = xenbus_printf(xbt, dev->nodename,
   1062				    "feature-rx-copy", "%d", 1);
   1063		if (err) {
   1064			message = "writing feature-rx-copy";
   1065			goto abort_transaction;
   1066		}
   1067
   1068		/* we can adjust a headroom for netfront XDP processing */
   1069		err = xenbus_printf(xbt, dev->nodename,
   1070				    "feature-xdp-headroom", "%d",
   1071				    provides_xdp_headroom);
   1072		if (err) {
   1073			message = "writing feature-xdp-headroom";
   1074			goto abort_transaction;
   1075		}
   1076
   1077		/* We don't support rx-flip path (except old guests who
   1078		 * don't grok this feature flag).
   1079		 */
   1080		err = xenbus_printf(xbt, dev->nodename,
   1081				    "feature-rx-flip", "%d", 0);
   1082		if (err) {
   1083			message = "writing feature-rx-flip";
   1084			goto abort_transaction;
   1085		}
   1086
   1087		/* We support dynamic multicast-control. */
   1088		err = xenbus_printf(xbt, dev->nodename,
   1089				    "feature-multicast-control", "%d", 1);
   1090		if (err) {
   1091			message = "writing feature-multicast-control";
   1092			goto abort_transaction;
   1093		}
   1094
   1095		err = xenbus_printf(xbt, dev->nodename,
   1096				    "feature-dynamic-multicast-control",
   1097				    "%d", 1);
   1098		if (err) {
   1099			message = "writing feature-dynamic-multicast-control";
   1100			goto abort_transaction;
   1101		}
   1102
   1103		err = xenbus_transaction_end(xbt, 0);
   1104	} while (err == -EAGAIN);
   1105
   1106	if (err) {
   1107		xenbus_dev_fatal(dev, err, "completing transaction");
   1108		goto fail;
   1109	}
   1110
   1111	/* Split event channels support, this is optional so it is not
   1112	 * put inside the above loop.
   1113	 */
   1114	err = xenbus_printf(XBT_NIL, dev->nodename,
   1115			    "feature-split-event-channels",
   1116			    "%u", separate_tx_rx_irq);
   1117	if (err)
   1118		pr_debug("Error writing feature-split-event-channels\n");
   1119
   1120	/* Multi-queue support: This is an optional feature. */
   1121	err = xenbus_printf(XBT_NIL, dev->nodename,
   1122			    "multi-queue-max-queues", "%u", xenvif_max_queues);
   1123	if (err)
   1124		pr_debug("Error writing multi-queue-max-queues\n");
   1125
   1126	err = xenbus_printf(XBT_NIL, dev->nodename,
   1127			    "feature-ctrl-ring",
   1128			    "%u", true);
   1129	if (err)
   1130		pr_debug("Error writing feature-ctrl-ring\n");
   1131
   1132	backend_switch_state(be, XenbusStateInitWait);
   1133
   1134	script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
   1135	if (IS_ERR(script)) {
   1136		err = PTR_ERR(script);
   1137		xenbus_dev_fatal(dev, err, "reading script");
   1138		goto fail;
   1139	}
   1140
   1141	be->hotplug_script = script;
   1142
   1143	/* This kicks hotplug scripts, so do it immediately. */
   1144	err = backend_create_xenvif(be);
   1145	if (err)
   1146		goto fail;
   1147
   1148	return 0;
   1149
   1150abort_transaction:
   1151	xenbus_transaction_end(xbt, 1);
   1152	xenbus_dev_fatal(dev, err, "%s", message);
   1153fail:
   1154	pr_debug("failed\n");
   1155	netback_remove(dev);
   1156	return err;
   1157}
   1158
   1159static const struct xenbus_device_id netback_ids[] = {
   1160	{ "vif" },
   1161	{ "" }
   1162};
   1163
   1164static struct xenbus_driver netback_driver = {
   1165	.ids = netback_ids,
   1166	.probe = netback_probe,
   1167	.remove = netback_remove,
   1168	.uevent = netback_uevent,
   1169	.otherend_changed = frontend_changed,
   1170	.allow_rebind = true,
   1171};
   1172
   1173int xenvif_xenbus_init(void)
   1174{
   1175	return xenbus_register_backend(&netback_driver);
   1176}
   1177
   1178void xenvif_xenbus_fini(void)
   1179{
   1180	return xenbus_unregister_driver(&netback_driver);
   1181}