cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vfio_ccw_drv.c (13525B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * VFIO based Physical Subchannel device driver
      4 *
      5 * Copyright IBM Corp. 2017
      6 * Copyright Red Hat, Inc. 2019
      7 *
      8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
      9 *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
     10 *            Cornelia Huck <cohuck@redhat.com>
     11 */
     12
     13#include <linux/module.h>
     14#include <linux/init.h>
     15#include <linux/device.h>
     16#include <linux/slab.h>
     17#include <linux/uuid.h>
     18#include <linux/mdev.h>
     19
     20#include <asm/isc.h>
     21
     22#include "chp.h"
     23#include "ioasm.h"
     24#include "css.h"
     25#include "vfio_ccw_private.h"
     26
     27struct workqueue_struct *vfio_ccw_work_q;
     28static struct kmem_cache *vfio_ccw_io_region;
     29static struct kmem_cache *vfio_ccw_cmd_region;
     30static struct kmem_cache *vfio_ccw_schib_region;
     31static struct kmem_cache *vfio_ccw_crw_region;
     32
     33debug_info_t *vfio_ccw_debug_msg_id;
     34debug_info_t *vfio_ccw_debug_trace_id;
     35
     36/*
     37 * Helpers
     38 */
     39int vfio_ccw_sch_quiesce(struct subchannel *sch)
     40{
     41	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
     42	DECLARE_COMPLETION_ONSTACK(completion);
     43	int iretry, ret = 0;
     44
     45	spin_lock_irq(sch->lock);
     46	if (!sch->schib.pmcw.ena)
     47		goto out_unlock;
     48	ret = cio_disable_subchannel(sch);
     49	if (ret != -EBUSY)
     50		goto out_unlock;
     51
     52	iretry = 255;
     53	do {
     54
     55		ret = cio_cancel_halt_clear(sch, &iretry);
     56
     57		if (ret == -EIO) {
     58			pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
     59			       sch->schid.ssid, sch->schid.sch_no);
     60			break;
     61		}
     62
     63		/*
     64		 * Flush all I/O and wait for
     65		 * cancel/halt/clear completion.
     66		 */
     67		private->completion = &completion;
     68		spin_unlock_irq(sch->lock);
     69
     70		if (ret == -EBUSY)
     71			wait_for_completion_timeout(&completion, 3*HZ);
     72
     73		private->completion = NULL;
     74		flush_workqueue(vfio_ccw_work_q);
     75		spin_lock_irq(sch->lock);
     76		ret = cio_disable_subchannel(sch);
     77	} while (ret == -EBUSY);
     78out_unlock:
     79	private->state = VFIO_CCW_STATE_NOT_OPER;
     80	spin_unlock_irq(sch->lock);
     81	return ret;
     82}
     83
     84static void vfio_ccw_sch_io_todo(struct work_struct *work)
     85{
     86	struct vfio_ccw_private *private;
     87	struct irb *irb;
     88	bool is_final;
     89	bool cp_is_finished = false;
     90
     91	private = container_of(work, struct vfio_ccw_private, io_work);
     92	irb = &private->irb;
     93
     94	is_final = !(scsw_actl(&irb->scsw) &
     95		     (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
     96	if (scsw_is_solicited(&irb->scsw)) {
     97		cp_update_scsw(&private->cp, &irb->scsw);
     98		if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
     99			cp_free(&private->cp);
    100			cp_is_finished = true;
    101		}
    102	}
    103	mutex_lock(&private->io_mutex);
    104	memcpy(private->io_region->irb_area, irb, sizeof(*irb));
    105	mutex_unlock(&private->io_mutex);
    106
    107	/*
    108	 * Reset to IDLE only if processing of a channel program
    109	 * has finished. Do not overwrite a possible processing
    110	 * state if the final interrupt was for HSCH or CSCH.
    111	 */
    112	if (private->mdev && cp_is_finished)
    113		private->state = VFIO_CCW_STATE_IDLE;
    114
    115	if (private->io_trigger)
    116		eventfd_signal(private->io_trigger, 1);
    117}
    118
    119static void vfio_ccw_crw_todo(struct work_struct *work)
    120{
    121	struct vfio_ccw_private *private;
    122
    123	private = container_of(work, struct vfio_ccw_private, crw_work);
    124
    125	if (!list_empty(&private->crw) && private->crw_trigger)
    126		eventfd_signal(private->crw_trigger, 1);
    127}
    128
    129/*
    130 * Css driver callbacks
    131 */
    132static void vfio_ccw_sch_irq(struct subchannel *sch)
    133{
    134	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
    135
    136	inc_irq_stat(IRQIO_CIO);
    137	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
    138}
    139
    140static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
    141{
    142	struct vfio_ccw_private *private;
    143
    144	private = kzalloc(sizeof(*private), GFP_KERNEL);
    145	if (!private)
    146		return ERR_PTR(-ENOMEM);
    147
    148	private->sch = sch;
    149	mutex_init(&private->io_mutex);
    150	private->state = VFIO_CCW_STATE_NOT_OPER;
    151	INIT_LIST_HEAD(&private->crw);
    152	INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
    153	INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
    154	atomic_set(&private->avail, 1);
    155
    156	private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
    157				       GFP_KERNEL);
    158	if (!private->cp.guest_cp)
    159		goto out_free_private;
    160
    161	private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
    162					       GFP_KERNEL | GFP_DMA);
    163	if (!private->io_region)
    164		goto out_free_cp;
    165
    166	private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
    167						GFP_KERNEL | GFP_DMA);
    168	if (!private->cmd_region)
    169		goto out_free_io;
    170
    171	private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
    172						  GFP_KERNEL | GFP_DMA);
    173
    174	if (!private->schib_region)
    175		goto out_free_cmd;
    176
    177	private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
    178						GFP_KERNEL | GFP_DMA);
    179
    180	if (!private->crw_region)
    181		goto out_free_schib;
    182	return private;
    183
    184out_free_schib:
    185	kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
    186out_free_cmd:
    187	kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
    188out_free_io:
    189	kmem_cache_free(vfio_ccw_io_region, private->io_region);
    190out_free_cp:
    191	kfree(private->cp.guest_cp);
    192out_free_private:
    193	mutex_destroy(&private->io_mutex);
    194	kfree(private);
    195	return ERR_PTR(-ENOMEM);
    196}
    197
    198static void vfio_ccw_free_private(struct vfio_ccw_private *private)
    199{
    200	struct vfio_ccw_crw *crw, *temp;
    201
    202	list_for_each_entry_safe(crw, temp, &private->crw, next) {
    203		list_del(&crw->next);
    204		kfree(crw);
    205	}
    206
    207	kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
    208	kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
    209	kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
    210	kmem_cache_free(vfio_ccw_io_region, private->io_region);
    211	kfree(private->cp.guest_cp);
    212	mutex_destroy(&private->io_mutex);
    213	kfree(private);
    214}
    215
    216static int vfio_ccw_sch_probe(struct subchannel *sch)
    217{
    218	struct pmcw *pmcw = &sch->schib.pmcw;
    219	struct vfio_ccw_private *private;
    220	int ret = -ENOMEM;
    221
    222	if (pmcw->qf) {
    223		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
    224			 dev_name(&sch->dev));
    225		return -ENODEV;
    226	}
    227
    228	private = vfio_ccw_alloc_private(sch);
    229	if (IS_ERR(private))
    230		return PTR_ERR(private);
    231
    232	dev_set_drvdata(&sch->dev, private);
    233
    234	spin_lock_irq(sch->lock);
    235	sch->isc = VFIO_CCW_ISC;
    236	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
    237	spin_unlock_irq(sch->lock);
    238	if (ret)
    239		goto out_free;
    240
    241	private->state = VFIO_CCW_STATE_STANDBY;
    242
    243	ret = vfio_ccw_mdev_reg(sch);
    244	if (ret)
    245		goto out_disable;
    246
    247	VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
    248			   sch->schid.cssid, sch->schid.ssid,
    249			   sch->schid.sch_no);
    250	return 0;
    251
    252out_disable:
    253	cio_disable_subchannel(sch);
    254out_free:
    255	dev_set_drvdata(&sch->dev, NULL);
    256	vfio_ccw_free_private(private);
    257	return ret;
    258}
    259
    260static void vfio_ccw_sch_remove(struct subchannel *sch)
    261{
    262	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
    263
    264	vfio_ccw_sch_quiesce(sch);
    265	vfio_ccw_mdev_unreg(sch);
    266
    267	dev_set_drvdata(&sch->dev, NULL);
    268
    269	vfio_ccw_free_private(private);
    270
    271	VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
    272			   sch->schid.cssid, sch->schid.ssid,
    273			   sch->schid.sch_no);
    274}
    275
    276static void vfio_ccw_sch_shutdown(struct subchannel *sch)
    277{
    278	vfio_ccw_sch_quiesce(sch);
    279}
    280
    281/**
    282 * vfio_ccw_sch_event - process subchannel event
    283 * @sch: subchannel
    284 * @process: non-zero if function is called in process context
    285 *
    286 * An unspecified event occurred for this subchannel. Adjust data according
    287 * to the current operational state of the subchannel. Return zero when the
    288 * event has been handled sufficiently or -EAGAIN when this function should
    289 * be called again in process context.
    290 */
    291static int vfio_ccw_sch_event(struct subchannel *sch, int process)
    292{
    293	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
    294	unsigned long flags;
    295	int rc = -EAGAIN;
    296
    297	spin_lock_irqsave(sch->lock, flags);
    298	if (!device_is_registered(&sch->dev))
    299		goto out_unlock;
    300
    301	if (work_pending(&sch->todo_work))
    302		goto out_unlock;
    303
    304	if (cio_update_schib(sch)) {
    305		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
    306		rc = 0;
    307		goto out_unlock;
    308	}
    309
    310	private = dev_get_drvdata(&sch->dev);
    311	if (private->state == VFIO_CCW_STATE_NOT_OPER) {
    312		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
    313				 VFIO_CCW_STATE_STANDBY;
    314	}
    315	rc = 0;
    316
    317out_unlock:
    318	spin_unlock_irqrestore(sch->lock, flags);
    319
    320	return rc;
    321}
    322
    323static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
    324			       unsigned int rsc,
    325			       unsigned int erc,
    326			       unsigned int rsid)
    327{
    328	struct vfio_ccw_crw *crw;
    329
    330	/*
    331	 * If unable to allocate a CRW, just drop the event and
    332	 * carry on.  The guest will either see a later one or
    333	 * learn when it issues its own store subchannel.
    334	 */
    335	crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
    336	if (!crw)
    337		return;
    338
    339	/*
    340	 * Build the CRW based on the inputs given to us.
    341	 */
    342	crw->crw.rsc = rsc;
    343	crw->crw.erc = erc;
    344	crw->crw.rsid = rsid;
    345
    346	list_add_tail(&crw->next, &private->crw);
    347	queue_work(vfio_ccw_work_q, &private->crw_work);
    348}
    349
    350static int vfio_ccw_chp_event(struct subchannel *sch,
    351			      struct chp_link *link, int event)
    352{
    353	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
    354	int mask = chp_ssd_get_mask(&sch->ssd_info, link);
    355	int retry = 255;
    356
    357	if (!private || !mask)
    358		return 0;
    359
    360	trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
    361	VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n",
    362			   mdev_uuid(private->mdev), sch->schid.cssid,
    363			   sch->schid.ssid, sch->schid.sch_no,
    364			   mask, event);
    365
    366	if (cio_update_schib(sch))
    367		return -ENODEV;
    368
    369	switch (event) {
    370	case CHP_VARY_OFF:
    371		/* Path logically turned off */
    372		sch->opm &= ~mask;
    373		sch->lpm &= ~mask;
    374		if (sch->schib.pmcw.lpum & mask)
    375			cio_cancel_halt_clear(sch, &retry);
    376		break;
    377	case CHP_OFFLINE:
    378		/* Path is gone */
    379		if (sch->schib.pmcw.lpum & mask)
    380			cio_cancel_halt_clear(sch, &retry);
    381		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
    382				   link->chpid.id);
    383		break;
    384	case CHP_VARY_ON:
    385		/* Path logically turned on */
    386		sch->opm |= mask;
    387		sch->lpm |= mask;
    388		break;
    389	case CHP_ONLINE:
    390		/* Path became available */
    391		sch->lpm |= mask & sch->opm;
    392		vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
    393				   link->chpid.id);
    394		break;
    395	}
    396
    397	return 0;
    398}
    399
    400static struct css_device_id vfio_ccw_sch_ids[] = {
    401	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
    402	{ /* end of list */ },
    403};
    404MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
    405
    406static struct css_driver vfio_ccw_sch_driver = {
    407	.drv = {
    408		.name = "vfio_ccw",
    409		.owner = THIS_MODULE,
    410	},
    411	.subchannel_type = vfio_ccw_sch_ids,
    412	.irq = vfio_ccw_sch_irq,
    413	.probe = vfio_ccw_sch_probe,
    414	.remove = vfio_ccw_sch_remove,
    415	.shutdown = vfio_ccw_sch_shutdown,
    416	.sch_event = vfio_ccw_sch_event,
    417	.chp_event = vfio_ccw_chp_event,
    418};
    419
    420static int __init vfio_ccw_debug_init(void)
    421{
    422	vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
    423					       11 * sizeof(long));
    424	if (!vfio_ccw_debug_msg_id)
    425		goto out_unregister;
    426	debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
    427	debug_set_level(vfio_ccw_debug_msg_id, 2);
    428	vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
    429	if (!vfio_ccw_debug_trace_id)
    430		goto out_unregister;
    431	debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
    432	debug_set_level(vfio_ccw_debug_trace_id, 2);
    433	return 0;
    434
    435out_unregister:
    436	debug_unregister(vfio_ccw_debug_msg_id);
    437	debug_unregister(vfio_ccw_debug_trace_id);
    438	return -1;
    439}
    440
    441static void vfio_ccw_debug_exit(void)
    442{
    443	debug_unregister(vfio_ccw_debug_msg_id);
    444	debug_unregister(vfio_ccw_debug_trace_id);
    445}
    446
    447static void vfio_ccw_destroy_regions(void)
    448{
    449	kmem_cache_destroy(vfio_ccw_crw_region);
    450	kmem_cache_destroy(vfio_ccw_schib_region);
    451	kmem_cache_destroy(vfio_ccw_cmd_region);
    452	kmem_cache_destroy(vfio_ccw_io_region);
    453}
    454
    455static int __init vfio_ccw_sch_init(void)
    456{
    457	int ret;
    458
    459	ret = vfio_ccw_debug_init();
    460	if (ret)
    461		return ret;
    462
    463	vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
    464	if (!vfio_ccw_work_q) {
    465		ret = -ENOMEM;
    466		goto out_regions;
    467	}
    468
    469	vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
    470					sizeof(struct ccw_io_region), 0,
    471					SLAB_ACCOUNT, 0,
    472					sizeof(struct ccw_io_region), NULL);
    473	if (!vfio_ccw_io_region) {
    474		ret = -ENOMEM;
    475		goto out_regions;
    476	}
    477
    478	vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
    479					sizeof(struct ccw_cmd_region), 0,
    480					SLAB_ACCOUNT, 0,
    481					sizeof(struct ccw_cmd_region), NULL);
    482	if (!vfio_ccw_cmd_region) {
    483		ret = -ENOMEM;
    484		goto out_regions;
    485	}
    486
    487	vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
    488					sizeof(struct ccw_schib_region), 0,
    489					SLAB_ACCOUNT, 0,
    490					sizeof(struct ccw_schib_region), NULL);
    491
    492	if (!vfio_ccw_schib_region) {
    493		ret = -ENOMEM;
    494		goto out_regions;
    495	}
    496
    497	vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
    498					sizeof(struct ccw_crw_region), 0,
    499					SLAB_ACCOUNT, 0,
    500					sizeof(struct ccw_crw_region), NULL);
    501
    502	if (!vfio_ccw_crw_region) {
    503		ret = -ENOMEM;
    504		goto out_regions;
    505	}
    506
    507	ret = mdev_register_driver(&vfio_ccw_mdev_driver);
    508	if (ret)
    509		goto out_regions;
    510
    511	isc_register(VFIO_CCW_ISC);
    512	ret = css_driver_register(&vfio_ccw_sch_driver);
    513	if (ret) {
    514		isc_unregister(VFIO_CCW_ISC);
    515		goto out_driver;
    516	}
    517
    518	return ret;
    519
    520out_driver:
    521	mdev_unregister_driver(&vfio_ccw_mdev_driver);
    522out_regions:
    523	vfio_ccw_destroy_regions();
    524	destroy_workqueue(vfio_ccw_work_q);
    525	vfio_ccw_debug_exit();
    526	return ret;
    527}
    528
    529static void __exit vfio_ccw_sch_exit(void)
    530{
    531	css_driver_unregister(&vfio_ccw_sch_driver);
    532	mdev_unregister_driver(&vfio_ccw_mdev_driver);
    533	isc_unregister(VFIO_CCW_ISC);
    534	vfio_ccw_destroy_regions();
    535	destroy_workqueue(vfio_ccw_work_q);
    536	vfio_ccw_debug_exit();
    537}
    538module_init(vfio_ccw_sch_init);
    539module_exit(vfio_ccw_sch_exit);
    540
    541MODULE_LICENSE("GPL v2");