cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

eadm_sch.c (8686B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Driver for s390 eadm subchannels
      4 *
      5 * Copyright IBM Corp. 2012
      6 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
      7 */
      8
      9#include <linux/kernel_stat.h>
     10#include <linux/completion.h>
     11#include <linux/workqueue.h>
     12#include <linux/spinlock.h>
     13#include <linux/device.h>
     14#include <linux/module.h>
     15#include <linux/timer.h>
     16#include <linux/slab.h>
     17#include <linux/list.h>
     18
     19#include <asm/css_chars.h>
     20#include <asm/debug.h>
     21#include <asm/isc.h>
     22#include <asm/cio.h>
     23#include <asm/scsw.h>
     24#include <asm/eadm.h>
     25
     26#include "eadm_sch.h"
     27#include "ioasm.h"
     28#include "cio.h"
     29#include "css.h"
     30#include "orb.h"
     31
     32MODULE_DESCRIPTION("driver for s390 eadm subchannels");
     33MODULE_LICENSE("GPL");
     34
     35#define EADM_TIMEOUT (7 * HZ)
     36static DEFINE_SPINLOCK(list_lock);
     37static LIST_HEAD(eadm_list);
     38
     39static debug_info_t *eadm_debug;
     40
     41#define EADM_LOG(imp, txt) do {					\
     42		debug_text_event(eadm_debug, imp, txt);		\
     43	} while (0)
     44
     45static void EADM_LOG_HEX(int level, void *data, int length)
     46{
     47	debug_event(eadm_debug, level, data, length);
     48}
     49
     50static void orb_init(union orb *orb)
     51{
     52	memset(orb, 0, sizeof(union orb));
     53	orb->eadm.compat1 = 1;
     54	orb->eadm.compat2 = 1;
     55	orb->eadm.fmt = 1;
     56	orb->eadm.x = 1;
     57}
     58
     59static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
     60{
     61	union orb *orb = &get_eadm_private(sch)->orb;
     62	int cc;
     63
     64	orb_init(orb);
     65	orb->eadm.aob = (u32)__pa(aob);
     66	orb->eadm.intparm = (u32)(addr_t)sch;
     67	orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
     68
     69	EADM_LOG(6, "start");
     70	EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
     71
     72	cc = ssch(sch->schid, orb);
     73	switch (cc) {
     74	case 0:
     75		sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
     76		break;
     77	case 1:		/* status pending */
     78	case 2:		/* busy */
     79		return -EBUSY;
     80	case 3:		/* not operational */
     81		return -ENODEV;
     82	}
     83	return 0;
     84}
     85
     86static int eadm_subchannel_clear(struct subchannel *sch)
     87{
     88	int cc;
     89
     90	cc = csch(sch->schid);
     91	if (cc)
     92		return -ENODEV;
     93
     94	sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
     95	return 0;
     96}
     97
     98static void eadm_subchannel_timeout(struct timer_list *t)
     99{
    100	struct eadm_private *private = from_timer(private, t, timer);
    101	struct subchannel *sch = private->sch;
    102
    103	spin_lock_irq(sch->lock);
    104	EADM_LOG(1, "timeout");
    105	EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
    106	if (eadm_subchannel_clear(sch))
    107		EADM_LOG(0, "clear failed");
    108	spin_unlock_irq(sch->lock);
    109}
    110
    111static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
    112{
    113	struct eadm_private *private = get_eadm_private(sch);
    114
    115	if (expires == 0)
    116		del_timer(&private->timer);
    117	else
    118		mod_timer(&private->timer, jiffies + expires);
    119}
    120
    121static void eadm_subchannel_irq(struct subchannel *sch)
    122{
    123	struct eadm_private *private = get_eadm_private(sch);
    124	struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
    125	struct irb *irb = this_cpu_ptr(&cio_irb);
    126	blk_status_t error = BLK_STS_OK;
    127
    128	EADM_LOG(6, "irq");
    129	EADM_LOG_HEX(6, irb, sizeof(*irb));
    130
    131	inc_irq_stat(IRQIO_ADM);
    132
    133	if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
    134	    && scsw->eswf == 1 && irb->esw.eadm.erw.r)
    135		error = BLK_STS_IOERR;
    136
    137	if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
    138		error = BLK_STS_TIMEOUT;
    139
    140	eadm_subchannel_set_timeout(sch, 0);
    141
    142	if (private->state != EADM_BUSY) {
    143		EADM_LOG(1, "irq unsol");
    144		EADM_LOG_HEX(1, irb, sizeof(*irb));
    145		private->state = EADM_NOT_OPER;
    146		css_sched_sch_todo(sch, SCH_TODO_EVAL);
    147		return;
    148	}
    149	scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
    150	private->state = EADM_IDLE;
    151
    152	if (private->completion)
    153		complete(private->completion);
    154}
    155
    156static struct subchannel *eadm_get_idle_sch(void)
    157{
    158	struct eadm_private *private;
    159	struct subchannel *sch;
    160	unsigned long flags;
    161
    162	spin_lock_irqsave(&list_lock, flags);
    163	list_for_each_entry(private, &eadm_list, head) {
    164		sch = private->sch;
    165		spin_lock(sch->lock);
    166		if (private->state == EADM_IDLE) {
    167			private->state = EADM_BUSY;
    168			list_move_tail(&private->head, &eadm_list);
    169			spin_unlock(sch->lock);
    170			spin_unlock_irqrestore(&list_lock, flags);
    171
    172			return sch;
    173		}
    174		spin_unlock(sch->lock);
    175	}
    176	spin_unlock_irqrestore(&list_lock, flags);
    177
    178	return NULL;
    179}
    180
    181int eadm_start_aob(struct aob *aob)
    182{
    183	struct eadm_private *private;
    184	struct subchannel *sch;
    185	unsigned long flags;
    186	int ret;
    187
    188	sch = eadm_get_idle_sch();
    189	if (!sch)
    190		return -EBUSY;
    191
    192	spin_lock_irqsave(sch->lock, flags);
    193	eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
    194	ret = eadm_subchannel_start(sch, aob);
    195	if (!ret)
    196		goto out_unlock;
    197
    198	/* Handle start subchannel failure. */
    199	eadm_subchannel_set_timeout(sch, 0);
    200	private = get_eadm_private(sch);
    201	private->state = EADM_NOT_OPER;
    202	css_sched_sch_todo(sch, SCH_TODO_EVAL);
    203
    204out_unlock:
    205	spin_unlock_irqrestore(sch->lock, flags);
    206
    207	return ret;
    208}
    209EXPORT_SYMBOL_GPL(eadm_start_aob);
    210
    211static int eadm_subchannel_probe(struct subchannel *sch)
    212{
    213	struct eadm_private *private;
    214	int ret;
    215
    216	private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
    217	if (!private)
    218		return -ENOMEM;
    219
    220	INIT_LIST_HEAD(&private->head);
    221	timer_setup(&private->timer, eadm_subchannel_timeout, 0);
    222
    223	spin_lock_irq(sch->lock);
    224	set_eadm_private(sch, private);
    225	private->state = EADM_IDLE;
    226	private->sch = sch;
    227	sch->isc = EADM_SCH_ISC;
    228	ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
    229	if (ret) {
    230		set_eadm_private(sch, NULL);
    231		spin_unlock_irq(sch->lock);
    232		kfree(private);
    233		goto out;
    234	}
    235	spin_unlock_irq(sch->lock);
    236
    237	spin_lock_irq(&list_lock);
    238	list_add(&private->head, &eadm_list);
    239	spin_unlock_irq(&list_lock);
    240out:
    241	return ret;
    242}
    243
    244static void eadm_quiesce(struct subchannel *sch)
    245{
    246	struct eadm_private *private = get_eadm_private(sch);
    247	DECLARE_COMPLETION_ONSTACK(completion);
    248	int ret;
    249
    250	spin_lock_irq(sch->lock);
    251	if (private->state != EADM_BUSY)
    252		goto disable;
    253
    254	if (eadm_subchannel_clear(sch))
    255		goto disable;
    256
    257	private->completion = &completion;
    258	spin_unlock_irq(sch->lock);
    259
    260	wait_for_completion_io(&completion);
    261
    262	spin_lock_irq(sch->lock);
    263	private->completion = NULL;
    264
    265disable:
    266	eadm_subchannel_set_timeout(sch, 0);
    267	do {
    268		ret = cio_disable_subchannel(sch);
    269	} while (ret == -EBUSY);
    270
    271	spin_unlock_irq(sch->lock);
    272}
    273
    274static void eadm_subchannel_remove(struct subchannel *sch)
    275{
    276	struct eadm_private *private = get_eadm_private(sch);
    277
    278	spin_lock_irq(&list_lock);
    279	list_del(&private->head);
    280	spin_unlock_irq(&list_lock);
    281
    282	eadm_quiesce(sch);
    283
    284	spin_lock_irq(sch->lock);
    285	set_eadm_private(sch, NULL);
    286	spin_unlock_irq(sch->lock);
    287
    288	kfree(private);
    289}
    290
    291static void eadm_subchannel_shutdown(struct subchannel *sch)
    292{
    293	eadm_quiesce(sch);
    294}
    295
    296/**
    297 * eadm_subchannel_sch_event - process subchannel event
    298 * @sch: subchannel
    299 * @process: non-zero if function is called in process context
    300 *
    301 * An unspecified event occurred for this subchannel. Adjust data according
    302 * to the current operational state of the subchannel. Return zero when the
    303 * event has been handled sufficiently or -EAGAIN when this function should
    304 * be called again in process context.
    305 */
    306static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
    307{
    308	struct eadm_private *private;
    309	unsigned long flags;
    310
    311	spin_lock_irqsave(sch->lock, flags);
    312	if (!device_is_registered(&sch->dev))
    313		goto out_unlock;
    314
    315	if (work_pending(&sch->todo_work))
    316		goto out_unlock;
    317
    318	if (cio_update_schib(sch)) {
    319		css_sched_sch_todo(sch, SCH_TODO_UNREG);
    320		goto out_unlock;
    321	}
    322	private = get_eadm_private(sch);
    323	if (private->state == EADM_NOT_OPER)
    324		private->state = EADM_IDLE;
    325
    326out_unlock:
    327	spin_unlock_irqrestore(sch->lock, flags);
    328
    329	return 0;
    330}
    331
    332static struct css_device_id eadm_subchannel_ids[] = {
    333	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
    334	{ /* end of list */ },
    335};
    336MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
    337
    338static struct css_driver eadm_subchannel_driver = {
    339	.drv = {
    340		.name = "eadm_subchannel",
    341		.owner = THIS_MODULE,
    342	},
    343	.subchannel_type = eadm_subchannel_ids,
    344	.irq = eadm_subchannel_irq,
    345	.probe = eadm_subchannel_probe,
    346	.remove = eadm_subchannel_remove,
    347	.shutdown = eadm_subchannel_shutdown,
    348	.sch_event = eadm_subchannel_sch_event,
    349};
    350
    351static int __init eadm_sch_init(void)
    352{
    353	int ret;
    354
    355	if (!css_general_characteristics.eadm)
    356		return -ENXIO;
    357
    358	eadm_debug = debug_register("eadm_log", 16, 1, 16);
    359	if (!eadm_debug)
    360		return -ENOMEM;
    361
    362	debug_register_view(eadm_debug, &debug_hex_ascii_view);
    363	debug_set_level(eadm_debug, 2);
    364
    365	isc_register(EADM_SCH_ISC);
    366	ret = css_driver_register(&eadm_subchannel_driver);
    367	if (ret)
    368		goto cleanup;
    369
    370	return ret;
    371
    372cleanup:
    373	isc_unregister(EADM_SCH_ISC);
    374	debug_unregister(eadm_debug);
    375	return ret;
    376}
    377
    378static void __exit eadm_sch_exit(void)
    379{
    380	css_driver_unregister(&eadm_subchannel_driver);
    381	isc_unregister(EADM_SCH_ISC);
    382	debug_unregister(eadm_debug);
    383}
    384module_init(eadm_sch_init);
    385module_exit(eadm_sch_exit);