cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmlogrdr.c (21709B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *	character device driver for reading z/VM system service records
      4 *
      5 *
      6 *	Copyright IBM Corp. 2004, 2009
      7 *	character device driver for reading z/VM system service records,
      8 *	Version 1.0
      9 *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
     10 *		   Stefan Weinhuber <wein@de.ibm.com>
     11 *
     12 */
     13
     14#define KMSG_COMPONENT "vmlogrdr"
     15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
     16
     17#include <linux/module.h>
     18#include <linux/init.h>
     19#include <linux/slab.h>
     20#include <linux/errno.h>
     21#include <linux/types.h>
     22#include <linux/interrupt.h>
     23#include <linux/spinlock.h>
     24#include <linux/atomic.h>
     25#include <linux/uaccess.h>
     26#include <asm/cpcmd.h>
     27#include <asm/debug.h>
     28#include <asm/ebcdic.h>
     29#include <net/iucv/iucv.h>
     30#include <linux/kmod.h>
     31#include <linux/cdev.h>
     32#include <linux/device.h>
     33#include <linux/string.h>
     34
     35MODULE_AUTHOR
     36	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
     37	 "                            Stefan Weinhuber (wein@de.ibm.com)");
     38MODULE_DESCRIPTION ("Character device driver for reading z/VM "
     39		    "system service records.");
     40MODULE_LICENSE("GPL");
     41
     42
     43/*
     44 * The size of the buffer for iucv data transfer is one page,
     45 * but in addition to the data we read from iucv we also
     46 * place an integer and some characters into that buffer,
     47 * so the maximum size for record data is a little less then
     48 * one page.
     49 */
     50#define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
     51
     52/*
     53 * The elements that are concurrently accessed by bottom halves are
     54 * connection_established, iucv_path_severed, local_interrupt_buffer
     55 * and receive_ready. The first three can be protected by
     56 * priv_lock.  receive_ready is atomic, so it can be incremented and
     57 * decremented without holding a lock.
     58 * The variable dev_in_use needs to be protected by the lock, since
     59 * it's a flag used by open to make sure that the device is opened only
     60 * by one user at the same time.
     61 */
     62struct vmlogrdr_priv_t {
     63	char system_service[8];
     64	char internal_name[8];
     65	char recording_name[8];
     66	struct iucv_path *path;
     67	int connection_established;
     68	int iucv_path_severed;
     69	struct iucv_message local_interrupt_buffer;
     70	atomic_t receive_ready;
     71	int minor_num;
     72	char * buffer;
     73	char * current_position;
     74	int remaining;
     75	ulong residual_length;
     76	int buffer_free;
     77	int dev_in_use; /* 1: already opened, 0: not opened*/
     78	spinlock_t priv_lock;
     79	struct device  *device;
     80	struct device  *class_device;
     81	int autorecording;
     82	int autopurge;
     83};
     84
     85
     86/*
     87 * File operation structure for vmlogrdr devices
     88 */
     89static int vmlogrdr_open(struct inode *, struct file *);
     90static int vmlogrdr_release(struct inode *, struct file *);
     91static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
     92			      size_t count, loff_t * ppos);
     93
     94static const struct file_operations vmlogrdr_fops = {
     95	.owner   = THIS_MODULE,
     96	.open    = vmlogrdr_open,
     97	.release = vmlogrdr_release,
     98	.read    = vmlogrdr_read,
     99	.llseek  = no_llseek,
    100};
    101
    102
    103static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
    104static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
    105static void vmlogrdr_iucv_message_pending(struct iucv_path *,
    106					  struct iucv_message *);
    107
    108
    109static struct iucv_handler vmlogrdr_iucv_handler = {
    110	.path_complete	 = vmlogrdr_iucv_path_complete,
    111	.path_severed	 = vmlogrdr_iucv_path_severed,
    112	.message_pending = vmlogrdr_iucv_message_pending,
    113};
    114
    115
    116static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
    117static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
    118
    119/*
    120 * pointer to system service private structure
    121 * minor number 0 --> logrec
    122 * minor number 1 --> account
    123 * minor number 2 --> symptom
    124 */
    125
    126static struct vmlogrdr_priv_t sys_ser[] = {
    127	{ .system_service = "*LOGREC ",
    128	  .internal_name  = "logrec",
    129	  .recording_name = "EREP",
    130	  .minor_num      = 0,
    131	  .buffer_free    = 1,
    132	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
    133	  .autorecording  = 1,
    134	  .autopurge      = 1,
    135	},
    136	{ .system_service = "*ACCOUNT",
    137	  .internal_name  = "account",
    138	  .recording_name = "ACCOUNT",
    139	  .minor_num      = 1,
    140	  .buffer_free    = 1,
    141	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
    142	  .autorecording  = 1,
    143	  .autopurge      = 1,
    144	},
    145	{ .system_service = "*SYMPTOM",
    146	  .internal_name  = "symptom",
    147	  .recording_name = "SYMPTOM",
    148	  .minor_num      = 2,
    149	  .buffer_free    = 1,
    150	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
    151	  .autorecording  = 1,
    152	  .autopurge      = 1,
    153	}
    154};
    155
    156#define MAXMINOR  ARRAY_SIZE(sys_ser)
    157
    158static char FENCE[] = {"EOR"};
    159static int vmlogrdr_major = 0;
    160static struct cdev  *vmlogrdr_cdev = NULL;
    161static int recording_class_AB;
    162
    163
    164static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
    165{
    166	struct vmlogrdr_priv_t * logptr = path->private;
    167
    168	spin_lock(&logptr->priv_lock);
    169	logptr->connection_established = 1;
    170	spin_unlock(&logptr->priv_lock);
    171	wake_up(&conn_wait_queue);
    172}
    173
    174
    175static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
    176{
    177	struct vmlogrdr_priv_t * logptr = path->private;
    178	u8 reason = (u8) ipuser[8];
    179
    180	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
    181
    182	iucv_path_sever(path, NULL);
    183	kfree(path);
    184	logptr->path = NULL;
    185
    186	spin_lock(&logptr->priv_lock);
    187	logptr->connection_established = 0;
    188	logptr->iucv_path_severed = 1;
    189	spin_unlock(&logptr->priv_lock);
    190
    191	wake_up(&conn_wait_queue);
    192	/* just in case we're sleeping waiting for a record */
    193	wake_up_interruptible(&read_wait_queue);
    194}
    195
    196
    197static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
    198					  struct iucv_message *msg)
    199{
    200	struct vmlogrdr_priv_t * logptr = path->private;
    201
    202	/*
    203	 * This function is the bottom half so it should be quick.
    204	 * Copy the external interrupt data into our local eib and increment
    205	 * the usage count
    206	 */
    207	spin_lock(&logptr->priv_lock);
    208	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
    209	atomic_inc(&logptr->receive_ready);
    210	spin_unlock(&logptr->priv_lock);
    211	wake_up_interruptible(&read_wait_queue);
    212}
    213
    214
    215static int vmlogrdr_get_recording_class_AB(void)
    216{
    217	static const char cp_command[] = "QUERY COMMAND RECORDING ";
    218	char cp_response[80];
    219	char *tail;
    220	int len,i;
    221
    222	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
    223	len = strnlen(cp_response,sizeof(cp_response));
    224	// now the parsing
    225	tail=strnchr(cp_response,len,'=');
    226	if (!tail)
    227		return 0;
    228	tail++;
    229	if (!strncmp("ANY",tail,3))
    230		return 1;
    231	if (!strncmp("NONE",tail,4))
    232		return 0;
    233	/*
    234	 * expect comma separated list of classes here, if one of them
    235	 * is A or B return 1 otherwise 0
    236	 */
    237        for (i=tail-cp_response; i<len; i++)
    238		if ( cp_response[i]=='A' || cp_response[i]=='B' )
    239			return 1;
    240	return 0;
    241}
    242
    243
    244static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
    245			      int action, int purge)
    246{
    247
    248	char cp_command[80];
    249	char cp_response[160];
    250	char *onoff, *qid_string;
    251	int rc;
    252
    253	onoff = ((action == 1) ? "ON" : "OFF");
    254	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
    255
    256	/*
    257	 * The recording commands needs to be called with option QID
    258	 * for guests that have previlege classes A or B.
    259	 * Purging has to be done as separate step, because recording
    260	 * can't be switched on as long as records are on the queue.
    261	 * Doing both at the same time doesn't work.
    262	 */
    263	if (purge && (action == 1)) {
    264		memset(cp_command, 0x00, sizeof(cp_command));
    265		memset(cp_response, 0x00, sizeof(cp_response));
    266		snprintf(cp_command, sizeof(cp_command),
    267			 "RECORDING %s PURGE %s",
    268			 logptr->recording_name,
    269			 qid_string);
    270		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
    271	}
    272
    273	memset(cp_command, 0x00, sizeof(cp_command));
    274	memset(cp_response, 0x00, sizeof(cp_response));
    275	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
    276		logptr->recording_name,
    277		onoff,
    278		qid_string);
    279	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
    280	/* The recording command will usually answer with 'Command complete'
    281	 * on success, but when the specific service was never connected
    282	 * before then there might be an additional informational message
    283	 * 'HCPCRC8072I Recording entry not found' before the
    284	 * 'Command complete'. So I use strstr rather then the strncmp.
    285	 */
    286	if (strstr(cp_response,"Command complete"))
    287		rc = 0;
    288	else
    289		rc = -EIO;
    290	/*
    291	 * If we turn recording off, we have to purge any remaining records
    292	 * afterwards, as a large number of queued records may impact z/VM
    293	 * performance.
    294	 */
    295	if (purge && (action == 0)) {
    296		memset(cp_command, 0x00, sizeof(cp_command));
    297		memset(cp_response, 0x00, sizeof(cp_response));
    298		snprintf(cp_command, sizeof(cp_command),
    299			 "RECORDING %s PURGE %s",
    300			 logptr->recording_name,
    301			 qid_string);
    302		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
    303	}
    304
    305	return rc;
    306}
    307
    308
    309static int vmlogrdr_open (struct inode *inode, struct file *filp)
    310{
    311	int dev_num = 0;
    312	struct vmlogrdr_priv_t * logptr = NULL;
    313	int connect_rc = 0;
    314	int ret;
    315
    316	dev_num = iminor(inode);
    317	if (dev_num >= MAXMINOR)
    318		return -ENODEV;
    319	logptr = &sys_ser[dev_num];
    320
    321	/*
    322	 * only allow for blocking reads to be open
    323	 */
    324	if (filp->f_flags & O_NONBLOCK)
    325		return -EOPNOTSUPP;
    326
    327	/* Besure this device hasn't already been opened */
    328	spin_lock_bh(&logptr->priv_lock);
    329	if (logptr->dev_in_use)	{
    330		spin_unlock_bh(&logptr->priv_lock);
    331		return -EBUSY;
    332	}
    333	logptr->dev_in_use = 1;
    334	logptr->connection_established = 0;
    335	logptr->iucv_path_severed = 0;
    336	atomic_set(&logptr->receive_ready, 0);
    337	logptr->buffer_free = 1;
    338	spin_unlock_bh(&logptr->priv_lock);
    339
    340	/* set the file options */
    341	filp->private_data = logptr;
    342
    343	/* start recording for this service*/
    344	if (logptr->autorecording) {
    345		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
    346		if (ret)
    347			pr_warn("vmlogrdr: failed to start recording automatically\n");
    348	}
    349
    350	/* create connection to the system service */
    351	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
    352	if (!logptr->path)
    353		goto out_dev;
    354	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
    355				       logptr->system_service, NULL, NULL,
    356				       logptr);
    357	if (connect_rc) {
    358		pr_err("vmlogrdr: iucv connection to %s "
    359		       "failed with rc %i \n",
    360		       logptr->system_service, connect_rc);
    361		goto out_path;
    362	}
    363
    364	/* We've issued the connect and now we must wait for a
    365	 * ConnectionComplete or ConnectinSevered Interrupt
    366	 * before we can continue to process.
    367	 */
    368	wait_event(conn_wait_queue, (logptr->connection_established)
    369		   || (logptr->iucv_path_severed));
    370	if (logptr->iucv_path_severed)
    371		goto out_record;
    372	nonseekable_open(inode, filp);
    373	return 0;
    374
    375out_record:
    376	if (logptr->autorecording)
    377		vmlogrdr_recording(logptr,0,logptr->autopurge);
    378out_path:
    379	kfree(logptr->path);	/* kfree(NULL) is ok. */
    380	logptr->path = NULL;
    381out_dev:
    382	logptr->dev_in_use = 0;
    383	return -EIO;
    384}
    385
    386
    387static int vmlogrdr_release (struct inode *inode, struct file *filp)
    388{
    389	int ret;
    390
    391	struct vmlogrdr_priv_t * logptr = filp->private_data;
    392
    393	iucv_path_sever(logptr->path, NULL);
    394	kfree(logptr->path);
    395	logptr->path = NULL;
    396	if (logptr->autorecording) {
    397		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
    398		if (ret)
    399			pr_warn("vmlogrdr: failed to stop recording automatically\n");
    400	}
    401	logptr->dev_in_use = 0;
    402
    403	return 0;
    404}
    405
    406
    407static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
    408{
    409	int rc, *temp;
    410	/* we need to keep track of two data sizes here:
    411	 * The number of bytes we need to receive from iucv and
    412	 * the total number of bytes we actually write into the buffer.
    413	 */
    414	int user_data_count, iucv_data_count;
    415	char * buffer;
    416
    417	if (atomic_read(&priv->receive_ready)) {
    418		spin_lock_bh(&priv->priv_lock);
    419		if (priv->residual_length){
    420			/* receive second half of a record */
    421			iucv_data_count = priv->residual_length;
    422			user_data_count = 0;
    423			buffer = priv->buffer;
    424		} else {
    425			/* receive a new record:
    426			 * We need to return the total length of the record
    427                         * + size of FENCE in the first 4 bytes of the buffer.
    428		         */
    429			iucv_data_count = priv->local_interrupt_buffer.length;
    430			user_data_count = sizeof(int);
    431			temp = (int*)priv->buffer;
    432			*temp= iucv_data_count + sizeof(FENCE);
    433			buffer = priv->buffer + sizeof(int);
    434		}
    435		/*
    436		 * If the record is bigger than our buffer, we receive only
    437		 * a part of it. We can get the rest later.
    438		 */
    439		if (iucv_data_count > NET_BUFFER_SIZE)
    440			iucv_data_count = NET_BUFFER_SIZE;
    441		rc = iucv_message_receive(priv->path,
    442					  &priv->local_interrupt_buffer,
    443					  0, buffer, iucv_data_count,
    444					  &priv->residual_length);
    445		spin_unlock_bh(&priv->priv_lock);
    446		/* An rc of 5 indicates that the record was bigger than
    447		 * the buffer, which is OK for us. A 9 indicates that the
    448		 * record was purged befor we could receive it.
    449		 */
    450		if (rc == 5)
    451			rc = 0;
    452		if (rc == 9)
    453			atomic_set(&priv->receive_ready, 0);
    454	} else {
    455		rc = 1;
    456	}
    457	if (!rc) {
    458		priv->buffer_free = 0;
    459 		user_data_count += iucv_data_count;
    460		priv->current_position = priv->buffer;
    461		if (priv->residual_length == 0){
    462			/* the whole record has been captured,
    463			 * now add the fence */
    464			atomic_dec(&priv->receive_ready);
    465			buffer = priv->buffer + user_data_count;
    466			memcpy(buffer, FENCE, sizeof(FENCE));
    467			user_data_count += sizeof(FENCE);
    468		}
    469		priv->remaining = user_data_count;
    470	}
    471
    472	return rc;
    473}
    474
    475
    476static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
    477			     size_t count, loff_t * ppos)
    478{
    479	int rc;
    480	struct vmlogrdr_priv_t * priv = filp->private_data;
    481
    482	while (priv->buffer_free) {
    483		rc = vmlogrdr_receive_data(priv);
    484		if (rc) {
    485			rc = wait_event_interruptible(read_wait_queue,
    486					atomic_read(&priv->receive_ready));
    487			if (rc)
    488				return rc;
    489		}
    490	}
    491	/* copy only up to end of record */
    492	if (count > priv->remaining)
    493		count = priv->remaining;
    494
    495	if (copy_to_user(data, priv->current_position, count))
    496		return -EFAULT;
    497
    498	*ppos += count;
    499	priv->current_position += count;
    500	priv->remaining -= count;
    501
    502	/* if all data has been transferred, set buffer free */
    503	if (priv->remaining == 0)
    504		priv->buffer_free = 1;
    505
    506	return count;
    507}
    508
    509static ssize_t vmlogrdr_autopurge_store(struct device * dev,
    510					struct device_attribute *attr,
    511					const char * buf, size_t count)
    512{
    513	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    514	ssize_t ret = count;
    515
    516	switch (buf[0]) {
    517	case '0':
    518		priv->autopurge=0;
    519		break;
    520	case '1':
    521		priv->autopurge=1;
    522		break;
    523	default:
    524		ret = -EINVAL;
    525	}
    526	return ret;
    527}
    528
    529
    530static ssize_t vmlogrdr_autopurge_show(struct device *dev,
    531				       struct device_attribute *attr,
    532				       char *buf)
    533{
    534	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    535	return sprintf(buf, "%u\n", priv->autopurge);
    536}
    537
    538
    539static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
    540		   vmlogrdr_autopurge_store);
    541
    542
    543static ssize_t vmlogrdr_purge_store(struct device * dev,
    544				    struct device_attribute *attr,
    545				    const char * buf, size_t count)
    546{
    547
    548	char cp_command[80];
    549	char cp_response[80];
    550	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    551
    552	if (buf[0] != '1')
    553		return -EINVAL;
    554
    555	memset(cp_command, 0x00, sizeof(cp_command));
    556	memset(cp_response, 0x00, sizeof(cp_response));
    557
    558        /*
    559	 * The recording command needs to be called with option QID
    560	 * for guests that have previlege classes A or B.
    561	 * Other guests will not recognize the command and we have to
    562	 * issue the same command without the QID parameter.
    563	 */
    564
    565	if (recording_class_AB)
    566		snprintf(cp_command, sizeof(cp_command),
    567			 "RECORDING %s PURGE QID * ",
    568			 priv->recording_name);
    569	else
    570		snprintf(cp_command, sizeof(cp_command),
    571			 "RECORDING %s PURGE ",
    572			 priv->recording_name);
    573
    574	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
    575
    576	return count;
    577}
    578
    579
    580static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
    581
    582
    583static ssize_t vmlogrdr_autorecording_store(struct device *dev,
    584					    struct device_attribute *attr,
    585					    const char *buf, size_t count)
    586{
    587	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    588	ssize_t ret = count;
    589
    590	switch (buf[0]) {
    591	case '0':
    592		priv->autorecording=0;
    593		break;
    594	case '1':
    595		priv->autorecording=1;
    596		break;
    597	default:
    598		ret = -EINVAL;
    599	}
    600	return ret;
    601}
    602
    603
    604static ssize_t vmlogrdr_autorecording_show(struct device *dev,
    605					   struct device_attribute *attr,
    606					   char *buf)
    607{
    608	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    609	return sprintf(buf, "%u\n", priv->autorecording);
    610}
    611
    612
    613static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
    614		   vmlogrdr_autorecording_store);
    615
    616
    617static ssize_t vmlogrdr_recording_store(struct device * dev,
    618					struct device_attribute *attr,
    619					const char * buf, size_t count)
    620{
    621	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
    622	ssize_t ret;
    623
    624	switch (buf[0]) {
    625	case '0':
    626		ret = vmlogrdr_recording(priv,0,0);
    627		break;
    628	case '1':
    629		ret = vmlogrdr_recording(priv,1,0);
    630		break;
    631	default:
    632		ret = -EINVAL;
    633	}
    634	if (ret)
    635		return ret;
    636	else
    637		return count;
    638
    639}
    640
    641
    642static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
    643
    644
    645static ssize_t recording_status_show(struct device_driver *driver, char *buf)
    646{
    647	static const char cp_command[] = "QUERY RECORDING ";
    648	int len;
    649
    650	cpcmd(cp_command, buf, 4096, NULL);
    651	len = strlen(buf);
    652	return len;
    653}
    654static DRIVER_ATTR_RO(recording_status);
    655static struct attribute *vmlogrdr_drv_attrs[] = {
    656	&driver_attr_recording_status.attr,
    657	NULL,
    658};
    659static struct attribute_group vmlogrdr_drv_attr_group = {
    660	.attrs = vmlogrdr_drv_attrs,
    661};
    662static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
    663	&vmlogrdr_drv_attr_group,
    664	NULL,
    665};
    666
    667static struct attribute *vmlogrdr_attrs[] = {
    668	&dev_attr_autopurge.attr,
    669	&dev_attr_purge.attr,
    670	&dev_attr_autorecording.attr,
    671	&dev_attr_recording.attr,
    672	NULL,
    673};
    674static struct attribute_group vmlogrdr_attr_group = {
    675	.attrs = vmlogrdr_attrs,
    676};
    677static const struct attribute_group *vmlogrdr_attr_groups[] = {
    678	&vmlogrdr_attr_group,
    679	NULL,
    680};
    681
    682static struct class *vmlogrdr_class;
    683static struct device_driver vmlogrdr_driver = {
    684	.name = "vmlogrdr",
    685	.bus  = &iucv_bus,
    686	.groups = vmlogrdr_drv_attr_groups,
    687};
    688
    689static int vmlogrdr_register_driver(void)
    690{
    691	int ret;
    692
    693	/* Register with iucv driver */
    694	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
    695	if (ret)
    696		goto out;
    697
    698	ret = driver_register(&vmlogrdr_driver);
    699	if (ret)
    700		goto out_iucv;
    701
    702	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
    703	if (IS_ERR(vmlogrdr_class)) {
    704		ret = PTR_ERR(vmlogrdr_class);
    705		vmlogrdr_class = NULL;
    706		goto out_driver;
    707	}
    708	return 0;
    709
    710out_driver:
    711	driver_unregister(&vmlogrdr_driver);
    712out_iucv:
    713	iucv_unregister(&vmlogrdr_iucv_handler, 1);
    714out:
    715	return ret;
    716}
    717
    718
    719static void vmlogrdr_unregister_driver(void)
    720{
    721	class_destroy(vmlogrdr_class);
    722	vmlogrdr_class = NULL;
    723	driver_unregister(&vmlogrdr_driver);
    724	iucv_unregister(&vmlogrdr_iucv_handler, 1);
    725}
    726
    727
    728static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
    729{
    730	struct device *dev;
    731	int ret;
    732
    733	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
    734	if (dev) {
    735		dev_set_name(dev, "%s", priv->internal_name);
    736		dev->bus = &iucv_bus;
    737		dev->parent = iucv_root;
    738		dev->driver = &vmlogrdr_driver;
    739		dev->groups = vmlogrdr_attr_groups;
    740		dev_set_drvdata(dev, priv);
    741		/*
    742		 * The release function could be called after the
    743		 * module has been unloaded. It's _only_ task is to
    744		 * free the struct. Therefore, we specify kfree()
    745		 * directly here. (Probably a little bit obfuscating
    746		 * but legitime ...).
    747		 */
    748		dev->release = (void (*)(struct device *))kfree;
    749	} else
    750		return -ENOMEM;
    751	ret = device_register(dev);
    752	if (ret) {
    753		put_device(dev);
    754		return ret;
    755	}
    756
    757	priv->class_device = device_create(vmlogrdr_class, dev,
    758					   MKDEV(vmlogrdr_major,
    759						 priv->minor_num),
    760					   priv, "%s", dev_name(dev));
    761	if (IS_ERR(priv->class_device)) {
    762		ret = PTR_ERR(priv->class_device);
    763		priv->class_device=NULL;
    764		device_unregister(dev);
    765		return ret;
    766	}
    767	priv->device = dev;
    768	return 0;
    769}
    770
    771
    772static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
    773{
    774	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
    775	if (priv->device != NULL) {
    776		device_unregister(priv->device);
    777		priv->device=NULL;
    778	}
    779	return 0;
    780}
    781
    782
    783static int vmlogrdr_register_cdev(dev_t dev)
    784{
    785	int rc = 0;
    786	vmlogrdr_cdev = cdev_alloc();
    787	if (!vmlogrdr_cdev) {
    788		return -ENOMEM;
    789	}
    790	vmlogrdr_cdev->owner = THIS_MODULE;
    791	vmlogrdr_cdev->ops = &vmlogrdr_fops;
    792	rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
    793	if (!rc)
    794		return 0;
    795
    796	// cleanup: cdev is not fully registered, no cdev_del here!
    797	kobject_put(&vmlogrdr_cdev->kobj);
    798	vmlogrdr_cdev=NULL;
    799	return rc;
    800}
    801
    802
    803static void vmlogrdr_cleanup(void)
    804{
    805        int i;
    806
    807	if (vmlogrdr_cdev) {
    808		cdev_del(vmlogrdr_cdev);
    809		vmlogrdr_cdev=NULL;
    810	}
    811	for (i=0; i < MAXMINOR; ++i ) {
    812		vmlogrdr_unregister_device(&sys_ser[i]);
    813		free_page((unsigned long)sys_ser[i].buffer);
    814	}
    815	vmlogrdr_unregister_driver();
    816	if (vmlogrdr_major) {
    817		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
    818		vmlogrdr_major=0;
    819	}
    820}
    821
    822
    823static int __init vmlogrdr_init(void)
    824{
    825	int rc;
    826	int i;
    827	dev_t dev;
    828
    829	if (! MACHINE_IS_VM) {
    830		pr_err("not running under VM, driver not loaded.\n");
    831		return -ENODEV;
    832	}
    833
    834        recording_class_AB = vmlogrdr_get_recording_class_AB();
    835
    836	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
    837	if (rc)
    838		return rc;
    839	vmlogrdr_major = MAJOR(dev);
    840
    841	rc=vmlogrdr_register_driver();
    842	if (rc)
    843		goto cleanup;
    844
    845	for (i=0; i < MAXMINOR; ++i ) {
    846		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
    847		if (!sys_ser[i].buffer) {
    848			rc = -ENOMEM;
    849			break;
    850		}
    851		sys_ser[i].current_position = sys_ser[i].buffer;
    852		rc=vmlogrdr_register_device(&sys_ser[i]);
    853		if (rc)
    854			break;
    855	}
    856	if (rc)
    857		goto cleanup;
    858
    859	rc = vmlogrdr_register_cdev(dev);
    860	if (rc)
    861		goto cleanup;
    862	return 0;
    863
    864cleanup:
    865	vmlogrdr_cleanup();
    866	return rc;
    867}
    868
    869
    870static void __exit vmlogrdr_exit(void)
    871{
    872	vmlogrdr_cleanup();
    873	return;
    874}
    875
    876
    877module_init(vmlogrdr_init);
    878module_exit(vmlogrdr_exit);