cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vme.c (53387B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * VME Bridge Framework
      4 *
      5 * Author: Martyn Welch <martyn.welch@ge.com>
      6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
      7 *
      8 * Based on work by Tom Armistead and Ajit Prem
      9 * Copyright 2004 Motorola Inc.
     10 */
     11
     12#include <linux/init.h>
     13#include <linux/export.h>
     14#include <linux/mm.h>
     15#include <linux/types.h>
     16#include <linux/kernel.h>
     17#include <linux/errno.h>
     18#include <linux/pci.h>
     19#include <linux/poll.h>
     20#include <linux/highmem.h>
     21#include <linux/interrupt.h>
     22#include <linux/pagemap.h>
     23#include <linux/device.h>
     24#include <linux/dma-mapping.h>
     25#include <linux/syscalls.h>
     26#include <linux/mutex.h>
     27#include <linux/spinlock.h>
     28#include <linux/slab.h>
     29#include <linux/vme.h>
     30
     31#include "vme_bridge.h"
     32
     33/* Bitmask and list of registered buses both protected by common mutex */
     34static unsigned int vme_bus_numbers;
     35static LIST_HEAD(vme_bus_list);
     36static DEFINE_MUTEX(vme_buses_lock);
     37
     38static int __init vme_init(void);
     39
     40static struct vme_dev *dev_to_vme_dev(struct device *dev)
     41{
     42	return container_of(dev, struct vme_dev, dev);
     43}
     44
     45/*
     46 * Find the bridge that the resource is associated with.
     47 */
     48static struct vme_bridge *find_bridge(struct vme_resource *resource)
     49{
     50	/* Get list to search */
     51	switch (resource->type) {
     52	case VME_MASTER:
     53		return list_entry(resource->entry, struct vme_master_resource,
     54			list)->parent;
     55	case VME_SLAVE:
     56		return list_entry(resource->entry, struct vme_slave_resource,
     57			list)->parent;
     58	case VME_DMA:
     59		return list_entry(resource->entry, struct vme_dma_resource,
     60			list)->parent;
     61	case VME_LM:
     62		return list_entry(resource->entry, struct vme_lm_resource,
     63			list)->parent;
     64	default:
     65		printk(KERN_ERR "Unknown resource type\n");
     66		return NULL;
     67	}
     68}
     69
     70/**
     71 * vme_alloc_consistent - Allocate contiguous memory.
     72 * @resource: Pointer to VME resource.
     73 * @size: Size of allocation required.
     74 * @dma: Pointer to variable to store physical address of allocation.
     75 *
     76 * Allocate a contiguous block of memory for use by the driver. This is used to
     77 * create the buffers for the slave windows.
     78 *
     79 * Return: Virtual address of allocation on success, NULL on failure.
     80 */
     81void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
     82	dma_addr_t *dma)
     83{
     84	struct vme_bridge *bridge;
     85
     86	if (!resource) {
     87		printk(KERN_ERR "No resource\n");
     88		return NULL;
     89	}
     90
     91	bridge = find_bridge(resource);
     92	if (!bridge) {
     93		printk(KERN_ERR "Can't find bridge\n");
     94		return NULL;
     95	}
     96
     97	if (!bridge->parent) {
     98		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
     99		return NULL;
    100	}
    101
    102	if (!bridge->alloc_consistent) {
    103		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
    104		       bridge->name);
    105		return NULL;
    106	}
    107
    108	return bridge->alloc_consistent(bridge->parent, size, dma);
    109}
    110EXPORT_SYMBOL(vme_alloc_consistent);
    111
    112/**
    113 * vme_free_consistent - Free previously allocated memory.
    114 * @resource: Pointer to VME resource.
    115 * @size: Size of allocation to free.
    116 * @vaddr: Virtual address of allocation.
    117 * @dma: Physical address of allocation.
    118 *
    119 * Free previously allocated block of contiguous memory.
    120 */
    121void vme_free_consistent(struct vme_resource *resource, size_t size,
    122	void *vaddr, dma_addr_t dma)
    123{
    124	struct vme_bridge *bridge;
    125
    126	if (!resource) {
    127		printk(KERN_ERR "No resource\n");
    128		return;
    129	}
    130
    131	bridge = find_bridge(resource);
    132	if (!bridge) {
    133		printk(KERN_ERR "Can't find bridge\n");
    134		return;
    135	}
    136
    137	if (!bridge->parent) {
    138		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
    139		return;
    140	}
    141
    142	if (!bridge->free_consistent) {
    143		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
    144		       bridge->name);
    145		return;
    146	}
    147
    148	bridge->free_consistent(bridge->parent, size, vaddr, dma);
    149}
    150EXPORT_SYMBOL(vme_free_consistent);
    151
    152/**
    153 * vme_get_size - Helper function returning size of a VME window
    154 * @resource: Pointer to VME slave or master resource.
    155 *
    156 * Determine the size of the VME window provided. This is a helper
    157 * function, wrappering the call to vme_master_get or vme_slave_get
    158 * depending on the type of window resource handed to it.
    159 *
    160 * Return: Size of the window on success, zero on failure.
    161 */
    162size_t vme_get_size(struct vme_resource *resource)
    163{
    164	int enabled, retval;
    165	unsigned long long base, size;
    166	dma_addr_t buf_base;
    167	u32 aspace, cycle, dwidth;
    168
    169	switch (resource->type) {
    170	case VME_MASTER:
    171		retval = vme_master_get(resource, &enabled, &base, &size,
    172			&aspace, &cycle, &dwidth);
    173		if (retval)
    174			return 0;
    175
    176		return size;
    177	case VME_SLAVE:
    178		retval = vme_slave_get(resource, &enabled, &base, &size,
    179			&buf_base, &aspace, &cycle);
    180		if (retval)
    181			return 0;
    182
    183		return size;
    184	case VME_DMA:
    185		return 0;
    186	default:
    187		printk(KERN_ERR "Unknown resource type\n");
    188		return 0;
    189	}
    190}
    191EXPORT_SYMBOL(vme_get_size);
    192
    193int vme_check_window(u32 aspace, unsigned long long vme_base,
    194		     unsigned long long size)
    195{
    196	int retval = 0;
    197
    198	if (vme_base + size < size)
    199		return -EINVAL;
    200
    201	switch (aspace) {
    202	case VME_A16:
    203		if (vme_base + size > VME_A16_MAX)
    204			retval = -EFAULT;
    205		break;
    206	case VME_A24:
    207		if (vme_base + size > VME_A24_MAX)
    208			retval = -EFAULT;
    209		break;
    210	case VME_A32:
    211		if (vme_base + size > VME_A32_MAX)
    212			retval = -EFAULT;
    213		break;
    214	case VME_A64:
    215		/* The VME_A64_MAX limit is actually U64_MAX + 1 */
    216		break;
    217	case VME_CRCSR:
    218		if (vme_base + size > VME_CRCSR_MAX)
    219			retval = -EFAULT;
    220		break;
    221	case VME_USER1:
    222	case VME_USER2:
    223	case VME_USER3:
    224	case VME_USER4:
    225		/* User Defined */
    226		break;
    227	default:
    228		printk(KERN_ERR "Invalid address space\n");
    229		retval = -EINVAL;
    230		break;
    231	}
    232
    233	return retval;
    234}
    235EXPORT_SYMBOL(vme_check_window);
    236
    237static u32 vme_get_aspace(int am)
    238{
    239	switch (am) {
    240	case 0x29:
    241	case 0x2D:
    242		return VME_A16;
    243	case 0x38:
    244	case 0x39:
    245	case 0x3A:
    246	case 0x3B:
    247	case 0x3C:
    248	case 0x3D:
    249	case 0x3E:
    250	case 0x3F:
    251		return VME_A24;
    252	case 0x8:
    253	case 0x9:
    254	case 0xA:
    255	case 0xB:
    256	case 0xC:
    257	case 0xD:
    258	case 0xE:
    259	case 0xF:
    260		return VME_A32;
    261	case 0x0:
    262	case 0x1:
    263	case 0x3:
    264		return VME_A64;
    265	}
    266
    267	return 0;
    268}
    269
    270/**
    271 * vme_slave_request - Request a VME slave window resource.
    272 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
    273 * @address: Required VME address space.
    274 * @cycle: Required VME data transfer cycle type.
    275 *
    276 * Request use of a VME window resource capable of being set for the requested
    277 * address space and data transfer cycle.
    278 *
    279 * Return: Pointer to VME resource on success, NULL on failure.
    280 */
    281struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
    282	u32 cycle)
    283{
    284	struct vme_bridge *bridge;
    285	struct list_head *slave_pos = NULL;
    286	struct vme_slave_resource *allocated_image = NULL;
    287	struct vme_slave_resource *slave_image = NULL;
    288	struct vme_resource *resource = NULL;
    289
    290	bridge = vdev->bridge;
    291	if (!bridge) {
    292		printk(KERN_ERR "Can't find VME bus\n");
    293		goto err_bus;
    294	}
    295
    296	/* Loop through slave resources */
    297	list_for_each(slave_pos, &bridge->slave_resources) {
    298		slave_image = list_entry(slave_pos,
    299			struct vme_slave_resource, list);
    300
    301		if (!slave_image) {
    302			printk(KERN_ERR "Registered NULL Slave resource\n");
    303			continue;
    304		}
    305
    306		/* Find an unlocked and compatible image */
    307		mutex_lock(&slave_image->mtx);
    308		if (((slave_image->address_attr & address) == address) &&
    309			((slave_image->cycle_attr & cycle) == cycle) &&
    310			(slave_image->locked == 0)) {
    311
    312			slave_image->locked = 1;
    313			mutex_unlock(&slave_image->mtx);
    314			allocated_image = slave_image;
    315			break;
    316		}
    317		mutex_unlock(&slave_image->mtx);
    318	}
    319
    320	/* No free image */
    321	if (!allocated_image)
    322		goto err_image;
    323
    324	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
    325	if (!resource)
    326		goto err_alloc;
    327
    328	resource->type = VME_SLAVE;
    329	resource->entry = &allocated_image->list;
    330
    331	return resource;
    332
    333err_alloc:
    334	/* Unlock image */
    335	mutex_lock(&slave_image->mtx);
    336	slave_image->locked = 0;
    337	mutex_unlock(&slave_image->mtx);
    338err_image:
    339err_bus:
    340	return NULL;
    341}
    342EXPORT_SYMBOL(vme_slave_request);
    343
    344/**
    345 * vme_slave_set - Set VME slave window configuration.
    346 * @resource: Pointer to VME slave resource.
    347 * @enabled: State to which the window should be configured.
    348 * @vme_base: Base address for the window.
    349 * @size: Size of the VME window.
    350 * @buf_base: Based address of buffer used to provide VME slave window storage.
    351 * @aspace: VME address space for the VME window.
    352 * @cycle: VME data transfer cycle type for the VME window.
    353 *
    354 * Set configuration for provided VME slave window.
    355 *
    356 * Return: Zero on success, -EINVAL if operation is not supported on this
    357 *         device, if an invalid resource has been provided or invalid
    358 *         attributes are provided. Hardware specific errors may also be
    359 *         returned.
    360 */
    361int vme_slave_set(struct vme_resource *resource, int enabled,
    362	unsigned long long vme_base, unsigned long long size,
    363	dma_addr_t buf_base, u32 aspace, u32 cycle)
    364{
    365	struct vme_bridge *bridge = find_bridge(resource);
    366	struct vme_slave_resource *image;
    367	int retval;
    368
    369	if (resource->type != VME_SLAVE) {
    370		printk(KERN_ERR "Not a slave resource\n");
    371		return -EINVAL;
    372	}
    373
    374	image = list_entry(resource->entry, struct vme_slave_resource, list);
    375
    376	if (!bridge->slave_set) {
    377		printk(KERN_ERR "Function not supported\n");
    378		return -ENOSYS;
    379	}
    380
    381	if (!(((image->address_attr & aspace) == aspace) &&
    382		((image->cycle_attr & cycle) == cycle))) {
    383		printk(KERN_ERR "Invalid attributes\n");
    384		return -EINVAL;
    385	}
    386
    387	retval = vme_check_window(aspace, vme_base, size);
    388	if (retval)
    389		return retval;
    390
    391	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
    392		aspace, cycle);
    393}
    394EXPORT_SYMBOL(vme_slave_set);
    395
    396/**
    397 * vme_slave_get - Retrieve VME slave window configuration.
    398 * @resource: Pointer to VME slave resource.
    399 * @enabled: Pointer to variable for storing state.
    400 * @vme_base: Pointer to variable for storing window base address.
    401 * @size: Pointer to variable for storing window size.
    402 * @buf_base: Pointer to variable for storing slave buffer base address.
    403 * @aspace: Pointer to variable for storing VME address space.
    404 * @cycle: Pointer to variable for storing VME data transfer cycle type.
    405 *
    406 * Return configuration for provided VME slave window.
    407 *
    408 * Return: Zero on success, -EINVAL if operation is not supported on this
    409 *         device or if an invalid resource has been provided.
    410 */
    411int vme_slave_get(struct vme_resource *resource, int *enabled,
    412	unsigned long long *vme_base, unsigned long long *size,
    413	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
    414{
    415	struct vme_bridge *bridge = find_bridge(resource);
    416	struct vme_slave_resource *image;
    417
    418	if (resource->type != VME_SLAVE) {
    419		printk(KERN_ERR "Not a slave resource\n");
    420		return -EINVAL;
    421	}
    422
    423	image = list_entry(resource->entry, struct vme_slave_resource, list);
    424
    425	if (!bridge->slave_get) {
    426		printk(KERN_ERR "vme_slave_get not supported\n");
    427		return -EINVAL;
    428	}
    429
    430	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
    431		aspace, cycle);
    432}
    433EXPORT_SYMBOL(vme_slave_get);
    434
    435/**
    436 * vme_slave_free - Free VME slave window
    437 * @resource: Pointer to VME slave resource.
    438 *
    439 * Free the provided slave resource so that it may be reallocated.
    440 */
    441void vme_slave_free(struct vme_resource *resource)
    442{
    443	struct vme_slave_resource *slave_image;
    444
    445	if (resource->type != VME_SLAVE) {
    446		printk(KERN_ERR "Not a slave resource\n");
    447		return;
    448	}
    449
    450	slave_image = list_entry(resource->entry, struct vme_slave_resource,
    451		list);
    452	if (!slave_image) {
    453		printk(KERN_ERR "Can't find slave resource\n");
    454		return;
    455	}
    456
    457	/* Unlock image */
    458	mutex_lock(&slave_image->mtx);
    459	if (slave_image->locked == 0)
    460		printk(KERN_ERR "Image is already free\n");
    461
    462	slave_image->locked = 0;
    463	mutex_unlock(&slave_image->mtx);
    464
    465	/* Free up resource memory */
    466	kfree(resource);
    467}
    468EXPORT_SYMBOL(vme_slave_free);
    469
    470/**
    471 * vme_master_request - Request a VME master window resource.
    472 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
    473 * @address: Required VME address space.
    474 * @cycle: Required VME data transfer cycle type.
    475 * @dwidth: Required VME data transfer width.
    476 *
    477 * Request use of a VME window resource capable of being set for the requested
    478 * address space, data transfer cycle and width.
    479 *
    480 * Return: Pointer to VME resource on success, NULL on failure.
    481 */
    482struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
    483	u32 cycle, u32 dwidth)
    484{
    485	struct vme_bridge *bridge;
    486	struct list_head *master_pos = NULL;
    487	struct vme_master_resource *allocated_image = NULL;
    488	struct vme_master_resource *master_image = NULL;
    489	struct vme_resource *resource = NULL;
    490
    491	bridge = vdev->bridge;
    492	if (!bridge) {
    493		printk(KERN_ERR "Can't find VME bus\n");
    494		goto err_bus;
    495	}
    496
    497	/* Loop through master resources */
    498	list_for_each(master_pos, &bridge->master_resources) {
    499		master_image = list_entry(master_pos,
    500			struct vme_master_resource, list);
    501
    502		if (!master_image) {
    503			printk(KERN_WARNING "Registered NULL master resource\n");
    504			continue;
    505		}
    506
    507		/* Find an unlocked and compatible image */
    508		spin_lock(&master_image->lock);
    509		if (((master_image->address_attr & address) == address) &&
    510			((master_image->cycle_attr & cycle) == cycle) &&
    511			((master_image->width_attr & dwidth) == dwidth) &&
    512			(master_image->locked == 0)) {
    513
    514			master_image->locked = 1;
    515			spin_unlock(&master_image->lock);
    516			allocated_image = master_image;
    517			break;
    518		}
    519		spin_unlock(&master_image->lock);
    520	}
    521
    522	/* Check to see if we found a resource */
    523	if (!allocated_image) {
    524		printk(KERN_ERR "Can't find a suitable resource\n");
    525		goto err_image;
    526	}
    527
    528	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
    529	if (!resource)
    530		goto err_alloc;
    531
    532	resource->type = VME_MASTER;
    533	resource->entry = &allocated_image->list;
    534
    535	return resource;
    536
    537err_alloc:
    538	/* Unlock image */
    539	spin_lock(&master_image->lock);
    540	master_image->locked = 0;
    541	spin_unlock(&master_image->lock);
    542err_image:
    543err_bus:
    544	return NULL;
    545}
    546EXPORT_SYMBOL(vme_master_request);
    547
    548/**
    549 * vme_master_set - Set VME master window configuration.
    550 * @resource: Pointer to VME master resource.
    551 * @enabled: State to which the window should be configured.
    552 * @vme_base: Base address for the window.
    553 * @size: Size of the VME window.
    554 * @aspace: VME address space for the VME window.
    555 * @cycle: VME data transfer cycle type for the VME window.
    556 * @dwidth: VME data transfer width for the VME window.
    557 *
    558 * Set configuration for provided VME master window.
    559 *
    560 * Return: Zero on success, -EINVAL if operation is not supported on this
    561 *         device, if an invalid resource has been provided or invalid
    562 *         attributes are provided. Hardware specific errors may also be
    563 *         returned.
    564 */
    565int vme_master_set(struct vme_resource *resource, int enabled,
    566	unsigned long long vme_base, unsigned long long size, u32 aspace,
    567	u32 cycle, u32 dwidth)
    568{
    569	struct vme_bridge *bridge = find_bridge(resource);
    570	struct vme_master_resource *image;
    571	int retval;
    572
    573	if (resource->type != VME_MASTER) {
    574		printk(KERN_ERR "Not a master resource\n");
    575		return -EINVAL;
    576	}
    577
    578	image = list_entry(resource->entry, struct vme_master_resource, list);
    579
    580	if (!bridge->master_set) {
    581		printk(KERN_WARNING "vme_master_set not supported\n");
    582		return -EINVAL;
    583	}
    584
    585	if (!(((image->address_attr & aspace) == aspace) &&
    586		((image->cycle_attr & cycle) == cycle) &&
    587		((image->width_attr & dwidth) == dwidth))) {
    588		printk(KERN_WARNING "Invalid attributes\n");
    589		return -EINVAL;
    590	}
    591
    592	retval = vme_check_window(aspace, vme_base, size);
    593	if (retval)
    594		return retval;
    595
    596	return bridge->master_set(image, enabled, vme_base, size, aspace,
    597		cycle, dwidth);
    598}
    599EXPORT_SYMBOL(vme_master_set);
    600
    601/**
    602 * vme_master_get - Retrieve VME master window configuration.
    603 * @resource: Pointer to VME master resource.
    604 * @enabled: Pointer to variable for storing state.
    605 * @vme_base: Pointer to variable for storing window base address.
    606 * @size: Pointer to variable for storing window size.
    607 * @aspace: Pointer to variable for storing VME address space.
    608 * @cycle: Pointer to variable for storing VME data transfer cycle type.
    609 * @dwidth: Pointer to variable for storing VME data transfer width.
    610 *
    611 * Return configuration for provided VME master window.
    612 *
    613 * Return: Zero on success, -EINVAL if operation is not supported on this
    614 *         device or if an invalid resource has been provided.
    615 */
    616int vme_master_get(struct vme_resource *resource, int *enabled,
    617	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
    618	u32 *cycle, u32 *dwidth)
    619{
    620	struct vme_bridge *bridge = find_bridge(resource);
    621	struct vme_master_resource *image;
    622
    623	if (resource->type != VME_MASTER) {
    624		printk(KERN_ERR "Not a master resource\n");
    625		return -EINVAL;
    626	}
    627
    628	image = list_entry(resource->entry, struct vme_master_resource, list);
    629
    630	if (!bridge->master_get) {
    631		printk(KERN_WARNING "%s not supported\n", __func__);
    632		return -EINVAL;
    633	}
    634
    635	return bridge->master_get(image, enabled, vme_base, size, aspace,
    636		cycle, dwidth);
    637}
    638EXPORT_SYMBOL(vme_master_get);
    639
    640/**
    641 * vme_master_read - Read data from VME space into a buffer.
    642 * @resource: Pointer to VME master resource.
    643 * @buf: Pointer to buffer where data should be transferred.
    644 * @count: Number of bytes to transfer.
    645 * @offset: Offset into VME master window at which to start transfer.
    646 *
    647 * Perform read of count bytes of data from location on VME bus which maps into
    648 * the VME master window at offset to buf.
    649 *
    650 * Return: Number of bytes read, -EINVAL if resource is not a VME master
    651 *         resource or read operation is not supported. -EFAULT returned if
    652 *         invalid offset is provided. Hardware specific errors may also be
    653 *         returned.
    654 */
    655ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
    656	loff_t offset)
    657{
    658	struct vme_bridge *bridge = find_bridge(resource);
    659	struct vme_master_resource *image;
    660	size_t length;
    661
    662	if (!bridge->master_read) {
    663		printk(KERN_WARNING "Reading from resource not supported\n");
    664		return -EINVAL;
    665	}
    666
    667	if (resource->type != VME_MASTER) {
    668		printk(KERN_ERR "Not a master resource\n");
    669		return -EINVAL;
    670	}
    671
    672	image = list_entry(resource->entry, struct vme_master_resource, list);
    673
    674	length = vme_get_size(resource);
    675
    676	if (offset > length) {
    677		printk(KERN_WARNING "Invalid Offset\n");
    678		return -EFAULT;
    679	}
    680
    681	if ((offset + count) > length)
    682		count = length - offset;
    683
    684	return bridge->master_read(image, buf, count, offset);
    685
    686}
    687EXPORT_SYMBOL(vme_master_read);
    688
    689/**
    690 * vme_master_write - Write data out to VME space from a buffer.
    691 * @resource: Pointer to VME master resource.
    692 * @buf: Pointer to buffer holding data to transfer.
    693 * @count: Number of bytes to transfer.
    694 * @offset: Offset into VME master window at which to start transfer.
    695 *
    696 * Perform write of count bytes of data from buf to location on VME bus which
    697 * maps into the VME master window at offset.
    698 *
    699 * Return: Number of bytes written, -EINVAL if resource is not a VME master
    700 *         resource or write operation is not supported. -EFAULT returned if
    701 *         invalid offset is provided. Hardware specific errors may also be
    702 *         returned.
    703 */
    704ssize_t vme_master_write(struct vme_resource *resource, void *buf,
    705	size_t count, loff_t offset)
    706{
    707	struct vme_bridge *bridge = find_bridge(resource);
    708	struct vme_master_resource *image;
    709	size_t length;
    710
    711	if (!bridge->master_write) {
    712		printk(KERN_WARNING "Writing to resource not supported\n");
    713		return -EINVAL;
    714	}
    715
    716	if (resource->type != VME_MASTER) {
    717		printk(KERN_ERR "Not a master resource\n");
    718		return -EINVAL;
    719	}
    720
    721	image = list_entry(resource->entry, struct vme_master_resource, list);
    722
    723	length = vme_get_size(resource);
    724
    725	if (offset > length) {
    726		printk(KERN_WARNING "Invalid Offset\n");
    727		return -EFAULT;
    728	}
    729
    730	if ((offset + count) > length)
    731		count = length - offset;
    732
    733	return bridge->master_write(image, buf, count, offset);
    734}
    735EXPORT_SYMBOL(vme_master_write);
    736
    737/**
    738 * vme_master_rmw - Perform read-modify-write cycle.
    739 * @resource: Pointer to VME master resource.
    740 * @mask: Bits to be compared and swapped in operation.
    741 * @compare: Bits to be compared with data read from offset.
    742 * @swap: Bits to be swapped in data read from offset.
    743 * @offset: Offset into VME master window at which to perform operation.
    744 *
    745 * Perform read-modify-write cycle on provided location:
    746 * - Location on VME bus is read.
    747 * - Bits selected by mask are compared with compare.
    748 * - Where a selected bit matches that in compare and are selected in swap,
    749 * the bit is swapped.
    750 * - Result written back to location on VME bus.
    751 *
    752 * Return: Bytes written on success, -EINVAL if resource is not a VME master
    753 *         resource or RMW operation is not supported. Hardware specific
    754 *         errors may also be returned.
    755 */
    756unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
    757	unsigned int compare, unsigned int swap, loff_t offset)
    758{
    759	struct vme_bridge *bridge = find_bridge(resource);
    760	struct vme_master_resource *image;
    761
    762	if (!bridge->master_rmw) {
    763		printk(KERN_WARNING "Writing to resource not supported\n");
    764		return -EINVAL;
    765	}
    766
    767	if (resource->type != VME_MASTER) {
    768		printk(KERN_ERR "Not a master resource\n");
    769		return -EINVAL;
    770	}
    771
    772	image = list_entry(resource->entry, struct vme_master_resource, list);
    773
    774	return bridge->master_rmw(image, mask, compare, swap, offset);
    775}
    776EXPORT_SYMBOL(vme_master_rmw);
    777
    778/**
    779 * vme_master_mmap - Mmap region of VME master window.
    780 * @resource: Pointer to VME master resource.
    781 * @vma: Pointer to definition of user mapping.
    782 *
    783 * Memory map a region of the VME master window into user space.
    784 *
    785 * Return: Zero on success, -EINVAL if resource is not a VME master
    786 *         resource or -EFAULT if map exceeds window size. Other generic mmap
    787 *         errors may also be returned.
    788 */
    789int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
    790{
    791	struct vme_master_resource *image;
    792	phys_addr_t phys_addr;
    793	unsigned long vma_size;
    794
    795	if (resource->type != VME_MASTER) {
    796		pr_err("Not a master resource\n");
    797		return -EINVAL;
    798	}
    799
    800	image = list_entry(resource->entry, struct vme_master_resource, list);
    801	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
    802	vma_size = vma->vm_end - vma->vm_start;
    803
    804	if (phys_addr + vma_size > image->bus_resource.end + 1) {
    805		pr_err("Map size cannot exceed the window size\n");
    806		return -EFAULT;
    807	}
    808
    809	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    810
    811	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
    812}
    813EXPORT_SYMBOL(vme_master_mmap);
    814
    815/**
    816 * vme_master_free - Free VME master window
    817 * @resource: Pointer to VME master resource.
    818 *
    819 * Free the provided master resource so that it may be reallocated.
    820 */
    821void vme_master_free(struct vme_resource *resource)
    822{
    823	struct vme_master_resource *master_image;
    824
    825	if (resource->type != VME_MASTER) {
    826		printk(KERN_ERR "Not a master resource\n");
    827		return;
    828	}
    829
    830	master_image = list_entry(resource->entry, struct vme_master_resource,
    831		list);
    832	if (!master_image) {
    833		printk(KERN_ERR "Can't find master resource\n");
    834		return;
    835	}
    836
    837	/* Unlock image */
    838	spin_lock(&master_image->lock);
    839	if (master_image->locked == 0)
    840		printk(KERN_ERR "Image is already free\n");
    841
    842	master_image->locked = 0;
    843	spin_unlock(&master_image->lock);
    844
    845	/* Free up resource memory */
    846	kfree(resource);
    847}
    848EXPORT_SYMBOL(vme_master_free);
    849
    850/**
    851 * vme_dma_request - Request a DMA controller.
    852 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
    853 * @route: Required src/destination combination.
    854 *
    855 * Request a VME DMA controller with capability to perform transfers bewteen
    856 * requested source/destination combination.
    857 *
    858 * Return: Pointer to VME DMA resource on success, NULL on failure.
    859 */
    860struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
    861{
    862	struct vme_bridge *bridge;
    863	struct list_head *dma_pos = NULL;
    864	struct vme_dma_resource *allocated_ctrlr = NULL;
    865	struct vme_dma_resource *dma_ctrlr = NULL;
    866	struct vme_resource *resource = NULL;
    867
    868	/* XXX Not checking resource attributes */
    869	printk(KERN_ERR "No VME resource Attribute tests done\n");
    870
    871	bridge = vdev->bridge;
    872	if (!bridge) {
    873		printk(KERN_ERR "Can't find VME bus\n");
    874		goto err_bus;
    875	}
    876
    877	/* Loop through DMA resources */
    878	list_for_each(dma_pos, &bridge->dma_resources) {
    879		dma_ctrlr = list_entry(dma_pos,
    880			struct vme_dma_resource, list);
    881		if (!dma_ctrlr) {
    882			printk(KERN_ERR "Registered NULL DMA resource\n");
    883			continue;
    884		}
    885
    886		/* Find an unlocked and compatible controller */
    887		mutex_lock(&dma_ctrlr->mtx);
    888		if (((dma_ctrlr->route_attr & route) == route) &&
    889			(dma_ctrlr->locked == 0)) {
    890
    891			dma_ctrlr->locked = 1;
    892			mutex_unlock(&dma_ctrlr->mtx);
    893			allocated_ctrlr = dma_ctrlr;
    894			break;
    895		}
    896		mutex_unlock(&dma_ctrlr->mtx);
    897	}
    898
    899	/* Check to see if we found a resource */
    900	if (!allocated_ctrlr)
    901		goto err_ctrlr;
    902
    903	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
    904	if (!resource)
    905		goto err_alloc;
    906
    907	resource->type = VME_DMA;
    908	resource->entry = &allocated_ctrlr->list;
    909
    910	return resource;
    911
    912err_alloc:
    913	/* Unlock image */
    914	mutex_lock(&dma_ctrlr->mtx);
    915	dma_ctrlr->locked = 0;
    916	mutex_unlock(&dma_ctrlr->mtx);
    917err_ctrlr:
    918err_bus:
    919	return NULL;
    920}
    921EXPORT_SYMBOL(vme_dma_request);
    922
    923/**
    924 * vme_new_dma_list - Create new VME DMA list.
    925 * @resource: Pointer to VME DMA resource.
    926 *
    927 * Create a new VME DMA list. It is the responsibility of the user to free
    928 * the list once it is no longer required with vme_dma_list_free().
    929 *
    930 * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
    931 *         VME DMA resource.
    932 */
    933struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
    934{
    935	struct vme_dma_list *dma_list;
    936
    937	if (resource->type != VME_DMA) {
    938		printk(KERN_ERR "Not a DMA resource\n");
    939		return NULL;
    940	}
    941
    942	dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
    943	if (!dma_list)
    944		return NULL;
    945
    946	INIT_LIST_HEAD(&dma_list->entries);
    947	dma_list->parent = list_entry(resource->entry,
    948				      struct vme_dma_resource,
    949				      list);
    950	mutex_init(&dma_list->mtx);
    951
    952	return dma_list;
    953}
    954EXPORT_SYMBOL(vme_new_dma_list);
    955
    956/**
    957 * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
    958 * @pattern: Value to use used as pattern
    959 * @type: Type of pattern to be written.
    960 *
    961 * Create VME DMA list attribute for pattern generation. It is the
    962 * responsibility of the user to free used attributes using
    963 * vme_dma_free_attribute().
    964 *
    965 * Return: Pointer to VME DMA attribute, NULL on failure.
    966 */
    967struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
    968{
    969	struct vme_dma_attr *attributes;
    970	struct vme_dma_pattern *pattern_attr;
    971
    972	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
    973	if (!attributes)
    974		goto err_attr;
    975
    976	pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
    977	if (!pattern_attr)
    978		goto err_pat;
    979
    980	attributes->type = VME_DMA_PATTERN;
    981	attributes->private = (void *)pattern_attr;
    982
    983	pattern_attr->pattern = pattern;
    984	pattern_attr->type = type;
    985
    986	return attributes;
    987
    988err_pat:
    989	kfree(attributes);
    990err_attr:
    991	return NULL;
    992}
    993EXPORT_SYMBOL(vme_dma_pattern_attribute);
    994
    995/**
    996 * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
    997 * @address: PCI base address for DMA transfer.
    998 *
    999 * Create VME DMA list attribute pointing to a location on PCI for DMA
   1000 * transfers. It is the responsibility of the user to free used attributes
   1001 * using vme_dma_free_attribute().
   1002 *
   1003 * Return: Pointer to VME DMA attribute, NULL on failure.
   1004 */
   1005struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
   1006{
   1007	struct vme_dma_attr *attributes;
   1008	struct vme_dma_pci *pci_attr;
   1009
   1010	/* XXX Run some sanity checks here */
   1011
   1012	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
   1013	if (!attributes)
   1014		goto err_attr;
   1015
   1016	pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
   1017	if (!pci_attr)
   1018		goto err_pci;
   1019
   1020	attributes->type = VME_DMA_PCI;
   1021	attributes->private = (void *)pci_attr;
   1022
   1023	pci_attr->address = address;
   1024
   1025	return attributes;
   1026
   1027err_pci:
   1028	kfree(attributes);
   1029err_attr:
   1030	return NULL;
   1031}
   1032EXPORT_SYMBOL(vme_dma_pci_attribute);
   1033
   1034/**
   1035 * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
   1036 * @address: VME base address for DMA transfer.
   1037 * @aspace: VME address space to use for DMA transfer.
   1038 * @cycle: VME bus cycle to use for DMA transfer.
   1039 * @dwidth: VME data width to use for DMA transfer.
   1040 *
   1041 * Create VME DMA list attribute pointing to a location on the VME bus for DMA
   1042 * transfers. It is the responsibility of the user to free used attributes
   1043 * using vme_dma_free_attribute().
   1044 *
   1045 * Return: Pointer to VME DMA attribute, NULL on failure.
   1046 */
   1047struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
   1048	u32 aspace, u32 cycle, u32 dwidth)
   1049{
   1050	struct vme_dma_attr *attributes;
   1051	struct vme_dma_vme *vme_attr;
   1052
   1053	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
   1054	if (!attributes)
   1055		goto err_attr;
   1056
   1057	vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
   1058	if (!vme_attr)
   1059		goto err_vme;
   1060
   1061	attributes->type = VME_DMA_VME;
   1062	attributes->private = (void *)vme_attr;
   1063
   1064	vme_attr->address = address;
   1065	vme_attr->aspace = aspace;
   1066	vme_attr->cycle = cycle;
   1067	vme_attr->dwidth = dwidth;
   1068
   1069	return attributes;
   1070
   1071err_vme:
   1072	kfree(attributes);
   1073err_attr:
   1074	return NULL;
   1075}
   1076EXPORT_SYMBOL(vme_dma_vme_attribute);
   1077
   1078/**
   1079 * vme_dma_free_attribute - Free DMA list attribute.
   1080 * @attributes: Pointer to DMA list attribute.
   1081 *
   1082 * Free VME DMA list attribute. VME DMA list attributes can be safely freed
   1083 * once vme_dma_list_add() has returned.
   1084 */
   1085void vme_dma_free_attribute(struct vme_dma_attr *attributes)
   1086{
   1087	kfree(attributes->private);
   1088	kfree(attributes);
   1089}
   1090EXPORT_SYMBOL(vme_dma_free_attribute);
   1091
   1092/**
   1093 * vme_dma_list_add - Add enty to a VME DMA list.
   1094 * @list: Pointer to VME list.
   1095 * @src: Pointer to DMA list attribute to use as source.
   1096 * @dest: Pointer to DMA list attribute to use as destination.
   1097 * @count: Number of bytes to transfer.
   1098 *
   1099 * Add an entry to the provided VME DMA list. Entry requires pointers to source
   1100 * and destination DMA attributes and a count.
   1101 *
   1102 * Please note, the attributes supported as source and destinations for
   1103 * transfers are hardware dependent.
   1104 *
   1105 * Return: Zero on success, -EINVAL if operation is not supported on this
   1106 *         device or if the link list has already been submitted for execution.
   1107 *         Hardware specific errors also possible.
   1108 */
   1109int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
   1110	struct vme_dma_attr *dest, size_t count)
   1111{
   1112	struct vme_bridge *bridge = list->parent->parent;
   1113	int retval;
   1114
   1115	if (!bridge->dma_list_add) {
   1116		printk(KERN_WARNING "Link List DMA generation not supported\n");
   1117		return -EINVAL;
   1118	}
   1119
   1120	if (!mutex_trylock(&list->mtx)) {
   1121		printk(KERN_ERR "Link List already submitted\n");
   1122		return -EINVAL;
   1123	}
   1124
   1125	retval = bridge->dma_list_add(list, src, dest, count);
   1126
   1127	mutex_unlock(&list->mtx);
   1128
   1129	return retval;
   1130}
   1131EXPORT_SYMBOL(vme_dma_list_add);
   1132
   1133/**
   1134 * vme_dma_list_exec - Queue a VME DMA list for execution.
   1135 * @list: Pointer to VME list.
   1136 *
   1137 * Queue the provided VME DMA list for execution. The call will return once the
   1138 * list has been executed.
   1139 *
   1140 * Return: Zero on success, -EINVAL if operation is not supported on this
   1141 *         device. Hardware specific errors also possible.
   1142 */
   1143int vme_dma_list_exec(struct vme_dma_list *list)
   1144{
   1145	struct vme_bridge *bridge = list->parent->parent;
   1146	int retval;
   1147
   1148	if (!bridge->dma_list_exec) {
   1149		printk(KERN_ERR "Link List DMA execution not supported\n");
   1150		return -EINVAL;
   1151	}
   1152
   1153	mutex_lock(&list->mtx);
   1154
   1155	retval = bridge->dma_list_exec(list);
   1156
   1157	mutex_unlock(&list->mtx);
   1158
   1159	return retval;
   1160}
   1161EXPORT_SYMBOL(vme_dma_list_exec);
   1162
   1163/**
   1164 * vme_dma_list_free - Free a VME DMA list.
   1165 * @list: Pointer to VME list.
   1166 *
   1167 * Free the provided DMA list and all its entries.
   1168 *
   1169 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
   1170 *         is still in use. Hardware specific errors also possible.
   1171 */
   1172int vme_dma_list_free(struct vme_dma_list *list)
   1173{
   1174	struct vme_bridge *bridge = list->parent->parent;
   1175	int retval;
   1176
   1177	if (!bridge->dma_list_empty) {
   1178		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
   1179		return -EINVAL;
   1180	}
   1181
   1182	if (!mutex_trylock(&list->mtx)) {
   1183		printk(KERN_ERR "Link List in use\n");
   1184		return -EBUSY;
   1185	}
   1186
   1187	/*
   1188	 * Empty out all of the entries from the DMA list. We need to go to the
   1189	 * low level driver as DMA entries are driver specific.
   1190	 */
   1191	retval = bridge->dma_list_empty(list);
   1192	if (retval) {
   1193		printk(KERN_ERR "Unable to empty link-list entries\n");
   1194		mutex_unlock(&list->mtx);
   1195		return retval;
   1196	}
   1197	mutex_unlock(&list->mtx);
   1198	kfree(list);
   1199
   1200	return retval;
   1201}
   1202EXPORT_SYMBOL(vme_dma_list_free);
   1203
   1204/**
   1205 * vme_dma_free - Free a VME DMA resource.
   1206 * @resource: Pointer to VME DMA resource.
   1207 *
   1208 * Free the provided DMA resource so that it may be reallocated.
   1209 *
   1210 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
   1211 *         is still active.
   1212 */
   1213int vme_dma_free(struct vme_resource *resource)
   1214{
   1215	struct vme_dma_resource *ctrlr;
   1216
   1217	if (resource->type != VME_DMA) {
   1218		printk(KERN_ERR "Not a DMA resource\n");
   1219		return -EINVAL;
   1220	}
   1221
   1222	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
   1223
   1224	if (!mutex_trylock(&ctrlr->mtx)) {
   1225		printk(KERN_ERR "Resource busy, can't free\n");
   1226		return -EBUSY;
   1227	}
   1228
   1229	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
   1230		printk(KERN_WARNING "Resource still processing transfers\n");
   1231		mutex_unlock(&ctrlr->mtx);
   1232		return -EBUSY;
   1233	}
   1234
   1235	ctrlr->locked = 0;
   1236
   1237	mutex_unlock(&ctrlr->mtx);
   1238
   1239	kfree(resource);
   1240
   1241	return 0;
   1242}
   1243EXPORT_SYMBOL(vme_dma_free);
   1244
   1245void vme_bus_error_handler(struct vme_bridge *bridge,
   1246			   unsigned long long address, int am)
   1247{
   1248	struct list_head *handler_pos = NULL;
   1249	struct vme_error_handler *handler;
   1250	int handler_triggered = 0;
   1251	u32 aspace = vme_get_aspace(am);
   1252
   1253	list_for_each(handler_pos, &bridge->vme_error_handlers) {
   1254		handler = list_entry(handler_pos, struct vme_error_handler,
   1255				     list);
   1256		if ((aspace == handler->aspace) &&
   1257		    (address >= handler->start) &&
   1258		    (address < handler->end)) {
   1259			if (!handler->num_errors)
   1260				handler->first_error = address;
   1261			if (handler->num_errors != UINT_MAX)
   1262				handler->num_errors++;
   1263			handler_triggered = 1;
   1264		}
   1265	}
   1266
   1267	if (!handler_triggered)
   1268		dev_err(bridge->parent,
   1269			"Unhandled VME access error at address 0x%llx\n",
   1270			address);
   1271}
   1272EXPORT_SYMBOL(vme_bus_error_handler);
   1273
   1274struct vme_error_handler *vme_register_error_handler(
   1275	struct vme_bridge *bridge, u32 aspace,
   1276	unsigned long long address, size_t len)
   1277{
   1278	struct vme_error_handler *handler;
   1279
   1280	handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
   1281	if (!handler)
   1282		return NULL;
   1283
   1284	handler->aspace = aspace;
   1285	handler->start = address;
   1286	handler->end = address + len;
   1287	handler->num_errors = 0;
   1288	handler->first_error = 0;
   1289	list_add_tail(&handler->list, &bridge->vme_error_handlers);
   1290
   1291	return handler;
   1292}
   1293EXPORT_SYMBOL(vme_register_error_handler);
   1294
   1295void vme_unregister_error_handler(struct vme_error_handler *handler)
   1296{
   1297	list_del(&handler->list);
   1298	kfree(handler);
   1299}
   1300EXPORT_SYMBOL(vme_unregister_error_handler);
   1301
   1302void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
   1303{
   1304	void (*call)(int, int, void *);
   1305	void *priv_data;
   1306
   1307	call = bridge->irq[level - 1].callback[statid].func;
   1308	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
   1309	if (call)
   1310		call(level, statid, priv_data);
   1311	else
   1312		printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
   1313		       level, statid);
   1314}
   1315EXPORT_SYMBOL(vme_irq_handler);
   1316
   1317/**
   1318 * vme_irq_request - Request a specific VME interrupt.
   1319 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1320 * @level: Interrupt priority being requested.
   1321 * @statid: Interrupt vector being requested.
   1322 * @callback: Pointer to callback function called when VME interrupt/vector
   1323 *            received.
   1324 * @priv_data: Generic pointer that will be passed to the callback function.
   1325 *
   1326 * Request callback to be attached as a handler for VME interrupts with provided
   1327 * level and statid.
   1328 *
   1329 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
   1330 *         function is not supported, -EBUSY if the level/statid combination is
   1331 *         already in use. Hardware specific errors also possible.
   1332 */
   1333int vme_irq_request(struct vme_dev *vdev, int level, int statid,
   1334	void (*callback)(int, int, void *),
   1335	void *priv_data)
   1336{
   1337	struct vme_bridge *bridge;
   1338
   1339	bridge = vdev->bridge;
   1340	if (!bridge) {
   1341		printk(KERN_ERR "Can't find VME bus\n");
   1342		return -EINVAL;
   1343	}
   1344
   1345	if ((level < 1) || (level > 7)) {
   1346		printk(KERN_ERR "Invalid interrupt level\n");
   1347		return -EINVAL;
   1348	}
   1349
   1350	if (!bridge->irq_set) {
   1351		printk(KERN_ERR "Configuring interrupts not supported\n");
   1352		return -EINVAL;
   1353	}
   1354
   1355	mutex_lock(&bridge->irq_mtx);
   1356
   1357	if (bridge->irq[level - 1].callback[statid].func) {
   1358		mutex_unlock(&bridge->irq_mtx);
   1359		printk(KERN_WARNING "VME Interrupt already taken\n");
   1360		return -EBUSY;
   1361	}
   1362
   1363	bridge->irq[level - 1].count++;
   1364	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
   1365	bridge->irq[level - 1].callback[statid].func = callback;
   1366
   1367	/* Enable IRQ level */
   1368	bridge->irq_set(bridge, level, 1, 1);
   1369
   1370	mutex_unlock(&bridge->irq_mtx);
   1371
   1372	return 0;
   1373}
   1374EXPORT_SYMBOL(vme_irq_request);
   1375
   1376/**
   1377 * vme_irq_free - Free a VME interrupt.
   1378 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1379 * @level: Interrupt priority of interrupt being freed.
   1380 * @statid: Interrupt vector of interrupt being freed.
   1381 *
   1382 * Remove previously attached callback from VME interrupt priority/vector.
   1383 */
   1384void vme_irq_free(struct vme_dev *vdev, int level, int statid)
   1385{
   1386	struct vme_bridge *bridge;
   1387
   1388	bridge = vdev->bridge;
   1389	if (!bridge) {
   1390		printk(KERN_ERR "Can't find VME bus\n");
   1391		return;
   1392	}
   1393
   1394	if ((level < 1) || (level > 7)) {
   1395		printk(KERN_ERR "Invalid interrupt level\n");
   1396		return;
   1397	}
   1398
   1399	if (!bridge->irq_set) {
   1400		printk(KERN_ERR "Configuring interrupts not supported\n");
   1401		return;
   1402	}
   1403
   1404	mutex_lock(&bridge->irq_mtx);
   1405
   1406	bridge->irq[level - 1].count--;
   1407
   1408	/* Disable IRQ level if no more interrupts attached at this level*/
   1409	if (bridge->irq[level - 1].count == 0)
   1410		bridge->irq_set(bridge, level, 0, 1);
   1411
   1412	bridge->irq[level - 1].callback[statid].func = NULL;
   1413	bridge->irq[level - 1].callback[statid].priv_data = NULL;
   1414
   1415	mutex_unlock(&bridge->irq_mtx);
   1416}
   1417EXPORT_SYMBOL(vme_irq_free);
   1418
   1419/**
   1420 * vme_irq_generate - Generate VME interrupt.
   1421 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1422 * @level: Interrupt priority at which to assert the interrupt.
   1423 * @statid: Interrupt vector to associate with the interrupt.
   1424 *
   1425 * Generate a VME interrupt of the provided level and with the provided
   1426 * statid.
   1427 *
   1428 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
   1429 *         function is not supported. Hardware specific errors also possible.
   1430 */
   1431int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
   1432{
   1433	struct vme_bridge *bridge;
   1434
   1435	bridge = vdev->bridge;
   1436	if (!bridge) {
   1437		printk(KERN_ERR "Can't find VME bus\n");
   1438		return -EINVAL;
   1439	}
   1440
   1441	if ((level < 1) || (level > 7)) {
   1442		printk(KERN_WARNING "Invalid interrupt level\n");
   1443		return -EINVAL;
   1444	}
   1445
   1446	if (!bridge->irq_generate) {
   1447		printk(KERN_WARNING "Interrupt generation not supported\n");
   1448		return -EINVAL;
   1449	}
   1450
   1451	return bridge->irq_generate(bridge, level, statid);
   1452}
   1453EXPORT_SYMBOL(vme_irq_generate);
   1454
   1455/**
   1456 * vme_lm_request - Request a VME location monitor
   1457 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1458 *
   1459 * Allocate a location monitor resource to the driver. A location monitor
   1460 * allows the driver to monitor accesses to a contiguous number of
   1461 * addresses on the VME bus.
   1462 *
   1463 * Return: Pointer to a VME resource on success or NULL on failure.
   1464 */
   1465struct vme_resource *vme_lm_request(struct vme_dev *vdev)
   1466{
   1467	struct vme_bridge *bridge;
   1468	struct list_head *lm_pos = NULL;
   1469	struct vme_lm_resource *allocated_lm = NULL;
   1470	struct vme_lm_resource *lm = NULL;
   1471	struct vme_resource *resource = NULL;
   1472
   1473	bridge = vdev->bridge;
   1474	if (!bridge) {
   1475		printk(KERN_ERR "Can't find VME bus\n");
   1476		goto err_bus;
   1477	}
   1478
   1479	/* Loop through LM resources */
   1480	list_for_each(lm_pos, &bridge->lm_resources) {
   1481		lm = list_entry(lm_pos,
   1482			struct vme_lm_resource, list);
   1483		if (!lm) {
   1484			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
   1485			continue;
   1486		}
   1487
   1488		/* Find an unlocked controller */
   1489		mutex_lock(&lm->mtx);
   1490		if (lm->locked == 0) {
   1491			lm->locked = 1;
   1492			mutex_unlock(&lm->mtx);
   1493			allocated_lm = lm;
   1494			break;
   1495		}
   1496		mutex_unlock(&lm->mtx);
   1497	}
   1498
   1499	/* Check to see if we found a resource */
   1500	if (!allocated_lm)
   1501		goto err_lm;
   1502
   1503	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
   1504	if (!resource)
   1505		goto err_alloc;
   1506
   1507	resource->type = VME_LM;
   1508	resource->entry = &allocated_lm->list;
   1509
   1510	return resource;
   1511
   1512err_alloc:
   1513	/* Unlock image */
   1514	mutex_lock(&lm->mtx);
   1515	lm->locked = 0;
   1516	mutex_unlock(&lm->mtx);
   1517err_lm:
   1518err_bus:
   1519	return NULL;
   1520}
   1521EXPORT_SYMBOL(vme_lm_request);
   1522
   1523/**
   1524 * vme_lm_count - Determine number of VME Addresses monitored
   1525 * @resource: Pointer to VME location monitor resource.
   1526 *
   1527 * The number of contiguous addresses monitored is hardware dependent.
   1528 * Return the number of contiguous addresses monitored by the
   1529 * location monitor.
   1530 *
   1531 * Return: Count of addresses monitored or -EINVAL when provided with an
   1532 *	   invalid location monitor resource.
   1533 */
   1534int vme_lm_count(struct vme_resource *resource)
   1535{
   1536	struct vme_lm_resource *lm;
   1537
   1538	if (resource->type != VME_LM) {
   1539		printk(KERN_ERR "Not a Location Monitor resource\n");
   1540		return -EINVAL;
   1541	}
   1542
   1543	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1544
   1545	return lm->monitors;
   1546}
   1547EXPORT_SYMBOL(vme_lm_count);
   1548
   1549/**
   1550 * vme_lm_set - Configure location monitor
   1551 * @resource: Pointer to VME location monitor resource.
   1552 * @lm_base: Base address to monitor.
   1553 * @aspace: VME address space to monitor.
   1554 * @cycle: VME bus cycle type to monitor.
   1555 *
   1556 * Set the base address, address space and cycle type of accesses to be
   1557 * monitored by the location monitor.
   1558 *
   1559 * Return: Zero on success, -EINVAL when provided with an invalid location
   1560 *	   monitor resource or function is not supported. Hardware specific
   1561 *	   errors may also be returned.
   1562 */
   1563int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
   1564	u32 aspace, u32 cycle)
   1565{
   1566	struct vme_bridge *bridge = find_bridge(resource);
   1567	struct vme_lm_resource *lm;
   1568
   1569	if (resource->type != VME_LM) {
   1570		printk(KERN_ERR "Not a Location Monitor resource\n");
   1571		return -EINVAL;
   1572	}
   1573
   1574	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1575
   1576	if (!bridge->lm_set) {
   1577		printk(KERN_ERR "vme_lm_set not supported\n");
   1578		return -EINVAL;
   1579	}
   1580
   1581	return bridge->lm_set(lm, lm_base, aspace, cycle);
   1582}
   1583EXPORT_SYMBOL(vme_lm_set);
   1584
   1585/**
   1586 * vme_lm_get - Retrieve location monitor settings
   1587 * @resource: Pointer to VME location monitor resource.
   1588 * @lm_base: Pointer used to output the base address monitored.
   1589 * @aspace: Pointer used to output the address space monitored.
   1590 * @cycle: Pointer used to output the VME bus cycle type monitored.
   1591 *
   1592 * Retrieve the base address, address space and cycle type of accesses to
   1593 * be monitored by the location monitor.
   1594 *
   1595 * Return: Zero on success, -EINVAL when provided with an invalid location
   1596 *	   monitor resource or function is not supported. Hardware specific
   1597 *	   errors may also be returned.
   1598 */
   1599int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
   1600	u32 *aspace, u32 *cycle)
   1601{
   1602	struct vme_bridge *bridge = find_bridge(resource);
   1603	struct vme_lm_resource *lm;
   1604
   1605	if (resource->type != VME_LM) {
   1606		printk(KERN_ERR "Not a Location Monitor resource\n");
   1607		return -EINVAL;
   1608	}
   1609
   1610	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1611
   1612	if (!bridge->lm_get) {
   1613		printk(KERN_ERR "vme_lm_get not supported\n");
   1614		return -EINVAL;
   1615	}
   1616
   1617	return bridge->lm_get(lm, lm_base, aspace, cycle);
   1618}
   1619EXPORT_SYMBOL(vme_lm_get);
   1620
   1621/**
   1622 * vme_lm_attach - Provide callback for location monitor address
   1623 * @resource: Pointer to VME location monitor resource.
   1624 * @monitor: Offset to which callback should be attached.
   1625 * @callback: Pointer to callback function called when triggered.
   1626 * @data: Generic pointer that will be passed to the callback function.
   1627 *
   1628 * Attach a callback to the specificed offset into the location monitors
   1629 * monitored addresses. A generic pointer is provided to allow data to be
   1630 * passed to the callback when called.
   1631 *
   1632 * Return: Zero on success, -EINVAL when provided with an invalid location
   1633 *	   monitor resource or function is not supported. Hardware specific
   1634 *	   errors may also be returned.
   1635 */
   1636int vme_lm_attach(struct vme_resource *resource, int monitor,
   1637	void (*callback)(void *), void *data)
   1638{
   1639	struct vme_bridge *bridge = find_bridge(resource);
   1640	struct vme_lm_resource *lm;
   1641
   1642	if (resource->type != VME_LM) {
   1643		printk(KERN_ERR "Not a Location Monitor resource\n");
   1644		return -EINVAL;
   1645	}
   1646
   1647	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1648
   1649	if (!bridge->lm_attach) {
   1650		printk(KERN_ERR "vme_lm_attach not supported\n");
   1651		return -EINVAL;
   1652	}
   1653
   1654	return bridge->lm_attach(lm, monitor, callback, data);
   1655}
   1656EXPORT_SYMBOL(vme_lm_attach);
   1657
   1658/**
   1659 * vme_lm_detach - Remove callback for location monitor address
   1660 * @resource: Pointer to VME location monitor resource.
   1661 * @monitor: Offset to which callback should be removed.
   1662 *
   1663 * Remove the callback associated with the specificed offset into the
   1664 * location monitors monitored addresses.
   1665 *
   1666 * Return: Zero on success, -EINVAL when provided with an invalid location
   1667 *	   monitor resource or function is not supported. Hardware specific
   1668 *	   errors may also be returned.
   1669 */
   1670int vme_lm_detach(struct vme_resource *resource, int monitor)
   1671{
   1672	struct vme_bridge *bridge = find_bridge(resource);
   1673	struct vme_lm_resource *lm;
   1674
   1675	if (resource->type != VME_LM) {
   1676		printk(KERN_ERR "Not a Location Monitor resource\n");
   1677		return -EINVAL;
   1678	}
   1679
   1680	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1681
   1682	if (!bridge->lm_detach) {
   1683		printk(KERN_ERR "vme_lm_detach not supported\n");
   1684		return -EINVAL;
   1685	}
   1686
   1687	return bridge->lm_detach(lm, monitor);
   1688}
   1689EXPORT_SYMBOL(vme_lm_detach);
   1690
   1691/**
   1692 * vme_lm_free - Free allocated VME location monitor
   1693 * @resource: Pointer to VME location monitor resource.
   1694 *
   1695 * Free allocation of a VME location monitor.
   1696 *
   1697 * WARNING: This function currently expects that any callbacks that have
   1698 *          been attached to the location monitor have been removed.
   1699 *
   1700 * Return: Zero on success, -EINVAL when provided with an invalid location
   1701 *	   monitor resource.
   1702 */
   1703void vme_lm_free(struct vme_resource *resource)
   1704{
   1705	struct vme_lm_resource *lm;
   1706
   1707	if (resource->type != VME_LM) {
   1708		printk(KERN_ERR "Not a Location Monitor resource\n");
   1709		return;
   1710	}
   1711
   1712	lm = list_entry(resource->entry, struct vme_lm_resource, list);
   1713
   1714	mutex_lock(&lm->mtx);
   1715
   1716	/* XXX
   1717	 * Check to see that there aren't any callbacks still attached, if
   1718	 * there are we should probably be detaching them!
   1719	 */
   1720
   1721	lm->locked = 0;
   1722
   1723	mutex_unlock(&lm->mtx);
   1724
   1725	kfree(resource);
   1726}
   1727EXPORT_SYMBOL(vme_lm_free);
   1728
   1729/**
   1730 * vme_slot_num - Retrieve slot ID
   1731 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1732 *
   1733 * Retrieve the slot ID associated with the provided VME device.
   1734 *
   1735 * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
   1736 *         or the function is not supported. Hardware specific errors may also
   1737 *         be returned.
   1738 */
   1739int vme_slot_num(struct vme_dev *vdev)
   1740{
   1741	struct vme_bridge *bridge;
   1742
   1743	bridge = vdev->bridge;
   1744	if (!bridge) {
   1745		printk(KERN_ERR "Can't find VME bus\n");
   1746		return -EINVAL;
   1747	}
   1748
   1749	if (!bridge->slot_get) {
   1750		printk(KERN_WARNING "vme_slot_num not supported\n");
   1751		return -EINVAL;
   1752	}
   1753
   1754	return bridge->slot_get(bridge);
   1755}
   1756EXPORT_SYMBOL(vme_slot_num);
   1757
   1758/**
   1759 * vme_bus_num - Retrieve bus number
   1760 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
   1761 *
   1762 * Retrieve the bus enumeration associated with the provided VME device.
   1763 *
   1764 * Return: The bus number on success, -EINVAL if VME bridge cannot be
   1765 *         determined.
   1766 */
   1767int vme_bus_num(struct vme_dev *vdev)
   1768{
   1769	struct vme_bridge *bridge;
   1770
   1771	bridge = vdev->bridge;
   1772	if (!bridge) {
   1773		pr_err("Can't find VME bus\n");
   1774		return -EINVAL;
   1775	}
   1776
   1777	return bridge->num;
   1778}
   1779EXPORT_SYMBOL(vme_bus_num);
   1780
   1781/* - Bridge Registration --------------------------------------------------- */
   1782
   1783static void vme_dev_release(struct device *dev)
   1784{
   1785	kfree(dev_to_vme_dev(dev));
   1786}
   1787
   1788/* Common bridge initialization */
   1789struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
   1790{
   1791	INIT_LIST_HEAD(&bridge->vme_error_handlers);
   1792	INIT_LIST_HEAD(&bridge->master_resources);
   1793	INIT_LIST_HEAD(&bridge->slave_resources);
   1794	INIT_LIST_HEAD(&bridge->dma_resources);
   1795	INIT_LIST_HEAD(&bridge->lm_resources);
   1796	mutex_init(&bridge->irq_mtx);
   1797
   1798	return bridge;
   1799}
   1800EXPORT_SYMBOL(vme_init_bridge);
   1801
   1802int vme_register_bridge(struct vme_bridge *bridge)
   1803{
   1804	int i;
   1805	int ret = -1;
   1806
   1807	mutex_lock(&vme_buses_lock);
   1808	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
   1809		if ((vme_bus_numbers & (1 << i)) == 0) {
   1810			vme_bus_numbers |= (1 << i);
   1811			bridge->num = i;
   1812			INIT_LIST_HEAD(&bridge->devices);
   1813			list_add_tail(&bridge->bus_list, &vme_bus_list);
   1814			ret = 0;
   1815			break;
   1816		}
   1817	}
   1818	mutex_unlock(&vme_buses_lock);
   1819
   1820	return ret;
   1821}
   1822EXPORT_SYMBOL(vme_register_bridge);
   1823
   1824void vme_unregister_bridge(struct vme_bridge *bridge)
   1825{
   1826	struct vme_dev *vdev;
   1827	struct vme_dev *tmp;
   1828
   1829	mutex_lock(&vme_buses_lock);
   1830	vme_bus_numbers &= ~(1 << bridge->num);
   1831	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
   1832		list_del(&vdev->drv_list);
   1833		list_del(&vdev->bridge_list);
   1834		device_unregister(&vdev->dev);
   1835	}
   1836	list_del(&bridge->bus_list);
   1837	mutex_unlock(&vme_buses_lock);
   1838}
   1839EXPORT_SYMBOL(vme_unregister_bridge);
   1840
   1841/* - Driver Registration --------------------------------------------------- */
   1842
   1843static int __vme_register_driver_bus(struct vme_driver *drv,
   1844	struct vme_bridge *bridge, unsigned int ndevs)
   1845{
   1846	int err;
   1847	unsigned int i;
   1848	struct vme_dev *vdev;
   1849	struct vme_dev *tmp;
   1850
   1851	for (i = 0; i < ndevs; i++) {
   1852		vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
   1853		if (!vdev) {
   1854			err = -ENOMEM;
   1855			goto err_devalloc;
   1856		}
   1857		vdev->num = i;
   1858		vdev->bridge = bridge;
   1859		vdev->dev.platform_data = drv;
   1860		vdev->dev.release = vme_dev_release;
   1861		vdev->dev.parent = bridge->parent;
   1862		vdev->dev.bus = &vme_bus_type;
   1863		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
   1864			vdev->num);
   1865
   1866		err = device_register(&vdev->dev);
   1867		if (err)
   1868			goto err_reg;
   1869
   1870		if (vdev->dev.platform_data) {
   1871			list_add_tail(&vdev->drv_list, &drv->devices);
   1872			list_add_tail(&vdev->bridge_list, &bridge->devices);
   1873		} else
   1874			device_unregister(&vdev->dev);
   1875	}
   1876	return 0;
   1877
   1878err_reg:
   1879	put_device(&vdev->dev);
   1880err_devalloc:
   1881	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
   1882		list_del(&vdev->drv_list);
   1883		list_del(&vdev->bridge_list);
   1884		device_unregister(&vdev->dev);
   1885	}
   1886	return err;
   1887}
   1888
   1889static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
   1890{
   1891	struct vme_bridge *bridge;
   1892	int err = 0;
   1893
   1894	mutex_lock(&vme_buses_lock);
   1895	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
   1896		/*
   1897		 * This cannot cause trouble as we already have vme_buses_lock
   1898		 * and if the bridge is removed, it will have to go through
   1899		 * vme_unregister_bridge() to do it (which calls remove() on
   1900		 * the bridge which in turn tries to acquire vme_buses_lock and
   1901		 * will have to wait).
   1902		 */
   1903		err = __vme_register_driver_bus(drv, bridge, ndevs);
   1904		if (err)
   1905			break;
   1906	}
   1907	mutex_unlock(&vme_buses_lock);
   1908	return err;
   1909}
   1910
   1911/**
   1912 * vme_register_driver - Register a VME driver
   1913 * @drv: Pointer to VME driver structure to register.
   1914 * @ndevs: Maximum number of devices to allow to be enumerated.
   1915 *
   1916 * Register a VME device driver with the VME subsystem.
   1917 *
   1918 * Return: Zero on success, error value on registration failure.
   1919 */
   1920int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
   1921{
   1922	int err;
   1923
   1924	drv->driver.name = drv->name;
   1925	drv->driver.bus = &vme_bus_type;
   1926	INIT_LIST_HEAD(&drv->devices);
   1927
   1928	err = driver_register(&drv->driver);
   1929	if (err)
   1930		return err;
   1931
   1932	err = __vme_register_driver(drv, ndevs);
   1933	if (err)
   1934		driver_unregister(&drv->driver);
   1935
   1936	return err;
   1937}
   1938EXPORT_SYMBOL(vme_register_driver);
   1939
   1940/**
   1941 * vme_unregister_driver - Unregister a VME driver
   1942 * @drv: Pointer to VME driver structure to unregister.
   1943 *
   1944 * Unregister a VME device driver from the VME subsystem.
   1945 */
   1946void vme_unregister_driver(struct vme_driver *drv)
   1947{
   1948	struct vme_dev *dev, *dev_tmp;
   1949
   1950	mutex_lock(&vme_buses_lock);
   1951	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
   1952		list_del(&dev->drv_list);
   1953		list_del(&dev->bridge_list);
   1954		device_unregister(&dev->dev);
   1955	}
   1956	mutex_unlock(&vme_buses_lock);
   1957
   1958	driver_unregister(&drv->driver);
   1959}
   1960EXPORT_SYMBOL(vme_unregister_driver);
   1961
   1962/* - Bus Registration ------------------------------------------------------ */
   1963
   1964static int vme_bus_match(struct device *dev, struct device_driver *drv)
   1965{
   1966	struct vme_driver *vme_drv;
   1967
   1968	vme_drv = container_of(drv, struct vme_driver, driver);
   1969
   1970	if (dev->platform_data == vme_drv) {
   1971		struct vme_dev *vdev = dev_to_vme_dev(dev);
   1972
   1973		if (vme_drv->match && vme_drv->match(vdev))
   1974			return 1;
   1975
   1976		dev->platform_data = NULL;
   1977	}
   1978	return 0;
   1979}
   1980
   1981static int vme_bus_probe(struct device *dev)
   1982{
   1983	struct vme_driver *driver;
   1984	struct vme_dev *vdev = dev_to_vme_dev(dev);
   1985
   1986	driver = dev->platform_data;
   1987	if (driver->probe)
   1988		return driver->probe(vdev);
   1989
   1990	return -ENODEV;
   1991}
   1992
   1993static void vme_bus_remove(struct device *dev)
   1994{
   1995	struct vme_driver *driver;
   1996	struct vme_dev *vdev = dev_to_vme_dev(dev);
   1997
   1998	driver = dev->platform_data;
   1999	if (driver->remove)
   2000		driver->remove(vdev);
   2001}
   2002
   2003struct bus_type vme_bus_type = {
   2004	.name = "vme",
   2005	.match = vme_bus_match,
   2006	.probe = vme_bus_probe,
   2007	.remove = vme_bus_remove,
   2008};
   2009EXPORT_SYMBOL(vme_bus_type);
   2010
   2011static int __init vme_init(void)
   2012{
   2013	return bus_register(&vme_bus_type);
   2014}
   2015subsys_initcall(vme_init);