cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dim2.c (28398B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * dim2.c - MediaLB DIM2 Hardware Dependent Module
      4 *
      5 * Copyright (C) 2015-2016, Microchip Technology Germany II GmbH & Co. KG
      6 */
      7
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/module.h>
     11#include <linux/of_platform.h>
     12#include <linux/printk.h>
     13#include <linux/kernel.h>
     14#include <linux/init.h>
     15#include <linux/platform_device.h>
     16#include <linux/interrupt.h>
     17#include <linux/slab.h>
     18#include <linux/io.h>
     19#include <linux/clk.h>
     20#include <linux/dma-mapping.h>
     21#include <linux/sched.h>
     22#include <linux/kthread.h>
     23#include <linux/most.h>
     24#include "hal.h"
     25#include "errors.h"
     26#include "sysfs.h"
     27
     28#define DMA_CHANNELS (32 - 1)  /* channel 0 is a system channel */
     29
     30#define MAX_BUFFERS_PACKET      32
     31#define MAX_BUFFERS_STREAMING   32
     32#define MAX_BUF_SIZE_PACKET     2048
     33#define MAX_BUF_SIZE_STREAMING  (8 * 1024)
     34
     35/*
     36 * The parameter representing the number of frames per sub-buffer for
     37 * synchronous channels.  Valid values: [0 .. 6].
     38 *
     39 * The values 0, 1, 2, 3, 4, 5, 6 represent corresponding number of frames per
     40 * sub-buffer 1, 2, 4, 8, 16, 32, 64.
     41 */
     42static u8 fcnt = 4;  /* (1 << fcnt) frames per subbuffer */
     43module_param(fcnt, byte, 0000);
     44MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a power of 2");
     45
     46static DEFINE_SPINLOCK(dim_lock);
     47
     48/**
     49 * struct hdm_channel - private structure to keep channel specific data
     50 * @name: channel name
     51 * @is_initialized: identifier to know whether the channel is initialized
     52 * @ch: HAL specific channel data
     53 * @reset_dbr_size: reset DBR data buffer size
     54 * @pending_list: list to keep MBO's before starting transfer
     55 * @started_list: list to keep MBO's after starting transfer
     56 * @direction: channel direction (TX or RX)
     57 * @data_type: channel data type
     58 */
     59struct hdm_channel {
     60	char name[sizeof "caNNN"];
     61	bool is_initialized;
     62	struct dim_channel ch;
     63	u16 *reset_dbr_size;
     64	struct list_head pending_list;	/* before dim_enqueue_buffer() */
     65	struct list_head started_list;	/* after dim_enqueue_buffer() */
     66	enum most_channel_direction direction;
     67	enum most_channel_data_type data_type;
     68};
     69
     70/*
     71 * struct dim2_hdm - private structure to keep interface specific data
     72 * @hch: an array of channel specific data
     73 * @most_iface: most interface structure
     74 * @capabilities: an array of channel capability data
     75 * @io_base: I/O register base address
     76 * @netinfo_task: thread to deliver network status
     77 * @netinfo_waitq: waitq for the thread to sleep
     78 * @deliver_netinfo: to identify whether network status received
     79 * @mac_addrs: INIC mac address
     80 * @link_state: network link state
     81 * @atx_idx: index of async tx channel
     82 */
     83struct dim2_hdm {
     84	struct device dev;
     85	struct hdm_channel hch[DMA_CHANNELS];
     86	struct most_channel_capability capabilities[DMA_CHANNELS];
     87	struct most_interface most_iface;
     88	char name[16 + sizeof "dim2-"];
     89	void __iomem *io_base;
     90	u8 clk_speed;
     91	struct clk *clk;
     92	struct clk *clk_pll;
     93	struct task_struct *netinfo_task;
     94	wait_queue_head_t netinfo_waitq;
     95	int deliver_netinfo;
     96	unsigned char mac_addrs[6];
     97	unsigned char link_state;
     98	int atx_idx;
     99	struct medialb_bus bus;
    100	void (*on_netinfo)(struct most_interface *most_iface,
    101			   unsigned char link_state, unsigned char *addrs);
    102	void (*disable_platform)(struct platform_device *pdev);
    103};
    104
    105struct dim2_platform_data {
    106	int (*enable)(struct platform_device *pdev);
    107	void (*disable)(struct platform_device *pdev);
    108	u8 fcnt;
    109};
    110
    111#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
    112
    113/* Macro to identify a network status message */
    114#define PACKET_IS_NET_INFO(p)  \
    115	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
    116	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
    117
    118static ssize_t state_show(struct device *dev, struct device_attribute *attr,
    119			  char *buf)
    120{
    121	bool state;
    122	unsigned long flags;
    123
    124	spin_lock_irqsave(&dim_lock, flags);
    125	state = dim_get_lock_state();
    126	spin_unlock_irqrestore(&dim_lock, flags);
    127
    128	return sysfs_emit(buf, "%s\n", state ? "locked" : "");
    129}
    130
    131static DEVICE_ATTR_RO(state);
    132
    133static struct attribute *dim2_attrs[] = {
    134	&dev_attr_state.attr,
    135	NULL,
    136};
    137
    138ATTRIBUTE_GROUPS(dim2);
    139
    140/**
    141 * dimcb_on_error - callback from HAL to report miscommunication between
    142 * HDM and HAL
    143 * @error_id: Error ID
    144 * @error_message: Error message. Some text in a free format
    145 */
    146void dimcb_on_error(u8 error_id, const char *error_message)
    147{
    148	pr_err("%s: error_id - %d, error_message - %s\n", __func__, error_id,
    149	       error_message);
    150}
    151
    152/**
    153 * try_start_dim_transfer - try to transfer a buffer on a channel
    154 * @hdm_ch: channel specific data
    155 *
    156 * Transfer a buffer from pending_list if the channel is ready
    157 */
    158static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
    159{
    160	u16 buf_size;
    161	struct list_head *head = &hdm_ch->pending_list;
    162	struct mbo *mbo;
    163	unsigned long flags;
    164	struct dim_ch_state_t st;
    165
    166	BUG_ON(!hdm_ch);
    167	BUG_ON(!hdm_ch->is_initialized);
    168
    169	spin_lock_irqsave(&dim_lock, flags);
    170	if (list_empty(head)) {
    171		spin_unlock_irqrestore(&dim_lock, flags);
    172		return -EAGAIN;
    173	}
    174
    175	if (!dim_get_channel_state(&hdm_ch->ch, &st)->ready) {
    176		spin_unlock_irqrestore(&dim_lock, flags);
    177		return -EAGAIN;
    178	}
    179
    180	mbo = list_first_entry(head, struct mbo, list);
    181	buf_size = mbo->buffer_length;
    182
    183	if (dim_dbr_space(&hdm_ch->ch) < buf_size) {
    184		spin_unlock_irqrestore(&dim_lock, flags);
    185		return -EAGAIN;
    186	}
    187
    188	BUG_ON(mbo->bus_address == 0);
    189	if (!dim_enqueue_buffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
    190		list_del(head->next);
    191		spin_unlock_irqrestore(&dim_lock, flags);
    192		mbo->processed_length = 0;
    193		mbo->status = MBO_E_INVAL;
    194		mbo->complete(mbo);
    195		return -EFAULT;
    196	}
    197
    198	list_move_tail(head->next, &hdm_ch->started_list);
    199	spin_unlock_irqrestore(&dim_lock, flags);
    200
    201	return 0;
    202}
    203
    204/**
    205 * deliver_netinfo_thread - thread to deliver network status to mostcore
    206 * @data: private data
    207 *
    208 * Wait for network status and deliver it to mostcore once it is received
    209 */
    210static int deliver_netinfo_thread(void *data)
    211{
    212	struct dim2_hdm *dev = data;
    213
    214	while (!kthread_should_stop()) {
    215		wait_event_interruptible(dev->netinfo_waitq,
    216					 dev->deliver_netinfo ||
    217					 kthread_should_stop());
    218
    219		if (dev->deliver_netinfo) {
    220			dev->deliver_netinfo--;
    221			if (dev->on_netinfo) {
    222				dev->on_netinfo(&dev->most_iface,
    223						dev->link_state,
    224						dev->mac_addrs);
    225			}
    226		}
    227	}
    228
    229	return 0;
    230}
    231
    232/**
    233 * retrieve_netinfo - retrieve network status from received buffer
    234 * @dev: private data
    235 * @mbo: received MBO
    236 *
    237 * Parse the message in buffer and get node address, link state, MAC address.
    238 * Wake up a thread to deliver this status to mostcore
    239 */
    240static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
    241{
    242	u8 *data = mbo->virt_address;
    243
    244	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
    245	dev->link_state = data[18];
    246	pr_info("NIState: %d\n", dev->link_state);
    247	memcpy(dev->mac_addrs, data + 19, 6);
    248	dev->deliver_netinfo++;
    249	wake_up_interruptible(&dev->netinfo_waitq);
    250}
    251
    252/**
    253 * service_done_flag - handle completed buffers
    254 * @dev: private data
    255 * @ch_idx: channel index
    256 *
    257 * Return back the completed buffers to mostcore, using completion callback
    258 */
    259static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
    260{
    261	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
    262	struct dim_ch_state_t st;
    263	struct list_head *head;
    264	struct mbo *mbo;
    265	int done_buffers;
    266	unsigned long flags;
    267	u8 *data;
    268
    269	BUG_ON(!hdm_ch);
    270	BUG_ON(!hdm_ch->is_initialized);
    271
    272	spin_lock_irqsave(&dim_lock, flags);
    273
    274	done_buffers = dim_get_channel_state(&hdm_ch->ch, &st)->done_buffers;
    275	if (!done_buffers) {
    276		spin_unlock_irqrestore(&dim_lock, flags);
    277		return;
    278	}
    279
    280	if (!dim_detach_buffers(&hdm_ch->ch, done_buffers)) {
    281		spin_unlock_irqrestore(&dim_lock, flags);
    282		return;
    283	}
    284	spin_unlock_irqrestore(&dim_lock, flags);
    285
    286	head = &hdm_ch->started_list;
    287
    288	while (done_buffers) {
    289		spin_lock_irqsave(&dim_lock, flags);
    290		if (list_empty(head)) {
    291			spin_unlock_irqrestore(&dim_lock, flags);
    292			pr_crit("hard error: started_mbo list is empty whereas DIM2 has sent buffers\n");
    293			break;
    294		}
    295
    296		mbo = list_first_entry(head, struct mbo, list);
    297		list_del(head->next);
    298		spin_unlock_irqrestore(&dim_lock, flags);
    299
    300		data = mbo->virt_address;
    301
    302		if (hdm_ch->data_type == MOST_CH_ASYNC &&
    303		    hdm_ch->direction == MOST_CH_RX &&
    304		    PACKET_IS_NET_INFO(data)) {
    305			retrieve_netinfo(dev, mbo);
    306
    307			spin_lock_irqsave(&dim_lock, flags);
    308			list_add_tail(&mbo->list, &hdm_ch->pending_list);
    309			spin_unlock_irqrestore(&dim_lock, flags);
    310		} else {
    311			if (hdm_ch->data_type == MOST_CH_CONTROL ||
    312			    hdm_ch->data_type == MOST_CH_ASYNC) {
    313				u32 const data_size =
    314					(u32)data[0] * 256 + data[1] + 2;
    315
    316				mbo->processed_length =
    317					min_t(u32, data_size,
    318					      mbo->buffer_length);
    319			} else {
    320				mbo->processed_length = mbo->buffer_length;
    321			}
    322			mbo->status = MBO_SUCCESS;
    323			mbo->complete(mbo);
    324		}
    325
    326		done_buffers--;
    327	}
    328}
    329
    330static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
    331						struct dim_channel **buffer)
    332{
    333	int idx = 0;
    334	int ch_idx;
    335
    336	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
    337		if (dev->hch[ch_idx].is_initialized)
    338			buffer[idx++] = &dev->hch[ch_idx].ch;
    339	}
    340	buffer[idx++] = NULL;
    341
    342	return buffer;
    343}
    344
    345static irqreturn_t dim2_mlb_isr(int irq, void *_dev)
    346{
    347	struct dim2_hdm *dev = _dev;
    348	unsigned long flags;
    349
    350	spin_lock_irqsave(&dim_lock, flags);
    351	dim_service_mlb_int_irq();
    352	spin_unlock_irqrestore(&dim_lock, flags);
    353
    354	if (dev->atx_idx >= 0 && dev->hch[dev->atx_idx].is_initialized)
    355		while (!try_start_dim_transfer(dev->hch + dev->atx_idx))
    356			continue;
    357
    358	return IRQ_HANDLED;
    359}
    360
    361static irqreturn_t dim2_task_irq(int irq, void *_dev)
    362{
    363	struct dim2_hdm *dev = _dev;
    364	unsigned long flags;
    365	int ch_idx;
    366
    367	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
    368		if (!dev->hch[ch_idx].is_initialized)
    369			continue;
    370
    371		spin_lock_irqsave(&dim_lock, flags);
    372		dim_service_channel(&dev->hch[ch_idx].ch);
    373		spin_unlock_irqrestore(&dim_lock, flags);
    374
    375		service_done_flag(dev, ch_idx);
    376		while (!try_start_dim_transfer(dev->hch + ch_idx))
    377			continue;
    378	}
    379
    380	return IRQ_HANDLED;
    381}
    382
    383/**
    384 * dim2_ahb_isr - interrupt service routine
    385 * @irq: irq number
    386 * @_dev: private data
    387 *
    388 * Acknowledge the interrupt and service each initialized channel,
    389 * if needed, in task context.
    390 */
    391static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
    392{
    393	struct dim2_hdm *dev = _dev;
    394	struct dim_channel *buffer[DMA_CHANNELS + 1];
    395	unsigned long flags;
    396
    397	spin_lock_irqsave(&dim_lock, flags);
    398	dim_service_ahb_int_irq(get_active_channels(dev, buffer));
    399	spin_unlock_irqrestore(&dim_lock, flags);
    400
    401	return IRQ_WAKE_THREAD;
    402}
    403
    404/**
    405 * complete_all_mbos - complete MBO's in a list
    406 * @head: list head
    407 *
    408 * Delete all the entries in list and return back MBO's to mostcore using
    409 * completion call back.
    410 */
    411static void complete_all_mbos(struct list_head *head)
    412{
    413	unsigned long flags;
    414	struct mbo *mbo;
    415
    416	for (;;) {
    417		spin_lock_irqsave(&dim_lock, flags);
    418		if (list_empty(head)) {
    419			spin_unlock_irqrestore(&dim_lock, flags);
    420			break;
    421		}
    422
    423		mbo = list_first_entry(head, struct mbo, list);
    424		list_del(head->next);
    425		spin_unlock_irqrestore(&dim_lock, flags);
    426
    427		mbo->processed_length = 0;
    428		mbo->status = MBO_E_CLOSE;
    429		mbo->complete(mbo);
    430	}
    431}
    432
    433/**
    434 * configure_channel - initialize a channel
    435 * @most_iface: interface the channel belongs to
    436 * @ch_idx: channel index to be configured
    437 * @ccfg: structure that holds the configuration information
    438 *
    439 * Receives configuration information from mostcore and initialize
    440 * the corresponding channel. Return 0 on success, negative on failure.
    441 */
    442static int configure_channel(struct most_interface *most_iface, int ch_idx,
    443			     struct most_channel_config *ccfg)
    444{
    445	struct dim2_hdm *dev = iface_to_hdm(most_iface);
    446	bool const is_tx = ccfg->direction == MOST_CH_TX;
    447	u16 const sub_size = ccfg->subbuffer_size;
    448	u16 const buf_size = ccfg->buffer_size;
    449	u16 new_size;
    450	unsigned long flags;
    451	u8 hal_ret;
    452	int const ch_addr = ch_idx * 2 + 2;
    453	struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
    454
    455	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
    456
    457	if (hdm_ch->is_initialized)
    458		return -EPERM;
    459
    460	/* do not reset if the property was set by user, see poison_channel */
    461	hdm_ch->reset_dbr_size = ccfg->dbr_size ? NULL : &ccfg->dbr_size;
    462
    463	/* zero value is default dbr_size, see dim2 hal */
    464	hdm_ch->ch.dbr_size = ccfg->dbr_size;
    465
    466	switch (ccfg->data_type) {
    467	case MOST_CH_CONTROL:
    468		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
    469		if (new_size == 0) {
    470			pr_err("%s: too small buffer size\n", hdm_ch->name);
    471			return -EINVAL;
    472		}
    473		ccfg->buffer_size = new_size;
    474		if (new_size != buf_size)
    475			pr_warn("%s: fixed buffer size (%d -> %d)\n",
    476				hdm_ch->name, buf_size, new_size);
    477		spin_lock_irqsave(&dim_lock, flags);
    478		hal_ret = dim_init_control(&hdm_ch->ch, is_tx, ch_addr,
    479					   is_tx ? new_size * 2 : new_size);
    480		break;
    481	case MOST_CH_ASYNC:
    482		new_size = dim_norm_ctrl_async_buffer_size(buf_size);
    483		if (new_size == 0) {
    484			pr_err("%s: too small buffer size\n", hdm_ch->name);
    485			return -EINVAL;
    486		}
    487		ccfg->buffer_size = new_size;
    488		if (new_size != buf_size)
    489			pr_warn("%s: fixed buffer size (%d -> %d)\n",
    490				hdm_ch->name, buf_size, new_size);
    491		spin_lock_irqsave(&dim_lock, flags);
    492		hal_ret = dim_init_async(&hdm_ch->ch, is_tx, ch_addr,
    493					 is_tx ? new_size * 2 : new_size);
    494		break;
    495	case MOST_CH_ISOC:
    496		new_size = dim_norm_isoc_buffer_size(buf_size, sub_size);
    497		if (new_size == 0) {
    498			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
    499			       hdm_ch->name);
    500			return -EINVAL;
    501		}
    502		ccfg->buffer_size = new_size;
    503		if (new_size != buf_size)
    504			pr_warn("%s: fixed buffer size (%d -> %d)\n",
    505				hdm_ch->name, buf_size, new_size);
    506		spin_lock_irqsave(&dim_lock, flags);
    507		hal_ret = dim_init_isoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
    508		break;
    509	case MOST_CH_SYNC:
    510		new_size = dim_norm_sync_buffer_size(buf_size, sub_size);
    511		if (new_size == 0) {
    512			pr_err("%s: invalid sub-buffer size or too small buffer size\n",
    513			       hdm_ch->name);
    514			return -EINVAL;
    515		}
    516		ccfg->buffer_size = new_size;
    517		if (new_size != buf_size)
    518			pr_warn("%s: fixed buffer size (%d -> %d)\n",
    519				hdm_ch->name, buf_size, new_size);
    520		spin_lock_irqsave(&dim_lock, flags);
    521		hal_ret = dim_init_sync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
    522		break;
    523	default:
    524		pr_err("%s: configure failed, bad channel type: %d\n",
    525		       hdm_ch->name, ccfg->data_type);
    526		return -EINVAL;
    527	}
    528
    529	if (hal_ret != DIM_NO_ERROR) {
    530		spin_unlock_irqrestore(&dim_lock, flags);
    531		pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
    532		       hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
    533		return -ENODEV;
    534	}
    535
    536	hdm_ch->data_type = ccfg->data_type;
    537	hdm_ch->direction = ccfg->direction;
    538	hdm_ch->is_initialized = true;
    539
    540	if (hdm_ch->data_type == MOST_CH_ASYNC &&
    541	    hdm_ch->direction == MOST_CH_TX &&
    542	    dev->atx_idx < 0)
    543		dev->atx_idx = ch_idx;
    544
    545	spin_unlock_irqrestore(&dim_lock, flags);
    546	ccfg->dbr_size = hdm_ch->ch.dbr_size;
    547
    548	return 0;
    549}
    550
    551/**
    552 * enqueue - enqueue a buffer for data transfer
    553 * @most_iface: intended interface
    554 * @ch_idx: ID of the channel the buffer is intended for
    555 * @mbo: pointer to the buffer object
    556 *
    557 * Push the buffer into pending_list and try to transfer one buffer from
    558 * pending_list. Return 0 on success, negative on failure.
    559 */
    560static int enqueue(struct most_interface *most_iface, int ch_idx,
    561		   struct mbo *mbo)
    562{
    563	struct dim2_hdm *dev = iface_to_hdm(most_iface);
    564	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
    565	unsigned long flags;
    566
    567	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
    568
    569	if (!hdm_ch->is_initialized)
    570		return -EPERM;
    571
    572	if (mbo->bus_address == 0)
    573		return -EFAULT;
    574
    575	spin_lock_irqsave(&dim_lock, flags);
    576	list_add_tail(&mbo->list, &hdm_ch->pending_list);
    577	spin_unlock_irqrestore(&dim_lock, flags);
    578
    579	(void)try_start_dim_transfer(hdm_ch);
    580
    581	return 0;
    582}
    583
    584/**
    585 * request_netinfo - triggers retrieving of network info
    586 * @most_iface: pointer to the interface
    587 * @ch_idx: corresponding channel ID
    588 * @on_netinfo: call-back used to deliver network status to mostcore
    589 *
    590 * Send a command to INIC which triggers retrieving of network info by means of
    591 * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
    592 */
    593static void request_netinfo(struct most_interface *most_iface, int ch_idx,
    594			    void (*on_netinfo)(struct most_interface *,
    595					       unsigned char, unsigned char *))
    596{
    597	struct dim2_hdm *dev = iface_to_hdm(most_iface);
    598	struct mbo *mbo;
    599	u8 *data;
    600
    601	dev->on_netinfo = on_netinfo;
    602	if (!on_netinfo)
    603		return;
    604
    605	if (dev->atx_idx < 0) {
    606		pr_err("Async Tx Not initialized\n");
    607		return;
    608	}
    609
    610	mbo = most_get_mbo(&dev->most_iface, dev->atx_idx, NULL);
    611	if (!mbo)
    612		return;
    613
    614	mbo->buffer_length = 5;
    615
    616	data = mbo->virt_address;
    617
    618	data[0] = 0x00; /* PML High byte */
    619	data[1] = 0x03; /* PML Low byte */
    620	data[2] = 0x02; /* PMHL */
    621	data[3] = 0x08; /* FPH */
    622	data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
    623
    624	most_submit_mbo(mbo);
    625}
    626
    627/**
    628 * poison_channel - poison buffers of a channel
    629 * @most_iface: pointer to the interface the channel to be poisoned belongs to
    630 * @ch_idx: corresponding channel ID
    631 *
    632 * Destroy a channel and complete all the buffers in both started_list &
    633 * pending_list. Return 0 on success, negative on failure.
    634 */
    635static int poison_channel(struct most_interface *most_iface, int ch_idx)
    636{
    637	struct dim2_hdm *dev = iface_to_hdm(most_iface);
    638	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
    639	unsigned long flags;
    640	u8 hal_ret;
    641	int ret = 0;
    642
    643	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
    644
    645	if (!hdm_ch->is_initialized)
    646		return -EPERM;
    647
    648	spin_lock_irqsave(&dim_lock, flags);
    649	hal_ret = dim_destroy_channel(&hdm_ch->ch);
    650	hdm_ch->is_initialized = false;
    651	if (ch_idx == dev->atx_idx)
    652		dev->atx_idx = -1;
    653	spin_unlock_irqrestore(&dim_lock, flags);
    654	if (hal_ret != DIM_NO_ERROR) {
    655		pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
    656		ret = -EFAULT;
    657	}
    658
    659	complete_all_mbos(&hdm_ch->started_list);
    660	complete_all_mbos(&hdm_ch->pending_list);
    661	if (hdm_ch->reset_dbr_size)
    662		*hdm_ch->reset_dbr_size = 0;
    663
    664	return ret;
    665}
    666
    667static void *dma_alloc(struct mbo *mbo, u32 size)
    668{
    669	struct device *dev = mbo->ifp->driver_dev;
    670
    671	return dma_alloc_coherent(dev, size, &mbo->bus_address, GFP_KERNEL);
    672}
    673
    674static void dma_free(struct mbo *mbo, u32 size)
    675{
    676	struct device *dev = mbo->ifp->driver_dev;
    677
    678	dma_free_coherent(dev, size, mbo->virt_address, mbo->bus_address);
    679}
    680
    681static const struct of_device_id dim2_of_match[];
    682
    683static struct {
    684	const char *clock_speed;
    685	u8 clk_speed;
    686} clk_mt[] = {
    687	{ "256fs", CLK_256FS },
    688	{ "512fs", CLK_512FS },
    689	{ "1024fs", CLK_1024FS },
    690	{ "2048fs", CLK_2048FS },
    691	{ "3072fs", CLK_3072FS },
    692	{ "4096fs", CLK_4096FS },
    693	{ "6144fs", CLK_6144FS },
    694	{ "8192fs", CLK_8192FS },
    695};
    696
    697/**
    698 * get_dim2_clk_speed - converts string to DIM2 clock speed value
    699 *
    700 * @clock_speed: string in the format "{NUMBER}fs"
    701 * @val: pointer to get one of the CLK_{NUMBER}FS values
    702 *
    703 * By success stores one of the CLK_{NUMBER}FS in the *val and returns 0,
    704 * otherwise returns -EINVAL.
    705 */
    706static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
    707{
    708	int i;
    709
    710	for (i = 0; i < ARRAY_SIZE(clk_mt); i++) {
    711		if (!strcmp(clock_speed, clk_mt[i].clock_speed)) {
    712			*val = clk_mt[i].clk_speed;
    713			return 0;
    714		}
    715	}
    716	return -EINVAL;
    717}
    718
    719static void dim2_release(struct device *d)
    720{
    721	struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev);
    722	unsigned long flags;
    723
    724	kthread_stop(dev->netinfo_task);
    725
    726	spin_lock_irqsave(&dim_lock, flags);
    727	dim_shutdown();
    728	spin_unlock_irqrestore(&dim_lock, flags);
    729
    730	if (dev->disable_platform)
    731		dev->disable_platform(to_platform_device(d->parent));
    732
    733	kfree(dev);
    734}
    735
    736/*
    737 * dim2_probe - dim2 probe handler
    738 * @pdev: platform device structure
    739 *
    740 * Register the dim2 interface with mostcore and initialize it.
    741 * Return 0 on success, negative on failure.
    742 */
    743static int dim2_probe(struct platform_device *pdev)
    744{
    745	const struct dim2_platform_data *pdata;
    746	const struct of_device_id *of_id;
    747	const char *clock_speed;
    748	struct dim2_hdm *dev;
    749	struct resource *res;
    750	int ret, i;
    751	u8 hal_ret;
    752	u8 dev_fcnt = fcnt;
    753	int irq;
    754
    755	enum { MLB_INT_IDX, AHB0_INT_IDX };
    756
    757	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    758	if (!dev)
    759		return -ENOMEM;
    760
    761	dev->atx_idx = -1;
    762
    763	platform_set_drvdata(pdev, dev);
    764
    765	ret = of_property_read_string(pdev->dev.of_node,
    766				      "microchip,clock-speed", &clock_speed);
    767	if (ret) {
    768		dev_err(&pdev->dev, "missing dt property clock-speed\n");
    769		goto err_free_dev;
    770	}
    771
    772	ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
    773	if (ret) {
    774		dev_err(&pdev->dev, "bad dt property clock-speed\n");
    775		goto err_free_dev;
    776	}
    777
    778	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    779	dev->io_base = devm_ioremap_resource(&pdev->dev, res);
    780	if (IS_ERR(dev->io_base)) {
    781		ret = PTR_ERR(dev->io_base);
    782		goto err_free_dev;
    783	}
    784
    785	of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
    786	pdata = of_id->data;
    787	if (pdata) {
    788		if (pdata->enable) {
    789			ret = pdata->enable(pdev);
    790			if (ret)
    791				goto err_free_dev;
    792		}
    793		dev->disable_platform = pdata->disable;
    794		if (pdata->fcnt)
    795			dev_fcnt = pdata->fcnt;
    796	}
    797
    798	dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n",
    799		 dev_fcnt);
    800	hal_ret = dim_startup(dev->io_base, dev->clk_speed, dev_fcnt);
    801	if (hal_ret != DIM_NO_ERROR) {
    802		dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
    803		ret = -ENODEV;
    804		goto err_disable_platform;
    805	}
    806
    807	irq = platform_get_irq(pdev, AHB0_INT_IDX);
    808	if (irq < 0) {
    809		ret = irq;
    810		goto err_shutdown_dim;
    811	}
    812
    813	ret = devm_request_threaded_irq(&pdev->dev, irq, dim2_ahb_isr,
    814					dim2_task_irq, 0, "dim2_ahb0_int", dev);
    815	if (ret) {
    816		dev_err(&pdev->dev, "failed to request ahb0_int irq %d\n", irq);
    817		goto err_shutdown_dim;
    818	}
    819
    820	irq = platform_get_irq(pdev, MLB_INT_IDX);
    821	if (irq < 0) {
    822		ret = irq;
    823		goto err_shutdown_dim;
    824	}
    825
    826	ret = devm_request_irq(&pdev->dev, irq, dim2_mlb_isr, 0,
    827			       "dim2_mlb_int", dev);
    828	if (ret) {
    829		dev_err(&pdev->dev, "failed to request mlb_int irq %d\n", irq);
    830		goto err_shutdown_dim;
    831	}
    832
    833	init_waitqueue_head(&dev->netinfo_waitq);
    834	dev->deliver_netinfo = 0;
    835	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, dev,
    836					"dim2_netinfo");
    837	if (IS_ERR(dev->netinfo_task)) {
    838		ret = PTR_ERR(dev->netinfo_task);
    839		goto err_shutdown_dim;
    840	}
    841
    842	for (i = 0; i < DMA_CHANNELS; i++) {
    843		struct most_channel_capability *cap = dev->capabilities + i;
    844		struct hdm_channel *hdm_ch = dev->hch + i;
    845
    846		INIT_LIST_HEAD(&hdm_ch->pending_list);
    847		INIT_LIST_HEAD(&hdm_ch->started_list);
    848		hdm_ch->is_initialized = false;
    849		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
    850
    851		cap->name_suffix = hdm_ch->name;
    852		cap->direction = MOST_CH_RX | MOST_CH_TX;
    853		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
    854				 MOST_CH_ISOC | MOST_CH_SYNC;
    855		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
    856		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
    857		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
    858		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
    859	}
    860
    861	{
    862		const char *fmt;
    863
    864		if (sizeof(res->start) == sizeof(long long))
    865			fmt = "dim2-%016llx";
    866		else if (sizeof(res->start) == sizeof(long))
    867			fmt = "dim2-%016lx";
    868		else
    869			fmt = "dim2-%016x";
    870
    871		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
    872	}
    873
    874	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
    875	dev->most_iface.description = dev->name;
    876	dev->most_iface.num_channels = DMA_CHANNELS;
    877	dev->most_iface.channel_vector = dev->capabilities;
    878	dev->most_iface.configure = configure_channel;
    879	dev->most_iface.enqueue = enqueue;
    880	dev->most_iface.dma_alloc = dma_alloc;
    881	dev->most_iface.dma_free = dma_free;
    882	dev->most_iface.poison_channel = poison_channel;
    883	dev->most_iface.request_netinfo = request_netinfo;
    884	dev->most_iface.driver_dev = &pdev->dev;
    885	dev->most_iface.dev = &dev->dev;
    886	dev->dev.init_name = dev->name;
    887	dev->dev.parent = &pdev->dev;
    888	dev->dev.release = dim2_release;
    889
    890	return most_register_interface(&dev->most_iface);
    891
    892err_shutdown_dim:
    893	dim_shutdown();
    894err_disable_platform:
    895	if (dev->disable_platform)
    896		dev->disable_platform(pdev);
    897err_free_dev:
    898	kfree(dev);
    899
    900	return ret;
    901}
    902
    903/**
    904 * dim2_remove - dim2 remove handler
    905 * @pdev: platform device structure
    906 *
    907 * Unregister the interface from mostcore
    908 */
    909static int dim2_remove(struct platform_device *pdev)
    910{
    911	struct dim2_hdm *dev = platform_get_drvdata(pdev);
    912
    913	most_deregister_interface(&dev->most_iface);
    914
    915	return 0;
    916}
    917
    918/* platform specific functions [[ */
    919
    920static int fsl_mx6_enable(struct platform_device *pdev)
    921{
    922	struct dim2_hdm *dev = platform_get_drvdata(pdev);
    923	int ret;
    924
    925	dev->clk = devm_clk_get(&pdev->dev, "mlb");
    926	if (IS_ERR_OR_NULL(dev->clk)) {
    927		dev_err(&pdev->dev, "unable to get mlb clock\n");
    928		return -EFAULT;
    929	}
    930
    931	ret = clk_prepare_enable(dev->clk);
    932	if (ret) {
    933		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
    934		return ret;
    935	}
    936
    937	if (dev->clk_speed >= CLK_2048FS) {
    938		/* enable pll */
    939		dev->clk_pll = devm_clk_get(&pdev->dev, "pll8_mlb");
    940		if (IS_ERR_OR_NULL(dev->clk_pll)) {
    941			dev_err(&pdev->dev, "unable to get mlb pll clock\n");
    942			clk_disable_unprepare(dev->clk);
    943			return -EFAULT;
    944		}
    945
    946		writel(0x888, dev->io_base + 0x38);
    947		clk_prepare_enable(dev->clk_pll);
    948	}
    949
    950	return 0;
    951}
    952
    953static void fsl_mx6_disable(struct platform_device *pdev)
    954{
    955	struct dim2_hdm *dev = platform_get_drvdata(pdev);
    956
    957	if (dev->clk_speed >= CLK_2048FS)
    958		clk_disable_unprepare(dev->clk_pll);
    959
    960	clk_disable_unprepare(dev->clk);
    961}
    962
    963static int rcar_gen2_enable(struct platform_device *pdev)
    964{
    965	struct dim2_hdm *dev = platform_get_drvdata(pdev);
    966	int ret;
    967
    968	dev->clk = devm_clk_get(&pdev->dev, NULL);
    969	if (IS_ERR(dev->clk)) {
    970		dev_err(&pdev->dev, "cannot get clock\n");
    971		return PTR_ERR(dev->clk);
    972	}
    973
    974	ret = clk_prepare_enable(dev->clk);
    975	if (ret) {
    976		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
    977		return ret;
    978	}
    979
    980	if (dev->clk_speed >= CLK_2048FS) {
    981		/* enable MLP pll and LVDS drivers */
    982		writel(0x03, dev->io_base + 0x600);
    983		/* set bias */
    984		writel(0x888, dev->io_base + 0x38);
    985	} else {
    986		/* PLL */
    987		writel(0x04, dev->io_base + 0x600);
    988	}
    989
    990
    991	/* BBCR = 0b11 */
    992	writel(0x03, dev->io_base + 0x500);
    993	writel(0x0002FF02, dev->io_base + 0x508);
    994
    995	return 0;
    996}
    997
    998static void rcar_gen2_disable(struct platform_device *pdev)
    999{
   1000	struct dim2_hdm *dev = platform_get_drvdata(pdev);
   1001
   1002	clk_disable_unprepare(dev->clk);
   1003
   1004	/* disable PLLs and LVDS drivers */
   1005	writel(0x0, dev->io_base + 0x600);
   1006}
   1007
   1008static int rcar_gen3_enable(struct platform_device *pdev)
   1009{
   1010	struct dim2_hdm *dev = platform_get_drvdata(pdev);
   1011	u32 enable_512fs = dev->clk_speed == CLK_512FS;
   1012	int ret;
   1013
   1014	dev->clk = devm_clk_get(&pdev->dev, NULL);
   1015	if (IS_ERR(dev->clk)) {
   1016		dev_err(&pdev->dev, "cannot get clock\n");
   1017		return PTR_ERR(dev->clk);
   1018	}
   1019
   1020	ret = clk_prepare_enable(dev->clk);
   1021	if (ret) {
   1022		dev_err(&pdev->dev, "%s\n", "clk_prepare_enable failed");
   1023		return ret;
   1024	}
   1025
   1026	/* PLL */
   1027	writel(0x04, dev->io_base + 0x600);
   1028
   1029	writel(enable_512fs, dev->io_base + 0x604);
   1030
   1031	/* BBCR = 0b11 */
   1032	writel(0x03, dev->io_base + 0x500);
   1033	writel(0x0002FF02, dev->io_base + 0x508);
   1034
   1035	return 0;
   1036}
   1037
   1038static void rcar_gen3_disable(struct platform_device *pdev)
   1039{
   1040	struct dim2_hdm *dev = platform_get_drvdata(pdev);
   1041
   1042	clk_disable_unprepare(dev->clk);
   1043
   1044	/* disable PLLs and LVDS drivers */
   1045	writel(0x0, dev->io_base + 0x600);
   1046}
   1047
   1048/* ]] platform specific functions */
   1049
   1050enum dim2_platforms { FSL_MX6, RCAR_GEN2, RCAR_GEN3 };
   1051
   1052static struct dim2_platform_data plat_data[] = {
   1053	[FSL_MX6] = {
   1054		.enable = fsl_mx6_enable,
   1055		.disable = fsl_mx6_disable,
   1056	},
   1057	[RCAR_GEN2] = {
   1058		.enable = rcar_gen2_enable,
   1059		.disable = rcar_gen2_disable,
   1060	},
   1061	[RCAR_GEN3] = {
   1062		.enable = rcar_gen3_enable,
   1063		.disable = rcar_gen3_disable,
   1064		.fcnt = 3,
   1065	},
   1066};
   1067
   1068static const struct of_device_id dim2_of_match[] = {
   1069	{
   1070		.compatible = "fsl,imx6q-mlb150",
   1071		.data = plat_data + FSL_MX6
   1072	},
   1073	{
   1074		.compatible = "renesas,mlp",
   1075		.data = plat_data + RCAR_GEN2
   1076	},
   1077	{
   1078		.compatible = "renesas,rcar-gen3-mlp",
   1079		.data = plat_data + RCAR_GEN3
   1080	},
   1081	{
   1082		.compatible = "xlnx,axi4-os62420_3pin-1.00.a",
   1083	},
   1084	{
   1085		.compatible = "xlnx,axi4-os62420_6pin-1.00.a",
   1086	},
   1087	{},
   1088};
   1089
   1090MODULE_DEVICE_TABLE(of, dim2_of_match);
   1091
   1092static struct platform_driver dim2_driver = {
   1093	.probe = dim2_probe,
   1094	.remove = dim2_remove,
   1095	.driver = {
   1096		.name = "hdm_dim2",
   1097		.of_match_table = dim2_of_match,
   1098		.dev_groups = dim2_groups,
   1099	},
   1100};
   1101
   1102module_platform_driver(dim2_driver);
   1103
   1104MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
   1105MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
   1106MODULE_LICENSE("GPL");