cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ti_sci.c (101891B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Texas Instruments System Control Interface Protocol Driver
      4 *
      5 * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
      6 *	Nishanth Menon
      7 */
      8
      9#define pr_fmt(fmt) "%s: " fmt, __func__
     10
     11#include <linux/bitmap.h>
     12#include <linux/debugfs.h>
     13#include <linux/export.h>
     14#include <linux/io.h>
     15#include <linux/iopoll.h>
     16#include <linux/kernel.h>
     17#include <linux/mailbox_client.h>
     18#include <linux/module.h>
     19#include <linux/of_device.h>
     20#include <linux/semaphore.h>
     21#include <linux/slab.h>
     22#include <linux/soc/ti/ti-msgmgr.h>
     23#include <linux/soc/ti/ti_sci_protocol.h>
     24#include <linux/reboot.h>
     25
     26#include "ti_sci.h"
     27
     28/* List of all TI SCI devices active in system */
     29static LIST_HEAD(ti_sci_list);
     30/* Protection for the entire list */
     31static DEFINE_MUTEX(ti_sci_list_mutex);
     32
     33/**
     34 * struct ti_sci_xfer - Structure representing a message flow
     35 * @tx_message:	Transmit message
     36 * @rx_len:	Receive message length
     37 * @xfer_buf:	Preallocated buffer to store receive message
     38 *		Since we work with request-ACK protocol, we can
     39 *		reuse the same buffer for the rx path as we
     40 *		use for the tx path.
     41 * @done:	completion event
     42 */
     43struct ti_sci_xfer {
     44	struct ti_msgmgr_message tx_message;
     45	u8 rx_len;
     46	u8 *xfer_buf;
     47	struct completion done;
     48};
     49
     50/**
     51 * struct ti_sci_xfers_info - Structure to manage transfer information
     52 * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
     53 *			Messages.
     54 * @xfer_block:		Preallocated Message array
     55 * @xfer_alloc_table:	Bitmap table for allocated messages.
     56 *			Index of this bitmap table is also used for message
     57 *			sequence identifier.
     58 * @xfer_lock:		Protection for message allocation
     59 */
     60struct ti_sci_xfers_info {
     61	struct semaphore sem_xfer_count;
     62	struct ti_sci_xfer *xfer_block;
     63	unsigned long *xfer_alloc_table;
     64	/* protect transfer allocation */
     65	spinlock_t xfer_lock;
     66};
     67
     68/**
     69 * struct ti_sci_desc - Description of SoC integration
     70 * @default_host_id:	Host identifier representing the compute entity
     71 * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
     72 * @max_msgs: Maximum number of messages that can be pending
     73 *		  simultaneously in the system
     74 * @max_msg_size: Maximum size of data per message that can be handled.
     75 */
     76struct ti_sci_desc {
     77	u8 default_host_id;
     78	int max_rx_timeout_ms;
     79	int max_msgs;
     80	int max_msg_size;
     81};
     82
     83/**
     84 * struct ti_sci_info - Structure representing a TI SCI instance
     85 * @dev:	Device pointer
     86 * @desc:	SoC description for this instance
     87 * @nb:	Reboot Notifier block
     88 * @d:		Debugfs file entry
     89 * @debug_region: Memory region where the debug message are available
     90 * @debug_region_size: Debug region size
     91 * @debug_buffer: Buffer allocated to copy debug messages.
     92 * @handle:	Instance of TI SCI handle to send to clients.
     93 * @cl:		Mailbox Client
     94 * @chan_tx:	Transmit mailbox channel
     95 * @chan_rx:	Receive mailbox channel
     96 * @minfo:	Message info
     97 * @node:	list head
     98 * @host_id:	Host ID
     99 * @users:	Number of users of this instance
    100 * @is_suspending: Flag set to indicate in suspend path.
    101 */
    102struct ti_sci_info {
    103	struct device *dev;
    104	struct notifier_block nb;
    105	const struct ti_sci_desc *desc;
    106	struct dentry *d;
    107	void __iomem *debug_region;
    108	char *debug_buffer;
    109	size_t debug_region_size;
    110	struct ti_sci_handle handle;
    111	struct mbox_client cl;
    112	struct mbox_chan *chan_tx;
    113	struct mbox_chan *chan_rx;
    114	struct ti_sci_xfers_info minfo;
    115	struct list_head node;
    116	u8 host_id;
    117	/* protected by ti_sci_list_mutex */
    118	int users;
    119	bool is_suspending;
    120};
    121
    122#define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
    123#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
    124#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
    125
    126#ifdef CONFIG_DEBUG_FS
    127
    128/**
    129 * ti_sci_debug_show() - Helper to dump the debug log
    130 * @s:	sequence file pointer
    131 * @unused:	unused.
    132 *
    133 * Return: 0
    134 */
    135static int ti_sci_debug_show(struct seq_file *s, void *unused)
    136{
    137	struct ti_sci_info *info = s->private;
    138
    139	memcpy_fromio(info->debug_buffer, info->debug_region,
    140		      info->debug_region_size);
    141	/*
    142	 * We don't trust firmware to leave NULL terminated last byte (hence
    143	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
    144	 * specific data format for debug messages, We just present the data
    145	 * in the buffer as is - we expect the messages to be self explanatory.
    146	 */
    147	seq_puts(s, info->debug_buffer);
    148	return 0;
    149}
    150
    151/* Provide the log file operations interface*/
    152DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
    153
    154/**
    155 * ti_sci_debugfs_create() - Create log debug file
    156 * @pdev:	platform device pointer
    157 * @info:	Pointer to SCI entity information
    158 *
    159 * Return: 0 if all went fine, else corresponding error.
    160 */
    161static int ti_sci_debugfs_create(struct platform_device *pdev,
    162				 struct ti_sci_info *info)
    163{
    164	struct device *dev = &pdev->dev;
    165	struct resource *res;
    166	char debug_name[50] = "ti_sci_debug@";
    167
    168	/* Debug region is optional */
    169	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
    170					   "debug_messages");
    171	info->debug_region = devm_ioremap_resource(dev, res);
    172	if (IS_ERR(info->debug_region))
    173		return 0;
    174	info->debug_region_size = resource_size(res);
    175
    176	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
    177					  sizeof(char), GFP_KERNEL);
    178	if (!info->debug_buffer)
    179		return -ENOMEM;
    180	/* Setup NULL termination */
    181	info->debug_buffer[info->debug_region_size] = 0;
    182
    183	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
    184					      sizeof(debug_name) -
    185					      sizeof("ti_sci_debug@")),
    186				      0444, NULL, info, &ti_sci_debug_fops);
    187	if (IS_ERR(info->d))
    188		return PTR_ERR(info->d);
    189
    190	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
    191		info->debug_region, info->debug_region_size, res);
    192	return 0;
    193}
    194
    195/**
    196 * ti_sci_debugfs_destroy() - clean up log debug file
    197 * @pdev:	platform device pointer
    198 * @info:	Pointer to SCI entity information
    199 */
    200static void ti_sci_debugfs_destroy(struct platform_device *pdev,
    201				   struct ti_sci_info *info)
    202{
    203	if (IS_ERR(info->debug_region))
    204		return;
    205
    206	debugfs_remove(info->d);
    207}
    208#else /* CONFIG_DEBUG_FS */
    209static inline int ti_sci_debugfs_create(struct platform_device *dev,
    210					struct ti_sci_info *info)
    211{
    212	return 0;
    213}
    214
    215static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
    216					  struct ti_sci_info *info)
    217{
    218}
    219#endif /* CONFIG_DEBUG_FS */
    220
    221/**
    222 * ti_sci_dump_header_dbg() - Helper to dump a message header.
    223 * @dev:	Device pointer corresponding to the SCI entity
    224 * @hdr:	pointer to header.
    225 */
    226static inline void ti_sci_dump_header_dbg(struct device *dev,
    227					  struct ti_sci_msg_hdr *hdr)
    228{
    229	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
    230		hdr->type, hdr->host, hdr->seq, hdr->flags);
    231}
    232
    233/**
    234 * ti_sci_rx_callback() - mailbox client callback for receive messages
    235 * @cl:	client pointer
    236 * @m:	mailbox message
    237 *
    238 * Processes one received message to appropriate transfer information and
    239 * signals completion of the transfer.
    240 *
    241 * NOTE: This function will be invoked in IRQ context, hence should be
    242 * as optimal as possible.
    243 */
    244static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
    245{
    246	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
    247	struct device *dev = info->dev;
    248	struct ti_sci_xfers_info *minfo = &info->minfo;
    249	struct ti_msgmgr_message *mbox_msg = m;
    250	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
    251	struct ti_sci_xfer *xfer;
    252	u8 xfer_id;
    253
    254	xfer_id = hdr->seq;
    255
    256	/*
    257	 * Are we even expecting this?
    258	 * NOTE: barriers were implicit in locks used for modifying the bitmap
    259	 */
    260	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
    261		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
    262		return;
    263	}
    264
    265	xfer = &minfo->xfer_block[xfer_id];
    266
    267	/* Is the message of valid length? */
    268	if (mbox_msg->len > info->desc->max_msg_size) {
    269		dev_err(dev, "Unable to handle %zu xfer(max %d)\n",
    270			mbox_msg->len, info->desc->max_msg_size);
    271		ti_sci_dump_header_dbg(dev, hdr);
    272		return;
    273	}
    274	if (mbox_msg->len < xfer->rx_len) {
    275		dev_err(dev, "Recv xfer %zu < expected %d length\n",
    276			mbox_msg->len, xfer->rx_len);
    277		ti_sci_dump_header_dbg(dev, hdr);
    278		return;
    279	}
    280
    281	ti_sci_dump_header_dbg(dev, hdr);
    282	/* Take a copy to the rx buffer.. */
    283	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
    284	complete(&xfer->done);
    285}
    286
    287/**
    288 * ti_sci_get_one_xfer() - Allocate one message
    289 * @info:	Pointer to SCI entity information
    290 * @msg_type:	Message type
    291 * @msg_flags:	Flag to set for the message
    292 * @tx_message_size: transmit message size
    293 * @rx_message_size: receive message size
    294 *
    295 * Helper function which is used by various command functions that are
    296 * exposed to clients of this driver for allocating a message traffic event.
    297 *
    298 * This function can sleep depending on pending requests already in the system
    299 * for the SCI entity. Further, this also holds a spinlock to maintain integrity
    300 * of internal data structures.
    301 *
    302 * Return: 0 if all went fine, else corresponding error.
    303 */
    304static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
    305					       u16 msg_type, u32 msg_flags,
    306					       size_t tx_message_size,
    307					       size_t rx_message_size)
    308{
    309	struct ti_sci_xfers_info *minfo = &info->minfo;
    310	struct ti_sci_xfer *xfer;
    311	struct ti_sci_msg_hdr *hdr;
    312	unsigned long flags;
    313	unsigned long bit_pos;
    314	u8 xfer_id;
    315	int ret;
    316	int timeout;
    317
    318	/* Ensure we have sane transfer sizes */
    319	if (rx_message_size > info->desc->max_msg_size ||
    320	    tx_message_size > info->desc->max_msg_size ||
    321	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
    322		return ERR_PTR(-ERANGE);
    323
    324	/*
    325	 * Ensure we have only controlled number of pending messages.
    326	 * Ideally, we might just have to wait a single message, be
    327	 * conservative and wait 5 times that..
    328	 */
    329	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
    330	ret = down_timeout(&minfo->sem_xfer_count, timeout);
    331	if (ret < 0)
    332		return ERR_PTR(ret);
    333
    334	/* Keep the locked section as small as possible */
    335	spin_lock_irqsave(&minfo->xfer_lock, flags);
    336	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
    337				      info->desc->max_msgs);
    338	set_bit(bit_pos, minfo->xfer_alloc_table);
    339	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
    340
    341	/*
    342	 * We already ensured in probe that we can have max messages that can
    343	 * fit in  hdr.seq - NOTE: this improves access latencies
    344	 * to predictable O(1) access, BUT, it opens us to risk if
    345	 * remote misbehaves with corrupted message sequence responses.
    346	 * If that happens, we are going to be messed up anyways..
    347	 */
    348	xfer_id = (u8)bit_pos;
    349
    350	xfer = &minfo->xfer_block[xfer_id];
    351
    352	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
    353	xfer->tx_message.len = tx_message_size;
    354	xfer->tx_message.chan_rx = info->chan_rx;
    355	xfer->tx_message.timeout_rx_ms = info->desc->max_rx_timeout_ms;
    356	xfer->rx_len = (u8)rx_message_size;
    357
    358	reinit_completion(&xfer->done);
    359
    360	hdr->seq = xfer_id;
    361	hdr->type = msg_type;
    362	hdr->host = info->host_id;
    363	hdr->flags = msg_flags;
    364
    365	return xfer;
    366}
    367
    368/**
    369 * ti_sci_put_one_xfer() - Release a message
    370 * @minfo:	transfer info pointer
    371 * @xfer:	message that was reserved by ti_sci_get_one_xfer
    372 *
    373 * This holds a spinlock to maintain integrity of internal data structures.
    374 */
    375static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
    376				struct ti_sci_xfer *xfer)
    377{
    378	unsigned long flags;
    379	struct ti_sci_msg_hdr *hdr;
    380	u8 xfer_id;
    381
    382	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
    383	xfer_id = hdr->seq;
    384
    385	/*
    386	 * Keep the locked section as small as possible
    387	 * NOTE: we might escape with smp_mb and no lock here..
    388	 * but just be conservative and symmetric.
    389	 */
    390	spin_lock_irqsave(&minfo->xfer_lock, flags);
    391	clear_bit(xfer_id, minfo->xfer_alloc_table);
    392	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
    393
    394	/* Increment the count for the next user to get through */
    395	up(&minfo->sem_xfer_count);
    396}
    397
    398/**
    399 * ti_sci_do_xfer() - Do one transfer
    400 * @info:	Pointer to SCI entity information
    401 * @xfer:	Transfer to initiate and wait for response
    402 *
    403 * Return: -ETIMEDOUT in case of no response, if transmit error,
    404 *	   return corresponding error, else if all goes well,
    405 *	   return 0.
    406 */
    407static inline int ti_sci_do_xfer(struct ti_sci_info *info,
    408				 struct ti_sci_xfer *xfer)
    409{
    410	int ret;
    411	int timeout;
    412	struct device *dev = info->dev;
    413	bool done_state = true;
    414
    415	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
    416	if (ret < 0)
    417		return ret;
    418
    419	ret = 0;
    420
    421	if (!info->is_suspending) {
    422		/* And we wait for the response. */
    423		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
    424		if (!wait_for_completion_timeout(&xfer->done, timeout))
    425			ret = -ETIMEDOUT;
    426	} else {
    427		/*
    428		 * If we are suspending, we cannot use wait_for_completion_timeout
    429		 * during noirq phase, so we must manually poll the completion.
    430		 */
    431		ret = read_poll_timeout_atomic(try_wait_for_completion, done_state,
    432					       true, 1,
    433					       info->desc->max_rx_timeout_ms * 1000,
    434					       false, &xfer->done);
    435	}
    436
    437	if (ret == -ETIMEDOUT || !done_state) {
    438		dev_err(dev, "Mbox timedout in resp(caller: %pS)\n",
    439			(void *)_RET_IP_);
    440	}
    441
    442	/*
    443	 * NOTE: we might prefer not to need the mailbox ticker to manage the
    444	 * transfer queueing since the protocol layer queues things by itself.
    445	 * Unfortunately, we have to kick the mailbox framework after we have
    446	 * received our message.
    447	 */
    448	mbox_client_txdone(info->chan_tx, ret);
    449
    450	return ret;
    451}
    452
    453/**
    454 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
    455 * @info:	Pointer to SCI entity information
    456 *
    457 * Updates the SCI information in the internal data structure.
    458 *
    459 * Return: 0 if all went fine, else return appropriate error.
    460 */
    461static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
    462{
    463	struct device *dev = info->dev;
    464	struct ti_sci_handle *handle = &info->handle;
    465	struct ti_sci_version_info *ver = &handle->version;
    466	struct ti_sci_msg_resp_version *rev_info;
    467	struct ti_sci_xfer *xfer;
    468	int ret;
    469
    470	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
    471				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
    472				   sizeof(struct ti_sci_msg_hdr),
    473				   sizeof(*rev_info));
    474	if (IS_ERR(xfer)) {
    475		ret = PTR_ERR(xfer);
    476		dev_err(dev, "Message alloc failed(%d)\n", ret);
    477		return ret;
    478	}
    479
    480	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
    481
    482	ret = ti_sci_do_xfer(info, xfer);
    483	if (ret) {
    484		dev_err(dev, "Mbox send fail %d\n", ret);
    485		goto fail;
    486	}
    487
    488	ver->abi_major = rev_info->abi_major;
    489	ver->abi_minor = rev_info->abi_minor;
    490	ver->firmware_revision = rev_info->firmware_revision;
    491	strncpy(ver->firmware_description, rev_info->firmware_description,
    492		sizeof(ver->firmware_description));
    493
    494fail:
    495	ti_sci_put_one_xfer(&info->minfo, xfer);
    496	return ret;
    497}
    498
    499/**
    500 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
    501 * @r:	pointer to response buffer
    502 *
    503 * Return: true if the response was an ACK, else returns false.
    504 */
    505static inline bool ti_sci_is_response_ack(void *r)
    506{
    507	struct ti_sci_msg_hdr *hdr = r;
    508
    509	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
    510}
    511
    512/**
    513 * ti_sci_set_device_state() - Set device state helper
    514 * @handle:	pointer to TI SCI handle
    515 * @id:		Device identifier
    516 * @flags:	flags to setup for the device
    517 * @state:	State to move the device to
    518 *
    519 * Return: 0 if all went well, else returns appropriate error value.
    520 */
    521static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
    522				   u32 id, u32 flags, u8 state)
    523{
    524	struct ti_sci_info *info;
    525	struct ti_sci_msg_req_set_device_state *req;
    526	struct ti_sci_msg_hdr *resp;
    527	struct ti_sci_xfer *xfer;
    528	struct device *dev;
    529	int ret = 0;
    530
    531	if (IS_ERR(handle))
    532		return PTR_ERR(handle);
    533	if (!handle)
    534		return -EINVAL;
    535
    536	info = handle_to_ti_sci_info(handle);
    537	dev = info->dev;
    538
    539	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
    540				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
    541				   sizeof(*req), sizeof(*resp));
    542	if (IS_ERR(xfer)) {
    543		ret = PTR_ERR(xfer);
    544		dev_err(dev, "Message alloc failed(%d)\n", ret);
    545		return ret;
    546	}
    547	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
    548	req->id = id;
    549	req->state = state;
    550
    551	ret = ti_sci_do_xfer(info, xfer);
    552	if (ret) {
    553		dev_err(dev, "Mbox send fail %d\n", ret);
    554		goto fail;
    555	}
    556
    557	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
    558
    559	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
    560
    561fail:
    562	ti_sci_put_one_xfer(&info->minfo, xfer);
    563
    564	return ret;
    565}
    566
    567/**
    568 * ti_sci_get_device_state() - Get device state helper
    569 * @handle:	Handle to the device
    570 * @id:		Device Identifier
    571 * @clcnt:	Pointer to Context Loss Count
    572 * @resets:	pointer to resets
    573 * @p_state:	pointer to p_state
    574 * @c_state:	pointer to c_state
    575 *
    576 * Return: 0 if all went fine, else return appropriate error.
    577 */
    578static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
    579				   u32 id,  u32 *clcnt,  u32 *resets,
    580				    u8 *p_state,  u8 *c_state)
    581{
    582	struct ti_sci_info *info;
    583	struct ti_sci_msg_req_get_device_state *req;
    584	struct ti_sci_msg_resp_get_device_state *resp;
    585	struct ti_sci_xfer *xfer;
    586	struct device *dev;
    587	int ret = 0;
    588
    589	if (IS_ERR(handle))
    590		return PTR_ERR(handle);
    591	if (!handle)
    592		return -EINVAL;
    593
    594	if (!clcnt && !resets && !p_state && !c_state)
    595		return -EINVAL;
    596
    597	info = handle_to_ti_sci_info(handle);
    598	dev = info->dev;
    599
    600	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
    601				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
    602				   sizeof(*req), sizeof(*resp));
    603	if (IS_ERR(xfer)) {
    604		ret = PTR_ERR(xfer);
    605		dev_err(dev, "Message alloc failed(%d)\n", ret);
    606		return ret;
    607	}
    608	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
    609	req->id = id;
    610
    611	ret = ti_sci_do_xfer(info, xfer);
    612	if (ret) {
    613		dev_err(dev, "Mbox send fail %d\n", ret);
    614		goto fail;
    615	}
    616
    617	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
    618	if (!ti_sci_is_response_ack(resp)) {
    619		ret = -ENODEV;
    620		goto fail;
    621	}
    622
    623	if (clcnt)
    624		*clcnt = resp->context_loss_count;
    625	if (resets)
    626		*resets = resp->resets;
    627	if (p_state)
    628		*p_state = resp->programmed_state;
    629	if (c_state)
    630		*c_state = resp->current_state;
    631fail:
    632	ti_sci_put_one_xfer(&info->minfo, xfer);
    633
    634	return ret;
    635}
    636
    637/**
    638 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
    639 *			     that can be shared with other hosts.
    640 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    641 * @id:		Device Identifier
    642 *
    643 * Request for the device - NOTE: the client MUST maintain integrity of
    644 * usage count by balancing get_device with put_device. No refcounting is
    645 * managed by driver for that purpose.
    646 *
    647 * Return: 0 if all went fine, else return appropriate error.
    648 */
    649static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
    650{
    651	return ti_sci_set_device_state(handle, id, 0,
    652				       MSG_DEVICE_SW_STATE_ON);
    653}
    654
    655/**
    656 * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
    657 *				       TISCI that is exclusively owned by the
    658 *				       requesting host.
    659 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    660 * @id:		Device Identifier
    661 *
    662 * Request for the device - NOTE: the client MUST maintain integrity of
    663 * usage count by balancing get_device with put_device. No refcounting is
    664 * managed by driver for that purpose.
    665 *
    666 * Return: 0 if all went fine, else return appropriate error.
    667 */
    668static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
    669					   u32 id)
    670{
    671	return ti_sci_set_device_state(handle, id,
    672				       MSG_FLAG_DEVICE_EXCLUSIVE,
    673				       MSG_DEVICE_SW_STATE_ON);
    674}
    675
    676/**
    677 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
    678 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    679 * @id:		Device Identifier
    680 *
    681 * Request for the device - NOTE: the client MUST maintain integrity of
    682 * usage count by balancing get_device with put_device. No refcounting is
    683 * managed by driver for that purpose.
    684 *
    685 * Return: 0 if all went fine, else return appropriate error.
    686 */
    687static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
    688{
    689	return ti_sci_set_device_state(handle, id, 0,
    690				       MSG_DEVICE_SW_STATE_RETENTION);
    691}
    692
    693/**
    694 * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
    695 *					TISCI that is exclusively owned by
    696 *					requesting host.
    697 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    698 * @id:		Device Identifier
    699 *
    700 * Request for the device - NOTE: the client MUST maintain integrity of
    701 * usage count by balancing get_device with put_device. No refcounting is
    702 * managed by driver for that purpose.
    703 *
    704 * Return: 0 if all went fine, else return appropriate error.
    705 */
    706static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
    707					    u32 id)
    708{
    709	return ti_sci_set_device_state(handle, id,
    710				       MSG_FLAG_DEVICE_EXCLUSIVE,
    711				       MSG_DEVICE_SW_STATE_RETENTION);
    712}
    713
    714/**
    715 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
    716 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    717 * @id:		Device Identifier
    718 *
    719 * Request for the device - NOTE: the client MUST maintain integrity of
    720 * usage count by balancing get_device with put_device. No refcounting is
    721 * managed by driver for that purpose.
    722 *
    723 * Return: 0 if all went fine, else return appropriate error.
    724 */
    725static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
    726{
    727	return ti_sci_set_device_state(handle, id,
    728				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
    729}
    730
    731/**
    732 * ti_sci_cmd_dev_is_valid() - Is the device valid
    733 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    734 * @id:		Device Identifier
    735 *
    736 * Return: 0 if all went fine and the device ID is valid, else return
    737 * appropriate error.
    738 */
    739static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
    740{
    741	u8 unused;
    742
    743	/* check the device state which will also tell us if the ID is valid */
    744	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
    745}
    746
    747/**
    748 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
    749 * @handle:	Pointer to TISCI handle
    750 * @id:		Device Identifier
    751 * @count:	Pointer to Context Loss counter to populate
    752 *
    753 * Return: 0 if all went fine, else return appropriate error.
    754 */
    755static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
    756				    u32 *count)
    757{
    758	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
    759}
    760
    761/**
    762 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
    763 * @handle:	Pointer to TISCI handle
    764 * @id:		Device Identifier
    765 * @r_state:	true if requested to be idle
    766 *
    767 * Return: 0 if all went fine, else return appropriate error.
    768 */
    769static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
    770				  bool *r_state)
    771{
    772	int ret;
    773	u8 state;
    774
    775	if (!r_state)
    776		return -EINVAL;
    777
    778	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
    779	if (ret)
    780		return ret;
    781
    782	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
    783
    784	return 0;
    785}
    786
    787/**
    788 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
    789 * @handle:	Pointer to TISCI handle
    790 * @id:		Device Identifier
    791 * @r_state:	true if requested to be stopped
    792 * @curr_state:	true if currently stopped.
    793 *
    794 * Return: 0 if all went fine, else return appropriate error.
    795 */
    796static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
    797				  bool *r_state,  bool *curr_state)
    798{
    799	int ret;
    800	u8 p_state, c_state;
    801
    802	if (!r_state && !curr_state)
    803		return -EINVAL;
    804
    805	ret =
    806	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
    807	if (ret)
    808		return ret;
    809
    810	if (r_state)
    811		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
    812	if (curr_state)
    813		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
    814
    815	return 0;
    816}
    817
    818/**
    819 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
    820 * @handle:	Pointer to TISCI handle
    821 * @id:		Device Identifier
    822 * @r_state:	true if requested to be ON
    823 * @curr_state:	true if currently ON and active
    824 *
    825 * Return: 0 if all went fine, else return appropriate error.
    826 */
    827static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
    828				bool *r_state,  bool *curr_state)
    829{
    830	int ret;
    831	u8 p_state, c_state;
    832
    833	if (!r_state && !curr_state)
    834		return -EINVAL;
    835
    836	ret =
    837	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
    838	if (ret)
    839		return ret;
    840
    841	if (r_state)
    842		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
    843	if (curr_state)
    844		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
    845
    846	return 0;
    847}
    848
    849/**
    850 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
    851 * @handle:	Pointer to TISCI handle
    852 * @id:		Device Identifier
    853 * @curr_state:	true if currently transitioning.
    854 *
    855 * Return: 0 if all went fine, else return appropriate error.
    856 */
    857static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
    858				   bool *curr_state)
    859{
    860	int ret;
    861	u8 state;
    862
    863	if (!curr_state)
    864		return -EINVAL;
    865
    866	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
    867	if (ret)
    868		return ret;
    869
    870	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
    871
    872	return 0;
    873}
    874
    875/**
    876 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
    877 *				    by TISCI
    878 * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
    879 * @id:		Device Identifier
    880 * @reset_state: Device specific reset bit field
    881 *
    882 * Return: 0 if all went fine, else return appropriate error.
    883 */
    884static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
    885					u32 id, u32 reset_state)
    886{
    887	struct ti_sci_info *info;
    888	struct ti_sci_msg_req_set_device_resets *req;
    889	struct ti_sci_msg_hdr *resp;
    890	struct ti_sci_xfer *xfer;
    891	struct device *dev;
    892	int ret = 0;
    893
    894	if (IS_ERR(handle))
    895		return PTR_ERR(handle);
    896	if (!handle)
    897		return -EINVAL;
    898
    899	info = handle_to_ti_sci_info(handle);
    900	dev = info->dev;
    901
    902	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
    903				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
    904				   sizeof(*req), sizeof(*resp));
    905	if (IS_ERR(xfer)) {
    906		ret = PTR_ERR(xfer);
    907		dev_err(dev, "Message alloc failed(%d)\n", ret);
    908		return ret;
    909	}
    910	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
    911	req->id = id;
    912	req->resets = reset_state;
    913
    914	ret = ti_sci_do_xfer(info, xfer);
    915	if (ret) {
    916		dev_err(dev, "Mbox send fail %d\n", ret);
    917		goto fail;
    918	}
    919
    920	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
    921
    922	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
    923
    924fail:
    925	ti_sci_put_one_xfer(&info->minfo, xfer);
    926
    927	return ret;
    928}
    929
    930/**
    931 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
    932 *				    by TISCI
    933 * @handle:		Pointer to TISCI handle
    934 * @id:			Device Identifier
    935 * @reset_state:	Pointer to reset state to populate
    936 *
    937 * Return: 0 if all went fine, else return appropriate error.
    938 */
    939static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
    940					u32 id, u32 *reset_state)
    941{
    942	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
    943				       NULL);
    944}
    945
    946/**
    947 * ti_sci_set_clock_state() - Set clock state helper
    948 * @handle:	pointer to TI SCI handle
    949 * @dev_id:	Device identifier this request is for
    950 * @clk_id:	Clock identifier for the device for this request.
    951 *		Each device has it's own set of clock inputs. This indexes
    952 *		which clock input to modify.
    953 * @flags:	Header flags as needed
    954 * @state:	State to request for the clock.
    955 *
    956 * Return: 0 if all went well, else returns appropriate error value.
    957 */
    958static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
    959				  u32 dev_id, u32 clk_id,
    960				  u32 flags, u8 state)
    961{
    962	struct ti_sci_info *info;
    963	struct ti_sci_msg_req_set_clock_state *req;
    964	struct ti_sci_msg_hdr *resp;
    965	struct ti_sci_xfer *xfer;
    966	struct device *dev;
    967	int ret = 0;
    968
    969	if (IS_ERR(handle))
    970		return PTR_ERR(handle);
    971	if (!handle)
    972		return -EINVAL;
    973
    974	info = handle_to_ti_sci_info(handle);
    975	dev = info->dev;
    976
    977	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
    978				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
    979				   sizeof(*req), sizeof(*resp));
    980	if (IS_ERR(xfer)) {
    981		ret = PTR_ERR(xfer);
    982		dev_err(dev, "Message alloc failed(%d)\n", ret);
    983		return ret;
    984	}
    985	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
    986	req->dev_id = dev_id;
    987	if (clk_id < 255) {
    988		req->clk_id = clk_id;
    989	} else {
    990		req->clk_id = 255;
    991		req->clk_id_32 = clk_id;
    992	}
    993	req->request_state = state;
    994
    995	ret = ti_sci_do_xfer(info, xfer);
    996	if (ret) {
    997		dev_err(dev, "Mbox send fail %d\n", ret);
    998		goto fail;
    999	}
   1000
   1001	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   1002
   1003	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   1004
   1005fail:
   1006	ti_sci_put_one_xfer(&info->minfo, xfer);
   1007
   1008	return ret;
   1009}
   1010
   1011/**
   1012 * ti_sci_cmd_get_clock_state() - Get clock state helper
   1013 * @handle:	pointer to TI SCI handle
   1014 * @dev_id:	Device identifier this request is for
   1015 * @clk_id:	Clock identifier for the device for this request.
   1016 *		Each device has it's own set of clock inputs. This indexes
   1017 *		which clock input to modify.
   1018 * @programmed_state:	State requested for clock to move to
   1019 * @current_state:	State that the clock is currently in
   1020 *
   1021 * Return: 0 if all went well, else returns appropriate error value.
   1022 */
   1023static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
   1024				      u32 dev_id, u32 clk_id,
   1025				      u8 *programmed_state, u8 *current_state)
   1026{
   1027	struct ti_sci_info *info;
   1028	struct ti_sci_msg_req_get_clock_state *req;
   1029	struct ti_sci_msg_resp_get_clock_state *resp;
   1030	struct ti_sci_xfer *xfer;
   1031	struct device *dev;
   1032	int ret = 0;
   1033
   1034	if (IS_ERR(handle))
   1035		return PTR_ERR(handle);
   1036	if (!handle)
   1037		return -EINVAL;
   1038
   1039	if (!programmed_state && !current_state)
   1040		return -EINVAL;
   1041
   1042	info = handle_to_ti_sci_info(handle);
   1043	dev = info->dev;
   1044
   1045	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
   1046				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1047				   sizeof(*req), sizeof(*resp));
   1048	if (IS_ERR(xfer)) {
   1049		ret = PTR_ERR(xfer);
   1050		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1051		return ret;
   1052	}
   1053	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
   1054	req->dev_id = dev_id;
   1055	if (clk_id < 255) {
   1056		req->clk_id = clk_id;
   1057	} else {
   1058		req->clk_id = 255;
   1059		req->clk_id_32 = clk_id;
   1060	}
   1061
   1062	ret = ti_sci_do_xfer(info, xfer);
   1063	if (ret) {
   1064		dev_err(dev, "Mbox send fail %d\n", ret);
   1065		goto fail;
   1066	}
   1067
   1068	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
   1069
   1070	if (!ti_sci_is_response_ack(resp)) {
   1071		ret = -ENODEV;
   1072		goto fail;
   1073	}
   1074
   1075	if (programmed_state)
   1076		*programmed_state = resp->programmed_state;
   1077	if (current_state)
   1078		*current_state = resp->current_state;
   1079
   1080fail:
   1081	ti_sci_put_one_xfer(&info->minfo, xfer);
   1082
   1083	return ret;
   1084}
   1085
   1086/**
   1087 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
   1088 * @handle:	pointer to TI SCI handle
   1089 * @dev_id:	Device identifier this request is for
   1090 * @clk_id:	Clock identifier for the device for this request.
   1091 *		Each device has it's own set of clock inputs. This indexes
   1092 *		which clock input to modify.
   1093 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
   1094 * @can_change_freq: 'true' if frequency change is desired, else 'false'
   1095 * @enable_input_term: 'true' if input termination is desired, else 'false'
   1096 *
   1097 * Return: 0 if all went well, else returns appropriate error value.
   1098 */
   1099static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
   1100				u32 clk_id, bool needs_ssc,
   1101				bool can_change_freq, bool enable_input_term)
   1102{
   1103	u32 flags = 0;
   1104
   1105	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
   1106	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
   1107	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
   1108
   1109	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
   1110				      MSG_CLOCK_SW_STATE_REQ);
   1111}
   1112
   1113/**
   1114 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
   1115 * @handle:	pointer to TI SCI handle
   1116 * @dev_id:	Device identifier this request is for
   1117 * @clk_id:	Clock identifier for the device for this request.
   1118 *		Each device has it's own set of clock inputs. This indexes
   1119 *		which clock input to modify.
   1120 *
   1121 * NOTE: This clock must have been requested by get_clock previously.
   1122 *
   1123 * Return: 0 if all went well, else returns appropriate error value.
   1124 */
   1125static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
   1126				 u32 dev_id, u32 clk_id)
   1127{
   1128	return ti_sci_set_clock_state(handle, dev_id, clk_id,
   1129				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
   1130				      MSG_CLOCK_SW_STATE_UNREQ);
   1131}
   1132
   1133/**
   1134 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
   1135 * @handle:	pointer to TI SCI handle
   1136 * @dev_id:	Device identifier this request is for
   1137 * @clk_id:	Clock identifier for the device for this request.
   1138 *		Each device has it's own set of clock inputs. This indexes
   1139 *		which clock input to modify.
   1140 *
   1141 * NOTE: This clock must have been requested by get_clock previously.
   1142 *
   1143 * Return: 0 if all went well, else returns appropriate error value.
   1144 */
   1145static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
   1146				u32 dev_id, u32 clk_id)
   1147{
   1148	return ti_sci_set_clock_state(handle, dev_id, clk_id,
   1149				      MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
   1150				      MSG_CLOCK_SW_STATE_AUTO);
   1151}
   1152
   1153/**
   1154 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
   1155 * @handle:	pointer to TI SCI handle
   1156 * @dev_id:	Device identifier this request is for
   1157 * @clk_id:	Clock identifier for the device for this request.
   1158 *		Each device has it's own set of clock inputs. This indexes
   1159 *		which clock input to modify.
   1160 * @req_state: state indicating if the clock is auto managed
   1161 *
   1162 * Return: 0 if all went well, else returns appropriate error value.
   1163 */
   1164static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
   1165				  u32 dev_id, u32 clk_id, bool *req_state)
   1166{
   1167	u8 state = 0;
   1168	int ret;
   1169
   1170	if (!req_state)
   1171		return -EINVAL;
   1172
   1173	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
   1174	if (ret)
   1175		return ret;
   1176
   1177	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
   1178	return 0;
   1179}
   1180
   1181/**
   1182 * ti_sci_cmd_clk_is_on() - Is the clock ON
   1183 * @handle:	pointer to TI SCI handle
   1184 * @dev_id:	Device identifier this request is for
   1185 * @clk_id:	Clock identifier for the device for this request.
   1186 *		Each device has it's own set of clock inputs. This indexes
   1187 *		which clock input to modify.
   1188 * @req_state: state indicating if the clock is managed by us and enabled
   1189 * @curr_state: state indicating if the clock is ready for operation
   1190 *
   1191 * Return: 0 if all went well, else returns appropriate error value.
   1192 */
   1193static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
   1194				u32 clk_id, bool *req_state, bool *curr_state)
   1195{
   1196	u8 c_state = 0, r_state = 0;
   1197	int ret;
   1198
   1199	if (!req_state && !curr_state)
   1200		return -EINVAL;
   1201
   1202	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
   1203					 &r_state, &c_state);
   1204	if (ret)
   1205		return ret;
   1206
   1207	if (req_state)
   1208		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
   1209	if (curr_state)
   1210		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
   1211	return 0;
   1212}
   1213
   1214/**
   1215 * ti_sci_cmd_clk_is_off() - Is the clock OFF
   1216 * @handle:	pointer to TI SCI handle
   1217 * @dev_id:	Device identifier this request is for
   1218 * @clk_id:	Clock identifier for the device for this request.
   1219 *		Each device has it's own set of clock inputs. This indexes
   1220 *		which clock input to modify.
   1221 * @req_state: state indicating if the clock is managed by us and disabled
   1222 * @curr_state: state indicating if the clock is NOT ready for operation
   1223 *
   1224 * Return: 0 if all went well, else returns appropriate error value.
   1225 */
   1226static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
   1227				 u32 clk_id, bool *req_state, bool *curr_state)
   1228{
   1229	u8 c_state = 0, r_state = 0;
   1230	int ret;
   1231
   1232	if (!req_state && !curr_state)
   1233		return -EINVAL;
   1234
   1235	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
   1236					 &r_state, &c_state);
   1237	if (ret)
   1238		return ret;
   1239
   1240	if (req_state)
   1241		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
   1242	if (curr_state)
   1243		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
   1244	return 0;
   1245}
   1246
   1247/**
   1248 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
   1249 * @handle:	pointer to TI SCI handle
   1250 * @dev_id:	Device identifier this request is for
   1251 * @clk_id:	Clock identifier for the device for this request.
   1252 *		Each device has it's own set of clock inputs. This indexes
   1253 *		which clock input to modify.
   1254 * @parent_id:	Parent clock identifier to set
   1255 *
   1256 * Return: 0 if all went well, else returns appropriate error value.
   1257 */
   1258static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
   1259				     u32 dev_id, u32 clk_id, u32 parent_id)
   1260{
   1261	struct ti_sci_info *info;
   1262	struct ti_sci_msg_req_set_clock_parent *req;
   1263	struct ti_sci_msg_hdr *resp;
   1264	struct ti_sci_xfer *xfer;
   1265	struct device *dev;
   1266	int ret = 0;
   1267
   1268	if (IS_ERR(handle))
   1269		return PTR_ERR(handle);
   1270	if (!handle)
   1271		return -EINVAL;
   1272
   1273	info = handle_to_ti_sci_info(handle);
   1274	dev = info->dev;
   1275
   1276	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
   1277				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1278				   sizeof(*req), sizeof(*resp));
   1279	if (IS_ERR(xfer)) {
   1280		ret = PTR_ERR(xfer);
   1281		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1282		return ret;
   1283	}
   1284	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
   1285	req->dev_id = dev_id;
   1286	if (clk_id < 255) {
   1287		req->clk_id = clk_id;
   1288	} else {
   1289		req->clk_id = 255;
   1290		req->clk_id_32 = clk_id;
   1291	}
   1292	if (parent_id < 255) {
   1293		req->parent_id = parent_id;
   1294	} else {
   1295		req->parent_id = 255;
   1296		req->parent_id_32 = parent_id;
   1297	}
   1298
   1299	ret = ti_sci_do_xfer(info, xfer);
   1300	if (ret) {
   1301		dev_err(dev, "Mbox send fail %d\n", ret);
   1302		goto fail;
   1303	}
   1304
   1305	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   1306
   1307	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   1308
   1309fail:
   1310	ti_sci_put_one_xfer(&info->minfo, xfer);
   1311
   1312	return ret;
   1313}
   1314
   1315/**
   1316 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
   1317 * @handle:	pointer to TI SCI handle
   1318 * @dev_id:	Device identifier this request is for
   1319 * @clk_id:	Clock identifier for the device for this request.
   1320 *		Each device has it's own set of clock inputs. This indexes
   1321 *		which clock input to modify.
   1322 * @parent_id:	Current clock parent
   1323 *
   1324 * Return: 0 if all went well, else returns appropriate error value.
   1325 */
   1326static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
   1327				     u32 dev_id, u32 clk_id, u32 *parent_id)
   1328{
   1329	struct ti_sci_info *info;
   1330	struct ti_sci_msg_req_get_clock_parent *req;
   1331	struct ti_sci_msg_resp_get_clock_parent *resp;
   1332	struct ti_sci_xfer *xfer;
   1333	struct device *dev;
   1334	int ret = 0;
   1335
   1336	if (IS_ERR(handle))
   1337		return PTR_ERR(handle);
   1338	if (!handle || !parent_id)
   1339		return -EINVAL;
   1340
   1341	info = handle_to_ti_sci_info(handle);
   1342	dev = info->dev;
   1343
   1344	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
   1345				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1346				   sizeof(*req), sizeof(*resp));
   1347	if (IS_ERR(xfer)) {
   1348		ret = PTR_ERR(xfer);
   1349		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1350		return ret;
   1351	}
   1352	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
   1353	req->dev_id = dev_id;
   1354	if (clk_id < 255) {
   1355		req->clk_id = clk_id;
   1356	} else {
   1357		req->clk_id = 255;
   1358		req->clk_id_32 = clk_id;
   1359	}
   1360
   1361	ret = ti_sci_do_xfer(info, xfer);
   1362	if (ret) {
   1363		dev_err(dev, "Mbox send fail %d\n", ret);
   1364		goto fail;
   1365	}
   1366
   1367	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
   1368
   1369	if (!ti_sci_is_response_ack(resp)) {
   1370		ret = -ENODEV;
   1371	} else {
   1372		if (resp->parent_id < 255)
   1373			*parent_id = resp->parent_id;
   1374		else
   1375			*parent_id = resp->parent_id_32;
   1376	}
   1377
   1378fail:
   1379	ti_sci_put_one_xfer(&info->minfo, xfer);
   1380
   1381	return ret;
   1382}
   1383
   1384/**
   1385 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
   1386 * @handle:	pointer to TI SCI handle
   1387 * @dev_id:	Device identifier this request is for
   1388 * @clk_id:	Clock identifier for the device for this request.
   1389 *		Each device has it's own set of clock inputs. This indexes
   1390 *		which clock input to modify.
   1391 * @num_parents: Returns he number of parents to the current clock.
   1392 *
   1393 * Return: 0 if all went well, else returns appropriate error value.
   1394 */
   1395static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
   1396					  u32 dev_id, u32 clk_id,
   1397					  u32 *num_parents)
   1398{
   1399	struct ti_sci_info *info;
   1400	struct ti_sci_msg_req_get_clock_num_parents *req;
   1401	struct ti_sci_msg_resp_get_clock_num_parents *resp;
   1402	struct ti_sci_xfer *xfer;
   1403	struct device *dev;
   1404	int ret = 0;
   1405
   1406	if (IS_ERR(handle))
   1407		return PTR_ERR(handle);
   1408	if (!handle || !num_parents)
   1409		return -EINVAL;
   1410
   1411	info = handle_to_ti_sci_info(handle);
   1412	dev = info->dev;
   1413
   1414	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
   1415				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1416				   sizeof(*req), sizeof(*resp));
   1417	if (IS_ERR(xfer)) {
   1418		ret = PTR_ERR(xfer);
   1419		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1420		return ret;
   1421	}
   1422	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
   1423	req->dev_id = dev_id;
   1424	if (clk_id < 255) {
   1425		req->clk_id = clk_id;
   1426	} else {
   1427		req->clk_id = 255;
   1428		req->clk_id_32 = clk_id;
   1429	}
   1430
   1431	ret = ti_sci_do_xfer(info, xfer);
   1432	if (ret) {
   1433		dev_err(dev, "Mbox send fail %d\n", ret);
   1434		goto fail;
   1435	}
   1436
   1437	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
   1438
   1439	if (!ti_sci_is_response_ack(resp)) {
   1440		ret = -ENODEV;
   1441	} else {
   1442		if (resp->num_parents < 255)
   1443			*num_parents = resp->num_parents;
   1444		else
   1445			*num_parents = resp->num_parents_32;
   1446	}
   1447
   1448fail:
   1449	ti_sci_put_one_xfer(&info->minfo, xfer);
   1450
   1451	return ret;
   1452}
   1453
   1454/**
   1455 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
   1456 * @handle:	pointer to TI SCI handle
   1457 * @dev_id:	Device identifier this request is for
   1458 * @clk_id:	Clock identifier for the device for this request.
   1459 *		Each device has it's own set of clock inputs. This indexes
   1460 *		which clock input to modify.
   1461 * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
   1462 *		allowable programmed frequency and does not account for clock
   1463 *		tolerances and jitter.
   1464 * @target_freq: The target clock frequency in Hz. A frequency will be
   1465 *		processed as close to this target frequency as possible.
   1466 * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
   1467 *		allowable programmed frequency and does not account for clock
   1468 *		tolerances and jitter.
   1469 * @match_freq:	Frequency match in Hz response.
   1470 *
   1471 * Return: 0 if all went well, else returns appropriate error value.
   1472 */
   1473static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
   1474					 u32 dev_id, u32 clk_id, u64 min_freq,
   1475					 u64 target_freq, u64 max_freq,
   1476					 u64 *match_freq)
   1477{
   1478	struct ti_sci_info *info;
   1479	struct ti_sci_msg_req_query_clock_freq *req;
   1480	struct ti_sci_msg_resp_query_clock_freq *resp;
   1481	struct ti_sci_xfer *xfer;
   1482	struct device *dev;
   1483	int ret = 0;
   1484
   1485	if (IS_ERR(handle))
   1486		return PTR_ERR(handle);
   1487	if (!handle || !match_freq)
   1488		return -EINVAL;
   1489
   1490	info = handle_to_ti_sci_info(handle);
   1491	dev = info->dev;
   1492
   1493	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
   1494				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1495				   sizeof(*req), sizeof(*resp));
   1496	if (IS_ERR(xfer)) {
   1497		ret = PTR_ERR(xfer);
   1498		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1499		return ret;
   1500	}
   1501	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
   1502	req->dev_id = dev_id;
   1503	if (clk_id < 255) {
   1504		req->clk_id = clk_id;
   1505	} else {
   1506		req->clk_id = 255;
   1507		req->clk_id_32 = clk_id;
   1508	}
   1509	req->min_freq_hz = min_freq;
   1510	req->target_freq_hz = target_freq;
   1511	req->max_freq_hz = max_freq;
   1512
   1513	ret = ti_sci_do_xfer(info, xfer);
   1514	if (ret) {
   1515		dev_err(dev, "Mbox send fail %d\n", ret);
   1516		goto fail;
   1517	}
   1518
   1519	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
   1520
   1521	if (!ti_sci_is_response_ack(resp))
   1522		ret = -ENODEV;
   1523	else
   1524		*match_freq = resp->freq_hz;
   1525
   1526fail:
   1527	ti_sci_put_one_xfer(&info->minfo, xfer);
   1528
   1529	return ret;
   1530}
   1531
   1532/**
   1533 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
   1534 * @handle:	pointer to TI SCI handle
   1535 * @dev_id:	Device identifier this request is for
   1536 * @clk_id:	Clock identifier for the device for this request.
   1537 *		Each device has it's own set of clock inputs. This indexes
   1538 *		which clock input to modify.
   1539 * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
   1540 *		allowable programmed frequency and does not account for clock
   1541 *		tolerances and jitter.
   1542 * @target_freq: The target clock frequency in Hz. A frequency will be
   1543 *		processed as close to this target frequency as possible.
   1544 * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
   1545 *		allowable programmed frequency and does not account for clock
   1546 *		tolerances and jitter.
   1547 *
   1548 * Return: 0 if all went well, else returns appropriate error value.
   1549 */
   1550static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
   1551				   u32 dev_id, u32 clk_id, u64 min_freq,
   1552				   u64 target_freq, u64 max_freq)
   1553{
   1554	struct ti_sci_info *info;
   1555	struct ti_sci_msg_req_set_clock_freq *req;
   1556	struct ti_sci_msg_hdr *resp;
   1557	struct ti_sci_xfer *xfer;
   1558	struct device *dev;
   1559	int ret = 0;
   1560
   1561	if (IS_ERR(handle))
   1562		return PTR_ERR(handle);
   1563	if (!handle)
   1564		return -EINVAL;
   1565
   1566	info = handle_to_ti_sci_info(handle);
   1567	dev = info->dev;
   1568
   1569	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
   1570				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1571				   sizeof(*req), sizeof(*resp));
   1572	if (IS_ERR(xfer)) {
   1573		ret = PTR_ERR(xfer);
   1574		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1575		return ret;
   1576	}
   1577	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
   1578	req->dev_id = dev_id;
   1579	if (clk_id < 255) {
   1580		req->clk_id = clk_id;
   1581	} else {
   1582		req->clk_id = 255;
   1583		req->clk_id_32 = clk_id;
   1584	}
   1585	req->min_freq_hz = min_freq;
   1586	req->target_freq_hz = target_freq;
   1587	req->max_freq_hz = max_freq;
   1588
   1589	ret = ti_sci_do_xfer(info, xfer);
   1590	if (ret) {
   1591		dev_err(dev, "Mbox send fail %d\n", ret);
   1592		goto fail;
   1593	}
   1594
   1595	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   1596
   1597	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   1598
   1599fail:
   1600	ti_sci_put_one_xfer(&info->minfo, xfer);
   1601
   1602	return ret;
   1603}
   1604
   1605/**
   1606 * ti_sci_cmd_clk_get_freq() - Get current frequency
   1607 * @handle:	pointer to TI SCI handle
   1608 * @dev_id:	Device identifier this request is for
   1609 * @clk_id:	Clock identifier for the device for this request.
   1610 *		Each device has it's own set of clock inputs. This indexes
   1611 *		which clock input to modify.
   1612 * @freq:	Currently frequency in Hz
   1613 *
   1614 * Return: 0 if all went well, else returns appropriate error value.
   1615 */
   1616static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
   1617				   u32 dev_id, u32 clk_id, u64 *freq)
   1618{
   1619	struct ti_sci_info *info;
   1620	struct ti_sci_msg_req_get_clock_freq *req;
   1621	struct ti_sci_msg_resp_get_clock_freq *resp;
   1622	struct ti_sci_xfer *xfer;
   1623	struct device *dev;
   1624	int ret = 0;
   1625
   1626	if (IS_ERR(handle))
   1627		return PTR_ERR(handle);
   1628	if (!handle || !freq)
   1629		return -EINVAL;
   1630
   1631	info = handle_to_ti_sci_info(handle);
   1632	dev = info->dev;
   1633
   1634	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
   1635				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1636				   sizeof(*req), sizeof(*resp));
   1637	if (IS_ERR(xfer)) {
   1638		ret = PTR_ERR(xfer);
   1639		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1640		return ret;
   1641	}
   1642	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
   1643	req->dev_id = dev_id;
   1644	if (clk_id < 255) {
   1645		req->clk_id = clk_id;
   1646	} else {
   1647		req->clk_id = 255;
   1648		req->clk_id_32 = clk_id;
   1649	}
   1650
   1651	ret = ti_sci_do_xfer(info, xfer);
   1652	if (ret) {
   1653		dev_err(dev, "Mbox send fail %d\n", ret);
   1654		goto fail;
   1655	}
   1656
   1657	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
   1658
   1659	if (!ti_sci_is_response_ack(resp))
   1660		ret = -ENODEV;
   1661	else
   1662		*freq = resp->freq_hz;
   1663
   1664fail:
   1665	ti_sci_put_one_xfer(&info->minfo, xfer);
   1666
   1667	return ret;
   1668}
   1669
   1670static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
   1671{
   1672	struct ti_sci_info *info;
   1673	struct ti_sci_msg_req_reboot *req;
   1674	struct ti_sci_msg_hdr *resp;
   1675	struct ti_sci_xfer *xfer;
   1676	struct device *dev;
   1677	int ret = 0;
   1678
   1679	if (IS_ERR(handle))
   1680		return PTR_ERR(handle);
   1681	if (!handle)
   1682		return -EINVAL;
   1683
   1684	info = handle_to_ti_sci_info(handle);
   1685	dev = info->dev;
   1686
   1687	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
   1688				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1689				   sizeof(*req), sizeof(*resp));
   1690	if (IS_ERR(xfer)) {
   1691		ret = PTR_ERR(xfer);
   1692		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1693		return ret;
   1694	}
   1695	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
   1696
   1697	ret = ti_sci_do_xfer(info, xfer);
   1698	if (ret) {
   1699		dev_err(dev, "Mbox send fail %d\n", ret);
   1700		goto fail;
   1701	}
   1702
   1703	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   1704
   1705	if (!ti_sci_is_response_ack(resp))
   1706		ret = -ENODEV;
   1707	else
   1708		ret = 0;
   1709
   1710fail:
   1711	ti_sci_put_one_xfer(&info->minfo, xfer);
   1712
   1713	return ret;
   1714}
   1715
   1716/**
   1717 * ti_sci_get_resource_range - Helper to get a range of resources assigned
   1718 *			       to a host. Resource is uniquely identified by
   1719 *			       type and subtype.
   1720 * @handle:		Pointer to TISCI handle.
   1721 * @dev_id:		TISCI device ID.
   1722 * @subtype:		Resource assignment subtype that is being requested
   1723 *			from the given device.
   1724 * @s_host:		Host processor ID to which the resources are allocated
   1725 * @desc:		Pointer to ti_sci_resource_desc to be updated with the
   1726 *			resource range start index and number of resources
   1727 *
   1728 * Return: 0 if all went fine, else return appropriate error.
   1729 */
   1730static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
   1731				     u32 dev_id, u8 subtype, u8 s_host,
   1732				     struct ti_sci_resource_desc *desc)
   1733{
   1734	struct ti_sci_msg_resp_get_resource_range *resp;
   1735	struct ti_sci_msg_req_get_resource_range *req;
   1736	struct ti_sci_xfer *xfer;
   1737	struct ti_sci_info *info;
   1738	struct device *dev;
   1739	int ret = 0;
   1740
   1741	if (IS_ERR(handle))
   1742		return PTR_ERR(handle);
   1743	if (!handle || !desc)
   1744		return -EINVAL;
   1745
   1746	info = handle_to_ti_sci_info(handle);
   1747	dev = info->dev;
   1748
   1749	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
   1750				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1751				   sizeof(*req), sizeof(*resp));
   1752	if (IS_ERR(xfer)) {
   1753		ret = PTR_ERR(xfer);
   1754		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1755		return ret;
   1756	}
   1757
   1758	req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
   1759	req->secondary_host = s_host;
   1760	req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
   1761	req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
   1762
   1763	ret = ti_sci_do_xfer(info, xfer);
   1764	if (ret) {
   1765		dev_err(dev, "Mbox send fail %d\n", ret);
   1766		goto fail;
   1767	}
   1768
   1769	resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
   1770
   1771	if (!ti_sci_is_response_ack(resp)) {
   1772		ret = -ENODEV;
   1773	} else if (!resp->range_num && !resp->range_num_sec) {
   1774		/* Neither of the two resource range is valid */
   1775		ret = -ENODEV;
   1776	} else {
   1777		desc->start = resp->range_start;
   1778		desc->num = resp->range_num;
   1779		desc->start_sec = resp->range_start_sec;
   1780		desc->num_sec = resp->range_num_sec;
   1781	}
   1782
   1783fail:
   1784	ti_sci_put_one_xfer(&info->minfo, xfer);
   1785
   1786	return ret;
   1787}
   1788
   1789/**
   1790 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
   1791 *				   that is same as ti sci interface host.
   1792 * @handle:		Pointer to TISCI handle.
   1793 * @dev_id:		TISCI device ID.
   1794 * @subtype:		Resource assignment subtype that is being requested
   1795 *			from the given device.
   1796 * @desc:		Pointer to ti_sci_resource_desc to be updated with the
   1797 *			resource range start index and number of resources
   1798 *
   1799 * Return: 0 if all went fine, else return appropriate error.
   1800 */
   1801static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
   1802					 u32 dev_id, u8 subtype,
   1803					 struct ti_sci_resource_desc *desc)
   1804{
   1805	return ti_sci_get_resource_range(handle, dev_id, subtype,
   1806					 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
   1807					 desc);
   1808}
   1809
   1810/**
   1811 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
   1812 *					      assigned to a specified host.
   1813 * @handle:		Pointer to TISCI handle.
   1814 * @dev_id:		TISCI device ID.
   1815 * @subtype:		Resource assignment subtype that is being requested
   1816 *			from the given device.
   1817 * @s_host:		Host processor ID to which the resources are allocated
   1818 * @desc:		Pointer to ti_sci_resource_desc to be updated with the
   1819 *			resource range start index and number of resources
   1820 *
   1821 * Return: 0 if all went fine, else return appropriate error.
   1822 */
   1823static
   1824int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
   1825					     u32 dev_id, u8 subtype, u8 s_host,
   1826					     struct ti_sci_resource_desc *desc)
   1827{
   1828	return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
   1829}
   1830
   1831/**
   1832 * ti_sci_manage_irq() - Helper api to configure/release the irq route between
   1833 *			 the requested source and destination
   1834 * @handle:		Pointer to TISCI handle.
   1835 * @valid_params:	Bit fields defining the validity of certain params
   1836 * @src_id:		Device ID of the IRQ source
   1837 * @src_index:		IRQ source index within the source device
   1838 * @dst_id:		Device ID of the IRQ destination
   1839 * @dst_host_irq:	IRQ number of the destination device
   1840 * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
   1841 * @vint:		Virtual interrupt to be used within the IA
   1842 * @global_event:	Global event number to be used for the requesting event
   1843 * @vint_status_bit:	Virtual interrupt status bit to be used for the event
   1844 * @s_host:		Secondary host ID to which the irq/event is being
   1845 *			requested for.
   1846 * @type:		Request type irq set or release.
   1847 *
   1848 * Return: 0 if all went fine, else return appropriate error.
   1849 */
   1850static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
   1851			     u32 valid_params, u16 src_id, u16 src_index,
   1852			     u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
   1853			     u16 global_event, u8 vint_status_bit, u8 s_host,
   1854			     u16 type)
   1855{
   1856	struct ti_sci_msg_req_manage_irq *req;
   1857	struct ti_sci_msg_hdr *resp;
   1858	struct ti_sci_xfer *xfer;
   1859	struct ti_sci_info *info;
   1860	struct device *dev;
   1861	int ret = 0;
   1862
   1863	if (IS_ERR(handle))
   1864		return PTR_ERR(handle);
   1865	if (!handle)
   1866		return -EINVAL;
   1867
   1868	info = handle_to_ti_sci_info(handle);
   1869	dev = info->dev;
   1870
   1871	xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   1872				   sizeof(*req), sizeof(*resp));
   1873	if (IS_ERR(xfer)) {
   1874		ret = PTR_ERR(xfer);
   1875		dev_err(dev, "Message alloc failed(%d)\n", ret);
   1876		return ret;
   1877	}
   1878	req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
   1879	req->valid_params = valid_params;
   1880	req->src_id = src_id;
   1881	req->src_index = src_index;
   1882	req->dst_id = dst_id;
   1883	req->dst_host_irq = dst_host_irq;
   1884	req->ia_id = ia_id;
   1885	req->vint = vint;
   1886	req->global_event = global_event;
   1887	req->vint_status_bit = vint_status_bit;
   1888	req->secondary_host = s_host;
   1889
   1890	ret = ti_sci_do_xfer(info, xfer);
   1891	if (ret) {
   1892		dev_err(dev, "Mbox send fail %d\n", ret);
   1893		goto fail;
   1894	}
   1895
   1896	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   1897
   1898	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   1899
   1900fail:
   1901	ti_sci_put_one_xfer(&info->minfo, xfer);
   1902
   1903	return ret;
   1904}
   1905
   1906/**
   1907 * ti_sci_set_irq() - Helper api to configure the irq route between the
   1908 *		      requested source and destination
   1909 * @handle:		Pointer to TISCI handle.
   1910 * @valid_params:	Bit fields defining the validity of certain params
   1911 * @src_id:		Device ID of the IRQ source
   1912 * @src_index:		IRQ source index within the source device
   1913 * @dst_id:		Device ID of the IRQ destination
   1914 * @dst_host_irq:	IRQ number of the destination device
   1915 * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
   1916 * @vint:		Virtual interrupt to be used within the IA
   1917 * @global_event:	Global event number to be used for the requesting event
   1918 * @vint_status_bit:	Virtual interrupt status bit to be used for the event
   1919 * @s_host:		Secondary host ID to which the irq/event is being
   1920 *			requested for.
   1921 *
   1922 * Return: 0 if all went fine, else return appropriate error.
   1923 */
   1924static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
   1925			  u16 src_id, u16 src_index, u16 dst_id,
   1926			  u16 dst_host_irq, u16 ia_id, u16 vint,
   1927			  u16 global_event, u8 vint_status_bit, u8 s_host)
   1928{
   1929	pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
   1930		 __func__, valid_params, src_id, src_index,
   1931		 dst_id, dst_host_irq, ia_id, vint, global_event,
   1932		 vint_status_bit);
   1933
   1934	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
   1935				 dst_id, dst_host_irq, ia_id, vint,
   1936				 global_event, vint_status_bit, s_host,
   1937				 TI_SCI_MSG_SET_IRQ);
   1938}
   1939
   1940/**
   1941 * ti_sci_free_irq() - Helper api to free the irq route between the
   1942 *			   requested source and destination
   1943 * @handle:		Pointer to TISCI handle.
   1944 * @valid_params:	Bit fields defining the validity of certain params
   1945 * @src_id:		Device ID of the IRQ source
   1946 * @src_index:		IRQ source index within the source device
   1947 * @dst_id:		Device ID of the IRQ destination
   1948 * @dst_host_irq:	IRQ number of the destination device
   1949 * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
   1950 * @vint:		Virtual interrupt to be used within the IA
   1951 * @global_event:	Global event number to be used for the requesting event
   1952 * @vint_status_bit:	Virtual interrupt status bit to be used for the event
   1953 * @s_host:		Secondary host ID to which the irq/event is being
   1954 *			requested for.
   1955 *
   1956 * Return: 0 if all went fine, else return appropriate error.
   1957 */
   1958static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
   1959			   u16 src_id, u16 src_index, u16 dst_id,
   1960			   u16 dst_host_irq, u16 ia_id, u16 vint,
   1961			   u16 global_event, u8 vint_status_bit, u8 s_host)
   1962{
   1963	pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
   1964		 __func__, valid_params, src_id, src_index,
   1965		 dst_id, dst_host_irq, ia_id, vint, global_event,
   1966		 vint_status_bit);
   1967
   1968	return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
   1969				 dst_id, dst_host_irq, ia_id, vint,
   1970				 global_event, vint_status_bit, s_host,
   1971				 TI_SCI_MSG_FREE_IRQ);
   1972}
   1973
   1974/**
   1975 * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
   1976 *			  source and destination.
   1977 * @handle:		Pointer to TISCI handle.
   1978 * @src_id:		Device ID of the IRQ source
   1979 * @src_index:		IRQ source index within the source device
   1980 * @dst_id:		Device ID of the IRQ destination
   1981 * @dst_host_irq:	IRQ number of the destination device
   1982 * @vint_irq:		Boolean specifying if this interrupt belongs to
   1983 *			Interrupt Aggregator.
   1984 *
   1985 * Return: 0 if all went fine, else return appropriate error.
   1986 */
   1987static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
   1988			      u16 src_index, u16 dst_id, u16 dst_host_irq)
   1989{
   1990	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
   1991
   1992	return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
   1993			      dst_host_irq, 0, 0, 0, 0, 0);
   1994}
   1995
   1996/**
   1997 * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
   1998 *				requested source and Interrupt Aggregator.
   1999 * @handle:		Pointer to TISCI handle.
   2000 * @src_id:		Device ID of the IRQ source
   2001 * @src_index:		IRQ source index within the source device
   2002 * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
   2003 * @vint:		Virtual interrupt to be used within the IA
   2004 * @global_event:	Global event number to be used for the requesting event
   2005 * @vint_status_bit:	Virtual interrupt status bit to be used for the event
   2006 *
   2007 * Return: 0 if all went fine, else return appropriate error.
   2008 */
   2009static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
   2010				    u16 src_id, u16 src_index, u16 ia_id,
   2011				    u16 vint, u16 global_event,
   2012				    u8 vint_status_bit)
   2013{
   2014	u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
   2015			   MSG_FLAG_GLB_EVNT_VALID |
   2016			   MSG_FLAG_VINT_STS_BIT_VALID;
   2017
   2018	return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
   2019			      ia_id, vint, global_event, vint_status_bit, 0);
   2020}
   2021
   2022/**
   2023 * ti_sci_cmd_free_irq() - Free a host irq route between the between the
   2024 *			   requested source and destination.
   2025 * @handle:		Pointer to TISCI handle.
   2026 * @src_id:		Device ID of the IRQ source
   2027 * @src_index:		IRQ source index within the source device
   2028 * @dst_id:		Device ID of the IRQ destination
   2029 * @dst_host_irq:	IRQ number of the destination device
   2030 * @vint_irq:		Boolean specifying if this interrupt belongs to
   2031 *			Interrupt Aggregator.
   2032 *
   2033 * Return: 0 if all went fine, else return appropriate error.
   2034 */
   2035static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
   2036			       u16 src_index, u16 dst_id, u16 dst_host_irq)
   2037{
   2038	u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
   2039
   2040	return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
   2041			       dst_host_irq, 0, 0, 0, 0, 0);
   2042}
   2043
   2044/**
   2045 * ti_sci_cmd_free_event_map() - Free an event map between the requested source
   2046 *				 and Interrupt Aggregator.
   2047 * @handle:		Pointer to TISCI handle.
   2048 * @src_id:		Device ID of the IRQ source
   2049 * @src_index:		IRQ source index within the source device
   2050 * @ia_id:		Device ID of the IA, if the IRQ flows through this IA
   2051 * @vint:		Virtual interrupt to be used within the IA
   2052 * @global_event:	Global event number to be used for the requesting event
   2053 * @vint_status_bit:	Virtual interrupt status bit to be used for the event
   2054 *
   2055 * Return: 0 if all went fine, else return appropriate error.
   2056 */
   2057static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
   2058				     u16 src_id, u16 src_index, u16 ia_id,
   2059				     u16 vint, u16 global_event,
   2060				     u8 vint_status_bit)
   2061{
   2062	u32 valid_params = MSG_FLAG_IA_ID_VALID |
   2063			   MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
   2064			   MSG_FLAG_VINT_STS_BIT_VALID;
   2065
   2066	return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
   2067			       ia_id, vint, global_event, vint_status_bit, 0);
   2068}
   2069
   2070/**
   2071 * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
   2072 * @handle:	Pointer to TI SCI handle.
   2073 * @params:	Pointer to ti_sci_msg_rm_ring_cfg ring config structure
   2074 *
   2075 * Return: 0 if all went well, else returns appropriate error value.
   2076 *
   2077 * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
   2078 * more info.
   2079 */
   2080static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
   2081				  const struct ti_sci_msg_rm_ring_cfg *params)
   2082{
   2083	struct ti_sci_msg_rm_ring_cfg_req *req;
   2084	struct ti_sci_msg_hdr *resp;
   2085	struct ti_sci_xfer *xfer;
   2086	struct ti_sci_info *info;
   2087	struct device *dev;
   2088	int ret = 0;
   2089
   2090	if (IS_ERR_OR_NULL(handle))
   2091		return -EINVAL;
   2092
   2093	info = handle_to_ti_sci_info(handle);
   2094	dev = info->dev;
   2095
   2096	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
   2097				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2098				   sizeof(*req), sizeof(*resp));
   2099	if (IS_ERR(xfer)) {
   2100		ret = PTR_ERR(xfer);
   2101		dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
   2102		return ret;
   2103	}
   2104	req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
   2105	req->valid_params = params->valid_params;
   2106	req->nav_id = params->nav_id;
   2107	req->index = params->index;
   2108	req->addr_lo = params->addr_lo;
   2109	req->addr_hi = params->addr_hi;
   2110	req->count = params->count;
   2111	req->mode = params->mode;
   2112	req->size = params->size;
   2113	req->order_id = params->order_id;
   2114	req->virtid = params->virtid;
   2115	req->asel = params->asel;
   2116
   2117	ret = ti_sci_do_xfer(info, xfer);
   2118	if (ret) {
   2119		dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
   2120		goto fail;
   2121	}
   2122
   2123	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2124	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2125
   2126fail:
   2127	ti_sci_put_one_xfer(&info->minfo, xfer);
   2128	dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
   2129	return ret;
   2130}
   2131
   2132/**
   2133 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
   2134 * @handle:	Pointer to TI SCI handle.
   2135 * @nav_id:	Device ID of Navigator Subsystem which should be used for
   2136 *		pairing
   2137 * @src_thread:	Source PSI-L thread ID
   2138 * @dst_thread: Destination PSI-L thread ID
   2139 *
   2140 * Return: 0 if all went well, else returns appropriate error value.
   2141 */
   2142static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
   2143				   u32 nav_id, u32 src_thread, u32 dst_thread)
   2144{
   2145	struct ti_sci_msg_psil_pair *req;
   2146	struct ti_sci_msg_hdr *resp;
   2147	struct ti_sci_xfer *xfer;
   2148	struct ti_sci_info *info;
   2149	struct device *dev;
   2150	int ret = 0;
   2151
   2152	if (IS_ERR(handle))
   2153		return PTR_ERR(handle);
   2154	if (!handle)
   2155		return -EINVAL;
   2156
   2157	info = handle_to_ti_sci_info(handle);
   2158	dev = info->dev;
   2159
   2160	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
   2161				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2162				   sizeof(*req), sizeof(*resp));
   2163	if (IS_ERR(xfer)) {
   2164		ret = PTR_ERR(xfer);
   2165		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
   2166		return ret;
   2167	}
   2168	req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
   2169	req->nav_id = nav_id;
   2170	req->src_thread = src_thread;
   2171	req->dst_thread = dst_thread;
   2172
   2173	ret = ti_sci_do_xfer(info, xfer);
   2174	if (ret) {
   2175		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
   2176		goto fail;
   2177	}
   2178
   2179	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2180	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2181
   2182fail:
   2183	ti_sci_put_one_xfer(&info->minfo, xfer);
   2184
   2185	return ret;
   2186}
   2187
   2188/**
   2189 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
   2190 * @handle:	Pointer to TI SCI handle.
   2191 * @nav_id:	Device ID of Navigator Subsystem which should be used for
   2192 *		unpairing
   2193 * @src_thread:	Source PSI-L thread ID
   2194 * @dst_thread:	Destination PSI-L thread ID
   2195 *
   2196 * Return: 0 if all went well, else returns appropriate error value.
   2197 */
   2198static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
   2199				     u32 nav_id, u32 src_thread, u32 dst_thread)
   2200{
   2201	struct ti_sci_msg_psil_unpair *req;
   2202	struct ti_sci_msg_hdr *resp;
   2203	struct ti_sci_xfer *xfer;
   2204	struct ti_sci_info *info;
   2205	struct device *dev;
   2206	int ret = 0;
   2207
   2208	if (IS_ERR(handle))
   2209		return PTR_ERR(handle);
   2210	if (!handle)
   2211		return -EINVAL;
   2212
   2213	info = handle_to_ti_sci_info(handle);
   2214	dev = info->dev;
   2215
   2216	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
   2217				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2218				   sizeof(*req), sizeof(*resp));
   2219	if (IS_ERR(xfer)) {
   2220		ret = PTR_ERR(xfer);
   2221		dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
   2222		return ret;
   2223	}
   2224	req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
   2225	req->nav_id = nav_id;
   2226	req->src_thread = src_thread;
   2227	req->dst_thread = dst_thread;
   2228
   2229	ret = ti_sci_do_xfer(info, xfer);
   2230	if (ret) {
   2231		dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
   2232		goto fail;
   2233	}
   2234
   2235	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2236	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2237
   2238fail:
   2239	ti_sci_put_one_xfer(&info->minfo, xfer);
   2240
   2241	return ret;
   2242}
   2243
   2244/**
   2245 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
   2246 * @handle:	Pointer to TI SCI handle.
   2247 * @params:	Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
   2248 *		structure
   2249 *
   2250 * Return: 0 if all went well, else returns appropriate error value.
   2251 *
   2252 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
   2253 * more info.
   2254 */
   2255static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
   2256			const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
   2257{
   2258	struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
   2259	struct ti_sci_msg_hdr *resp;
   2260	struct ti_sci_xfer *xfer;
   2261	struct ti_sci_info *info;
   2262	struct device *dev;
   2263	int ret = 0;
   2264
   2265	if (IS_ERR_OR_NULL(handle))
   2266		return -EINVAL;
   2267
   2268	info = handle_to_ti_sci_info(handle);
   2269	dev = info->dev;
   2270
   2271	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
   2272				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2273				   sizeof(*req), sizeof(*resp));
   2274	if (IS_ERR(xfer)) {
   2275		ret = PTR_ERR(xfer);
   2276		dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
   2277		return ret;
   2278	}
   2279	req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
   2280	req->valid_params = params->valid_params;
   2281	req->nav_id = params->nav_id;
   2282	req->index = params->index;
   2283	req->tx_pause_on_err = params->tx_pause_on_err;
   2284	req->tx_filt_einfo = params->tx_filt_einfo;
   2285	req->tx_filt_pswords = params->tx_filt_pswords;
   2286	req->tx_atype = params->tx_atype;
   2287	req->tx_chan_type = params->tx_chan_type;
   2288	req->tx_supr_tdpkt = params->tx_supr_tdpkt;
   2289	req->tx_fetch_size = params->tx_fetch_size;
   2290	req->tx_credit_count = params->tx_credit_count;
   2291	req->txcq_qnum = params->txcq_qnum;
   2292	req->tx_priority = params->tx_priority;
   2293	req->tx_qos = params->tx_qos;
   2294	req->tx_orderid = params->tx_orderid;
   2295	req->fdepth = params->fdepth;
   2296	req->tx_sched_priority = params->tx_sched_priority;
   2297	req->tx_burst_size = params->tx_burst_size;
   2298	req->tx_tdtype = params->tx_tdtype;
   2299	req->extended_ch_type = params->extended_ch_type;
   2300
   2301	ret = ti_sci_do_xfer(info, xfer);
   2302	if (ret) {
   2303		dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
   2304		goto fail;
   2305	}
   2306
   2307	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2308	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2309
   2310fail:
   2311	ti_sci_put_one_xfer(&info->minfo, xfer);
   2312	dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
   2313	return ret;
   2314}
   2315
   2316/**
   2317 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
   2318 * @handle:	Pointer to TI SCI handle.
   2319 * @params:	Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
   2320 *		structure
   2321 *
   2322 * Return: 0 if all went well, else returns appropriate error value.
   2323 *
   2324 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
   2325 * more info.
   2326 */
   2327static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
   2328			const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
   2329{
   2330	struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
   2331	struct ti_sci_msg_hdr *resp;
   2332	struct ti_sci_xfer *xfer;
   2333	struct ti_sci_info *info;
   2334	struct device *dev;
   2335	int ret = 0;
   2336
   2337	if (IS_ERR_OR_NULL(handle))
   2338		return -EINVAL;
   2339
   2340	info = handle_to_ti_sci_info(handle);
   2341	dev = info->dev;
   2342
   2343	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
   2344				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2345				   sizeof(*req), sizeof(*resp));
   2346	if (IS_ERR(xfer)) {
   2347		ret = PTR_ERR(xfer);
   2348		dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
   2349		return ret;
   2350	}
   2351	req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
   2352	req->valid_params = params->valid_params;
   2353	req->nav_id = params->nav_id;
   2354	req->index = params->index;
   2355	req->rx_fetch_size = params->rx_fetch_size;
   2356	req->rxcq_qnum = params->rxcq_qnum;
   2357	req->rx_priority = params->rx_priority;
   2358	req->rx_qos = params->rx_qos;
   2359	req->rx_orderid = params->rx_orderid;
   2360	req->rx_sched_priority = params->rx_sched_priority;
   2361	req->flowid_start = params->flowid_start;
   2362	req->flowid_cnt = params->flowid_cnt;
   2363	req->rx_pause_on_err = params->rx_pause_on_err;
   2364	req->rx_atype = params->rx_atype;
   2365	req->rx_chan_type = params->rx_chan_type;
   2366	req->rx_ignore_short = params->rx_ignore_short;
   2367	req->rx_ignore_long = params->rx_ignore_long;
   2368	req->rx_burst_size = params->rx_burst_size;
   2369
   2370	ret = ti_sci_do_xfer(info, xfer);
   2371	if (ret) {
   2372		dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
   2373		goto fail;
   2374	}
   2375
   2376	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2377	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2378
   2379fail:
   2380	ti_sci_put_one_xfer(&info->minfo, xfer);
   2381	dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
   2382	return ret;
   2383}
   2384
   2385/**
   2386 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
   2387 * @handle:	Pointer to TI SCI handle.
   2388 * @params:	Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
   2389 *		structure
   2390 *
   2391 * Return: 0 if all went well, else returns appropriate error value.
   2392 *
   2393 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
   2394 * more info.
   2395 */
   2396static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
   2397			const struct ti_sci_msg_rm_udmap_flow_cfg *params)
   2398{
   2399	struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
   2400	struct ti_sci_msg_hdr *resp;
   2401	struct ti_sci_xfer *xfer;
   2402	struct ti_sci_info *info;
   2403	struct device *dev;
   2404	int ret = 0;
   2405
   2406	if (IS_ERR_OR_NULL(handle))
   2407		return -EINVAL;
   2408
   2409	info = handle_to_ti_sci_info(handle);
   2410	dev = info->dev;
   2411
   2412	xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
   2413				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2414				   sizeof(*req), sizeof(*resp));
   2415	if (IS_ERR(xfer)) {
   2416		ret = PTR_ERR(xfer);
   2417		dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
   2418		return ret;
   2419	}
   2420	req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
   2421	req->valid_params = params->valid_params;
   2422	req->nav_id = params->nav_id;
   2423	req->flow_index = params->flow_index;
   2424	req->rx_einfo_present = params->rx_einfo_present;
   2425	req->rx_psinfo_present = params->rx_psinfo_present;
   2426	req->rx_error_handling = params->rx_error_handling;
   2427	req->rx_desc_type = params->rx_desc_type;
   2428	req->rx_sop_offset = params->rx_sop_offset;
   2429	req->rx_dest_qnum = params->rx_dest_qnum;
   2430	req->rx_src_tag_hi = params->rx_src_tag_hi;
   2431	req->rx_src_tag_lo = params->rx_src_tag_lo;
   2432	req->rx_dest_tag_hi = params->rx_dest_tag_hi;
   2433	req->rx_dest_tag_lo = params->rx_dest_tag_lo;
   2434	req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
   2435	req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
   2436	req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
   2437	req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
   2438	req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
   2439	req->rx_fdq1_qnum = params->rx_fdq1_qnum;
   2440	req->rx_fdq2_qnum = params->rx_fdq2_qnum;
   2441	req->rx_fdq3_qnum = params->rx_fdq3_qnum;
   2442	req->rx_ps_location = params->rx_ps_location;
   2443
   2444	ret = ti_sci_do_xfer(info, xfer);
   2445	if (ret) {
   2446		dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
   2447		goto fail;
   2448	}
   2449
   2450	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
   2451	ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
   2452
   2453fail:
   2454	ti_sci_put_one_xfer(&info->minfo, xfer);
   2455	dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
   2456	return ret;
   2457}
   2458
   2459/**
   2460 * ti_sci_cmd_proc_request() - Command to request a physical processor control
   2461 * @handle:	Pointer to TI SCI handle
   2462 * @proc_id:	Processor ID this request is for
   2463 *
   2464 * Return: 0 if all went well, else returns appropriate error value.
   2465 */
   2466static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
   2467				   u8 proc_id)
   2468{
   2469	struct ti_sci_msg_req_proc_request *req;
   2470	struct ti_sci_msg_hdr *resp;
   2471	struct ti_sci_info *info;
   2472	struct ti_sci_xfer *xfer;
   2473	struct device *dev;
   2474	int ret = 0;
   2475
   2476	if (!handle)
   2477		return -EINVAL;
   2478	if (IS_ERR(handle))
   2479		return PTR_ERR(handle);
   2480
   2481	info = handle_to_ti_sci_info(handle);
   2482	dev = info->dev;
   2483
   2484	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
   2485				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2486				   sizeof(*req), sizeof(*resp));
   2487	if (IS_ERR(xfer)) {
   2488		ret = PTR_ERR(xfer);
   2489		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2490		return ret;
   2491	}
   2492	req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
   2493	req->processor_id = proc_id;
   2494
   2495	ret = ti_sci_do_xfer(info, xfer);
   2496	if (ret) {
   2497		dev_err(dev, "Mbox send fail %d\n", ret);
   2498		goto fail;
   2499	}
   2500
   2501	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
   2502
   2503	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   2504
   2505fail:
   2506	ti_sci_put_one_xfer(&info->minfo, xfer);
   2507
   2508	return ret;
   2509}
   2510
   2511/**
   2512 * ti_sci_cmd_proc_release() - Command to release a physical processor control
   2513 * @handle:	Pointer to TI SCI handle
   2514 * @proc_id:	Processor ID this request is for
   2515 *
   2516 * Return: 0 if all went well, else returns appropriate error value.
   2517 */
   2518static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
   2519				   u8 proc_id)
   2520{
   2521	struct ti_sci_msg_req_proc_release *req;
   2522	struct ti_sci_msg_hdr *resp;
   2523	struct ti_sci_info *info;
   2524	struct ti_sci_xfer *xfer;
   2525	struct device *dev;
   2526	int ret = 0;
   2527
   2528	if (!handle)
   2529		return -EINVAL;
   2530	if (IS_ERR(handle))
   2531		return PTR_ERR(handle);
   2532
   2533	info = handle_to_ti_sci_info(handle);
   2534	dev = info->dev;
   2535
   2536	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
   2537				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2538				   sizeof(*req), sizeof(*resp));
   2539	if (IS_ERR(xfer)) {
   2540		ret = PTR_ERR(xfer);
   2541		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2542		return ret;
   2543	}
   2544	req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
   2545	req->processor_id = proc_id;
   2546
   2547	ret = ti_sci_do_xfer(info, xfer);
   2548	if (ret) {
   2549		dev_err(dev, "Mbox send fail %d\n", ret);
   2550		goto fail;
   2551	}
   2552
   2553	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
   2554
   2555	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   2556
   2557fail:
   2558	ti_sci_put_one_xfer(&info->minfo, xfer);
   2559
   2560	return ret;
   2561}
   2562
   2563/**
   2564 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
   2565 *				control to a host in the processor's access
   2566 *				control list.
   2567 * @handle:	Pointer to TI SCI handle
   2568 * @proc_id:	Processor ID this request is for
   2569 * @host_id:	Host ID to get the control of the processor
   2570 *
   2571 * Return: 0 if all went well, else returns appropriate error value.
   2572 */
   2573static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
   2574				    u8 proc_id, u8 host_id)
   2575{
   2576	struct ti_sci_msg_req_proc_handover *req;
   2577	struct ti_sci_msg_hdr *resp;
   2578	struct ti_sci_info *info;
   2579	struct ti_sci_xfer *xfer;
   2580	struct device *dev;
   2581	int ret = 0;
   2582
   2583	if (!handle)
   2584		return -EINVAL;
   2585	if (IS_ERR(handle))
   2586		return PTR_ERR(handle);
   2587
   2588	info = handle_to_ti_sci_info(handle);
   2589	dev = info->dev;
   2590
   2591	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
   2592				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2593				   sizeof(*req), sizeof(*resp));
   2594	if (IS_ERR(xfer)) {
   2595		ret = PTR_ERR(xfer);
   2596		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2597		return ret;
   2598	}
   2599	req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
   2600	req->processor_id = proc_id;
   2601	req->host_id = host_id;
   2602
   2603	ret = ti_sci_do_xfer(info, xfer);
   2604	if (ret) {
   2605		dev_err(dev, "Mbox send fail %d\n", ret);
   2606		goto fail;
   2607	}
   2608
   2609	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
   2610
   2611	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   2612
   2613fail:
   2614	ti_sci_put_one_xfer(&info->minfo, xfer);
   2615
   2616	return ret;
   2617}
   2618
   2619/**
   2620 * ti_sci_cmd_proc_set_config() - Command to set the processor boot
   2621 *				    configuration flags
   2622 * @handle:		Pointer to TI SCI handle
   2623 * @proc_id:		Processor ID this request is for
   2624 * @config_flags_set:	Configuration flags to be set
   2625 * @config_flags_clear:	Configuration flags to be cleared.
   2626 *
   2627 * Return: 0 if all went well, else returns appropriate error value.
   2628 */
   2629static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
   2630				      u8 proc_id, u64 bootvector,
   2631				      u32 config_flags_set,
   2632				      u32 config_flags_clear)
   2633{
   2634	struct ti_sci_msg_req_set_config *req;
   2635	struct ti_sci_msg_hdr *resp;
   2636	struct ti_sci_info *info;
   2637	struct ti_sci_xfer *xfer;
   2638	struct device *dev;
   2639	int ret = 0;
   2640
   2641	if (!handle)
   2642		return -EINVAL;
   2643	if (IS_ERR(handle))
   2644		return PTR_ERR(handle);
   2645
   2646	info = handle_to_ti_sci_info(handle);
   2647	dev = info->dev;
   2648
   2649	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
   2650				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2651				   sizeof(*req), sizeof(*resp));
   2652	if (IS_ERR(xfer)) {
   2653		ret = PTR_ERR(xfer);
   2654		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2655		return ret;
   2656	}
   2657	req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
   2658	req->processor_id = proc_id;
   2659	req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
   2660	req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
   2661				TI_SCI_ADDR_HIGH_SHIFT;
   2662	req->config_flags_set = config_flags_set;
   2663	req->config_flags_clear = config_flags_clear;
   2664
   2665	ret = ti_sci_do_xfer(info, xfer);
   2666	if (ret) {
   2667		dev_err(dev, "Mbox send fail %d\n", ret);
   2668		goto fail;
   2669	}
   2670
   2671	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
   2672
   2673	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   2674
   2675fail:
   2676	ti_sci_put_one_xfer(&info->minfo, xfer);
   2677
   2678	return ret;
   2679}
   2680
   2681/**
   2682 * ti_sci_cmd_proc_set_control() - Command to set the processor boot
   2683 *				     control flags
   2684 * @handle:			Pointer to TI SCI handle
   2685 * @proc_id:			Processor ID this request is for
   2686 * @control_flags_set:		Control flags to be set
   2687 * @control_flags_clear:	Control flags to be cleared
   2688 *
   2689 * Return: 0 if all went well, else returns appropriate error value.
   2690 */
   2691static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
   2692				       u8 proc_id, u32 control_flags_set,
   2693				       u32 control_flags_clear)
   2694{
   2695	struct ti_sci_msg_req_set_ctrl *req;
   2696	struct ti_sci_msg_hdr *resp;
   2697	struct ti_sci_info *info;
   2698	struct ti_sci_xfer *xfer;
   2699	struct device *dev;
   2700	int ret = 0;
   2701
   2702	if (!handle)
   2703		return -EINVAL;
   2704	if (IS_ERR(handle))
   2705		return PTR_ERR(handle);
   2706
   2707	info = handle_to_ti_sci_info(handle);
   2708	dev = info->dev;
   2709
   2710	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
   2711				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2712				   sizeof(*req), sizeof(*resp));
   2713	if (IS_ERR(xfer)) {
   2714		ret = PTR_ERR(xfer);
   2715		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2716		return ret;
   2717	}
   2718	req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
   2719	req->processor_id = proc_id;
   2720	req->control_flags_set = control_flags_set;
   2721	req->control_flags_clear = control_flags_clear;
   2722
   2723	ret = ti_sci_do_xfer(info, xfer);
   2724	if (ret) {
   2725		dev_err(dev, "Mbox send fail %d\n", ret);
   2726		goto fail;
   2727	}
   2728
   2729	resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
   2730
   2731	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
   2732
   2733fail:
   2734	ti_sci_put_one_xfer(&info->minfo, xfer);
   2735
   2736	return ret;
   2737}
   2738
   2739/**
   2740 * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
   2741 * @handle:	Pointer to TI SCI handle
   2742 * @proc_id:	Processor ID this request is for
   2743 *
   2744 * Return: 0 if all went well, else returns appropriate error value.
   2745 */
   2746static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
   2747				      u8 proc_id, u64 *bv, u32 *cfg_flags,
   2748				      u32 *ctrl_flags, u32 *sts_flags)
   2749{
   2750	struct ti_sci_msg_resp_get_status *resp;
   2751	struct ti_sci_msg_req_get_status *req;
   2752	struct ti_sci_info *info;
   2753	struct ti_sci_xfer *xfer;
   2754	struct device *dev;
   2755	int ret = 0;
   2756
   2757	if (!handle)
   2758		return -EINVAL;
   2759	if (IS_ERR(handle))
   2760		return PTR_ERR(handle);
   2761
   2762	info = handle_to_ti_sci_info(handle);
   2763	dev = info->dev;
   2764
   2765	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
   2766				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
   2767				   sizeof(*req), sizeof(*resp));
   2768	if (IS_ERR(xfer)) {
   2769		ret = PTR_ERR(xfer);
   2770		dev_err(dev, "Message alloc failed(%d)\n", ret);
   2771		return ret;
   2772	}
   2773	req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
   2774	req->processor_id = proc_id;
   2775
   2776	ret = ti_sci_do_xfer(info, xfer);
   2777	if (ret) {
   2778		dev_err(dev, "Mbox send fail %d\n", ret);
   2779		goto fail;
   2780	}
   2781
   2782	resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
   2783
   2784	if (!ti_sci_is_response_ack(resp)) {
   2785		ret = -ENODEV;
   2786	} else {
   2787		*bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
   2788		      (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
   2789		       TI_SCI_ADDR_HIGH_MASK);
   2790		*cfg_flags = resp->config_flags;
   2791		*ctrl_flags = resp->control_flags;
   2792		*sts_flags = resp->status_flags;
   2793	}
   2794
   2795fail:
   2796	ti_sci_put_one_xfer(&info->minfo, xfer);
   2797
   2798	return ret;
   2799}
   2800
   2801/*
   2802 * ti_sci_setup_ops() - Setup the operations structures
   2803 * @info:	pointer to TISCI pointer
   2804 */
   2805static void ti_sci_setup_ops(struct ti_sci_info *info)
   2806{
   2807	struct ti_sci_ops *ops = &info->handle.ops;
   2808	struct ti_sci_core_ops *core_ops = &ops->core_ops;
   2809	struct ti_sci_dev_ops *dops = &ops->dev_ops;
   2810	struct ti_sci_clk_ops *cops = &ops->clk_ops;
   2811	struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
   2812	struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
   2813	struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
   2814	struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
   2815	struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
   2816	struct ti_sci_proc_ops *pops = &ops->proc_ops;
   2817
   2818	core_ops->reboot_device = ti_sci_cmd_core_reboot;
   2819
   2820	dops->get_device = ti_sci_cmd_get_device;
   2821	dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
   2822	dops->idle_device = ti_sci_cmd_idle_device;
   2823	dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
   2824	dops->put_device = ti_sci_cmd_put_device;
   2825
   2826	dops->is_valid = ti_sci_cmd_dev_is_valid;
   2827	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
   2828	dops->is_idle = ti_sci_cmd_dev_is_idle;
   2829	dops->is_stop = ti_sci_cmd_dev_is_stop;
   2830	dops->is_on = ti_sci_cmd_dev_is_on;
   2831	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
   2832	dops->set_device_resets = ti_sci_cmd_set_device_resets;
   2833	dops->get_device_resets = ti_sci_cmd_get_device_resets;
   2834
   2835	cops->get_clock = ti_sci_cmd_get_clock;
   2836	cops->idle_clock = ti_sci_cmd_idle_clock;
   2837	cops->put_clock = ti_sci_cmd_put_clock;
   2838	cops->is_auto = ti_sci_cmd_clk_is_auto;
   2839	cops->is_on = ti_sci_cmd_clk_is_on;
   2840	cops->is_off = ti_sci_cmd_clk_is_off;
   2841
   2842	cops->set_parent = ti_sci_cmd_clk_set_parent;
   2843	cops->get_parent = ti_sci_cmd_clk_get_parent;
   2844	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
   2845
   2846	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
   2847	cops->set_freq = ti_sci_cmd_clk_set_freq;
   2848	cops->get_freq = ti_sci_cmd_clk_get_freq;
   2849
   2850	rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
   2851	rm_core_ops->get_range_from_shost =
   2852				ti_sci_cmd_get_resource_range_from_shost;
   2853
   2854	iops->set_irq = ti_sci_cmd_set_irq;
   2855	iops->set_event_map = ti_sci_cmd_set_event_map;
   2856	iops->free_irq = ti_sci_cmd_free_irq;
   2857	iops->free_event_map = ti_sci_cmd_free_event_map;
   2858
   2859	rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
   2860
   2861	psilops->pair = ti_sci_cmd_rm_psil_pair;
   2862	psilops->unpair = ti_sci_cmd_rm_psil_unpair;
   2863
   2864	udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
   2865	udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
   2866	udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
   2867
   2868	pops->request = ti_sci_cmd_proc_request;
   2869	pops->release = ti_sci_cmd_proc_release;
   2870	pops->handover = ti_sci_cmd_proc_handover;
   2871	pops->set_config = ti_sci_cmd_proc_set_config;
   2872	pops->set_control = ti_sci_cmd_proc_set_control;
   2873	pops->get_status = ti_sci_cmd_proc_get_status;
   2874}
   2875
   2876/**
   2877 * ti_sci_get_handle() - Get the TI SCI handle for a device
   2878 * @dev:	Pointer to device for which we want SCI handle
   2879 *
   2880 * NOTE: The function does not track individual clients of the framework
   2881 * and is expected to be maintained by caller of TI SCI protocol library.
   2882 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
   2883 * Return: pointer to handle if successful, else:
   2884 * -EPROBE_DEFER if the instance is not ready
   2885 * -ENODEV if the required node handler is missing
   2886 * -EINVAL if invalid conditions are encountered.
   2887 */
   2888const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
   2889{
   2890	struct device_node *ti_sci_np;
   2891	struct list_head *p;
   2892	struct ti_sci_handle *handle = NULL;
   2893	struct ti_sci_info *info;
   2894
   2895	if (!dev) {
   2896		pr_err("I need a device pointer\n");
   2897		return ERR_PTR(-EINVAL);
   2898	}
   2899	ti_sci_np = of_get_parent(dev->of_node);
   2900	if (!ti_sci_np) {
   2901		dev_err(dev, "No OF information\n");
   2902		return ERR_PTR(-EINVAL);
   2903	}
   2904
   2905	mutex_lock(&ti_sci_list_mutex);
   2906	list_for_each(p, &ti_sci_list) {
   2907		info = list_entry(p, struct ti_sci_info, node);
   2908		if (ti_sci_np == info->dev->of_node) {
   2909			handle = &info->handle;
   2910			info->users++;
   2911			break;
   2912		}
   2913	}
   2914	mutex_unlock(&ti_sci_list_mutex);
   2915	of_node_put(ti_sci_np);
   2916
   2917	if (!handle)
   2918		return ERR_PTR(-EPROBE_DEFER);
   2919
   2920	return handle;
   2921}
   2922EXPORT_SYMBOL_GPL(ti_sci_get_handle);
   2923
   2924/**
   2925 * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
   2926 * @handle:	Handle acquired by ti_sci_get_handle
   2927 *
   2928 * NOTE: The function does not track individual clients of the framework
   2929 * and is expected to be maintained by caller of TI SCI protocol library.
   2930 * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
   2931 *
   2932 * Return: 0 is successfully released
   2933 * if an error pointer was passed, it returns the error value back,
   2934 * if null was passed, it returns -EINVAL;
   2935 */
   2936int ti_sci_put_handle(const struct ti_sci_handle *handle)
   2937{
   2938	struct ti_sci_info *info;
   2939
   2940	if (IS_ERR(handle))
   2941		return PTR_ERR(handle);
   2942	if (!handle)
   2943		return -EINVAL;
   2944
   2945	info = handle_to_ti_sci_info(handle);
   2946	mutex_lock(&ti_sci_list_mutex);
   2947	if (!WARN_ON(!info->users))
   2948		info->users--;
   2949	mutex_unlock(&ti_sci_list_mutex);
   2950
   2951	return 0;
   2952}
   2953EXPORT_SYMBOL_GPL(ti_sci_put_handle);
   2954
   2955static void devm_ti_sci_release(struct device *dev, void *res)
   2956{
   2957	const struct ti_sci_handle **ptr = res;
   2958	const struct ti_sci_handle *handle = *ptr;
   2959	int ret;
   2960
   2961	ret = ti_sci_put_handle(handle);
   2962	if (ret)
   2963		dev_err(dev, "failed to put handle %d\n", ret);
   2964}
   2965
   2966/**
   2967 * devm_ti_sci_get_handle() - Managed get handle
   2968 * @dev:	device for which we want SCI handle for.
   2969 *
   2970 * NOTE: This releases the handle once the device resources are
   2971 * no longer needed. MUST NOT BE released with ti_sci_put_handle.
   2972 * The function does not track individual clients of the framework
   2973 * and is expected to be maintained by caller of TI SCI protocol library.
   2974 *
   2975 * Return: 0 if all went fine, else corresponding error.
   2976 */
   2977const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
   2978{
   2979	const struct ti_sci_handle **ptr;
   2980	const struct ti_sci_handle *handle;
   2981
   2982	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
   2983	if (!ptr)
   2984		return ERR_PTR(-ENOMEM);
   2985	handle = ti_sci_get_handle(dev);
   2986
   2987	if (!IS_ERR(handle)) {
   2988		*ptr = handle;
   2989		devres_add(dev, ptr);
   2990	} else {
   2991		devres_free(ptr);
   2992	}
   2993
   2994	return handle;
   2995}
   2996EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
   2997
   2998/**
   2999 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
   3000 * @np:		device node
   3001 * @property:	property name containing phandle on TISCI node
   3002 *
   3003 * NOTE: The function does not track individual clients of the framework
   3004 * and is expected to be maintained by caller of TI SCI protocol library.
   3005 * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
   3006 * Return: pointer to handle if successful, else:
   3007 * -EPROBE_DEFER if the instance is not ready
   3008 * -ENODEV if the required node handler is missing
   3009 * -EINVAL if invalid conditions are encountered.
   3010 */
   3011const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
   3012						  const char *property)
   3013{
   3014	struct ti_sci_handle *handle = NULL;
   3015	struct device_node *ti_sci_np;
   3016	struct ti_sci_info *info;
   3017	struct list_head *p;
   3018
   3019	if (!np) {
   3020		pr_err("I need a device pointer\n");
   3021		return ERR_PTR(-EINVAL);
   3022	}
   3023
   3024	ti_sci_np = of_parse_phandle(np, property, 0);
   3025	if (!ti_sci_np)
   3026		return ERR_PTR(-ENODEV);
   3027
   3028	mutex_lock(&ti_sci_list_mutex);
   3029	list_for_each(p, &ti_sci_list) {
   3030		info = list_entry(p, struct ti_sci_info, node);
   3031		if (ti_sci_np == info->dev->of_node) {
   3032			handle = &info->handle;
   3033			info->users++;
   3034			break;
   3035		}
   3036	}
   3037	mutex_unlock(&ti_sci_list_mutex);
   3038	of_node_put(ti_sci_np);
   3039
   3040	if (!handle)
   3041		return ERR_PTR(-EPROBE_DEFER);
   3042
   3043	return handle;
   3044}
   3045EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
   3046
   3047/**
   3048 * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
   3049 * @dev:	Device pointer requesting TISCI handle
   3050 * @property:	property name containing phandle on TISCI node
   3051 *
   3052 * NOTE: This releases the handle once the device resources are
   3053 * no longer needed. MUST NOT BE released with ti_sci_put_handle.
   3054 * The function does not track individual clients of the framework
   3055 * and is expected to be maintained by caller of TI SCI protocol library.
   3056 *
   3057 * Return: 0 if all went fine, else corresponding error.
   3058 */
   3059const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
   3060						       const char *property)
   3061{
   3062	const struct ti_sci_handle *handle;
   3063	const struct ti_sci_handle **ptr;
   3064
   3065	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
   3066	if (!ptr)
   3067		return ERR_PTR(-ENOMEM);
   3068	handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
   3069
   3070	if (!IS_ERR(handle)) {
   3071		*ptr = handle;
   3072		devres_add(dev, ptr);
   3073	} else {
   3074		devres_free(ptr);
   3075	}
   3076
   3077	return handle;
   3078}
   3079EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
   3080
   3081/**
   3082 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
   3083 * @res:	Pointer to the TISCI resource
   3084 *
   3085 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
   3086 */
   3087u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
   3088{
   3089	unsigned long flags;
   3090	u16 set, free_bit;
   3091
   3092	raw_spin_lock_irqsave(&res->lock, flags);
   3093	for (set = 0; set < res->sets; set++) {
   3094		struct ti_sci_resource_desc *desc = &res->desc[set];
   3095		int res_count = desc->num + desc->num_sec;
   3096
   3097		free_bit = find_first_zero_bit(desc->res_map, res_count);
   3098		if (free_bit != res_count) {
   3099			set_bit(free_bit, desc->res_map);
   3100			raw_spin_unlock_irqrestore(&res->lock, flags);
   3101
   3102			if (desc->num && free_bit < desc->num)
   3103				return desc->start + free_bit;
   3104			else
   3105				return desc->start_sec + free_bit;
   3106		}
   3107	}
   3108	raw_spin_unlock_irqrestore(&res->lock, flags);
   3109
   3110	return TI_SCI_RESOURCE_NULL;
   3111}
   3112EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
   3113
   3114/**
   3115 * ti_sci_release_resource() - Release a resource from TISCI resource.
   3116 * @res:	Pointer to the TISCI resource
   3117 * @id:		Resource id to be released.
   3118 */
   3119void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
   3120{
   3121	unsigned long flags;
   3122	u16 set;
   3123
   3124	raw_spin_lock_irqsave(&res->lock, flags);
   3125	for (set = 0; set < res->sets; set++) {
   3126		struct ti_sci_resource_desc *desc = &res->desc[set];
   3127
   3128		if (desc->num && desc->start <= id &&
   3129		    (desc->start + desc->num) > id)
   3130			clear_bit(id - desc->start, desc->res_map);
   3131		else if (desc->num_sec && desc->start_sec <= id &&
   3132			 (desc->start_sec + desc->num_sec) > id)
   3133			clear_bit(id - desc->start_sec, desc->res_map);
   3134	}
   3135	raw_spin_unlock_irqrestore(&res->lock, flags);
   3136}
   3137EXPORT_SYMBOL_GPL(ti_sci_release_resource);
   3138
   3139/**
   3140 * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
   3141 * @res:	Pointer to the TISCI resource
   3142 *
   3143 * Return: Total number of available resources.
   3144 */
   3145u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
   3146{
   3147	u32 set, count = 0;
   3148
   3149	for (set = 0; set < res->sets; set++)
   3150		count += res->desc[set].num + res->desc[set].num_sec;
   3151
   3152	return count;
   3153}
   3154EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
   3155
   3156/**
   3157 * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
   3158 * @handle:	TISCI handle
   3159 * @dev:	Device pointer to which the resource is assigned
   3160 * @dev_id:	TISCI device id to which the resource is assigned
   3161 * @sub_types:	Array of sub_types assigned corresponding to device
   3162 * @sets:	Number of sub_types
   3163 *
   3164 * Return: Pointer to ti_sci_resource if all went well else appropriate
   3165 *	   error pointer.
   3166 */
   3167static struct ti_sci_resource *
   3168devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
   3169			      struct device *dev, u32 dev_id, u32 *sub_types,
   3170			      u32 sets)
   3171{
   3172	struct ti_sci_resource *res;
   3173	bool valid_set = false;
   3174	int i, ret, res_count;
   3175
   3176	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
   3177	if (!res)
   3178		return ERR_PTR(-ENOMEM);
   3179
   3180	res->sets = sets;
   3181	res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
   3182				 GFP_KERNEL);
   3183	if (!res->desc)
   3184		return ERR_PTR(-ENOMEM);
   3185
   3186	for (i = 0; i < res->sets; i++) {
   3187		ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
   3188							sub_types[i],
   3189							&res->desc[i]);
   3190		if (ret) {
   3191			dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
   3192				dev_id, sub_types[i]);
   3193			memset(&res->desc[i], 0, sizeof(res->desc[i]));
   3194			continue;
   3195		}
   3196
   3197		dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
   3198			dev_id, sub_types[i], res->desc[i].start,
   3199			res->desc[i].num, res->desc[i].start_sec,
   3200			res->desc[i].num_sec);
   3201
   3202		valid_set = true;
   3203		res_count = res->desc[i].num + res->desc[i].num_sec;
   3204		res->desc[i].res_map =
   3205			devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
   3206				     sizeof(*res->desc[i].res_map), GFP_KERNEL);
   3207		if (!res->desc[i].res_map)
   3208			return ERR_PTR(-ENOMEM);
   3209	}
   3210	raw_spin_lock_init(&res->lock);
   3211
   3212	if (valid_set)
   3213		return res;
   3214
   3215	return ERR_PTR(-EINVAL);
   3216}
   3217
   3218/**
   3219 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
   3220 * @handle:	TISCI handle
   3221 * @dev:	Device pointer to which the resource is assigned
   3222 * @dev_id:	TISCI device id to which the resource is assigned
   3223 * @of_prop:	property name by which the resource are represented
   3224 *
   3225 * Return: Pointer to ti_sci_resource if all went well else appropriate
   3226 *	   error pointer.
   3227 */
   3228struct ti_sci_resource *
   3229devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
   3230			    struct device *dev, u32 dev_id, char *of_prop)
   3231{
   3232	struct ti_sci_resource *res;
   3233	u32 *sub_types;
   3234	int sets;
   3235
   3236	sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
   3237					       sizeof(u32));
   3238	if (sets < 0) {
   3239		dev_err(dev, "%s resource type ids not available\n", of_prop);
   3240		return ERR_PTR(sets);
   3241	}
   3242
   3243	sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
   3244	if (!sub_types)
   3245		return ERR_PTR(-ENOMEM);
   3246
   3247	of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
   3248	res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
   3249					    sets);
   3250
   3251	kfree(sub_types);
   3252	return res;
   3253}
   3254EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
   3255
   3256/**
   3257 * devm_ti_sci_get_resource() - Get a resource range assigned to the device
   3258 * @handle:	TISCI handle
   3259 * @dev:	Device pointer to which the resource is assigned
   3260 * @dev_id:	TISCI device id to which the resource is assigned
   3261 * @suub_type:	TISCI resource subytpe representing the resource.
   3262 *
   3263 * Return: Pointer to ti_sci_resource if all went well else appropriate
   3264 *	   error pointer.
   3265 */
   3266struct ti_sci_resource *
   3267devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
   3268			 u32 dev_id, u32 sub_type)
   3269{
   3270	return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
   3271}
   3272EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
   3273
   3274static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
   3275				void *cmd)
   3276{
   3277	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
   3278	const struct ti_sci_handle *handle = &info->handle;
   3279
   3280	ti_sci_cmd_core_reboot(handle);
   3281
   3282	/* call fail OR pass, we should not be here in the first place */
   3283	return NOTIFY_BAD;
   3284}
   3285
   3286static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending)
   3287{
   3288	info->is_suspending = is_suspending;
   3289}
   3290
   3291static int ti_sci_suspend(struct device *dev)
   3292{
   3293	struct ti_sci_info *info = dev_get_drvdata(dev);
   3294	/*
   3295	 * We must switch operation to polled mode now as drivers and the genpd
   3296	 * layer may make late TI SCI calls to change clock and device states
   3297	 * from the noirq phase of suspend.
   3298	 */
   3299	ti_sci_set_is_suspending(info, true);
   3300
   3301	return 0;
   3302}
   3303
   3304static int ti_sci_resume(struct device *dev)
   3305{
   3306	struct ti_sci_info *info = dev_get_drvdata(dev);
   3307
   3308	ti_sci_set_is_suspending(info, false);
   3309
   3310	return 0;
   3311}
   3312
   3313static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume);
   3314
   3315/* Description for K2G */
   3316static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
   3317	.default_host_id = 2,
   3318	/* Conservative duration */
   3319	.max_rx_timeout_ms = 1000,
   3320	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
   3321	.max_msgs = 20,
   3322	.max_msg_size = 64,
   3323};
   3324
   3325/* Description for AM654 */
   3326static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
   3327	.default_host_id = 12,
   3328	/* Conservative duration */
   3329	.max_rx_timeout_ms = 10000,
   3330	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
   3331	.max_msgs = 20,
   3332	.max_msg_size = 60,
   3333};
   3334
   3335static const struct of_device_id ti_sci_of_match[] = {
   3336	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
   3337	{.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
   3338	{ /* Sentinel */ },
   3339};
   3340MODULE_DEVICE_TABLE(of, ti_sci_of_match);
   3341
   3342static int ti_sci_probe(struct platform_device *pdev)
   3343{
   3344	struct device *dev = &pdev->dev;
   3345	const struct of_device_id *of_id;
   3346	const struct ti_sci_desc *desc;
   3347	struct ti_sci_xfer *xfer;
   3348	struct ti_sci_info *info = NULL;
   3349	struct ti_sci_xfers_info *minfo;
   3350	struct mbox_client *cl;
   3351	int ret = -EINVAL;
   3352	int i;
   3353	int reboot = 0;
   3354	u32 h_id;
   3355
   3356	of_id = of_match_device(ti_sci_of_match, dev);
   3357	if (!of_id) {
   3358		dev_err(dev, "OF data missing\n");
   3359		return -EINVAL;
   3360	}
   3361	desc = of_id->data;
   3362
   3363	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
   3364	if (!info)
   3365		return -ENOMEM;
   3366
   3367	info->dev = dev;
   3368	info->desc = desc;
   3369	ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
   3370	/* if the property is not present in DT, use a default from desc */
   3371	if (ret < 0) {
   3372		info->host_id = info->desc->default_host_id;
   3373	} else {
   3374		if (!h_id) {
   3375			dev_warn(dev, "Host ID 0 is reserved for firmware\n");
   3376			info->host_id = info->desc->default_host_id;
   3377		} else {
   3378			info->host_id = h_id;
   3379		}
   3380	}
   3381
   3382	reboot = of_property_read_bool(dev->of_node,
   3383				       "ti,system-reboot-controller");
   3384	INIT_LIST_HEAD(&info->node);
   3385	minfo = &info->minfo;
   3386
   3387	/*
   3388	 * Pre-allocate messages
   3389	 * NEVER allocate more than what we can indicate in hdr.seq
   3390	 * if we have data description bug, force a fix..
   3391	 */
   3392	if (WARN_ON(desc->max_msgs >=
   3393		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
   3394		return -EINVAL;
   3395
   3396	minfo->xfer_block = devm_kcalloc(dev,
   3397					 desc->max_msgs,
   3398					 sizeof(*minfo->xfer_block),
   3399					 GFP_KERNEL);
   3400	if (!minfo->xfer_block)
   3401		return -ENOMEM;
   3402
   3403	minfo->xfer_alloc_table = devm_kcalloc(dev,
   3404					       BITS_TO_LONGS(desc->max_msgs),
   3405					       sizeof(unsigned long),
   3406					       GFP_KERNEL);
   3407	if (!minfo->xfer_alloc_table)
   3408		return -ENOMEM;
   3409	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
   3410
   3411	/* Pre-initialize the buffer pointer to pre-allocated buffers */
   3412	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
   3413		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
   3414					      GFP_KERNEL);
   3415		if (!xfer->xfer_buf)
   3416			return -ENOMEM;
   3417
   3418		xfer->tx_message.buf = xfer->xfer_buf;
   3419		init_completion(&xfer->done);
   3420	}
   3421
   3422	ret = ti_sci_debugfs_create(pdev, info);
   3423	if (ret)
   3424		dev_warn(dev, "Failed to create debug file\n");
   3425
   3426	platform_set_drvdata(pdev, info);
   3427
   3428	cl = &info->cl;
   3429	cl->dev = dev;
   3430	cl->tx_block = false;
   3431	cl->rx_callback = ti_sci_rx_callback;
   3432	cl->knows_txdone = true;
   3433
   3434	spin_lock_init(&minfo->xfer_lock);
   3435	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
   3436
   3437	info->chan_rx = mbox_request_channel_byname(cl, "rx");
   3438	if (IS_ERR(info->chan_rx)) {
   3439		ret = PTR_ERR(info->chan_rx);
   3440		goto out;
   3441	}
   3442
   3443	info->chan_tx = mbox_request_channel_byname(cl, "tx");
   3444	if (IS_ERR(info->chan_tx)) {
   3445		ret = PTR_ERR(info->chan_tx);
   3446		goto out;
   3447	}
   3448	ret = ti_sci_cmd_get_revision(info);
   3449	if (ret) {
   3450		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
   3451		goto out;
   3452	}
   3453
   3454	ti_sci_setup_ops(info);
   3455
   3456	if (reboot) {
   3457		info->nb.notifier_call = tisci_reboot_handler;
   3458		info->nb.priority = 128;
   3459
   3460		ret = register_restart_handler(&info->nb);
   3461		if (ret) {
   3462			dev_err(dev, "reboot registration fail(%d)\n", ret);
   3463			goto out;
   3464		}
   3465	}
   3466
   3467	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
   3468		 info->handle.version.abi_major, info->handle.version.abi_minor,
   3469		 info->handle.version.firmware_revision,
   3470		 info->handle.version.firmware_description);
   3471
   3472	mutex_lock(&ti_sci_list_mutex);
   3473	list_add_tail(&info->node, &ti_sci_list);
   3474	mutex_unlock(&ti_sci_list_mutex);
   3475
   3476	return of_platform_populate(dev->of_node, NULL, NULL, dev);
   3477out:
   3478	if (!IS_ERR(info->chan_tx))
   3479		mbox_free_channel(info->chan_tx);
   3480	if (!IS_ERR(info->chan_rx))
   3481		mbox_free_channel(info->chan_rx);
   3482	debugfs_remove(info->d);
   3483	return ret;
   3484}
   3485
   3486static int ti_sci_remove(struct platform_device *pdev)
   3487{
   3488	struct ti_sci_info *info;
   3489	struct device *dev = &pdev->dev;
   3490	int ret = 0;
   3491
   3492	of_platform_depopulate(dev);
   3493
   3494	info = platform_get_drvdata(pdev);
   3495
   3496	if (info->nb.notifier_call)
   3497		unregister_restart_handler(&info->nb);
   3498
   3499	mutex_lock(&ti_sci_list_mutex);
   3500	if (info->users)
   3501		ret = -EBUSY;
   3502	else
   3503		list_del(&info->node);
   3504	mutex_unlock(&ti_sci_list_mutex);
   3505
   3506	if (!ret) {
   3507		ti_sci_debugfs_destroy(pdev, info);
   3508
   3509		/* Safe to free channels since no more users */
   3510		mbox_free_channel(info->chan_tx);
   3511		mbox_free_channel(info->chan_rx);
   3512	}
   3513
   3514	return ret;
   3515}
   3516
   3517static struct platform_driver ti_sci_driver = {
   3518	.probe = ti_sci_probe,
   3519	.remove = ti_sci_remove,
   3520	.driver = {
   3521		   .name = "ti-sci",
   3522		   .of_match_table = of_match_ptr(ti_sci_of_match),
   3523		   .pm = &ti_sci_pm_ops,
   3524	},
   3525};
   3526module_platform_driver(ti_sci_driver);
   3527
   3528MODULE_LICENSE("GPL v2");
   3529MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
   3530MODULE_AUTHOR("Nishanth Menon");
   3531MODULE_ALIAS("platform:ti-sci");