cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qcom_glink_native.c (45598B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2016-2017, Linaro Ltd
      4 */
      5
      6#include <linux/idr.h>
      7#include <linux/interrupt.h>
      8#include <linux/io.h>
      9#include <linux/list.h>
     10#include <linux/mfd/syscon.h>
     11#include <linux/module.h>
     12#include <linux/of.h>
     13#include <linux/of_address.h>
     14#include <linux/of_irq.h>
     15#include <linux/platform_device.h>
     16#include <linux/regmap.h>
     17#include <linux/rpmsg.h>
     18#include <linux/sizes.h>
     19#include <linux/slab.h>
     20#include <linux/workqueue.h>
     21#include <linux/mailbox_client.h>
     22
     23#include "rpmsg_internal.h"
     24#include "qcom_glink_native.h"
     25
     26#define GLINK_NAME_SIZE		32
     27#define GLINK_VERSION_1		1
     28
     29#define RPM_GLINK_CID_MIN	1
     30#define RPM_GLINK_CID_MAX	65536
     31
     32struct glink_msg {
     33	__le16 cmd;
     34	__le16 param1;
     35	__le32 param2;
     36	u8 data[];
     37} __packed;
     38
     39/**
     40 * struct glink_defer_cmd - deferred incoming control message
     41 * @node:	list node
     42 * @msg:	message header
     43 * @data:	payload of the message
     44 *
     45 * Copy of a received control message, to be added to @rx_queue and processed
     46 * by @rx_work of @qcom_glink.
     47 */
     48struct glink_defer_cmd {
     49	struct list_head node;
     50
     51	struct glink_msg msg;
     52	u8 data[];
     53};
     54
     55/**
     56 * struct glink_core_rx_intent - RX intent
     57 * RX intent
     58 *
     59 * @data: pointer to the data (may be NULL for zero-copy)
     60 * @id: remote or local intent ID
     61 * @size: size of the original intent (do not modify)
     62 * @reuse: To mark if the intent can be reused after first use
     63 * @in_use: To mark if intent is already in use for the channel
     64 * @offset: next write offset (initially 0)
     65 * @node:	list node
     66 */
     67struct glink_core_rx_intent {
     68	void *data;
     69	u32 id;
     70	size_t size;
     71	bool reuse;
     72	bool in_use;
     73	u32 offset;
     74
     75	struct list_head node;
     76};
     77
     78/**
     79 * struct qcom_glink - driver context, relates to one remote subsystem
     80 * @dev:	reference to the associated struct device
     81 * @mbox_client: mailbox client
     82 * @mbox_chan:  mailbox channel
     83 * @rx_pipe:	pipe object for receive FIFO
     84 * @tx_pipe:	pipe object for transmit FIFO
     85 * @irq:	IRQ for signaling incoming events
     86 * @rx_work:	worker for handling received control messages
     87 * @rx_lock:	protects the @rx_queue
     88 * @rx_queue:	queue of received control messages to be processed in @rx_work
     89 * @tx_lock:	synchronizes operations on the tx fifo
     90 * @idr_lock:	synchronizes @lcids and @rcids modifications
     91 * @lcids:	idr of all channels with a known local channel id
     92 * @rcids:	idr of all channels with a known remote channel id
     93 * @features:	remote features
     94 * @intentless:	flag to indicate that there is no intent
     95 * @tx_avail_notify: Waitqueue for pending tx tasks
     96 * @sent_read_notify: flag to check cmd sent or not
     97 */
     98struct qcom_glink {
     99	struct device *dev;
    100
    101	const char *name;
    102
    103	struct mbox_client mbox_client;
    104	struct mbox_chan *mbox_chan;
    105
    106	struct qcom_glink_pipe *rx_pipe;
    107	struct qcom_glink_pipe *tx_pipe;
    108
    109	int irq;
    110
    111	struct work_struct rx_work;
    112	spinlock_t rx_lock;
    113	struct list_head rx_queue;
    114
    115	spinlock_t tx_lock;
    116
    117	spinlock_t idr_lock;
    118	struct idr lcids;
    119	struct idr rcids;
    120	unsigned long features;
    121
    122	bool intentless;
    123	wait_queue_head_t tx_avail_notify;
    124	bool sent_read_notify;
    125};
    126
    127enum {
    128	GLINK_STATE_CLOSED,
    129	GLINK_STATE_OPENING,
    130	GLINK_STATE_OPEN,
    131	GLINK_STATE_CLOSING,
    132};
    133
    134/**
    135 * struct glink_channel - internal representation of a channel
    136 * @rpdev:	rpdev reference, only used for primary endpoints
    137 * @ept:	rpmsg endpoint this channel is associated with
    138 * @glink:	qcom_glink context handle
    139 * @refcount:	refcount for the channel object
    140 * @recv_lock:	guard for @ept.cb
    141 * @name:	unique channel name/identifier
    142 * @lcid:	channel id, in local space
    143 * @rcid:	channel id, in remote space
    144 * @intent_lock: lock for protection of @liids, @riids
    145 * @liids:	idr of all local intents
    146 * @riids:	idr of all remote intents
    147 * @intent_work: worker responsible for transmitting rx_done packets
    148 * @done_intents: list of intents that needs to be announced rx_done
    149 * @buf:	receive buffer, for gathering fragments
    150 * @buf_offset:	write offset in @buf
    151 * @buf_size:	size of current @buf
    152 * @open_ack:	completed once remote has acked the open-request
    153 * @open_req:	completed once open-request has been received
    154 * @intent_req_lock: Synchronises multiple intent requests
    155 * @intent_req_result: Result of intent request
    156 * @intent_req_comp: Completion for intent_req signalling
    157 */
    158struct glink_channel {
    159	struct rpmsg_endpoint ept;
    160
    161	struct rpmsg_device *rpdev;
    162	struct qcom_glink *glink;
    163
    164	struct kref refcount;
    165
    166	spinlock_t recv_lock;
    167
    168	char *name;
    169	unsigned int lcid;
    170	unsigned int rcid;
    171
    172	spinlock_t intent_lock;
    173	struct idr liids;
    174	struct idr riids;
    175	struct work_struct intent_work;
    176	struct list_head done_intents;
    177
    178	struct glink_core_rx_intent *buf;
    179	int buf_offset;
    180	int buf_size;
    181
    182	struct completion open_ack;
    183	struct completion open_req;
    184
    185	struct mutex intent_req_lock;
    186	bool intent_req_result;
    187	struct completion intent_req_comp;
    188};
    189
    190#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
    191
    192static const struct rpmsg_endpoint_ops glink_endpoint_ops;
    193
    194#define RPM_CMD_VERSION			0
    195#define RPM_CMD_VERSION_ACK		1
    196#define RPM_CMD_OPEN			2
    197#define RPM_CMD_CLOSE			3
    198#define RPM_CMD_OPEN_ACK		4
    199#define RPM_CMD_INTENT			5
    200#define RPM_CMD_RX_DONE			6
    201#define RPM_CMD_RX_INTENT_REQ		7
    202#define RPM_CMD_RX_INTENT_REQ_ACK	8
    203#define RPM_CMD_TX_DATA			9
    204#define RPM_CMD_CLOSE_ACK		11
    205#define RPM_CMD_TX_DATA_CONT		12
    206#define RPM_CMD_READ_NOTIF		13
    207#define RPM_CMD_RX_DONE_W_REUSE		14
    208
    209#define GLINK_FEATURE_INTENTLESS	BIT(1)
    210
    211static void qcom_glink_rx_done_work(struct work_struct *work);
    212
    213static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
    214						      const char *name)
    215{
    216	struct glink_channel *channel;
    217
    218	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
    219	if (!channel)
    220		return ERR_PTR(-ENOMEM);
    221
    222	/* Setup glink internal glink_channel data */
    223	spin_lock_init(&channel->recv_lock);
    224	spin_lock_init(&channel->intent_lock);
    225	mutex_init(&channel->intent_req_lock);
    226
    227	channel->glink = glink;
    228	channel->name = kstrdup(name, GFP_KERNEL);
    229
    230	init_completion(&channel->open_req);
    231	init_completion(&channel->open_ack);
    232	init_completion(&channel->intent_req_comp);
    233
    234	INIT_LIST_HEAD(&channel->done_intents);
    235	INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
    236
    237	idr_init(&channel->liids);
    238	idr_init(&channel->riids);
    239	kref_init(&channel->refcount);
    240
    241	return channel;
    242}
    243
    244static void qcom_glink_channel_release(struct kref *ref)
    245{
    246	struct glink_channel *channel = container_of(ref, struct glink_channel,
    247						     refcount);
    248	struct glink_core_rx_intent *intent;
    249	struct glink_core_rx_intent *tmp;
    250	unsigned long flags;
    251	int iid;
    252
    253	/* cancel pending rx_done work */
    254	cancel_work_sync(&channel->intent_work);
    255
    256	spin_lock_irqsave(&channel->intent_lock, flags);
    257	/* Free all non-reuse intents pending rx_done work */
    258	list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
    259		if (!intent->reuse) {
    260			kfree(intent->data);
    261			kfree(intent);
    262		}
    263	}
    264
    265	idr_for_each_entry(&channel->liids, tmp, iid) {
    266		kfree(tmp->data);
    267		kfree(tmp);
    268	}
    269	idr_destroy(&channel->liids);
    270
    271	idr_for_each_entry(&channel->riids, tmp, iid)
    272		kfree(tmp);
    273	idr_destroy(&channel->riids);
    274	spin_unlock_irqrestore(&channel->intent_lock, flags);
    275
    276	kfree(channel->name);
    277	kfree(channel);
    278}
    279
    280static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
    281{
    282	return glink->rx_pipe->avail(glink->rx_pipe);
    283}
    284
    285static void qcom_glink_rx_peak(struct qcom_glink *glink,
    286			       void *data, unsigned int offset, size_t count)
    287{
    288	glink->rx_pipe->peak(glink->rx_pipe, data, offset, count);
    289}
    290
    291static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
    292{
    293	glink->rx_pipe->advance(glink->rx_pipe, count);
    294}
    295
    296static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
    297{
    298	return glink->tx_pipe->avail(glink->tx_pipe);
    299}
    300
    301static void qcom_glink_tx_write(struct qcom_glink *glink,
    302				const void *hdr, size_t hlen,
    303				const void *data, size_t dlen)
    304{
    305	glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
    306}
    307
    308static void qcom_glink_send_read_notify(struct qcom_glink *glink)
    309{
    310	struct glink_msg msg;
    311
    312	msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF);
    313	msg.param1 = 0;
    314	msg.param2 = 0;
    315
    316	qcom_glink_tx_write(glink, &msg, sizeof(msg), NULL, 0);
    317
    318	mbox_send_message(glink->mbox_chan, NULL);
    319	mbox_client_txdone(glink->mbox_chan, 0);
    320}
    321
    322static int qcom_glink_tx(struct qcom_glink *glink,
    323			 const void *hdr, size_t hlen,
    324			 const void *data, size_t dlen, bool wait)
    325{
    326	unsigned int tlen = hlen + dlen;
    327	unsigned long flags;
    328	int ret = 0;
    329
    330	/* Reject packets that are too big */
    331	if (tlen >= glink->tx_pipe->length)
    332		return -EINVAL;
    333
    334	spin_lock_irqsave(&glink->tx_lock, flags);
    335
    336	while (qcom_glink_tx_avail(glink) < tlen) {
    337		if (!wait) {
    338			ret = -EAGAIN;
    339			goto out;
    340		}
    341
    342		if (!glink->sent_read_notify) {
    343			glink->sent_read_notify = true;
    344			qcom_glink_send_read_notify(glink);
    345		}
    346
    347		/* Wait without holding the tx_lock */
    348		spin_unlock_irqrestore(&glink->tx_lock, flags);
    349
    350		wait_event_timeout(glink->tx_avail_notify,
    351				   qcom_glink_tx_avail(glink) >= tlen, 10 * HZ);
    352
    353		spin_lock_irqsave(&glink->tx_lock, flags);
    354
    355		if (qcom_glink_tx_avail(glink) >= tlen)
    356			glink->sent_read_notify = false;
    357	}
    358
    359	qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
    360
    361	mbox_send_message(glink->mbox_chan, NULL);
    362	mbox_client_txdone(glink->mbox_chan, 0);
    363
    364out:
    365	spin_unlock_irqrestore(&glink->tx_lock, flags);
    366
    367	return ret;
    368}
    369
    370static int qcom_glink_send_version(struct qcom_glink *glink)
    371{
    372	struct glink_msg msg;
    373
    374	msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
    375	msg.param1 = cpu_to_le16(GLINK_VERSION_1);
    376	msg.param2 = cpu_to_le32(glink->features);
    377
    378	return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
    379}
    380
    381static void qcom_glink_send_version_ack(struct qcom_glink *glink)
    382{
    383	struct glink_msg msg;
    384
    385	msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
    386	msg.param1 = cpu_to_le16(GLINK_VERSION_1);
    387	msg.param2 = cpu_to_le32(glink->features);
    388
    389	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
    390}
    391
    392static void qcom_glink_send_open_ack(struct qcom_glink *glink,
    393				     struct glink_channel *channel)
    394{
    395	struct glink_msg msg;
    396
    397	msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
    398	msg.param1 = cpu_to_le16(channel->rcid);
    399	msg.param2 = cpu_to_le32(0);
    400
    401	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
    402}
    403
    404static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
    405					     unsigned int cid, bool granted)
    406{
    407	struct glink_channel *channel;
    408	unsigned long flags;
    409
    410	spin_lock_irqsave(&glink->idr_lock, flags);
    411	channel = idr_find(&glink->rcids, cid);
    412	spin_unlock_irqrestore(&glink->idr_lock, flags);
    413	if (!channel) {
    414		dev_err(glink->dev, "unable to find channel\n");
    415		return;
    416	}
    417
    418	channel->intent_req_result = granted;
    419	complete(&channel->intent_req_comp);
    420}
    421
    422/**
    423 * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
    424 * @glink: Ptr to the glink edge
    425 * @channel: Ptr to the channel that the open req is sent
    426 *
    427 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
    428 * Will return with refcount held, regardless of outcome.
    429 *
    430 * Return: 0 on success, negative errno otherwise.
    431 */
    432static int qcom_glink_send_open_req(struct qcom_glink *glink,
    433				    struct glink_channel *channel)
    434{
    435	struct {
    436		struct glink_msg msg;
    437		u8 name[GLINK_NAME_SIZE];
    438	} __packed req;
    439	int name_len = strlen(channel->name) + 1;
    440	int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
    441	int ret;
    442	unsigned long flags;
    443
    444	kref_get(&channel->refcount);
    445
    446	spin_lock_irqsave(&glink->idr_lock, flags);
    447	ret = idr_alloc_cyclic(&glink->lcids, channel,
    448			       RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
    449			       GFP_ATOMIC);
    450	spin_unlock_irqrestore(&glink->idr_lock, flags);
    451	if (ret < 0)
    452		return ret;
    453
    454	channel->lcid = ret;
    455
    456	req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
    457	req.msg.param1 = cpu_to_le16(channel->lcid);
    458	req.msg.param2 = cpu_to_le32(name_len);
    459	strcpy(req.name, channel->name);
    460
    461	ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
    462	if (ret)
    463		goto remove_idr;
    464
    465	return 0;
    466
    467remove_idr:
    468	spin_lock_irqsave(&glink->idr_lock, flags);
    469	idr_remove(&glink->lcids, channel->lcid);
    470	channel->lcid = 0;
    471	spin_unlock_irqrestore(&glink->idr_lock, flags);
    472
    473	return ret;
    474}
    475
    476static void qcom_glink_send_close_req(struct qcom_glink *glink,
    477				      struct glink_channel *channel)
    478{
    479	struct glink_msg req;
    480
    481	req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
    482	req.param1 = cpu_to_le16(channel->lcid);
    483	req.param2 = 0;
    484
    485	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
    486}
    487
    488static void qcom_glink_send_close_ack(struct qcom_glink *glink,
    489				      unsigned int rcid)
    490{
    491	struct glink_msg req;
    492
    493	req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
    494	req.param1 = cpu_to_le16(rcid);
    495	req.param2 = 0;
    496
    497	qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
    498}
    499
    500static void qcom_glink_rx_done_work(struct work_struct *work)
    501{
    502	struct glink_channel *channel = container_of(work, struct glink_channel,
    503						     intent_work);
    504	struct qcom_glink *glink = channel->glink;
    505	struct glink_core_rx_intent *intent, *tmp;
    506	struct {
    507		u16 id;
    508		u16 lcid;
    509		u32 liid;
    510	} __packed cmd;
    511
    512	unsigned int cid = channel->lcid;
    513	unsigned int iid;
    514	bool reuse;
    515	unsigned long flags;
    516
    517	spin_lock_irqsave(&channel->intent_lock, flags);
    518	list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
    519		list_del(&intent->node);
    520		spin_unlock_irqrestore(&channel->intent_lock, flags);
    521		iid = intent->id;
    522		reuse = intent->reuse;
    523
    524		cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
    525		cmd.lcid = cid;
    526		cmd.liid = iid;
    527
    528		qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
    529		if (!reuse) {
    530			kfree(intent->data);
    531			kfree(intent);
    532		}
    533		spin_lock_irqsave(&channel->intent_lock, flags);
    534	}
    535	spin_unlock_irqrestore(&channel->intent_lock, flags);
    536}
    537
    538static void qcom_glink_rx_done(struct qcom_glink *glink,
    539			       struct glink_channel *channel,
    540			       struct glink_core_rx_intent *intent)
    541{
    542	/* We don't send RX_DONE to intentless systems */
    543	if (glink->intentless) {
    544		kfree(intent->data);
    545		kfree(intent);
    546		return;
    547	}
    548
    549	/* Take it off the tree of receive intents */
    550	if (!intent->reuse) {
    551		spin_lock(&channel->intent_lock);
    552		idr_remove(&channel->liids, intent->id);
    553		spin_unlock(&channel->intent_lock);
    554	}
    555
    556	/* Schedule the sending of a rx_done indication */
    557	spin_lock(&channel->intent_lock);
    558	list_add_tail(&intent->node, &channel->done_intents);
    559	spin_unlock(&channel->intent_lock);
    560
    561	schedule_work(&channel->intent_work);
    562}
    563
    564/**
    565 * qcom_glink_receive_version() - receive version/features from remote system
    566 *
    567 * @glink:	pointer to transport interface
    568 * @version:	remote version
    569 * @features:	remote features
    570 *
    571 * This function is called in response to a remote-initiated version/feature
    572 * negotiation sequence.
    573 */
    574static void qcom_glink_receive_version(struct qcom_glink *glink,
    575				       u32 version,
    576				       u32 features)
    577{
    578	switch (version) {
    579	case 0:
    580		break;
    581	case GLINK_VERSION_1:
    582		glink->features &= features;
    583		fallthrough;
    584	default:
    585		qcom_glink_send_version_ack(glink);
    586		break;
    587	}
    588}
    589
    590/**
    591 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
    592 *
    593 * @glink:	pointer to transport interface
    594 * @version:	remote version response
    595 * @features:	remote features response
    596 *
    597 * This function is called in response to a local-initiated version/feature
    598 * negotiation sequence and is the counter-offer from the remote side based
    599 * upon the initial version and feature set requested.
    600 */
    601static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
    602					   u32 version,
    603					   u32 features)
    604{
    605	switch (version) {
    606	case 0:
    607		/* Version negotiation failed */
    608		break;
    609	case GLINK_VERSION_1:
    610		if (features == glink->features)
    611			break;
    612
    613		glink->features &= features;
    614		fallthrough;
    615	default:
    616		qcom_glink_send_version(glink);
    617		break;
    618	}
    619}
    620
    621/**
    622 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
    623 * 	wire format and transmit
    624 * @glink:	The transport to transmit on.
    625 * @channel:	The glink channel
    626 * @granted:	The request response to encode.
    627 *
    628 * Return: 0 on success or standard Linux error code.
    629 */
    630static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
    631					  struct glink_channel *channel,
    632					  bool granted)
    633{
    634	struct glink_msg msg;
    635
    636	msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK);
    637	msg.param1 = cpu_to_le16(channel->lcid);
    638	msg.param2 = cpu_to_le32(granted);
    639
    640	qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
    641
    642	return 0;
    643}
    644
    645/**
    646 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
    647 *			   transmit
    648 * @glink:	The transport to transmit on.
    649 * @channel:	The local channel
    650 * @intent:	The intent to pass on to remote.
    651 *
    652 * Return: 0 on success or standard Linux error code.
    653 */
    654static int qcom_glink_advertise_intent(struct qcom_glink *glink,
    655				       struct glink_channel *channel,
    656				       struct glink_core_rx_intent *intent)
    657{
    658	struct command {
    659		__le16 id;
    660		__le16 lcid;
    661		__le32 count;
    662		__le32 size;
    663		__le32 liid;
    664	} __packed;
    665	struct command cmd;
    666
    667	cmd.id = cpu_to_le16(RPM_CMD_INTENT);
    668	cmd.lcid = cpu_to_le16(channel->lcid);
    669	cmd.count = cpu_to_le32(1);
    670	cmd.size = cpu_to_le32(intent->size);
    671	cmd.liid = cpu_to_le32(intent->id);
    672
    673	qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
    674
    675	return 0;
    676}
    677
    678static struct glink_core_rx_intent *
    679qcom_glink_alloc_intent(struct qcom_glink *glink,
    680			struct glink_channel *channel,
    681			size_t size,
    682			bool reuseable)
    683{
    684	struct glink_core_rx_intent *intent;
    685	int ret;
    686	unsigned long flags;
    687
    688	intent = kzalloc(sizeof(*intent), GFP_KERNEL);
    689	if (!intent)
    690		return NULL;
    691
    692	intent->data = kzalloc(size, GFP_KERNEL);
    693	if (!intent->data)
    694		goto free_intent;
    695
    696	spin_lock_irqsave(&channel->intent_lock, flags);
    697	ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
    698	if (ret < 0) {
    699		spin_unlock_irqrestore(&channel->intent_lock, flags);
    700		goto free_data;
    701	}
    702	spin_unlock_irqrestore(&channel->intent_lock, flags);
    703
    704	intent->id = ret;
    705	intent->size = size;
    706	intent->reuse = reuseable;
    707
    708	return intent;
    709
    710free_data:
    711	kfree(intent->data);
    712free_intent:
    713	kfree(intent);
    714	return NULL;
    715}
    716
    717static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
    718				      u32 cid, uint32_t iid,
    719				      bool reuse)
    720{
    721	struct glink_core_rx_intent *intent;
    722	struct glink_channel *channel;
    723	unsigned long flags;
    724
    725	spin_lock_irqsave(&glink->idr_lock, flags);
    726	channel = idr_find(&glink->rcids, cid);
    727	spin_unlock_irqrestore(&glink->idr_lock, flags);
    728	if (!channel) {
    729		dev_err(glink->dev, "invalid channel id received\n");
    730		return;
    731	}
    732
    733	spin_lock_irqsave(&channel->intent_lock, flags);
    734	intent = idr_find(&channel->riids, iid);
    735
    736	if (!intent) {
    737		spin_unlock_irqrestore(&channel->intent_lock, flags);
    738		dev_err(glink->dev, "invalid intent id received\n");
    739		return;
    740	}
    741
    742	intent->in_use = false;
    743
    744	if (!reuse) {
    745		idr_remove(&channel->riids, intent->id);
    746		kfree(intent);
    747	}
    748	spin_unlock_irqrestore(&channel->intent_lock, flags);
    749}
    750
    751/**
    752 * qcom_glink_handle_intent_req() - Receive a request for rx_intent
    753 *					    from remote side
    754 * @glink:      Pointer to the transport interface
    755 * @cid:	Remote channel ID
    756 * @size:	size of the intent
    757 *
    758 * The function searches for the local channel to which the request for
    759 * rx_intent has arrived and allocates and notifies the remote back
    760 */
    761static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
    762					 u32 cid, size_t size)
    763{
    764	struct glink_core_rx_intent *intent;
    765	struct glink_channel *channel;
    766	unsigned long flags;
    767
    768	spin_lock_irqsave(&glink->idr_lock, flags);
    769	channel = idr_find(&glink->rcids, cid);
    770	spin_unlock_irqrestore(&glink->idr_lock, flags);
    771
    772	if (!channel) {
    773		pr_err("%s channel not found for cid %d\n", __func__, cid);
    774		return;
    775	}
    776
    777	intent = qcom_glink_alloc_intent(glink, channel, size, false);
    778	if (intent)
    779		qcom_glink_advertise_intent(glink, channel, intent);
    780
    781	qcom_glink_send_intent_req_ack(glink, channel, !!intent);
    782}
    783
    784static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
    785{
    786	struct glink_defer_cmd *dcmd;
    787
    788	extra = ALIGN(extra, 8);
    789
    790	if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
    791		dev_dbg(glink->dev, "Insufficient data in rx fifo");
    792		return -ENXIO;
    793	}
    794
    795	dcmd = kzalloc(struct_size(dcmd, data, extra), GFP_ATOMIC);
    796	if (!dcmd)
    797		return -ENOMEM;
    798
    799	INIT_LIST_HEAD(&dcmd->node);
    800
    801	qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
    802
    803	spin_lock(&glink->rx_lock);
    804	list_add_tail(&dcmd->node, &glink->rx_queue);
    805	spin_unlock(&glink->rx_lock);
    806
    807	schedule_work(&glink->rx_work);
    808	qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
    809
    810	return 0;
    811}
    812
    813static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
    814{
    815	struct glink_core_rx_intent *intent;
    816	struct glink_channel *channel;
    817	struct {
    818		struct glink_msg msg;
    819		__le32 chunk_size;
    820		__le32 left_size;
    821	} __packed hdr;
    822	unsigned int chunk_size;
    823	unsigned int left_size;
    824	unsigned int rcid;
    825	unsigned int liid;
    826	int ret = 0;
    827	unsigned long flags;
    828
    829	if (avail < sizeof(hdr)) {
    830		dev_dbg(glink->dev, "Not enough data in fifo\n");
    831		return -EAGAIN;
    832	}
    833
    834	qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
    835	chunk_size = le32_to_cpu(hdr.chunk_size);
    836	left_size = le32_to_cpu(hdr.left_size);
    837
    838	if (avail < sizeof(hdr) + chunk_size) {
    839		dev_dbg(glink->dev, "Payload not yet in fifo\n");
    840		return -EAGAIN;
    841	}
    842
    843	rcid = le16_to_cpu(hdr.msg.param1);
    844	spin_lock_irqsave(&glink->idr_lock, flags);
    845	channel = idr_find(&glink->rcids, rcid);
    846	spin_unlock_irqrestore(&glink->idr_lock, flags);
    847	if (!channel) {
    848		dev_dbg(glink->dev, "Data on non-existing channel\n");
    849
    850		/* Drop the message */
    851		goto advance_rx;
    852	}
    853
    854	if (glink->intentless) {
    855		/* Might have an ongoing, fragmented, message to append */
    856		if (!channel->buf) {
    857			intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
    858			if (!intent)
    859				return -ENOMEM;
    860
    861			intent->data = kmalloc(chunk_size + left_size,
    862					       GFP_ATOMIC);
    863			if (!intent->data) {
    864				kfree(intent);
    865				return -ENOMEM;
    866			}
    867
    868			intent->id = 0xbabababa;
    869			intent->size = chunk_size + left_size;
    870			intent->offset = 0;
    871
    872			channel->buf = intent;
    873		} else {
    874			intent = channel->buf;
    875		}
    876	} else {
    877		liid = le32_to_cpu(hdr.msg.param2);
    878
    879		spin_lock_irqsave(&channel->intent_lock, flags);
    880		intent = idr_find(&channel->liids, liid);
    881		spin_unlock_irqrestore(&channel->intent_lock, flags);
    882
    883		if (!intent) {
    884			dev_err(glink->dev,
    885				"no intent found for channel %s intent %d",
    886				channel->name, liid);
    887			ret = -ENOENT;
    888			goto advance_rx;
    889		}
    890	}
    891
    892	if (intent->size - intent->offset < chunk_size) {
    893		dev_err(glink->dev, "Insufficient space in intent\n");
    894
    895		/* The packet header lied, drop payload */
    896		goto advance_rx;
    897	}
    898
    899	qcom_glink_rx_peak(glink, intent->data + intent->offset,
    900			   sizeof(hdr), chunk_size);
    901	intent->offset += chunk_size;
    902
    903	/* Handle message when no fragments remain to be received */
    904	if (!left_size) {
    905		spin_lock(&channel->recv_lock);
    906		if (channel->ept.cb) {
    907			channel->ept.cb(channel->ept.rpdev,
    908					intent->data,
    909					intent->offset,
    910					channel->ept.priv,
    911					RPMSG_ADDR_ANY);
    912		}
    913		spin_unlock(&channel->recv_lock);
    914
    915		intent->offset = 0;
    916		channel->buf = NULL;
    917
    918		qcom_glink_rx_done(glink, channel, intent);
    919	}
    920
    921advance_rx:
    922	qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
    923
    924	return ret;
    925}
    926
    927static void qcom_glink_handle_intent(struct qcom_glink *glink,
    928				     unsigned int cid,
    929				     unsigned int count,
    930				     size_t avail)
    931{
    932	struct glink_core_rx_intent *intent;
    933	struct glink_channel *channel;
    934	struct intent_pair {
    935		__le32 size;
    936		__le32 iid;
    937	};
    938
    939	struct {
    940		struct glink_msg msg;
    941		struct intent_pair intents[];
    942	} __packed * msg;
    943
    944	const size_t msglen = struct_size(msg, intents, count);
    945	int ret;
    946	int i;
    947	unsigned long flags;
    948
    949	if (avail < msglen) {
    950		dev_dbg(glink->dev, "Not enough data in fifo\n");
    951		return;
    952	}
    953
    954	spin_lock_irqsave(&glink->idr_lock, flags);
    955	channel = idr_find(&glink->rcids, cid);
    956	spin_unlock_irqrestore(&glink->idr_lock, flags);
    957	if (!channel) {
    958		dev_err(glink->dev, "intents for non-existing channel\n");
    959		return;
    960	}
    961
    962	msg = kmalloc(msglen, GFP_ATOMIC);
    963	if (!msg)
    964		return;
    965
    966	qcom_glink_rx_peak(glink, msg, 0, msglen);
    967
    968	for (i = 0; i < count; ++i) {
    969		intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
    970		if (!intent)
    971			break;
    972
    973		intent->id = le32_to_cpu(msg->intents[i].iid);
    974		intent->size = le32_to_cpu(msg->intents[i].size);
    975
    976		spin_lock_irqsave(&channel->intent_lock, flags);
    977		ret = idr_alloc(&channel->riids, intent,
    978				intent->id, intent->id + 1, GFP_ATOMIC);
    979		spin_unlock_irqrestore(&channel->intent_lock, flags);
    980
    981		if (ret < 0)
    982			dev_err(glink->dev, "failed to store remote intent\n");
    983	}
    984
    985	kfree(msg);
    986	qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
    987}
    988
    989static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
    990{
    991	struct glink_channel *channel;
    992
    993	spin_lock(&glink->idr_lock);
    994	channel = idr_find(&glink->lcids, lcid);
    995	spin_unlock(&glink->idr_lock);
    996	if (!channel) {
    997		dev_err(glink->dev, "Invalid open ack packet\n");
    998		return -EINVAL;
    999	}
   1000
   1001	complete_all(&channel->open_ack);
   1002
   1003	return 0;
   1004}
   1005
   1006static irqreturn_t qcom_glink_native_intr(int irq, void *data)
   1007{
   1008	struct qcom_glink *glink = data;
   1009	struct glink_msg msg;
   1010	unsigned int param1;
   1011	unsigned int param2;
   1012	unsigned int avail;
   1013	unsigned int cmd;
   1014	int ret = 0;
   1015
   1016	/* To wakeup any blocking writers */
   1017	wake_up_all(&glink->tx_avail_notify);
   1018
   1019	for (;;) {
   1020		avail = qcom_glink_rx_avail(glink);
   1021		if (avail < sizeof(msg))
   1022			break;
   1023
   1024		qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg));
   1025
   1026		cmd = le16_to_cpu(msg.cmd);
   1027		param1 = le16_to_cpu(msg.param1);
   1028		param2 = le32_to_cpu(msg.param2);
   1029
   1030		switch (cmd) {
   1031		case RPM_CMD_VERSION:
   1032		case RPM_CMD_VERSION_ACK:
   1033		case RPM_CMD_CLOSE:
   1034		case RPM_CMD_CLOSE_ACK:
   1035		case RPM_CMD_RX_INTENT_REQ:
   1036			ret = qcom_glink_rx_defer(glink, 0);
   1037			break;
   1038		case RPM_CMD_OPEN_ACK:
   1039			ret = qcom_glink_rx_open_ack(glink, param1);
   1040			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
   1041			break;
   1042		case RPM_CMD_OPEN:
   1043			ret = qcom_glink_rx_defer(glink, param2);
   1044			break;
   1045		case RPM_CMD_TX_DATA:
   1046		case RPM_CMD_TX_DATA_CONT:
   1047			ret = qcom_glink_rx_data(glink, avail);
   1048			break;
   1049		case RPM_CMD_READ_NOTIF:
   1050			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
   1051
   1052			mbox_send_message(glink->mbox_chan, NULL);
   1053			mbox_client_txdone(glink->mbox_chan, 0);
   1054			break;
   1055		case RPM_CMD_INTENT:
   1056			qcom_glink_handle_intent(glink, param1, param2, avail);
   1057			break;
   1058		case RPM_CMD_RX_DONE:
   1059			qcom_glink_handle_rx_done(glink, param1, param2, false);
   1060			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
   1061			break;
   1062		case RPM_CMD_RX_DONE_W_REUSE:
   1063			qcom_glink_handle_rx_done(glink, param1, param2, true);
   1064			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
   1065			break;
   1066		case RPM_CMD_RX_INTENT_REQ_ACK:
   1067			qcom_glink_handle_intent_req_ack(glink, param1, param2);
   1068			qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
   1069			break;
   1070		default:
   1071			dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
   1072			ret = -EINVAL;
   1073			break;
   1074		}
   1075
   1076		if (ret)
   1077			break;
   1078	}
   1079
   1080	return IRQ_HANDLED;
   1081}
   1082
   1083/* Locally initiated rpmsg_create_ept */
   1084static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
   1085						     const char *name)
   1086{
   1087	struct glink_channel *channel;
   1088	int ret;
   1089	unsigned long flags;
   1090
   1091	channel = qcom_glink_alloc_channel(glink, name);
   1092	if (IS_ERR(channel))
   1093		return ERR_CAST(channel);
   1094
   1095	ret = qcom_glink_send_open_req(glink, channel);
   1096	if (ret)
   1097		goto release_channel;
   1098
   1099	ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
   1100	if (!ret)
   1101		goto err_timeout;
   1102
   1103	ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
   1104	if (!ret)
   1105		goto err_timeout;
   1106
   1107	qcom_glink_send_open_ack(glink, channel);
   1108
   1109	return channel;
   1110
   1111err_timeout:
   1112	/* qcom_glink_send_open_req() did register the channel in lcids*/
   1113	spin_lock_irqsave(&glink->idr_lock, flags);
   1114	idr_remove(&glink->lcids, channel->lcid);
   1115	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1116
   1117release_channel:
   1118	/* Release qcom_glink_send_open_req() reference */
   1119	kref_put(&channel->refcount, qcom_glink_channel_release);
   1120	/* Release qcom_glink_alloc_channel() reference */
   1121	kref_put(&channel->refcount, qcom_glink_channel_release);
   1122
   1123	return ERR_PTR(-ETIMEDOUT);
   1124}
   1125
   1126/* Remote initiated rpmsg_create_ept */
   1127static int qcom_glink_create_remote(struct qcom_glink *glink,
   1128				    struct glink_channel *channel)
   1129{
   1130	int ret;
   1131
   1132	qcom_glink_send_open_ack(glink, channel);
   1133
   1134	ret = qcom_glink_send_open_req(glink, channel);
   1135	if (ret)
   1136		goto close_link;
   1137
   1138	ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
   1139	if (!ret) {
   1140		ret = -ETIMEDOUT;
   1141		goto close_link;
   1142	}
   1143
   1144	return 0;
   1145
   1146close_link:
   1147	/*
   1148	 * Send a close request to "undo" our open-ack. The close-ack will
   1149	 * release qcom_glink_send_open_req() reference and the last reference
   1150	 * will be relesed after receiving remote_close or transport unregister
   1151	 * by calling qcom_glink_native_remove().
   1152	 */
   1153	qcom_glink_send_close_req(glink, channel);
   1154
   1155	return ret;
   1156}
   1157
   1158static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
   1159						    rpmsg_rx_cb_t cb,
   1160						    void *priv,
   1161						    struct rpmsg_channel_info
   1162									chinfo)
   1163{
   1164	struct glink_channel *parent = to_glink_channel(rpdev->ept);
   1165	struct glink_channel *channel;
   1166	struct qcom_glink *glink = parent->glink;
   1167	struct rpmsg_endpoint *ept;
   1168	const char *name = chinfo.name;
   1169	int cid;
   1170	int ret;
   1171	unsigned long flags;
   1172
   1173	spin_lock_irqsave(&glink->idr_lock, flags);
   1174	idr_for_each_entry(&glink->rcids, channel, cid) {
   1175		if (!strcmp(channel->name, name))
   1176			break;
   1177	}
   1178	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1179
   1180	if (!channel) {
   1181		channel = qcom_glink_create_local(glink, name);
   1182		if (IS_ERR(channel))
   1183			return NULL;
   1184	} else {
   1185		ret = qcom_glink_create_remote(glink, channel);
   1186		if (ret)
   1187			return NULL;
   1188	}
   1189
   1190	ept = &channel->ept;
   1191	ept->rpdev = rpdev;
   1192	ept->cb = cb;
   1193	ept->priv = priv;
   1194	ept->ops = &glink_endpoint_ops;
   1195
   1196	return ept;
   1197}
   1198
   1199static int qcom_glink_announce_create(struct rpmsg_device *rpdev)
   1200{
   1201	struct glink_channel *channel = to_glink_channel(rpdev->ept);
   1202	struct device_node *np = rpdev->dev.of_node;
   1203	struct qcom_glink *glink = channel->glink;
   1204	struct glink_core_rx_intent *intent;
   1205	const struct property *prop = NULL;
   1206	__be32 defaults[] = { cpu_to_be32(SZ_1K), cpu_to_be32(5) };
   1207	int num_intents;
   1208	int num_groups = 1;
   1209	__be32 *val = defaults;
   1210	int size;
   1211
   1212	if (glink->intentless || !completion_done(&channel->open_ack))
   1213		return 0;
   1214
   1215	prop = of_find_property(np, "qcom,intents", NULL);
   1216	if (prop) {
   1217		val = prop->value;
   1218		num_groups = prop->length / sizeof(u32) / 2;
   1219	}
   1220
   1221	/* Channel is now open, advertise base set of intents */
   1222	while (num_groups--) {
   1223		size = be32_to_cpup(val++);
   1224		num_intents = be32_to_cpup(val++);
   1225		while (num_intents--) {
   1226			intent = qcom_glink_alloc_intent(glink, channel, size,
   1227							 true);
   1228			if (!intent)
   1229				break;
   1230
   1231			qcom_glink_advertise_intent(glink, channel, intent);
   1232		}
   1233	}
   1234	return 0;
   1235}
   1236
   1237static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
   1238{
   1239	struct glink_channel *channel = to_glink_channel(ept);
   1240	struct qcom_glink *glink = channel->glink;
   1241	unsigned long flags;
   1242
   1243	spin_lock_irqsave(&channel->recv_lock, flags);
   1244	channel->ept.cb = NULL;
   1245	spin_unlock_irqrestore(&channel->recv_lock, flags);
   1246
   1247	/* Decouple the potential rpdev from the channel */
   1248	channel->rpdev = NULL;
   1249
   1250	qcom_glink_send_close_req(glink, channel);
   1251}
   1252
   1253static int qcom_glink_request_intent(struct qcom_glink *glink,
   1254				     struct glink_channel *channel,
   1255				     size_t size)
   1256{
   1257	struct {
   1258		u16 id;
   1259		u16 cid;
   1260		u32 size;
   1261	} __packed cmd;
   1262
   1263	int ret;
   1264
   1265	mutex_lock(&channel->intent_req_lock);
   1266
   1267	reinit_completion(&channel->intent_req_comp);
   1268
   1269	cmd.id = RPM_CMD_RX_INTENT_REQ;
   1270	cmd.cid = channel->lcid;
   1271	cmd.size = size;
   1272
   1273	ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
   1274	if (ret)
   1275		goto unlock;
   1276
   1277	ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
   1278	if (!ret) {
   1279		dev_err(glink->dev, "intent request timed out\n");
   1280		ret = -ETIMEDOUT;
   1281	} else {
   1282		ret = channel->intent_req_result ? 0 : -ECANCELED;
   1283	}
   1284
   1285unlock:
   1286	mutex_unlock(&channel->intent_req_lock);
   1287	return ret;
   1288}
   1289
   1290static int __qcom_glink_send(struct glink_channel *channel,
   1291			     void *data, int len, bool wait)
   1292{
   1293	struct qcom_glink *glink = channel->glink;
   1294	struct glink_core_rx_intent *intent = NULL;
   1295	struct glink_core_rx_intent *tmp;
   1296	int iid = 0;
   1297	struct {
   1298		struct glink_msg msg;
   1299		__le32 chunk_size;
   1300		__le32 left_size;
   1301	} __packed req;
   1302	int ret;
   1303	unsigned long flags;
   1304	int chunk_size = len;
   1305	int left_size = 0;
   1306
   1307	if (!glink->intentless) {
   1308		while (!intent) {
   1309			spin_lock_irqsave(&channel->intent_lock, flags);
   1310			idr_for_each_entry(&channel->riids, tmp, iid) {
   1311				if (tmp->size >= len && !tmp->in_use) {
   1312					if (!intent)
   1313						intent = tmp;
   1314					else if (intent->size > tmp->size)
   1315						intent = tmp;
   1316					if (intent->size == len)
   1317						break;
   1318				}
   1319			}
   1320			if (intent)
   1321				intent->in_use = true;
   1322			spin_unlock_irqrestore(&channel->intent_lock, flags);
   1323
   1324			/* We found an available intent */
   1325			if (intent)
   1326				break;
   1327
   1328			if (!wait)
   1329				return -EBUSY;
   1330
   1331			ret = qcom_glink_request_intent(glink, channel, len);
   1332			if (ret < 0)
   1333				return ret;
   1334		}
   1335
   1336		iid = intent->id;
   1337	}
   1338
   1339	if (wait && chunk_size > SZ_8K) {
   1340		chunk_size = SZ_8K;
   1341		left_size = len - chunk_size;
   1342	}
   1343	req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
   1344	req.msg.param1 = cpu_to_le16(channel->lcid);
   1345	req.msg.param2 = cpu_to_le32(iid);
   1346	req.chunk_size = cpu_to_le32(chunk_size);
   1347	req.left_size = cpu_to_le32(left_size);
   1348
   1349	ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
   1350
   1351	/* Mark intent available if we failed */
   1352	if (ret && intent) {
   1353		intent->in_use = false;
   1354		return ret;
   1355	}
   1356
   1357	while (left_size > 0) {
   1358		data = (void *)((char *)data + chunk_size);
   1359		chunk_size = left_size;
   1360		if (chunk_size > SZ_8K)
   1361			chunk_size = SZ_8K;
   1362		left_size -= chunk_size;
   1363
   1364		req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT);
   1365		req.msg.param1 = cpu_to_le16(channel->lcid);
   1366		req.msg.param2 = cpu_to_le32(iid);
   1367		req.chunk_size = cpu_to_le32(chunk_size);
   1368		req.left_size = cpu_to_le32(left_size);
   1369
   1370		ret = qcom_glink_tx(glink, &req, sizeof(req), data,
   1371				    chunk_size, wait);
   1372
   1373		/* Mark intent available if we failed */
   1374		if (ret && intent) {
   1375			intent->in_use = false;
   1376			break;
   1377		}
   1378	}
   1379	return ret;
   1380}
   1381
   1382static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
   1383{
   1384	struct glink_channel *channel = to_glink_channel(ept);
   1385
   1386	return __qcom_glink_send(channel, data, len, true);
   1387}
   1388
   1389static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
   1390{
   1391	struct glink_channel *channel = to_glink_channel(ept);
   1392
   1393	return __qcom_glink_send(channel, data, len, false);
   1394}
   1395
   1396static int qcom_glink_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
   1397{
   1398	struct glink_channel *channel = to_glink_channel(ept);
   1399
   1400	return __qcom_glink_send(channel, data, len, true);
   1401}
   1402
   1403static int qcom_glink_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
   1404{
   1405	struct glink_channel *channel = to_glink_channel(ept);
   1406
   1407	return __qcom_glink_send(channel, data, len, false);
   1408}
   1409
   1410/*
   1411 * Finds the device_node for the glink child interested in this channel.
   1412 */
   1413static struct device_node *qcom_glink_match_channel(struct device_node *node,
   1414						    const char *channel)
   1415{
   1416	struct device_node *child;
   1417	const char *name;
   1418	const char *key;
   1419	int ret;
   1420
   1421	for_each_available_child_of_node(node, child) {
   1422		key = "qcom,glink-channels";
   1423		ret = of_property_read_string(child, key, &name);
   1424		if (ret)
   1425			continue;
   1426
   1427		if (strcmp(name, channel) == 0)
   1428			return child;
   1429	}
   1430
   1431	return NULL;
   1432}
   1433
   1434static const struct rpmsg_device_ops glink_device_ops = {
   1435	.create_ept = qcom_glink_create_ept,
   1436	.announce_create = qcom_glink_announce_create,
   1437};
   1438
   1439static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
   1440	.destroy_ept = qcom_glink_destroy_ept,
   1441	.send = qcom_glink_send,
   1442	.sendto = qcom_glink_sendto,
   1443	.trysend = qcom_glink_trysend,
   1444	.trysendto = qcom_glink_trysendto,
   1445};
   1446
   1447static void qcom_glink_rpdev_release(struct device *dev)
   1448{
   1449	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
   1450
   1451	kfree(rpdev);
   1452}
   1453
   1454static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
   1455			      char *name)
   1456{
   1457	struct glink_channel *channel;
   1458	struct rpmsg_device *rpdev;
   1459	bool create_device = false;
   1460	struct device_node *node;
   1461	int lcid;
   1462	int ret;
   1463	unsigned long flags;
   1464
   1465	spin_lock_irqsave(&glink->idr_lock, flags);
   1466	idr_for_each_entry(&glink->lcids, channel, lcid) {
   1467		if (!strcmp(channel->name, name))
   1468			break;
   1469	}
   1470	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1471
   1472	if (!channel) {
   1473		channel = qcom_glink_alloc_channel(glink, name);
   1474		if (IS_ERR(channel))
   1475			return PTR_ERR(channel);
   1476
   1477		/* The opening dance was initiated by the remote */
   1478		create_device = true;
   1479	}
   1480
   1481	spin_lock_irqsave(&glink->idr_lock, flags);
   1482	ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
   1483	if (ret < 0) {
   1484		dev_err(glink->dev, "Unable to insert channel into rcid list\n");
   1485		spin_unlock_irqrestore(&glink->idr_lock, flags);
   1486		goto free_channel;
   1487	}
   1488	channel->rcid = ret;
   1489	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1490
   1491	complete_all(&channel->open_req);
   1492
   1493	if (create_device) {
   1494		rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
   1495		if (!rpdev) {
   1496			ret = -ENOMEM;
   1497			goto rcid_remove;
   1498		}
   1499
   1500		rpdev->ept = &channel->ept;
   1501		strscpy_pad(rpdev->id.name, name, RPMSG_NAME_SIZE);
   1502		rpdev->src = RPMSG_ADDR_ANY;
   1503		rpdev->dst = RPMSG_ADDR_ANY;
   1504		rpdev->ops = &glink_device_ops;
   1505
   1506		node = qcom_glink_match_channel(glink->dev->of_node, name);
   1507		rpdev->dev.of_node = node;
   1508		rpdev->dev.parent = glink->dev;
   1509		rpdev->dev.release = qcom_glink_rpdev_release;
   1510
   1511		ret = rpmsg_register_device(rpdev);
   1512		if (ret)
   1513			goto rcid_remove;
   1514
   1515		channel->rpdev = rpdev;
   1516	}
   1517
   1518	return 0;
   1519
   1520rcid_remove:
   1521	spin_lock_irqsave(&glink->idr_lock, flags);
   1522	idr_remove(&glink->rcids, channel->rcid);
   1523	channel->rcid = 0;
   1524	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1525free_channel:
   1526	/* Release the reference, iff we took it */
   1527	if (create_device)
   1528		kref_put(&channel->refcount, qcom_glink_channel_release);
   1529
   1530	return ret;
   1531}
   1532
   1533static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
   1534{
   1535	struct rpmsg_channel_info chinfo;
   1536	struct glink_channel *channel;
   1537	unsigned long flags;
   1538
   1539	spin_lock_irqsave(&glink->idr_lock, flags);
   1540	channel = idr_find(&glink->rcids, rcid);
   1541	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1542	if (WARN(!channel, "close request on unknown channel\n"))
   1543		return;
   1544
   1545	/* cancel pending rx_done work */
   1546	cancel_work_sync(&channel->intent_work);
   1547
   1548	if (channel->rpdev) {
   1549		strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
   1550		chinfo.src = RPMSG_ADDR_ANY;
   1551		chinfo.dst = RPMSG_ADDR_ANY;
   1552
   1553		rpmsg_unregister_device(glink->dev, &chinfo);
   1554	}
   1555	channel->rpdev = NULL;
   1556
   1557	qcom_glink_send_close_ack(glink, channel->rcid);
   1558
   1559	spin_lock_irqsave(&glink->idr_lock, flags);
   1560	idr_remove(&glink->rcids, channel->rcid);
   1561	channel->rcid = 0;
   1562	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1563
   1564	kref_put(&channel->refcount, qcom_glink_channel_release);
   1565}
   1566
   1567static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
   1568{
   1569	struct rpmsg_channel_info chinfo;
   1570	struct glink_channel *channel;
   1571	unsigned long flags;
   1572
   1573	/* To wakeup any blocking writers */
   1574	wake_up_all(&glink->tx_avail_notify);
   1575
   1576	spin_lock_irqsave(&glink->idr_lock, flags);
   1577	channel = idr_find(&glink->lcids, lcid);
   1578	if (WARN(!channel, "close ack on unknown channel\n")) {
   1579		spin_unlock_irqrestore(&glink->idr_lock, flags);
   1580		return;
   1581	}
   1582
   1583	idr_remove(&glink->lcids, channel->lcid);
   1584	channel->lcid = 0;
   1585	spin_unlock_irqrestore(&glink->idr_lock, flags);
   1586
   1587	/* Decouple the potential rpdev from the channel */
   1588	if (channel->rpdev) {
   1589		strscpy(chinfo.name, channel->name, sizeof(chinfo.name));
   1590		chinfo.src = RPMSG_ADDR_ANY;
   1591		chinfo.dst = RPMSG_ADDR_ANY;
   1592
   1593		rpmsg_unregister_device(glink->dev, &chinfo);
   1594	}
   1595	channel->rpdev = NULL;
   1596
   1597	kref_put(&channel->refcount, qcom_glink_channel_release);
   1598}
   1599
   1600static void qcom_glink_work(struct work_struct *work)
   1601{
   1602	struct qcom_glink *glink = container_of(work, struct qcom_glink,
   1603						rx_work);
   1604	struct glink_defer_cmd *dcmd;
   1605	struct glink_msg *msg;
   1606	unsigned long flags;
   1607	unsigned int param1;
   1608	unsigned int param2;
   1609	unsigned int cmd;
   1610
   1611	for (;;) {
   1612		spin_lock_irqsave(&glink->rx_lock, flags);
   1613		if (list_empty(&glink->rx_queue)) {
   1614			spin_unlock_irqrestore(&glink->rx_lock, flags);
   1615			break;
   1616		}
   1617		dcmd = list_first_entry(&glink->rx_queue,
   1618					struct glink_defer_cmd, node);
   1619		list_del(&dcmd->node);
   1620		spin_unlock_irqrestore(&glink->rx_lock, flags);
   1621
   1622		msg = &dcmd->msg;
   1623		cmd = le16_to_cpu(msg->cmd);
   1624		param1 = le16_to_cpu(msg->param1);
   1625		param2 = le32_to_cpu(msg->param2);
   1626
   1627		switch (cmd) {
   1628		case RPM_CMD_VERSION:
   1629			qcom_glink_receive_version(glink, param1, param2);
   1630			break;
   1631		case RPM_CMD_VERSION_ACK:
   1632			qcom_glink_receive_version_ack(glink, param1, param2);
   1633			break;
   1634		case RPM_CMD_OPEN:
   1635			qcom_glink_rx_open(glink, param1, msg->data);
   1636			break;
   1637		case RPM_CMD_CLOSE:
   1638			qcom_glink_rx_close(glink, param1);
   1639			break;
   1640		case RPM_CMD_CLOSE_ACK:
   1641			qcom_glink_rx_close_ack(glink, param1);
   1642			break;
   1643		case RPM_CMD_RX_INTENT_REQ:
   1644			qcom_glink_handle_intent_req(glink, param1, param2);
   1645			break;
   1646		default:
   1647			WARN(1, "Unknown defer object %d\n", cmd);
   1648			break;
   1649		}
   1650
   1651		kfree(dcmd);
   1652	}
   1653}
   1654
   1655static void qcom_glink_cancel_rx_work(struct qcom_glink *glink)
   1656{
   1657	struct glink_defer_cmd *dcmd;
   1658	struct glink_defer_cmd *tmp;
   1659
   1660	/* cancel any pending deferred rx_work */
   1661	cancel_work_sync(&glink->rx_work);
   1662
   1663	list_for_each_entry_safe(dcmd, tmp, &glink->rx_queue, node)
   1664		kfree(dcmd);
   1665}
   1666
   1667static ssize_t rpmsg_name_show(struct device *dev,
   1668			       struct device_attribute *attr, char *buf)
   1669{
   1670	int ret = 0;
   1671	const char *name;
   1672
   1673	ret = of_property_read_string(dev->of_node, "label", &name);
   1674	if (ret < 0)
   1675		name = dev->of_node->name;
   1676
   1677	return snprintf(buf, RPMSG_NAME_SIZE, "%s\n", name);
   1678}
   1679static DEVICE_ATTR_RO(rpmsg_name);
   1680
   1681static struct attribute *qcom_glink_attrs[] = {
   1682	&dev_attr_rpmsg_name.attr,
   1683	NULL
   1684};
   1685ATTRIBUTE_GROUPS(qcom_glink);
   1686
   1687static void qcom_glink_device_release(struct device *dev)
   1688{
   1689	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
   1690	struct glink_channel *channel = to_glink_channel(rpdev->ept);
   1691
   1692	/* Release qcom_glink_alloc_channel() reference */
   1693	kref_put(&channel->refcount, qcom_glink_channel_release);
   1694	kfree(rpdev);
   1695}
   1696
   1697static int qcom_glink_create_chrdev(struct qcom_glink *glink)
   1698{
   1699	struct rpmsg_device *rpdev;
   1700	struct glink_channel *channel;
   1701
   1702	rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
   1703	if (!rpdev)
   1704		return -ENOMEM;
   1705
   1706	channel = qcom_glink_alloc_channel(glink, "rpmsg_chrdev");
   1707	if (IS_ERR(channel)) {
   1708		kfree(rpdev);
   1709		return PTR_ERR(channel);
   1710	}
   1711	channel->rpdev = rpdev;
   1712
   1713	rpdev->ept = &channel->ept;
   1714	rpdev->ops = &glink_device_ops;
   1715	rpdev->dev.parent = glink->dev;
   1716	rpdev->dev.release = qcom_glink_device_release;
   1717
   1718	return rpmsg_ctrldev_register_device(rpdev);
   1719}
   1720
   1721struct qcom_glink *qcom_glink_native_probe(struct device *dev,
   1722					   unsigned long features,
   1723					   struct qcom_glink_pipe *rx,
   1724					   struct qcom_glink_pipe *tx,
   1725					   bool intentless)
   1726{
   1727	int irq;
   1728	int ret;
   1729	struct qcom_glink *glink;
   1730
   1731	glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
   1732	if (!glink)
   1733		return ERR_PTR(-ENOMEM);
   1734
   1735	glink->dev = dev;
   1736	glink->tx_pipe = tx;
   1737	glink->rx_pipe = rx;
   1738
   1739	glink->features = features;
   1740	glink->intentless = intentless;
   1741
   1742	spin_lock_init(&glink->tx_lock);
   1743	spin_lock_init(&glink->rx_lock);
   1744	INIT_LIST_HEAD(&glink->rx_queue);
   1745	INIT_WORK(&glink->rx_work, qcom_glink_work);
   1746	init_waitqueue_head(&glink->tx_avail_notify);
   1747
   1748	spin_lock_init(&glink->idr_lock);
   1749	idr_init(&glink->lcids);
   1750	idr_init(&glink->rcids);
   1751
   1752	glink->dev->groups = qcom_glink_groups;
   1753
   1754	ret = device_add_groups(dev, qcom_glink_groups);
   1755	if (ret)
   1756		dev_err(dev, "failed to add groups\n");
   1757
   1758	ret = of_property_read_string(dev->of_node, "label", &glink->name);
   1759	if (ret < 0)
   1760		glink->name = dev->of_node->name;
   1761
   1762	glink->mbox_client.dev = dev;
   1763	glink->mbox_client.knows_txdone = true;
   1764	glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
   1765	if (IS_ERR(glink->mbox_chan)) {
   1766		if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
   1767			dev_err(dev, "failed to acquire IPC channel\n");
   1768		return ERR_CAST(glink->mbox_chan);
   1769	}
   1770
   1771	irq = of_irq_get(dev->of_node, 0);
   1772	ret = devm_request_irq(dev, irq,
   1773			       qcom_glink_native_intr,
   1774			       IRQF_NO_SUSPEND | IRQF_SHARED,
   1775			       "glink-native", glink);
   1776	if (ret) {
   1777		dev_err(dev, "failed to request IRQ\n");
   1778		return ERR_PTR(ret);
   1779	}
   1780
   1781	glink->irq = irq;
   1782
   1783	ret = qcom_glink_send_version(glink);
   1784	if (ret)
   1785		return ERR_PTR(ret);
   1786
   1787	ret = qcom_glink_create_chrdev(glink);
   1788	if (ret)
   1789		dev_err(glink->dev, "failed to register chrdev\n");
   1790
   1791	return glink;
   1792}
   1793EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
   1794
   1795static int qcom_glink_remove_device(struct device *dev, void *data)
   1796{
   1797	device_unregister(dev);
   1798
   1799	return 0;
   1800}
   1801
   1802void qcom_glink_native_remove(struct qcom_glink *glink)
   1803{
   1804	struct glink_channel *channel;
   1805	int cid;
   1806	int ret;
   1807
   1808	disable_irq(glink->irq);
   1809	qcom_glink_cancel_rx_work(glink);
   1810
   1811	ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
   1812	if (ret)
   1813		dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
   1814
   1815	/* Release any defunct local channels, waiting for close-ack */
   1816	idr_for_each_entry(&glink->lcids, channel, cid)
   1817		kref_put(&channel->refcount, qcom_glink_channel_release);
   1818
   1819	/* Release any defunct local channels, waiting for close-req */
   1820	idr_for_each_entry(&glink->rcids, channel, cid)
   1821		kref_put(&channel->refcount, qcom_glink_channel_release);
   1822
   1823	idr_destroy(&glink->lcids);
   1824	idr_destroy(&glink->rcids);
   1825	mbox_free_channel(glink->mbox_chan);
   1826}
   1827EXPORT_SYMBOL_GPL(qcom_glink_native_remove);
   1828
   1829void qcom_glink_native_unregister(struct qcom_glink *glink)
   1830{
   1831	device_unregister(glink->dev);
   1832}
   1833EXPORT_SYMBOL_GPL(qcom_glink_native_unregister);
   1834
   1835MODULE_DESCRIPTION("Qualcomm GLINK driver");
   1836MODULE_LICENSE("GPL v2");