cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

client.c (51650B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
      4 * Intel Management Engine Interface (Intel MEI) Linux driver
      5 */
      6
      7#include <linux/sched/signal.h>
      8#include <linux/wait.h>
      9#include <linux/delay.h>
     10#include <linux/slab.h>
     11#include <linux/pm_runtime.h>
     12#include <linux/dma-mapping.h>
     13
     14#include <linux/mei.h>
     15
     16#include "mei_dev.h"
     17#include "hbm.h"
     18#include "client.h"
     19
     20/**
     21 * mei_me_cl_init - initialize me client
     22 *
     23 * @me_cl: me client
     24 */
     25void mei_me_cl_init(struct mei_me_client *me_cl)
     26{
     27	INIT_LIST_HEAD(&me_cl->list);
     28	kref_init(&me_cl->refcnt);
     29}
     30
     31/**
     32 * mei_me_cl_get - increases me client refcount
     33 *
     34 * @me_cl: me client
     35 *
     36 * Locking: called under "dev->device_lock" lock
     37 *
     38 * Return: me client or NULL
     39 */
     40struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
     41{
     42	if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
     43		return me_cl;
     44
     45	return NULL;
     46}
     47
     48/**
     49 * mei_me_cl_release - free me client
     50 *
     51 * Locking: called under "dev->device_lock" lock
     52 *
     53 * @ref: me_client refcount
     54 */
     55static void mei_me_cl_release(struct kref *ref)
     56{
     57	struct mei_me_client *me_cl =
     58		container_of(ref, struct mei_me_client, refcnt);
     59
     60	kfree(me_cl);
     61}
     62
     63/**
     64 * mei_me_cl_put - decrease me client refcount and free client if necessary
     65 *
     66 * Locking: called under "dev->device_lock" lock
     67 *
     68 * @me_cl: me client
     69 */
     70void mei_me_cl_put(struct mei_me_client *me_cl)
     71{
     72	if (me_cl)
     73		kref_put(&me_cl->refcnt, mei_me_cl_release);
     74}
     75
     76/**
     77 * __mei_me_cl_del  - delete me client from the list and decrease
     78 *     reference counter
     79 *
     80 * @dev: mei device
     81 * @me_cl: me client
     82 *
     83 * Locking: dev->me_clients_rwsem
     84 */
     85static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
     86{
     87	if (!me_cl)
     88		return;
     89
     90	list_del_init(&me_cl->list);
     91	mei_me_cl_put(me_cl);
     92}
     93
     94/**
     95 * mei_me_cl_del - delete me client from the list and decrease
     96 *     reference counter
     97 *
     98 * @dev: mei device
     99 * @me_cl: me client
    100 */
    101void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
    102{
    103	down_write(&dev->me_clients_rwsem);
    104	__mei_me_cl_del(dev, me_cl);
    105	up_write(&dev->me_clients_rwsem);
    106}
    107
    108/**
    109 * mei_me_cl_add - add me client to the list
    110 *
    111 * @dev: mei device
    112 * @me_cl: me client
    113 */
    114void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
    115{
    116	down_write(&dev->me_clients_rwsem);
    117	list_add(&me_cl->list, &dev->me_clients);
    118	up_write(&dev->me_clients_rwsem);
    119}
    120
    121/**
    122 * __mei_me_cl_by_uuid - locate me client by uuid
    123 *	increases ref count
    124 *
    125 * @dev: mei device
    126 * @uuid: me client uuid
    127 *
    128 * Return: me client or NULL if not found
    129 *
    130 * Locking: dev->me_clients_rwsem
    131 */
    132static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
    133					const uuid_le *uuid)
    134{
    135	struct mei_me_client *me_cl;
    136	const uuid_le *pn;
    137
    138	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
    139
    140	list_for_each_entry(me_cl, &dev->me_clients, list) {
    141		pn = &me_cl->props.protocol_name;
    142		if (uuid_le_cmp(*uuid, *pn) == 0)
    143			return mei_me_cl_get(me_cl);
    144	}
    145
    146	return NULL;
    147}
    148
    149/**
    150 * mei_me_cl_by_uuid - locate me client by uuid
    151 *	increases ref count
    152 *
    153 * @dev: mei device
    154 * @uuid: me client uuid
    155 *
    156 * Return: me client or NULL if not found
    157 *
    158 * Locking: dev->me_clients_rwsem
    159 */
    160struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
    161					const uuid_le *uuid)
    162{
    163	struct mei_me_client *me_cl;
    164
    165	down_read(&dev->me_clients_rwsem);
    166	me_cl = __mei_me_cl_by_uuid(dev, uuid);
    167	up_read(&dev->me_clients_rwsem);
    168
    169	return me_cl;
    170}
    171
    172/**
    173 * mei_me_cl_by_id - locate me client by client id
    174 *	increases ref count
    175 *
    176 * @dev: the device structure
    177 * @client_id: me client id
    178 *
    179 * Return: me client or NULL if not found
    180 *
    181 * Locking: dev->me_clients_rwsem
    182 */
    183struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
    184{
    185
    186	struct mei_me_client *__me_cl, *me_cl = NULL;
    187
    188	down_read(&dev->me_clients_rwsem);
    189	list_for_each_entry(__me_cl, &dev->me_clients, list) {
    190		if (__me_cl->client_id == client_id) {
    191			me_cl = mei_me_cl_get(__me_cl);
    192			break;
    193		}
    194	}
    195	up_read(&dev->me_clients_rwsem);
    196
    197	return me_cl;
    198}
    199
    200/**
    201 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
    202 *	increases ref count
    203 *
    204 * @dev: the device structure
    205 * @uuid: me client uuid
    206 * @client_id: me client id
    207 *
    208 * Return: me client or null if not found
    209 *
    210 * Locking: dev->me_clients_rwsem
    211 */
    212static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
    213					   const uuid_le *uuid, u8 client_id)
    214{
    215	struct mei_me_client *me_cl;
    216	const uuid_le *pn;
    217
    218	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
    219
    220	list_for_each_entry(me_cl, &dev->me_clients, list) {
    221		pn = &me_cl->props.protocol_name;
    222		if (uuid_le_cmp(*uuid, *pn) == 0 &&
    223		    me_cl->client_id == client_id)
    224			return mei_me_cl_get(me_cl);
    225	}
    226
    227	return NULL;
    228}
    229
    230
    231/**
    232 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
    233 *	increases ref count
    234 *
    235 * @dev: the device structure
    236 * @uuid: me client uuid
    237 * @client_id: me client id
    238 *
    239 * Return: me client or null if not found
    240 */
    241struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
    242					   const uuid_le *uuid, u8 client_id)
    243{
    244	struct mei_me_client *me_cl;
    245
    246	down_read(&dev->me_clients_rwsem);
    247	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
    248	up_read(&dev->me_clients_rwsem);
    249
    250	return me_cl;
    251}
    252
    253/**
    254 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
    255 *
    256 * @dev: the device structure
    257 * @uuid: me client uuid
    258 *
    259 * Locking: called under "dev->device_lock" lock
    260 */
    261void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
    262{
    263	struct mei_me_client *me_cl;
    264
    265	dev_dbg(dev->dev, "remove %pUl\n", uuid);
    266
    267	down_write(&dev->me_clients_rwsem);
    268	me_cl = __mei_me_cl_by_uuid(dev, uuid);
    269	__mei_me_cl_del(dev, me_cl);
    270	mei_me_cl_put(me_cl);
    271	up_write(&dev->me_clients_rwsem);
    272}
    273
    274/**
    275 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
    276 *
    277 * @dev: the device structure
    278 * @uuid: me client uuid
    279 * @id: me client id
    280 *
    281 * Locking: called under "dev->device_lock" lock
    282 */
    283void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
    284{
    285	struct mei_me_client *me_cl;
    286
    287	dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
    288
    289	down_write(&dev->me_clients_rwsem);
    290	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
    291	__mei_me_cl_del(dev, me_cl);
    292	mei_me_cl_put(me_cl);
    293	up_write(&dev->me_clients_rwsem);
    294}
    295
    296/**
    297 * mei_me_cl_rm_all - remove all me clients
    298 *
    299 * @dev: the device structure
    300 *
    301 * Locking: called under "dev->device_lock" lock
    302 */
    303void mei_me_cl_rm_all(struct mei_device *dev)
    304{
    305	struct mei_me_client *me_cl, *next;
    306
    307	down_write(&dev->me_clients_rwsem);
    308	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
    309		__mei_me_cl_del(dev, me_cl);
    310	up_write(&dev->me_clients_rwsem);
    311}
    312
    313/**
    314 * mei_io_cb_free - free mei_cb_private related memory
    315 *
    316 * @cb: mei callback struct
    317 */
    318void mei_io_cb_free(struct mei_cl_cb *cb)
    319{
    320	if (cb == NULL)
    321		return;
    322
    323	list_del(&cb->list);
    324	kfree(cb->buf.data);
    325	kfree(cb);
    326}
    327
    328/**
    329 * mei_tx_cb_enqueue - queue tx callback
    330 *
    331 * Locking: called under "dev->device_lock" lock
    332 *
    333 * @cb: mei callback struct
    334 * @head: an instance of list to queue on
    335 */
    336static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
    337				     struct list_head *head)
    338{
    339	list_add_tail(&cb->list, head);
    340	cb->cl->tx_cb_queued++;
    341}
    342
    343/**
    344 * mei_tx_cb_dequeue - dequeue tx callback
    345 *
    346 * Locking: called under "dev->device_lock" lock
    347 *
    348 * @cb: mei callback struct to dequeue and free
    349 */
    350static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
    351{
    352	if (!WARN_ON(cb->cl->tx_cb_queued == 0))
    353		cb->cl->tx_cb_queued--;
    354
    355	mei_io_cb_free(cb);
    356}
    357
    358/**
    359 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
    360 *
    361 * Locking: called under "dev->device_lock" lock
    362 *
    363 * @cl: mei client
    364 * @fp: pointer to file structure
    365 */
    366static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
    367				  const struct file *fp)
    368{
    369	struct mei_cl_vtag *cl_vtag;
    370
    371	list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
    372		if (cl_vtag->fp == fp) {
    373			cl_vtag->pending_read = true;
    374			return;
    375		}
    376	}
    377}
    378
    379/**
    380 * mei_io_cb_init - allocate and initialize io callback
    381 *
    382 * @cl: mei client
    383 * @type: operation type
    384 * @fp: pointer to file structure
    385 *
    386 * Return: mei_cl_cb pointer or NULL;
    387 */
    388static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
    389					enum mei_cb_file_ops type,
    390					const struct file *fp)
    391{
    392	struct mei_cl_cb *cb;
    393
    394	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
    395	if (!cb)
    396		return NULL;
    397
    398	INIT_LIST_HEAD(&cb->list);
    399	cb->fp = fp;
    400	cb->cl = cl;
    401	cb->buf_idx = 0;
    402	cb->fop_type = type;
    403	cb->vtag = 0;
    404
    405	return cb;
    406}
    407
    408/**
    409 * mei_io_list_flush_cl - removes cbs belonging to the cl.
    410 *
    411 * @head:  an instance of our list structure
    412 * @cl:    host client
    413 */
    414static void mei_io_list_flush_cl(struct list_head *head,
    415				 const struct mei_cl *cl)
    416{
    417	struct mei_cl_cb *cb, *next;
    418
    419	list_for_each_entry_safe(cb, next, head, list) {
    420		if (cl == cb->cl) {
    421			list_del_init(&cb->list);
    422			if (cb->fop_type == MEI_FOP_READ)
    423				mei_io_cb_free(cb);
    424		}
    425	}
    426}
    427
    428/**
    429 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
    430 *
    431 * @head: An instance of our list structure
    432 * @cl: host client
    433 * @fp: file pointer (matching cb file object), may be NULL
    434 */
    435static void mei_io_tx_list_free_cl(struct list_head *head,
    436				   const struct mei_cl *cl,
    437				   const struct file *fp)
    438{
    439	struct mei_cl_cb *cb, *next;
    440
    441	list_for_each_entry_safe(cb, next, head, list) {
    442		if (cl == cb->cl && (!fp || fp == cb->fp))
    443			mei_tx_cb_dequeue(cb);
    444	}
    445}
    446
    447/**
    448 * mei_io_list_free_fp - free cb from a list that matches file pointer
    449 *
    450 * @head: io list
    451 * @fp: file pointer (matching cb file object), may be NULL
    452 */
    453static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
    454{
    455	struct mei_cl_cb *cb, *next;
    456
    457	list_for_each_entry_safe(cb, next, head, list)
    458		if (!fp || fp == cb->fp)
    459			mei_io_cb_free(cb);
    460}
    461
    462/**
    463 * mei_cl_free_pending - free pending cb
    464 *
    465 * @cl: host client
    466 */
    467static void mei_cl_free_pending(struct mei_cl *cl)
    468{
    469	struct mei_cl_cb *cb;
    470
    471	cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
    472	mei_io_cb_free(cb);
    473}
    474
    475/**
    476 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
    477 *
    478 * @cl: host client
    479 * @length: size of the buffer
    480 * @fop_type: operation type
    481 * @fp: associated file pointer (might be NULL)
    482 *
    483 * Return: cb on success and NULL on failure
    484 */
    485struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
    486				  enum mei_cb_file_ops fop_type,
    487				  const struct file *fp)
    488{
    489	struct mei_cl_cb *cb;
    490
    491	cb = mei_io_cb_init(cl, fop_type, fp);
    492	if (!cb)
    493		return NULL;
    494
    495	if (length == 0)
    496		return cb;
    497
    498	cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
    499	if (!cb->buf.data) {
    500		mei_io_cb_free(cb);
    501		return NULL;
    502	}
    503	cb->buf.size = length;
    504
    505	return cb;
    506}
    507
    508/**
    509 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
    510 *     and enqueuing of the control commands cb
    511 *
    512 * @cl: host client
    513 * @length: size of the buffer
    514 * @fop_type: operation type
    515 * @fp: associated file pointer (might be NULL)
    516 *
    517 * Return: cb on success and NULL on failure
    518 * Locking: called under "dev->device_lock" lock
    519 */
    520struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
    521					    enum mei_cb_file_ops fop_type,
    522					    const struct file *fp)
    523{
    524	struct mei_cl_cb *cb;
    525
    526	/* for RX always allocate at least client's mtu */
    527	if (length)
    528		length = max_t(size_t, length, mei_cl_mtu(cl));
    529
    530	cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
    531	if (!cb)
    532		return NULL;
    533
    534	list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
    535	return cb;
    536}
    537
    538/**
    539 * mei_cl_read_cb - find this cl's callback in the read list
    540 *     for a specific file
    541 *
    542 * @cl: host client
    543 * @fp: file pointer (matching cb file object), may be NULL
    544 *
    545 * Return: cb on success, NULL if cb is not found
    546 */
    547struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
    548{
    549	struct mei_cl_cb *cb;
    550	struct mei_cl_cb *ret_cb = NULL;
    551
    552	spin_lock(&cl->rd_completed_lock);
    553	list_for_each_entry(cb, &cl->rd_completed, list)
    554		if (!fp || fp == cb->fp) {
    555			ret_cb = cb;
    556			break;
    557		}
    558	spin_unlock(&cl->rd_completed_lock);
    559	return ret_cb;
    560}
    561
    562/**
    563 * mei_cl_flush_queues - flushes queue lists belonging to cl.
    564 *
    565 * @cl: host client
    566 * @fp: file pointer (matching cb file object), may be NULL
    567 *
    568 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
    569 */
    570int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
    571{
    572	struct mei_device *dev;
    573
    574	if (WARN_ON(!cl || !cl->dev))
    575		return -EINVAL;
    576
    577	dev = cl->dev;
    578
    579	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
    580	mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
    581	mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
    582	/* free pending and control cb only in final flush */
    583	if (!fp) {
    584		mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
    585		mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
    586		mei_cl_free_pending(cl);
    587	}
    588	spin_lock(&cl->rd_completed_lock);
    589	mei_io_list_free_fp(&cl->rd_completed, fp);
    590	spin_unlock(&cl->rd_completed_lock);
    591
    592	return 0;
    593}
    594
    595/**
    596 * mei_cl_init - initializes cl.
    597 *
    598 * @cl: host client to be initialized
    599 * @dev: mei device
    600 */
    601static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
    602{
    603	memset(cl, 0, sizeof(*cl));
    604	init_waitqueue_head(&cl->wait);
    605	init_waitqueue_head(&cl->rx_wait);
    606	init_waitqueue_head(&cl->tx_wait);
    607	init_waitqueue_head(&cl->ev_wait);
    608	INIT_LIST_HEAD(&cl->vtag_map);
    609	spin_lock_init(&cl->rd_completed_lock);
    610	INIT_LIST_HEAD(&cl->rd_completed);
    611	INIT_LIST_HEAD(&cl->rd_pending);
    612	INIT_LIST_HEAD(&cl->link);
    613	cl->writing_state = MEI_IDLE;
    614	cl->state = MEI_FILE_UNINITIALIZED;
    615	cl->dev = dev;
    616}
    617
    618/**
    619 * mei_cl_allocate - allocates cl  structure and sets it up.
    620 *
    621 * @dev: mei device
    622 * Return:  The allocated file or NULL on failure
    623 */
    624struct mei_cl *mei_cl_allocate(struct mei_device *dev)
    625{
    626	struct mei_cl *cl;
    627
    628	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
    629	if (!cl)
    630		return NULL;
    631
    632	mei_cl_init(cl, dev);
    633
    634	return cl;
    635}
    636
    637/**
    638 * mei_cl_link - allocate host id in the host map
    639 *
    640 * @cl: host client
    641 *
    642 * Return: 0 on success
    643 *	-EINVAL on incorrect values
    644 *	-EMFILE if open count exceeded.
    645 */
    646int mei_cl_link(struct mei_cl *cl)
    647{
    648	struct mei_device *dev;
    649	int id;
    650
    651	if (WARN_ON(!cl || !cl->dev))
    652		return -EINVAL;
    653
    654	dev = cl->dev;
    655
    656	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
    657	if (id >= MEI_CLIENTS_MAX) {
    658		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
    659		return -EMFILE;
    660	}
    661
    662	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
    663		dev_err(dev->dev, "open_handle_count exceeded %d",
    664			MEI_MAX_OPEN_HANDLE_COUNT);
    665		return -EMFILE;
    666	}
    667
    668	dev->open_handle_count++;
    669
    670	cl->host_client_id = id;
    671	list_add_tail(&cl->link, &dev->file_list);
    672
    673	set_bit(id, dev->host_clients_map);
    674
    675	cl->state = MEI_FILE_INITIALIZING;
    676
    677	cl_dbg(dev, cl, "link cl\n");
    678	return 0;
    679}
    680
    681/**
    682 * mei_cl_unlink - remove host client from the list
    683 *
    684 * @cl: host client
    685 *
    686 * Return: always 0
    687 */
    688int mei_cl_unlink(struct mei_cl *cl)
    689{
    690	struct mei_device *dev;
    691
    692	/* don't shout on error exit path */
    693	if (!cl)
    694		return 0;
    695
    696	if (WARN_ON(!cl->dev))
    697		return 0;
    698
    699	dev = cl->dev;
    700
    701	cl_dbg(dev, cl, "unlink client");
    702
    703	if (cl->state == MEI_FILE_UNINITIALIZED)
    704		return 0;
    705
    706	if (dev->open_handle_count > 0)
    707		dev->open_handle_count--;
    708
    709	/* never clear the 0 bit */
    710	if (cl->host_client_id)
    711		clear_bit(cl->host_client_id, dev->host_clients_map);
    712
    713	list_del_init(&cl->link);
    714
    715	cl->state = MEI_FILE_UNINITIALIZED;
    716	cl->writing_state = MEI_IDLE;
    717
    718	WARN_ON(!list_empty(&cl->rd_completed) ||
    719		!list_empty(&cl->rd_pending) ||
    720		!list_empty(&cl->link));
    721
    722	return 0;
    723}
    724
    725void mei_host_client_init(struct mei_device *dev)
    726{
    727	mei_set_devstate(dev, MEI_DEV_ENABLED);
    728	dev->reset_count = 0;
    729
    730	schedule_work(&dev->bus_rescan_work);
    731
    732	pm_runtime_mark_last_busy(dev->dev);
    733	dev_dbg(dev->dev, "rpm: autosuspend\n");
    734	pm_request_autosuspend(dev->dev);
    735}
    736
    737/**
    738 * mei_hbuf_acquire - try to acquire host buffer
    739 *
    740 * @dev: the device structure
    741 * Return: true if host buffer was acquired
    742 */
    743bool mei_hbuf_acquire(struct mei_device *dev)
    744{
    745	if (mei_pg_state(dev) == MEI_PG_ON ||
    746	    mei_pg_in_transition(dev)) {
    747		dev_dbg(dev->dev, "device is in pg\n");
    748		return false;
    749	}
    750
    751	if (!dev->hbuf_is_ready) {
    752		dev_dbg(dev->dev, "hbuf is not ready\n");
    753		return false;
    754	}
    755
    756	dev->hbuf_is_ready = false;
    757
    758	return true;
    759}
    760
    761/**
    762 * mei_cl_wake_all - wake up readers, writers and event waiters so
    763 *                 they can be interrupted
    764 *
    765 * @cl: host client
    766 */
    767static void mei_cl_wake_all(struct mei_cl *cl)
    768{
    769	struct mei_device *dev = cl->dev;
    770
    771	/* synchronized under device mutex */
    772	if (waitqueue_active(&cl->rx_wait)) {
    773		cl_dbg(dev, cl, "Waking up reading client!\n");
    774		wake_up_interruptible(&cl->rx_wait);
    775	}
    776	/* synchronized under device mutex */
    777	if (waitqueue_active(&cl->tx_wait)) {
    778		cl_dbg(dev, cl, "Waking up writing client!\n");
    779		wake_up_interruptible(&cl->tx_wait);
    780	}
    781	/* synchronized under device mutex */
    782	if (waitqueue_active(&cl->ev_wait)) {
    783		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
    784		wake_up_interruptible(&cl->ev_wait);
    785	}
    786	/* synchronized under device mutex */
    787	if (waitqueue_active(&cl->wait)) {
    788		cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
    789		wake_up(&cl->wait);
    790	}
    791}
    792
    793/**
    794 * mei_cl_set_disconnected - set disconnected state and clear
    795 *   associated states and resources
    796 *
    797 * @cl: host client
    798 */
    799static void mei_cl_set_disconnected(struct mei_cl *cl)
    800{
    801	struct mei_device *dev = cl->dev;
    802
    803	if (cl->state == MEI_FILE_DISCONNECTED ||
    804	    cl->state <= MEI_FILE_INITIALIZING)
    805		return;
    806
    807	cl->state = MEI_FILE_DISCONNECTED;
    808	mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
    809	mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
    810	mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
    811	mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
    812	mei_cl_wake_all(cl);
    813	cl->rx_flow_ctrl_creds = 0;
    814	cl->tx_flow_ctrl_creds = 0;
    815	cl->timer_count = 0;
    816
    817	if (!cl->me_cl)
    818		return;
    819
    820	if (!WARN_ON(cl->me_cl->connect_count == 0))
    821		cl->me_cl->connect_count--;
    822
    823	if (cl->me_cl->connect_count == 0)
    824		cl->me_cl->tx_flow_ctrl_creds = 0;
    825
    826	mei_me_cl_put(cl->me_cl);
    827	cl->me_cl = NULL;
    828}
    829
    830static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
    831{
    832	if (!mei_me_cl_get(me_cl))
    833		return -ENOENT;
    834
    835	/* only one connection is allowed for fixed address clients */
    836	if (me_cl->props.fixed_address) {
    837		if (me_cl->connect_count) {
    838			mei_me_cl_put(me_cl);
    839			return -EBUSY;
    840		}
    841	}
    842
    843	cl->me_cl = me_cl;
    844	cl->state = MEI_FILE_CONNECTING;
    845	cl->me_cl->connect_count++;
    846
    847	return 0;
    848}
    849
    850/*
    851 * mei_cl_send_disconnect - send disconnect request
    852 *
    853 * @cl: host client
    854 * @cb: callback block
    855 *
    856 * Return: 0, OK; otherwise, error.
    857 */
    858static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
    859{
    860	struct mei_device *dev;
    861	int ret;
    862
    863	dev = cl->dev;
    864
    865	ret = mei_hbm_cl_disconnect_req(dev, cl);
    866	cl->status = ret;
    867	if (ret) {
    868		cl->state = MEI_FILE_DISCONNECT_REPLY;
    869		return ret;
    870	}
    871
    872	list_move_tail(&cb->list, &dev->ctrl_rd_list);
    873	cl->timer_count = MEI_CONNECT_TIMEOUT;
    874	mei_schedule_stall_timer(dev);
    875
    876	return 0;
    877}
    878
    879/**
    880 * mei_cl_irq_disconnect - processes close related operation from
    881 *	interrupt thread context - send disconnect request
    882 *
    883 * @cl: client
    884 * @cb: callback block.
    885 * @cmpl_list: complete list.
    886 *
    887 * Return: 0, OK; otherwise, error.
    888 */
    889int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
    890			  struct list_head *cmpl_list)
    891{
    892	struct mei_device *dev = cl->dev;
    893	u32 msg_slots;
    894	int slots;
    895	int ret;
    896
    897	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
    898	slots = mei_hbuf_empty_slots(dev);
    899	if (slots < 0)
    900		return -EOVERFLOW;
    901
    902	if ((u32)slots < msg_slots)
    903		return -EMSGSIZE;
    904
    905	ret = mei_cl_send_disconnect(cl, cb);
    906	if (ret)
    907		list_move_tail(&cb->list, cmpl_list);
    908
    909	return ret;
    910}
    911
    912/**
    913 * __mei_cl_disconnect - disconnect host client from the me one
    914 *     internal function runtime pm has to be already acquired
    915 *
    916 * @cl: host client
    917 *
    918 * Return: 0 on success, <0 on failure.
    919 */
    920static int __mei_cl_disconnect(struct mei_cl *cl)
    921{
    922	struct mei_device *dev;
    923	struct mei_cl_cb *cb;
    924	int rets;
    925
    926	dev = cl->dev;
    927
    928	cl->state = MEI_FILE_DISCONNECTING;
    929
    930	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
    931	if (!cb) {
    932		rets = -ENOMEM;
    933		goto out;
    934	}
    935
    936	if (mei_hbuf_acquire(dev)) {
    937		rets = mei_cl_send_disconnect(cl, cb);
    938		if (rets) {
    939			cl_err(dev, cl, "failed to disconnect.\n");
    940			goto out;
    941		}
    942	}
    943
    944	mutex_unlock(&dev->device_lock);
    945	wait_event_timeout(cl->wait,
    946			   cl->state == MEI_FILE_DISCONNECT_REPLY ||
    947			   cl->state == MEI_FILE_DISCONNECTED,
    948			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
    949	mutex_lock(&dev->device_lock);
    950
    951	rets = cl->status;
    952	if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
    953	    cl->state != MEI_FILE_DISCONNECTED) {
    954		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
    955		rets = -ETIME;
    956	}
    957
    958out:
    959	/* we disconnect also on error */
    960	mei_cl_set_disconnected(cl);
    961	if (!rets)
    962		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
    963
    964	mei_io_cb_free(cb);
    965	return rets;
    966}
    967
    968/**
    969 * mei_cl_disconnect - disconnect host client from the me one
    970 *
    971 * @cl: host client
    972 *
    973 * Locking: called under "dev->device_lock" lock
    974 *
    975 * Return: 0 on success, <0 on failure.
    976 */
    977int mei_cl_disconnect(struct mei_cl *cl)
    978{
    979	struct mei_device *dev;
    980	int rets;
    981
    982	if (WARN_ON(!cl || !cl->dev))
    983		return -ENODEV;
    984
    985	dev = cl->dev;
    986
    987	cl_dbg(dev, cl, "disconnecting");
    988
    989	if (!mei_cl_is_connected(cl))
    990		return 0;
    991
    992	if (mei_cl_is_fixed_address(cl)) {
    993		mei_cl_set_disconnected(cl);
    994		return 0;
    995	}
    996
    997	if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
    998	    dev->dev_state == MEI_DEV_POWER_DOWN) {
    999		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
   1000		mei_cl_set_disconnected(cl);
   1001		return 0;
   1002	}
   1003
   1004	rets = pm_runtime_get(dev->dev);
   1005	if (rets < 0 && rets != -EINPROGRESS) {
   1006		pm_runtime_put_noidle(dev->dev);
   1007		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   1008		return rets;
   1009	}
   1010
   1011	rets = __mei_cl_disconnect(cl);
   1012
   1013	cl_dbg(dev, cl, "rpm: autosuspend\n");
   1014	pm_runtime_mark_last_busy(dev->dev);
   1015	pm_runtime_put_autosuspend(dev->dev);
   1016
   1017	return rets;
   1018}
   1019
   1020
   1021/**
   1022 * mei_cl_is_other_connecting - checks if other
   1023 *    client with the same me client id is connecting
   1024 *
   1025 * @cl: private data of the file object
   1026 *
   1027 * Return: true if other client is connected, false - otherwise.
   1028 */
   1029static bool mei_cl_is_other_connecting(struct mei_cl *cl)
   1030{
   1031	struct mei_device *dev;
   1032	struct mei_cl_cb *cb;
   1033
   1034	dev = cl->dev;
   1035
   1036	list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
   1037		if (cb->fop_type == MEI_FOP_CONNECT &&
   1038		    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
   1039			return true;
   1040	}
   1041
   1042	return false;
   1043}
   1044
   1045/**
   1046 * mei_cl_send_connect - send connect request
   1047 *
   1048 * @cl: host client
   1049 * @cb: callback block
   1050 *
   1051 * Return: 0, OK; otherwise, error.
   1052 */
   1053static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
   1054{
   1055	struct mei_device *dev;
   1056	int ret;
   1057
   1058	dev = cl->dev;
   1059
   1060	ret = mei_hbm_cl_connect_req(dev, cl);
   1061	cl->status = ret;
   1062	if (ret) {
   1063		cl->state = MEI_FILE_DISCONNECT_REPLY;
   1064		return ret;
   1065	}
   1066
   1067	list_move_tail(&cb->list, &dev->ctrl_rd_list);
   1068	cl->timer_count = MEI_CONNECT_TIMEOUT;
   1069	mei_schedule_stall_timer(dev);
   1070	return 0;
   1071}
   1072
   1073/**
   1074 * mei_cl_irq_connect - send connect request in irq_thread context
   1075 *
   1076 * @cl: host client
   1077 * @cb: callback block
   1078 * @cmpl_list: complete list
   1079 *
   1080 * Return: 0, OK; otherwise, error.
   1081 */
   1082int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
   1083		       struct list_head *cmpl_list)
   1084{
   1085	struct mei_device *dev = cl->dev;
   1086	u32 msg_slots;
   1087	int slots;
   1088	int rets;
   1089
   1090	if (mei_cl_is_other_connecting(cl))
   1091		return 0;
   1092
   1093	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
   1094	slots = mei_hbuf_empty_slots(dev);
   1095	if (slots < 0)
   1096		return -EOVERFLOW;
   1097
   1098	if ((u32)slots < msg_slots)
   1099		return -EMSGSIZE;
   1100
   1101	rets = mei_cl_send_connect(cl, cb);
   1102	if (rets)
   1103		list_move_tail(&cb->list, cmpl_list);
   1104
   1105	return rets;
   1106}
   1107
   1108/**
   1109 * mei_cl_connect - connect host client to the me one
   1110 *
   1111 * @cl: host client
   1112 * @me_cl: me client
   1113 * @fp: pointer to file structure
   1114 *
   1115 * Locking: called under "dev->device_lock" lock
   1116 *
   1117 * Return: 0 on success, <0 on failure.
   1118 */
   1119int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
   1120		   const struct file *fp)
   1121{
   1122	struct mei_device *dev;
   1123	struct mei_cl_cb *cb;
   1124	int rets;
   1125
   1126	if (WARN_ON(!cl || !cl->dev || !me_cl))
   1127		return -ENODEV;
   1128
   1129	dev = cl->dev;
   1130
   1131	rets = mei_cl_set_connecting(cl, me_cl);
   1132	if (rets)
   1133		goto nortpm;
   1134
   1135	if (mei_cl_is_fixed_address(cl)) {
   1136		cl->state = MEI_FILE_CONNECTED;
   1137		rets = 0;
   1138		goto nortpm;
   1139	}
   1140
   1141	rets = pm_runtime_get(dev->dev);
   1142	if (rets < 0 && rets != -EINPROGRESS) {
   1143		pm_runtime_put_noidle(dev->dev);
   1144		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   1145		goto nortpm;
   1146	}
   1147
   1148	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
   1149	if (!cb) {
   1150		rets = -ENOMEM;
   1151		goto out;
   1152	}
   1153
   1154	/* run hbuf acquire last so we don't have to undo */
   1155	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
   1156		rets = mei_cl_send_connect(cl, cb);
   1157		if (rets)
   1158			goto out;
   1159	}
   1160
   1161	mutex_unlock(&dev->device_lock);
   1162	wait_event_timeout(cl->wait,
   1163			(cl->state == MEI_FILE_CONNECTED ||
   1164			 cl->state == MEI_FILE_DISCONNECTED ||
   1165			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
   1166			 cl->state == MEI_FILE_DISCONNECT_REPLY),
   1167			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
   1168	mutex_lock(&dev->device_lock);
   1169
   1170	if (!mei_cl_is_connected(cl)) {
   1171		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
   1172			mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
   1173			mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
   1174			 /* ignore disconnect return valuue;
   1175			  * in case of failure reset will be invoked
   1176			  */
   1177			__mei_cl_disconnect(cl);
   1178			rets = -EFAULT;
   1179			goto out;
   1180		}
   1181
   1182		/* timeout or something went really wrong */
   1183		if (!cl->status)
   1184			cl->status = -EFAULT;
   1185	}
   1186
   1187	rets = cl->status;
   1188out:
   1189	cl_dbg(dev, cl, "rpm: autosuspend\n");
   1190	pm_runtime_mark_last_busy(dev->dev);
   1191	pm_runtime_put_autosuspend(dev->dev);
   1192
   1193	mei_io_cb_free(cb);
   1194
   1195nortpm:
   1196	if (!mei_cl_is_connected(cl))
   1197		mei_cl_set_disconnected(cl);
   1198
   1199	return rets;
   1200}
   1201
   1202/**
   1203 * mei_cl_alloc_linked - allocate and link host client
   1204 *
   1205 * @dev: the device structure
   1206 *
   1207 * Return: cl on success ERR_PTR on failure
   1208 */
   1209struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
   1210{
   1211	struct mei_cl *cl;
   1212	int ret;
   1213
   1214	cl = mei_cl_allocate(dev);
   1215	if (!cl) {
   1216		ret = -ENOMEM;
   1217		goto err;
   1218	}
   1219
   1220	ret = mei_cl_link(cl);
   1221	if (ret)
   1222		goto err;
   1223
   1224	return cl;
   1225err:
   1226	kfree(cl);
   1227	return ERR_PTR(ret);
   1228}
   1229
   1230/**
   1231 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
   1232 *
   1233 * @cl: host client
   1234 *
   1235 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
   1236 */
   1237static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
   1238{
   1239	if (WARN_ON(!cl || !cl->me_cl))
   1240		return -EINVAL;
   1241
   1242	if (cl->tx_flow_ctrl_creds > 0)
   1243		return 1;
   1244
   1245	if (mei_cl_is_fixed_address(cl))
   1246		return 1;
   1247
   1248	if (mei_cl_is_single_recv_buf(cl)) {
   1249		if (cl->me_cl->tx_flow_ctrl_creds > 0)
   1250			return 1;
   1251	}
   1252	return 0;
   1253}
   1254
   1255/**
   1256 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
   1257 *   for a client
   1258 *
   1259 * @cl: host client
   1260 *
   1261 * Return:
   1262 *	0 on success
   1263 *	-EINVAL when ctrl credits are <= 0
   1264 */
   1265static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
   1266{
   1267	if (WARN_ON(!cl || !cl->me_cl))
   1268		return -EINVAL;
   1269
   1270	if (mei_cl_is_fixed_address(cl))
   1271		return 0;
   1272
   1273	if (mei_cl_is_single_recv_buf(cl)) {
   1274		if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
   1275			return -EINVAL;
   1276		cl->me_cl->tx_flow_ctrl_creds--;
   1277	} else {
   1278		if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
   1279			return -EINVAL;
   1280		cl->tx_flow_ctrl_creds--;
   1281	}
   1282	return 0;
   1283}
   1284
   1285/**
   1286 * mei_cl_vtag_alloc - allocate and fill the vtag structure
   1287 *
   1288 * @fp: pointer to file structure
   1289 * @vtag: vm tag
   1290 *
   1291 * Return:
   1292 * * Pointer to allocated struct - on success
   1293 * * ERR_PTR(-ENOMEM) on memory allocation failure
   1294 */
   1295struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
   1296{
   1297	struct mei_cl_vtag *cl_vtag;
   1298
   1299	cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
   1300	if (!cl_vtag)
   1301		return ERR_PTR(-ENOMEM);
   1302
   1303	INIT_LIST_HEAD(&cl_vtag->list);
   1304	cl_vtag->vtag = vtag;
   1305	cl_vtag->fp = fp;
   1306
   1307	return cl_vtag;
   1308}
   1309
   1310/**
   1311 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
   1312 *
   1313 * @cl: host client
   1314 * @vtag: virtual tag
   1315 *
   1316 * Return:
   1317 * * A file pointer - on success
   1318 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
   1319 */
   1320const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
   1321{
   1322	struct mei_cl_vtag *vtag_l;
   1323
   1324	list_for_each_entry(vtag_l, &cl->vtag_map, list)
   1325		/* The client on bus has one fixed fp */
   1326		if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
   1327		    vtag_l->vtag == vtag)
   1328			return vtag_l->fp;
   1329
   1330	return ERR_PTR(-ENOENT);
   1331}
   1332
   1333/**
   1334 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
   1335 *
   1336 * @cl: host client
   1337 * @vtag: vm tag
   1338 */
   1339static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
   1340{
   1341	struct mei_cl_vtag *vtag_l;
   1342
   1343	list_for_each_entry(vtag_l, &cl->vtag_map, list) {
   1344		if (vtag_l->vtag == vtag) {
   1345			vtag_l->pending_read = false;
   1346			break;
   1347		}
   1348	}
   1349}
   1350
   1351/**
   1352 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
   1353 *                           in the vtag list
   1354 *
   1355 * @cl: host client
   1356 */
   1357static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
   1358{
   1359	struct mei_cl_vtag *cl_vtag;
   1360
   1361	list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
   1362		if (cl_vtag->pending_read) {
   1363			if (mei_cl_enqueue_ctrl_wr_cb(cl,
   1364						      mei_cl_mtu(cl),
   1365						      MEI_FOP_READ,
   1366						      cl_vtag->fp))
   1367				cl->rx_flow_ctrl_creds++;
   1368			break;
   1369		}
   1370	}
   1371}
   1372
   1373/**
   1374 * mei_cl_vt_support_check - check if client support vtags
   1375 *
   1376 * @cl: host client
   1377 *
   1378 * Return:
   1379 * * 0 - supported, or not connected at all
   1380 * * -EOPNOTSUPP - vtags are not supported by client
   1381 */
   1382int mei_cl_vt_support_check(const struct mei_cl *cl)
   1383{
   1384	struct mei_device *dev = cl->dev;
   1385
   1386	if (!dev->hbm_f_vt_supported)
   1387		return -EOPNOTSUPP;
   1388
   1389	if (!cl->me_cl)
   1390		return 0;
   1391
   1392	return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
   1393}
   1394
   1395/**
   1396 * mei_cl_add_rd_completed - add read completed callback to list with lock
   1397 *                           and vtag check
   1398 *
   1399 * @cl: host client
   1400 * @cb: callback block
   1401 *
   1402 */
   1403void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
   1404{
   1405	const struct file *fp;
   1406
   1407	if (!mei_cl_vt_support_check(cl)) {
   1408		fp = mei_cl_fp_by_vtag(cl, cb->vtag);
   1409		if (IS_ERR(fp)) {
   1410			/* client already disconnected, discarding */
   1411			mei_io_cb_free(cb);
   1412			return;
   1413		}
   1414		cb->fp = fp;
   1415		mei_cl_reset_read_by_vtag(cl, cb->vtag);
   1416		mei_cl_read_vtag_add_fc(cl);
   1417	}
   1418
   1419	spin_lock(&cl->rd_completed_lock);
   1420	list_add_tail(&cb->list, &cl->rd_completed);
   1421	spin_unlock(&cl->rd_completed_lock);
   1422}
   1423
   1424/**
   1425 * mei_cl_del_rd_completed - free read completed callback with lock
   1426 *
   1427 * @cl: host client
   1428 * @cb: callback block
   1429 *
   1430 */
   1431void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
   1432{
   1433	spin_lock(&cl->rd_completed_lock);
   1434	mei_io_cb_free(cb);
   1435	spin_unlock(&cl->rd_completed_lock);
   1436}
   1437
   1438/**
   1439 *  mei_cl_notify_fop2req - convert fop to proper request
   1440 *
   1441 * @fop: client notification start response command
   1442 *
   1443 * Return:  MEI_HBM_NOTIFICATION_START/STOP
   1444 */
   1445u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
   1446{
   1447	if (fop == MEI_FOP_NOTIFY_START)
   1448		return MEI_HBM_NOTIFICATION_START;
   1449	else
   1450		return MEI_HBM_NOTIFICATION_STOP;
   1451}
   1452
   1453/**
   1454 *  mei_cl_notify_req2fop - convert notification request top file operation type
   1455 *
   1456 * @req: hbm notification request type
   1457 *
   1458 * Return:  MEI_FOP_NOTIFY_START/STOP
   1459 */
   1460enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
   1461{
   1462	if (req == MEI_HBM_NOTIFICATION_START)
   1463		return MEI_FOP_NOTIFY_START;
   1464	else
   1465		return MEI_FOP_NOTIFY_STOP;
   1466}
   1467
   1468/**
   1469 * mei_cl_irq_notify - send notification request in irq_thread context
   1470 *
   1471 * @cl: client
   1472 * @cb: callback block.
   1473 * @cmpl_list: complete list.
   1474 *
   1475 * Return: 0 on such and error otherwise.
   1476 */
   1477int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
   1478		      struct list_head *cmpl_list)
   1479{
   1480	struct mei_device *dev = cl->dev;
   1481	u32 msg_slots;
   1482	int slots;
   1483	int ret;
   1484	bool request;
   1485
   1486	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
   1487	slots = mei_hbuf_empty_slots(dev);
   1488	if (slots < 0)
   1489		return -EOVERFLOW;
   1490
   1491	if ((u32)slots < msg_slots)
   1492		return -EMSGSIZE;
   1493
   1494	request = mei_cl_notify_fop2req(cb->fop_type);
   1495	ret = mei_hbm_cl_notify_req(dev, cl, request);
   1496	if (ret) {
   1497		cl->status = ret;
   1498		list_move_tail(&cb->list, cmpl_list);
   1499		return ret;
   1500	}
   1501
   1502	list_move_tail(&cb->list, &dev->ctrl_rd_list);
   1503	return 0;
   1504}
   1505
   1506/**
   1507 * mei_cl_notify_request - send notification stop/start request
   1508 *
   1509 * @cl: host client
   1510 * @fp: associate request with file
   1511 * @request: 1 for start or 0 for stop
   1512 *
   1513 * Locking: called under "dev->device_lock" lock
   1514 *
   1515 * Return: 0 on such and error otherwise.
   1516 */
   1517int mei_cl_notify_request(struct mei_cl *cl,
   1518			  const struct file *fp, u8 request)
   1519{
   1520	struct mei_device *dev;
   1521	struct mei_cl_cb *cb;
   1522	enum mei_cb_file_ops fop_type;
   1523	int rets;
   1524
   1525	if (WARN_ON(!cl || !cl->dev))
   1526		return -ENODEV;
   1527
   1528	dev = cl->dev;
   1529
   1530	if (!dev->hbm_f_ev_supported) {
   1531		cl_dbg(dev, cl, "notifications not supported\n");
   1532		return -EOPNOTSUPP;
   1533	}
   1534
   1535	if (!mei_cl_is_connected(cl))
   1536		return -ENODEV;
   1537
   1538	rets = pm_runtime_get(dev->dev);
   1539	if (rets < 0 && rets != -EINPROGRESS) {
   1540		pm_runtime_put_noidle(dev->dev);
   1541		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   1542		return rets;
   1543	}
   1544
   1545	fop_type = mei_cl_notify_req2fop(request);
   1546	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
   1547	if (!cb) {
   1548		rets = -ENOMEM;
   1549		goto out;
   1550	}
   1551
   1552	if (mei_hbuf_acquire(dev)) {
   1553		if (mei_hbm_cl_notify_req(dev, cl, request)) {
   1554			rets = -ENODEV;
   1555			goto out;
   1556		}
   1557		list_move_tail(&cb->list, &dev->ctrl_rd_list);
   1558	}
   1559
   1560	mutex_unlock(&dev->device_lock);
   1561	wait_event_timeout(cl->wait,
   1562			   cl->notify_en == request ||
   1563			   cl->status ||
   1564			   !mei_cl_is_connected(cl),
   1565			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
   1566	mutex_lock(&dev->device_lock);
   1567
   1568	if (cl->notify_en != request && !cl->status)
   1569		cl->status = -EFAULT;
   1570
   1571	rets = cl->status;
   1572
   1573out:
   1574	cl_dbg(dev, cl, "rpm: autosuspend\n");
   1575	pm_runtime_mark_last_busy(dev->dev);
   1576	pm_runtime_put_autosuspend(dev->dev);
   1577
   1578	mei_io_cb_free(cb);
   1579	return rets;
   1580}
   1581
   1582/**
   1583 * mei_cl_notify - raise notification
   1584 *
   1585 * @cl: host client
   1586 *
   1587 * Locking: called under "dev->device_lock" lock
   1588 */
   1589void mei_cl_notify(struct mei_cl *cl)
   1590{
   1591	struct mei_device *dev;
   1592
   1593	if (!cl || !cl->dev)
   1594		return;
   1595
   1596	dev = cl->dev;
   1597
   1598	if (!cl->notify_en)
   1599		return;
   1600
   1601	cl_dbg(dev, cl, "notify event");
   1602	cl->notify_ev = true;
   1603	if (!mei_cl_bus_notify_event(cl))
   1604		wake_up_interruptible(&cl->ev_wait);
   1605
   1606	if (cl->ev_async)
   1607		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
   1608
   1609}
   1610
   1611/**
   1612 * mei_cl_notify_get - get or wait for notification event
   1613 *
   1614 * @cl: host client
   1615 * @block: this request is blocking
   1616 * @notify_ev: true if notification event was received
   1617 *
   1618 * Locking: called under "dev->device_lock" lock
   1619 *
   1620 * Return: 0 on such and error otherwise.
   1621 */
   1622int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
   1623{
   1624	struct mei_device *dev;
   1625	int rets;
   1626
   1627	*notify_ev = false;
   1628
   1629	if (WARN_ON(!cl || !cl->dev))
   1630		return -ENODEV;
   1631
   1632	dev = cl->dev;
   1633
   1634	if (!dev->hbm_f_ev_supported) {
   1635		cl_dbg(dev, cl, "notifications not supported\n");
   1636		return -EOPNOTSUPP;
   1637	}
   1638
   1639	if (!mei_cl_is_connected(cl))
   1640		return -ENODEV;
   1641
   1642	if (cl->notify_ev)
   1643		goto out;
   1644
   1645	if (!block)
   1646		return -EAGAIN;
   1647
   1648	mutex_unlock(&dev->device_lock);
   1649	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
   1650	mutex_lock(&dev->device_lock);
   1651
   1652	if (rets < 0)
   1653		return rets;
   1654
   1655out:
   1656	*notify_ev = cl->notify_ev;
   1657	cl->notify_ev = false;
   1658	return 0;
   1659}
   1660
   1661/**
   1662 * mei_cl_read_start - the start read client message function.
   1663 *
   1664 * @cl: host client
   1665 * @length: number of bytes to read
   1666 * @fp: pointer to file structure
   1667 *
   1668 * Return: 0 on success, <0 on failure.
   1669 */
   1670int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
   1671{
   1672	struct mei_device *dev;
   1673	struct mei_cl_cb *cb;
   1674	int rets;
   1675
   1676	if (WARN_ON(!cl || !cl->dev))
   1677		return -ENODEV;
   1678
   1679	dev = cl->dev;
   1680
   1681	if (!mei_cl_is_connected(cl))
   1682		return -ENODEV;
   1683
   1684	if (!mei_me_cl_is_active(cl->me_cl)) {
   1685		cl_err(dev, cl, "no such me client\n");
   1686		return  -ENOTTY;
   1687	}
   1688
   1689	if (mei_cl_is_fixed_address(cl))
   1690		return 0;
   1691
   1692	/* HW currently supports only one pending read */
   1693	if (cl->rx_flow_ctrl_creds) {
   1694		mei_cl_set_read_by_fp(cl, fp);
   1695		return -EBUSY;
   1696	}
   1697
   1698	cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
   1699	if (!cb)
   1700		return -ENOMEM;
   1701
   1702	mei_cl_set_read_by_fp(cl, fp);
   1703
   1704	rets = pm_runtime_get(dev->dev);
   1705	if (rets < 0 && rets != -EINPROGRESS) {
   1706		pm_runtime_put_noidle(dev->dev);
   1707		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   1708		goto nortpm;
   1709	}
   1710
   1711	rets = 0;
   1712	if (mei_hbuf_acquire(dev)) {
   1713		rets = mei_hbm_cl_flow_control_req(dev, cl);
   1714		if (rets < 0)
   1715			goto out;
   1716
   1717		list_move_tail(&cb->list, &cl->rd_pending);
   1718	}
   1719	cl->rx_flow_ctrl_creds++;
   1720
   1721out:
   1722	cl_dbg(dev, cl, "rpm: autosuspend\n");
   1723	pm_runtime_mark_last_busy(dev->dev);
   1724	pm_runtime_put_autosuspend(dev->dev);
   1725nortpm:
   1726	if (rets)
   1727		mei_io_cb_free(cb);
   1728
   1729	return rets;
   1730}
   1731
   1732static inline u8 mei_ext_hdr_set_vtag(void *ext, u8 vtag)
   1733{
   1734	struct mei_ext_hdr_vtag *vtag_hdr = ext;
   1735
   1736	vtag_hdr->hdr.type = MEI_EXT_HDR_VTAG;
   1737	vtag_hdr->hdr.length = mei_data2slots(sizeof(*vtag_hdr));
   1738	vtag_hdr->vtag = vtag;
   1739	vtag_hdr->reserved = 0;
   1740	return vtag_hdr->hdr.length;
   1741}
   1742
   1743/**
   1744 * mei_msg_hdr_init - allocate and initialize mei message header
   1745 *
   1746 * @cb: message callback structure
   1747 *
   1748 * Return: a pointer to initialized header or ERR_PTR on failure
   1749 */
   1750static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
   1751{
   1752	size_t hdr_len;
   1753	struct mei_ext_meta_hdr *meta;
   1754	struct mei_msg_hdr *mei_hdr;
   1755	bool is_ext, is_vtag;
   1756
   1757	if (!cb)
   1758		return ERR_PTR(-EINVAL);
   1759
   1760	/* Extended header for vtag is attached only on the first fragment */
   1761	is_vtag = (cb->vtag && cb->buf_idx == 0);
   1762	is_ext = is_vtag;
   1763
   1764	/* Compute extended header size */
   1765	hdr_len = sizeof(*mei_hdr);
   1766
   1767	if (!is_ext)
   1768		goto setup_hdr;
   1769
   1770	hdr_len += sizeof(*meta);
   1771	if (is_vtag)
   1772		hdr_len += sizeof(struct mei_ext_hdr_vtag);
   1773
   1774setup_hdr:
   1775	mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
   1776	if (!mei_hdr)
   1777		return ERR_PTR(-ENOMEM);
   1778
   1779	mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
   1780	mei_hdr->me_addr = mei_cl_me_id(cb->cl);
   1781	mei_hdr->internal = cb->internal;
   1782	mei_hdr->extended = is_ext;
   1783
   1784	if (!is_ext)
   1785		goto out;
   1786
   1787	meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
   1788	if (is_vtag) {
   1789		meta->count++;
   1790		meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
   1791	}
   1792out:
   1793	mei_hdr->length = hdr_len - sizeof(*mei_hdr);
   1794	return mei_hdr;
   1795}
   1796
   1797/**
   1798 * mei_cl_irq_write - write a message to device
   1799 *	from the interrupt thread context
   1800 *
   1801 * @cl: client
   1802 * @cb: callback block.
   1803 * @cmpl_list: complete list.
   1804 *
   1805 * Return: 0, OK; otherwise error.
   1806 */
   1807int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
   1808		     struct list_head *cmpl_list)
   1809{
   1810	struct mei_device *dev;
   1811	struct mei_msg_data *buf;
   1812	struct mei_msg_hdr *mei_hdr = NULL;
   1813	size_t hdr_len;
   1814	size_t hbuf_len, dr_len;
   1815	size_t buf_len;
   1816	size_t data_len;
   1817	int hbuf_slots;
   1818	u32 dr_slots;
   1819	u32 dma_len;
   1820	int rets;
   1821	bool first_chunk;
   1822	const void *data;
   1823
   1824	if (WARN_ON(!cl || !cl->dev))
   1825		return -ENODEV;
   1826
   1827	dev = cl->dev;
   1828
   1829	buf = &cb->buf;
   1830
   1831	first_chunk = cb->buf_idx == 0;
   1832
   1833	rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
   1834	if (rets < 0)
   1835		goto err;
   1836
   1837	if (rets == 0) {
   1838		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
   1839		return 0;
   1840	}
   1841
   1842	buf_len = buf->size - cb->buf_idx;
   1843	data = buf->data + cb->buf_idx;
   1844	hbuf_slots = mei_hbuf_empty_slots(dev);
   1845	if (hbuf_slots < 0) {
   1846		rets = -EOVERFLOW;
   1847		goto err;
   1848	}
   1849
   1850	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
   1851	dr_slots = mei_dma_ring_empty_slots(dev);
   1852	dr_len = mei_slots2data(dr_slots);
   1853
   1854	mei_hdr = mei_msg_hdr_init(cb);
   1855	if (IS_ERR(mei_hdr)) {
   1856		rets = PTR_ERR(mei_hdr);
   1857		mei_hdr = NULL;
   1858		goto err;
   1859	}
   1860
   1861	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
   1862	       mei_hdr->extended, cb->vtag);
   1863
   1864	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
   1865
   1866	/**
   1867	 * Split the message only if we can write the whole host buffer
   1868	 * otherwise wait for next time the host buffer is empty.
   1869	 */
   1870	if (hdr_len + buf_len <= hbuf_len) {
   1871		data_len = buf_len;
   1872		mei_hdr->msg_complete = 1;
   1873	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
   1874		mei_hdr->dma_ring = 1;
   1875		if (buf_len > dr_len)
   1876			buf_len = dr_len;
   1877		else
   1878			mei_hdr->msg_complete = 1;
   1879
   1880		data_len = sizeof(dma_len);
   1881		dma_len = buf_len;
   1882		data = &dma_len;
   1883	} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
   1884		buf_len = hbuf_len - hdr_len;
   1885		data_len = buf_len;
   1886	} else {
   1887		kfree(mei_hdr);
   1888		return 0;
   1889	}
   1890	mei_hdr->length += data_len;
   1891
   1892	if (mei_hdr->dma_ring)
   1893		mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
   1894	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
   1895
   1896	if (rets)
   1897		goto err;
   1898
   1899	cl->status = 0;
   1900	cl->writing_state = MEI_WRITING;
   1901	cb->buf_idx += buf_len;
   1902
   1903	if (first_chunk) {
   1904		if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
   1905			rets = -EIO;
   1906			goto err;
   1907		}
   1908	}
   1909
   1910	if (mei_hdr->msg_complete)
   1911		list_move_tail(&cb->list, &dev->write_waiting_list);
   1912
   1913	kfree(mei_hdr);
   1914	return 0;
   1915
   1916err:
   1917	kfree(mei_hdr);
   1918	cl->status = rets;
   1919	list_move_tail(&cb->list, cmpl_list);
   1920	return rets;
   1921}
   1922
   1923/**
   1924 * mei_cl_write - submit a write cb to mei device
   1925 *	assumes device_lock is locked
   1926 *
   1927 * @cl: host client
   1928 * @cb: write callback with filled data
   1929 *
   1930 * Return: number of bytes sent on success, <0 on failure.
   1931 */
   1932ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
   1933{
   1934	struct mei_device *dev;
   1935	struct mei_msg_data *buf;
   1936	struct mei_msg_hdr *mei_hdr = NULL;
   1937	size_t hdr_len;
   1938	size_t hbuf_len, dr_len;
   1939	size_t buf_len;
   1940	size_t data_len;
   1941	int hbuf_slots;
   1942	u32 dr_slots;
   1943	u32 dma_len;
   1944	ssize_t rets;
   1945	bool blocking;
   1946	const void *data;
   1947
   1948	if (WARN_ON(!cl || !cl->dev))
   1949		return -ENODEV;
   1950
   1951	if (WARN_ON(!cb))
   1952		return -EINVAL;
   1953
   1954	dev = cl->dev;
   1955
   1956	buf = &cb->buf;
   1957	buf_len = buf->size;
   1958
   1959	cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
   1960
   1961	blocking = cb->blocking;
   1962	data = buf->data;
   1963
   1964	rets = pm_runtime_get(dev->dev);
   1965	if (rets < 0 && rets != -EINPROGRESS) {
   1966		pm_runtime_put_noidle(dev->dev);
   1967		cl_err(dev, cl, "rpm: get failed %zd\n", rets);
   1968		goto free;
   1969	}
   1970
   1971	cb->buf_idx = 0;
   1972	cl->writing_state = MEI_IDLE;
   1973
   1974
   1975	rets = mei_cl_tx_flow_ctrl_creds(cl);
   1976	if (rets < 0)
   1977		goto err;
   1978
   1979	mei_hdr = mei_msg_hdr_init(cb);
   1980	if (IS_ERR(mei_hdr)) {
   1981		rets = -PTR_ERR(mei_hdr);
   1982		mei_hdr = NULL;
   1983		goto err;
   1984	}
   1985
   1986	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
   1987	       mei_hdr->extended, cb->vtag);
   1988
   1989	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
   1990
   1991	if (rets == 0) {
   1992		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
   1993		rets = buf_len;
   1994		goto out;
   1995	}
   1996
   1997	if (!mei_hbuf_acquire(dev)) {
   1998		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
   1999		rets = buf_len;
   2000		goto out;
   2001	}
   2002
   2003	hbuf_slots = mei_hbuf_empty_slots(dev);
   2004	if (hbuf_slots < 0) {
   2005		rets = -EOVERFLOW;
   2006		goto out;
   2007	}
   2008
   2009	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
   2010	dr_slots = mei_dma_ring_empty_slots(dev);
   2011	dr_len =  mei_slots2data(dr_slots);
   2012
   2013	if (hdr_len + buf_len <= hbuf_len) {
   2014		data_len = buf_len;
   2015		mei_hdr->msg_complete = 1;
   2016	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
   2017		mei_hdr->dma_ring = 1;
   2018		if (buf_len > dr_len)
   2019			buf_len = dr_len;
   2020		else
   2021			mei_hdr->msg_complete = 1;
   2022
   2023		data_len = sizeof(dma_len);
   2024		dma_len = buf_len;
   2025		data = &dma_len;
   2026	} else {
   2027		buf_len = hbuf_len - hdr_len;
   2028		data_len = buf_len;
   2029	}
   2030
   2031	mei_hdr->length += data_len;
   2032
   2033	if (mei_hdr->dma_ring)
   2034		mei_dma_ring_write(dev, buf->data, buf_len);
   2035	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
   2036
   2037	if (rets)
   2038		goto err;
   2039
   2040	rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
   2041	if (rets)
   2042		goto err;
   2043
   2044	cl->writing_state = MEI_WRITING;
   2045	cb->buf_idx = buf_len;
   2046	/* restore return value */
   2047	buf_len = buf->size;
   2048
   2049out:
   2050	if (mei_hdr->msg_complete)
   2051		mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
   2052	else
   2053		mei_tx_cb_enqueue(cb, &dev->write_list);
   2054
   2055	cb = NULL;
   2056	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
   2057
   2058		mutex_unlock(&dev->device_lock);
   2059		rets = wait_event_interruptible(cl->tx_wait,
   2060				cl->writing_state == MEI_WRITE_COMPLETE ||
   2061				(!mei_cl_is_connected(cl)));
   2062		mutex_lock(&dev->device_lock);
   2063		/* wait_event_interruptible returns -ERESTARTSYS */
   2064		if (rets) {
   2065			if (signal_pending(current))
   2066				rets = -EINTR;
   2067			goto err;
   2068		}
   2069		if (cl->writing_state != MEI_WRITE_COMPLETE) {
   2070			rets = -EFAULT;
   2071			goto err;
   2072		}
   2073	}
   2074
   2075	rets = buf_len;
   2076err:
   2077	cl_dbg(dev, cl, "rpm: autosuspend\n");
   2078	pm_runtime_mark_last_busy(dev->dev);
   2079	pm_runtime_put_autosuspend(dev->dev);
   2080free:
   2081	mei_io_cb_free(cb);
   2082
   2083	kfree(mei_hdr);
   2084
   2085	return rets;
   2086}
   2087
   2088/**
   2089 * mei_cl_complete - processes completed operation for a client
   2090 *
   2091 * @cl: private data of the file object.
   2092 * @cb: callback block.
   2093 */
   2094void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
   2095{
   2096	struct mei_device *dev = cl->dev;
   2097
   2098	switch (cb->fop_type) {
   2099	case MEI_FOP_WRITE:
   2100		mei_tx_cb_dequeue(cb);
   2101		cl->writing_state = MEI_WRITE_COMPLETE;
   2102		if (waitqueue_active(&cl->tx_wait)) {
   2103			wake_up_interruptible(&cl->tx_wait);
   2104		} else {
   2105			pm_runtime_mark_last_busy(dev->dev);
   2106			pm_request_autosuspend(dev->dev);
   2107		}
   2108		break;
   2109
   2110	case MEI_FOP_READ:
   2111		mei_cl_add_rd_completed(cl, cb);
   2112		if (!mei_cl_is_fixed_address(cl) &&
   2113		    !WARN_ON(!cl->rx_flow_ctrl_creds))
   2114			cl->rx_flow_ctrl_creds--;
   2115		if (!mei_cl_bus_rx_event(cl))
   2116			wake_up_interruptible(&cl->rx_wait);
   2117		break;
   2118
   2119	case MEI_FOP_CONNECT:
   2120	case MEI_FOP_DISCONNECT:
   2121	case MEI_FOP_NOTIFY_STOP:
   2122	case MEI_FOP_NOTIFY_START:
   2123	case MEI_FOP_DMA_MAP:
   2124	case MEI_FOP_DMA_UNMAP:
   2125		if (waitqueue_active(&cl->wait))
   2126			wake_up(&cl->wait);
   2127
   2128		break;
   2129	case MEI_FOP_DISCONNECT_RSP:
   2130		mei_io_cb_free(cb);
   2131		mei_cl_set_disconnected(cl);
   2132		break;
   2133	default:
   2134		BUG_ON(0);
   2135	}
   2136}
   2137
   2138
   2139/**
   2140 * mei_cl_all_disconnect - disconnect forcefully all connected clients
   2141 *
   2142 * @dev: mei device
   2143 */
   2144void mei_cl_all_disconnect(struct mei_device *dev)
   2145{
   2146	struct mei_cl *cl;
   2147
   2148	list_for_each_entry(cl, &dev->file_list, link)
   2149		mei_cl_set_disconnected(cl);
   2150}
   2151EXPORT_SYMBOL_GPL(mei_cl_all_disconnect);
   2152
   2153static struct mei_cl *mei_cl_dma_map_find(struct mei_device *dev, u8 buffer_id)
   2154{
   2155	struct mei_cl *cl;
   2156
   2157	list_for_each_entry(cl, &dev->file_list, link)
   2158		if (cl->dma.buffer_id == buffer_id)
   2159			return cl;
   2160	return NULL;
   2161}
   2162
   2163/**
   2164 * mei_cl_irq_dma_map - send client dma map request in irq_thread context
   2165 *
   2166 * @cl: client
   2167 * @cb: callback block.
   2168 * @cmpl_list: complete list.
   2169 *
   2170 * Return: 0 on such and error otherwise.
   2171 */
   2172int mei_cl_irq_dma_map(struct mei_cl *cl, struct mei_cl_cb *cb,
   2173		       struct list_head *cmpl_list)
   2174{
   2175	struct mei_device *dev = cl->dev;
   2176	u32 msg_slots;
   2177	int slots;
   2178	int ret;
   2179
   2180	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_map_request));
   2181	slots = mei_hbuf_empty_slots(dev);
   2182	if (slots < 0)
   2183		return -EOVERFLOW;
   2184
   2185	if ((u32)slots < msg_slots)
   2186		return -EMSGSIZE;
   2187
   2188	ret = mei_hbm_cl_dma_map_req(dev, cl);
   2189	if (ret) {
   2190		cl->status = ret;
   2191		list_move_tail(&cb->list, cmpl_list);
   2192		return ret;
   2193	}
   2194
   2195	list_move_tail(&cb->list, &dev->ctrl_rd_list);
   2196	return 0;
   2197}
   2198
   2199/**
   2200 * mei_cl_irq_dma_unmap - send client dma unmap request in irq_thread context
   2201 *
   2202 * @cl: client
   2203 * @cb: callback block.
   2204 * @cmpl_list: complete list.
   2205 *
   2206 * Return: 0 on such and error otherwise.
   2207 */
   2208int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
   2209			 struct list_head *cmpl_list)
   2210{
   2211	struct mei_device *dev = cl->dev;
   2212	u32 msg_slots;
   2213	int slots;
   2214	int ret;
   2215
   2216	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_dma_unmap_request));
   2217	slots = mei_hbuf_empty_slots(dev);
   2218	if (slots < 0)
   2219		return -EOVERFLOW;
   2220
   2221	if ((u32)slots < msg_slots)
   2222		return -EMSGSIZE;
   2223
   2224	ret = mei_hbm_cl_dma_unmap_req(dev, cl);
   2225	if (ret) {
   2226		cl->status = ret;
   2227		list_move_tail(&cb->list, cmpl_list);
   2228		return ret;
   2229	}
   2230
   2231	list_move_tail(&cb->list, &dev->ctrl_rd_list);
   2232	return 0;
   2233}
   2234
   2235static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
   2236{
   2237	cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
   2238					    &cl->dma.daddr, GFP_KERNEL);
   2239	if (!cl->dma.vaddr)
   2240		return -ENOMEM;
   2241
   2242	cl->dma.buffer_id = buf_id;
   2243	cl->dma.size = size;
   2244
   2245	return 0;
   2246}
   2247
   2248static void mei_cl_dma_free(struct mei_cl *cl)
   2249{
   2250	cl->dma.buffer_id = 0;
   2251	dmam_free_coherent(cl->dev->dev,
   2252			   cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
   2253	cl->dma.size = 0;
   2254	cl->dma.vaddr = NULL;
   2255	cl->dma.daddr = 0;
   2256}
   2257
   2258/**
   2259 * mei_cl_dma_alloc_and_map - send client dma map request
   2260 *
   2261 * @cl: host client
   2262 * @fp: pointer to file structure
   2263 * @buffer_id: id of the mapped buffer
   2264 * @size: size of the buffer
   2265 *
   2266 * Locking: called under "dev->device_lock" lock
   2267 *
   2268 * Return:
   2269 * * -ENODEV
   2270 * * -EINVAL
   2271 * * -EOPNOTSUPP
   2272 * * -EPROTO
   2273 * * -ENOMEM;
   2274 */
   2275int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
   2276			     u8 buffer_id, size_t size)
   2277{
   2278	struct mei_device *dev;
   2279	struct mei_cl_cb *cb;
   2280	int rets;
   2281
   2282	if (WARN_ON(!cl || !cl->dev))
   2283		return -ENODEV;
   2284
   2285	dev = cl->dev;
   2286
   2287	if (!dev->hbm_f_cd_supported) {
   2288		cl_dbg(dev, cl, "client dma is not supported\n");
   2289		return -EOPNOTSUPP;
   2290	}
   2291
   2292	if (buffer_id == 0)
   2293		return -EINVAL;
   2294
   2295	if (mei_cl_is_connected(cl))
   2296		return -EPROTO;
   2297
   2298	if (cl->dma_mapped)
   2299		return -EPROTO;
   2300
   2301	if (mei_cl_dma_map_find(dev, buffer_id)) {
   2302		cl_dbg(dev, cl, "client dma with id %d is already allocated\n",
   2303		       cl->dma.buffer_id);
   2304		return -EPROTO;
   2305	}
   2306
   2307	rets = pm_runtime_get(dev->dev);
   2308	if (rets < 0 && rets != -EINPROGRESS) {
   2309		pm_runtime_put_noidle(dev->dev);
   2310		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   2311		return rets;
   2312	}
   2313
   2314	rets = mei_cl_dma_alloc(cl, buffer_id, size);
   2315	if (rets) {
   2316		pm_runtime_put_noidle(dev->dev);
   2317		return rets;
   2318	}
   2319
   2320	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_MAP, fp);
   2321	if (!cb) {
   2322		rets = -ENOMEM;
   2323		goto out;
   2324	}
   2325
   2326	if (mei_hbuf_acquire(dev)) {
   2327		if (mei_hbm_cl_dma_map_req(dev, cl)) {
   2328			rets = -ENODEV;
   2329			goto out;
   2330		}
   2331		list_move_tail(&cb->list, &dev->ctrl_rd_list);
   2332	}
   2333
   2334	cl->status = 0;
   2335
   2336	mutex_unlock(&dev->device_lock);
   2337	wait_event_timeout(cl->wait,
   2338			   cl->dma_mapped || cl->status,
   2339			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
   2340	mutex_lock(&dev->device_lock);
   2341
   2342	if (!cl->dma_mapped && !cl->status)
   2343		cl->status = -EFAULT;
   2344
   2345	rets = cl->status;
   2346
   2347out:
   2348	if (rets)
   2349		mei_cl_dma_free(cl);
   2350
   2351	cl_dbg(dev, cl, "rpm: autosuspend\n");
   2352	pm_runtime_mark_last_busy(dev->dev);
   2353	pm_runtime_put_autosuspend(dev->dev);
   2354
   2355	mei_io_cb_free(cb);
   2356	return rets;
   2357}
   2358
   2359/**
   2360 * mei_cl_dma_unmap - send client dma unmap request
   2361 *
   2362 * @cl: host client
   2363 * @fp: pointer to file structure
   2364 *
   2365 * Locking: called under "dev->device_lock" lock
   2366 *
   2367 * Return: 0 on such and error otherwise.
   2368 */
   2369int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
   2370{
   2371	struct mei_device *dev;
   2372	struct mei_cl_cb *cb;
   2373	int rets;
   2374
   2375	if (WARN_ON(!cl || !cl->dev))
   2376		return -ENODEV;
   2377
   2378	dev = cl->dev;
   2379
   2380	if (!dev->hbm_f_cd_supported) {
   2381		cl_dbg(dev, cl, "client dma is not supported\n");
   2382		return -EOPNOTSUPP;
   2383	}
   2384
   2385	/* do not allow unmap for connected client */
   2386	if (mei_cl_is_connected(cl))
   2387		return -EPROTO;
   2388
   2389	if (!cl->dma_mapped)
   2390		return -EPROTO;
   2391
   2392	rets = pm_runtime_get(dev->dev);
   2393	if (rets < 0 && rets != -EINPROGRESS) {
   2394		pm_runtime_put_noidle(dev->dev);
   2395		cl_err(dev, cl, "rpm: get failed %d\n", rets);
   2396		return rets;
   2397	}
   2398
   2399	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DMA_UNMAP, fp);
   2400	if (!cb) {
   2401		rets = -ENOMEM;
   2402		goto out;
   2403	}
   2404
   2405	if (mei_hbuf_acquire(dev)) {
   2406		if (mei_hbm_cl_dma_unmap_req(dev, cl)) {
   2407			rets = -ENODEV;
   2408			goto out;
   2409		}
   2410		list_move_tail(&cb->list, &dev->ctrl_rd_list);
   2411	}
   2412
   2413	cl->status = 0;
   2414
   2415	mutex_unlock(&dev->device_lock);
   2416	wait_event_timeout(cl->wait,
   2417			   !cl->dma_mapped || cl->status,
   2418			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
   2419	mutex_lock(&dev->device_lock);
   2420
   2421	if (cl->dma_mapped && !cl->status)
   2422		cl->status = -EFAULT;
   2423
   2424	rets = cl->status;
   2425
   2426	if (!rets)
   2427		mei_cl_dma_free(cl);
   2428out:
   2429	cl_dbg(dev, cl, "rpm: autosuspend\n");
   2430	pm_runtime_mark_last_busy(dev->dev);
   2431	pm_runtime_put_autosuspend(dev->dev);
   2432
   2433	mei_io_cb_free(cb);
   2434	return rets;
   2435}