cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

knav_qmss_queue.c (47809B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Keystone Queue Manager subsystem driver
      4 *
      5 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
      6 * Authors:	Sandeep Nair <sandeep_n@ti.com>
      7 *		Cyril Chemparathy <cyril@ti.com>
      8 *		Santosh Shilimkar <santosh.shilimkar@ti.com>
      9 */
     10
     11#include <linux/debugfs.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/firmware.h>
     14#include <linux/interrupt.h>
     15#include <linux/io.h>
     16#include <linux/module.h>
     17#include <linux/of_address.h>
     18#include <linux/of_device.h>
     19#include <linux/of_irq.h>
     20#include <linux/pm_runtime.h>
     21#include <linux/slab.h>
     22#include <linux/soc/ti/knav_qmss.h>
     23
     24#include "knav_qmss.h"
     25
     26static struct knav_device *kdev;
     27static DEFINE_MUTEX(knav_dev_lock);
     28#define knav_dev_lock_held() \
     29	lockdep_is_held(&knav_dev_lock)
     30
     31/* Queue manager register indices in DTS */
     32#define KNAV_QUEUE_PEEK_REG_INDEX	0
     33#define KNAV_QUEUE_STATUS_REG_INDEX	1
     34#define KNAV_QUEUE_CONFIG_REG_INDEX	2
     35#define KNAV_QUEUE_REGION_REG_INDEX	3
     36#define KNAV_QUEUE_PUSH_REG_INDEX	4
     37#define KNAV_QUEUE_POP_REG_INDEX	5
     38
     39/* Queue manager register indices in DTS for QMSS in K2G NAVSS.
     40 * There are no status and vbusm push registers on this version
     41 * of QMSS. Push registers are same as pop, So all indices above 1
     42 * are to be re-defined
     43 */
     44#define KNAV_L_QUEUE_CONFIG_REG_INDEX	1
     45#define KNAV_L_QUEUE_REGION_REG_INDEX	2
     46#define KNAV_L_QUEUE_PUSH_REG_INDEX	3
     47
     48/* PDSP register indices in DTS */
     49#define KNAV_QUEUE_PDSP_IRAM_REG_INDEX	0
     50#define KNAV_QUEUE_PDSP_REGS_REG_INDEX	1
     51#define KNAV_QUEUE_PDSP_INTD_REG_INDEX	2
     52#define KNAV_QUEUE_PDSP_CMD_REG_INDEX	3
     53
     54#define knav_queue_idx_to_inst(kdev, idx)			\
     55	(kdev->instances + (idx << kdev->inst_shift))
     56
     57#define for_each_handle_rcu(qh, inst)				\
     58	list_for_each_entry_rcu(qh, &inst->handles, list,	\
     59				knav_dev_lock_held())
     60
     61#define for_each_instance(idx, inst, kdev)		\
     62	for (idx = 0, inst = kdev->instances;		\
     63	     idx < (kdev)->num_queues_in_use;			\
     64	     idx++, inst = knav_queue_idx_to_inst(kdev, idx))
     65
     66/* All firmware file names end up here. List the firmware file names below.
     67 * Newest followed by older ones. Search is done from start of the array
     68 * until a firmware file is found.
     69 */
     70const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"};
     71
     72static bool device_ready;
     73bool knav_qmss_device_ready(void)
     74{
     75	return device_ready;
     76}
     77EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
     78
     79/**
     80 * knav_queue_notify: qmss queue notfier call
     81 *
     82 * @inst:		- qmss queue instance like accumulator
     83 */
     84void knav_queue_notify(struct knav_queue_inst *inst)
     85{
     86	struct knav_queue *qh;
     87
     88	if (!inst)
     89		return;
     90
     91	rcu_read_lock();
     92	for_each_handle_rcu(qh, inst) {
     93		if (atomic_read(&qh->notifier_enabled) <= 0)
     94			continue;
     95		if (WARN_ON(!qh->notifier_fn))
     96			continue;
     97		this_cpu_inc(qh->stats->notifies);
     98		qh->notifier_fn(qh->notifier_fn_arg);
     99	}
    100	rcu_read_unlock();
    101}
    102EXPORT_SYMBOL_GPL(knav_queue_notify);
    103
    104static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
    105{
    106	struct knav_queue_inst *inst = _instdata;
    107
    108	knav_queue_notify(inst);
    109	return IRQ_HANDLED;
    110}
    111
    112static int knav_queue_setup_irq(struct knav_range_info *range,
    113			  struct knav_queue_inst *inst)
    114{
    115	unsigned queue = inst->id - range->queue_base;
    116	int ret = 0, irq;
    117
    118	if (range->flags & RANGE_HAS_IRQ) {
    119		irq = range->irqs[queue].irq;
    120		ret = request_irq(irq, knav_queue_int_handler, 0,
    121					inst->irq_name, inst);
    122		if (ret)
    123			return ret;
    124		disable_irq(irq);
    125		if (range->irqs[queue].cpu_mask) {
    126			ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
    127			if (ret) {
    128				dev_warn(range->kdev->dev,
    129					 "Failed to set IRQ affinity\n");
    130				return ret;
    131			}
    132		}
    133	}
    134	return ret;
    135}
    136
    137static void knav_queue_free_irq(struct knav_queue_inst *inst)
    138{
    139	struct knav_range_info *range = inst->range;
    140	unsigned queue = inst->id - inst->range->queue_base;
    141	int irq;
    142
    143	if (range->flags & RANGE_HAS_IRQ) {
    144		irq = range->irqs[queue].irq;
    145		irq_set_affinity_hint(irq, NULL);
    146		free_irq(irq, inst);
    147	}
    148}
    149
    150static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
    151{
    152	return !list_empty(&inst->handles);
    153}
    154
    155static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
    156{
    157	return inst->range->flags & RANGE_RESERVED;
    158}
    159
    160static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
    161{
    162	struct knav_queue *tmp;
    163
    164	rcu_read_lock();
    165	for_each_handle_rcu(tmp, inst) {
    166		if (tmp->flags & KNAV_QUEUE_SHARED) {
    167			rcu_read_unlock();
    168			return true;
    169		}
    170	}
    171	rcu_read_unlock();
    172	return false;
    173}
    174
    175static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
    176						unsigned type)
    177{
    178	if ((type == KNAV_QUEUE_QPEND) &&
    179	    (inst->range->flags & RANGE_HAS_IRQ)) {
    180		return true;
    181	} else if ((type == KNAV_QUEUE_ACC) &&
    182		(inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
    183		return true;
    184	} else if ((type == KNAV_QUEUE_GP) &&
    185		!(inst->range->flags &
    186			(RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
    187		return true;
    188	}
    189	return false;
    190}
    191
    192static inline struct knav_queue_inst *
    193knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
    194{
    195	struct knav_queue_inst *inst;
    196	int idx;
    197
    198	for_each_instance(idx, inst, kdev) {
    199		if (inst->id == id)
    200			return inst;
    201	}
    202	return NULL;
    203}
    204
    205static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
    206{
    207	if (kdev->base_id <= id &&
    208	    kdev->base_id + kdev->num_queues > id) {
    209		id -= kdev->base_id;
    210		return knav_queue_match_id_to_inst(kdev, id);
    211	}
    212	return NULL;
    213}
    214
    215static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
    216				      const char *name, unsigned flags)
    217{
    218	struct knav_queue *qh;
    219	unsigned id;
    220	int ret = 0;
    221
    222	qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
    223	if (!qh)
    224		return ERR_PTR(-ENOMEM);
    225
    226	qh->stats = alloc_percpu(struct knav_queue_stats);
    227	if (!qh->stats) {
    228		ret = -ENOMEM;
    229		goto err;
    230	}
    231
    232	qh->flags = flags;
    233	qh->inst = inst;
    234	id = inst->id - inst->qmgr->start_queue;
    235	qh->reg_push = &inst->qmgr->reg_push[id];
    236	qh->reg_pop = &inst->qmgr->reg_pop[id];
    237	qh->reg_peek = &inst->qmgr->reg_peek[id];
    238
    239	/* first opener? */
    240	if (!knav_queue_is_busy(inst)) {
    241		struct knav_range_info *range = inst->range;
    242
    243		inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
    244		if (range->ops && range->ops->open_queue)
    245			ret = range->ops->open_queue(range, inst, flags);
    246
    247		if (ret)
    248			goto err;
    249	}
    250	list_add_tail_rcu(&qh->list, &inst->handles);
    251	return qh;
    252
    253err:
    254	if (qh->stats)
    255		free_percpu(qh->stats);
    256	devm_kfree(inst->kdev->dev, qh);
    257	return ERR_PTR(ret);
    258}
    259
    260static struct knav_queue *
    261knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
    262{
    263	struct knav_queue_inst *inst;
    264	struct knav_queue *qh;
    265
    266	mutex_lock(&knav_dev_lock);
    267
    268	qh = ERR_PTR(-ENODEV);
    269	inst = knav_queue_find_by_id(id);
    270	if (!inst)
    271		goto unlock_ret;
    272
    273	qh = ERR_PTR(-EEXIST);
    274	if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
    275		goto unlock_ret;
    276
    277	qh = ERR_PTR(-EBUSY);
    278	if ((flags & KNAV_QUEUE_SHARED) &&
    279	    (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
    280		goto unlock_ret;
    281
    282	qh = __knav_queue_open(inst, name, flags);
    283
    284unlock_ret:
    285	mutex_unlock(&knav_dev_lock);
    286
    287	return qh;
    288}
    289
    290static struct knav_queue *knav_queue_open_by_type(const char *name,
    291						unsigned type, unsigned flags)
    292{
    293	struct knav_queue_inst *inst;
    294	struct knav_queue *qh = ERR_PTR(-EINVAL);
    295	int idx;
    296
    297	mutex_lock(&knav_dev_lock);
    298
    299	for_each_instance(idx, inst, kdev) {
    300		if (knav_queue_is_reserved(inst))
    301			continue;
    302		if (!knav_queue_match_type(inst, type))
    303			continue;
    304		if (knav_queue_is_busy(inst))
    305			continue;
    306		qh = __knav_queue_open(inst, name, flags);
    307		goto unlock_ret;
    308	}
    309
    310unlock_ret:
    311	mutex_unlock(&knav_dev_lock);
    312	return qh;
    313}
    314
    315static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
    316{
    317	struct knav_range_info *range = inst->range;
    318
    319	if (range->ops && range->ops->set_notify)
    320		range->ops->set_notify(range, inst, enabled);
    321}
    322
    323static int knav_queue_enable_notifier(struct knav_queue *qh)
    324{
    325	struct knav_queue_inst *inst = qh->inst;
    326	bool first;
    327
    328	if (WARN_ON(!qh->notifier_fn))
    329		return -EINVAL;
    330
    331	/* Adjust the per handle notifier count */
    332	first = (atomic_inc_return(&qh->notifier_enabled) == 1);
    333	if (!first)
    334		return 0; /* nothing to do */
    335
    336	/* Now adjust the per instance notifier count */
    337	first = (atomic_inc_return(&inst->num_notifiers) == 1);
    338	if (first)
    339		knav_queue_set_notify(inst, true);
    340
    341	return 0;
    342}
    343
    344static int knav_queue_disable_notifier(struct knav_queue *qh)
    345{
    346	struct knav_queue_inst *inst = qh->inst;
    347	bool last;
    348
    349	last = (atomic_dec_return(&qh->notifier_enabled) == 0);
    350	if (!last)
    351		return 0; /* nothing to do */
    352
    353	last = (atomic_dec_return(&inst->num_notifiers) == 0);
    354	if (last)
    355		knav_queue_set_notify(inst, false);
    356
    357	return 0;
    358}
    359
    360static int knav_queue_set_notifier(struct knav_queue *qh,
    361				struct knav_queue_notify_config *cfg)
    362{
    363	knav_queue_notify_fn old_fn = qh->notifier_fn;
    364
    365	if (!cfg)
    366		return -EINVAL;
    367
    368	if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
    369		return -ENOTSUPP;
    370
    371	if (!cfg->fn && old_fn)
    372		knav_queue_disable_notifier(qh);
    373
    374	qh->notifier_fn = cfg->fn;
    375	qh->notifier_fn_arg = cfg->fn_arg;
    376
    377	if (cfg->fn && !old_fn)
    378		knav_queue_enable_notifier(qh);
    379
    380	return 0;
    381}
    382
    383static int knav_gp_set_notify(struct knav_range_info *range,
    384			       struct knav_queue_inst *inst,
    385			       bool enabled)
    386{
    387	unsigned queue;
    388
    389	if (range->flags & RANGE_HAS_IRQ) {
    390		queue = inst->id - range->queue_base;
    391		if (enabled)
    392			enable_irq(range->irqs[queue].irq);
    393		else
    394			disable_irq_nosync(range->irqs[queue].irq);
    395	}
    396	return 0;
    397}
    398
    399static int knav_gp_open_queue(struct knav_range_info *range,
    400				struct knav_queue_inst *inst, unsigned flags)
    401{
    402	return knav_queue_setup_irq(range, inst);
    403}
    404
    405static int knav_gp_close_queue(struct knav_range_info *range,
    406				struct knav_queue_inst *inst)
    407{
    408	knav_queue_free_irq(inst);
    409	return 0;
    410}
    411
    412static struct knav_range_ops knav_gp_range_ops = {
    413	.set_notify	= knav_gp_set_notify,
    414	.open_queue	= knav_gp_open_queue,
    415	.close_queue	= knav_gp_close_queue,
    416};
    417
    418
    419static int knav_queue_get_count(void *qhandle)
    420{
    421	struct knav_queue *qh = qhandle;
    422	struct knav_queue_inst *inst = qh->inst;
    423
    424	return readl_relaxed(&qh->reg_peek[0].entry_count) +
    425		atomic_read(&inst->desc_count);
    426}
    427
    428static void knav_queue_debug_show_instance(struct seq_file *s,
    429					struct knav_queue_inst *inst)
    430{
    431	struct knav_device *kdev = inst->kdev;
    432	struct knav_queue *qh;
    433	int cpu = 0;
    434	int pushes = 0;
    435	int pops = 0;
    436	int push_errors = 0;
    437	int pop_errors = 0;
    438	int notifies = 0;
    439
    440	if (!knav_queue_is_busy(inst))
    441		return;
    442
    443	seq_printf(s, "\tqueue id %d (%s)\n",
    444		   kdev->base_id + inst->id, inst->name);
    445	for_each_handle_rcu(qh, inst) {
    446		for_each_possible_cpu(cpu) {
    447			pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
    448			pops += per_cpu_ptr(qh->stats, cpu)->pops;
    449			push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
    450			pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
    451			notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
    452		}
    453
    454		seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
    455				qh,
    456				pushes,
    457				pops,
    458				knav_queue_get_count(qh),
    459				notifies,
    460				push_errors,
    461				pop_errors);
    462	}
    463}
    464
    465static int knav_queue_debug_show(struct seq_file *s, void *v)
    466{
    467	struct knav_queue_inst *inst;
    468	int idx;
    469
    470	mutex_lock(&knav_dev_lock);
    471	seq_printf(s, "%s: %u-%u\n",
    472		   dev_name(kdev->dev), kdev->base_id,
    473		   kdev->base_id + kdev->num_queues - 1);
    474	for_each_instance(idx, inst, kdev)
    475		knav_queue_debug_show_instance(s, inst);
    476	mutex_unlock(&knav_dev_lock);
    477
    478	return 0;
    479}
    480
    481DEFINE_SHOW_ATTRIBUTE(knav_queue_debug);
    482
    483static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
    484					u32 flags)
    485{
    486	unsigned long end;
    487	u32 val = 0;
    488
    489	end = jiffies + msecs_to_jiffies(timeout);
    490	while (time_after(end, jiffies)) {
    491		val = readl_relaxed(addr);
    492		if (flags)
    493			val &= flags;
    494		if (!val)
    495			break;
    496		cpu_relax();
    497	}
    498	return val ? -ETIMEDOUT : 0;
    499}
    500
    501
    502static int knav_queue_flush(struct knav_queue *qh)
    503{
    504	struct knav_queue_inst *inst = qh->inst;
    505	unsigned id = inst->id - inst->qmgr->start_queue;
    506
    507	atomic_set(&inst->desc_count, 0);
    508	writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
    509	return 0;
    510}
    511
    512/**
    513 * knav_queue_open()	- open a hardware queue
    514 * @name:		- name to give the queue handle
    515 * @id:			- desired queue number if any or specifes the type
    516 *			  of queue
    517 * @flags:		- the following flags are applicable to queues:
    518 *	KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
    519 *			     exclusive by default.
    520 *			     Subsequent attempts to open a shared queue should
    521 *			     also have this flag.
    522 *
    523 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
    524 * to check the returned value for error codes.
    525 */
    526void *knav_queue_open(const char *name, unsigned id,
    527					unsigned flags)
    528{
    529	struct knav_queue *qh = ERR_PTR(-EINVAL);
    530
    531	switch (id) {
    532	case KNAV_QUEUE_QPEND:
    533	case KNAV_QUEUE_ACC:
    534	case KNAV_QUEUE_GP:
    535		qh = knav_queue_open_by_type(name, id, flags);
    536		break;
    537
    538	default:
    539		qh = knav_queue_open_by_id(name, id, flags);
    540		break;
    541	}
    542	return qh;
    543}
    544EXPORT_SYMBOL_GPL(knav_queue_open);
    545
    546/**
    547 * knav_queue_close()	- close a hardware queue handle
    548 * @qhandle:		- handle to close
    549 */
    550void knav_queue_close(void *qhandle)
    551{
    552	struct knav_queue *qh = qhandle;
    553	struct knav_queue_inst *inst = qh->inst;
    554
    555	while (atomic_read(&qh->notifier_enabled) > 0)
    556		knav_queue_disable_notifier(qh);
    557
    558	mutex_lock(&knav_dev_lock);
    559	list_del_rcu(&qh->list);
    560	mutex_unlock(&knav_dev_lock);
    561	synchronize_rcu();
    562	if (!knav_queue_is_busy(inst)) {
    563		struct knav_range_info *range = inst->range;
    564
    565		if (range->ops && range->ops->close_queue)
    566			range->ops->close_queue(range, inst);
    567	}
    568	free_percpu(qh->stats);
    569	devm_kfree(inst->kdev->dev, qh);
    570}
    571EXPORT_SYMBOL_GPL(knav_queue_close);
    572
    573/**
    574 * knav_queue_device_control()	- Perform control operations on a queue
    575 * @qhandle:			- queue handle
    576 * @cmd:			- control commands
    577 * @arg:			- command argument
    578 *
    579 * Returns 0 on success, errno otherwise.
    580 */
    581int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
    582				unsigned long arg)
    583{
    584	struct knav_queue *qh = qhandle;
    585	struct knav_queue_notify_config *cfg;
    586	int ret;
    587
    588	switch ((int)cmd) {
    589	case KNAV_QUEUE_GET_ID:
    590		ret = qh->inst->kdev->base_id + qh->inst->id;
    591		break;
    592
    593	case KNAV_QUEUE_FLUSH:
    594		ret = knav_queue_flush(qh);
    595		break;
    596
    597	case KNAV_QUEUE_SET_NOTIFIER:
    598		cfg = (void *)arg;
    599		ret = knav_queue_set_notifier(qh, cfg);
    600		break;
    601
    602	case KNAV_QUEUE_ENABLE_NOTIFY:
    603		ret = knav_queue_enable_notifier(qh);
    604		break;
    605
    606	case KNAV_QUEUE_DISABLE_NOTIFY:
    607		ret = knav_queue_disable_notifier(qh);
    608		break;
    609
    610	case KNAV_QUEUE_GET_COUNT:
    611		ret = knav_queue_get_count(qh);
    612		break;
    613
    614	default:
    615		ret = -ENOTSUPP;
    616		break;
    617	}
    618	return ret;
    619}
    620EXPORT_SYMBOL_GPL(knav_queue_device_control);
    621
    622
    623
    624/**
    625 * knav_queue_push()	- push data (or descriptor) to the tail of a queue
    626 * @qhandle:		- hardware queue handle
    627 * @dma:		- DMA data to push
    628 * @size:		- size of data to push
    629 * @flags:		- can be used to pass additional information
    630 *
    631 * Returns 0 on success, errno otherwise.
    632 */
    633int knav_queue_push(void *qhandle, dma_addr_t dma,
    634					unsigned size, unsigned flags)
    635{
    636	struct knav_queue *qh = qhandle;
    637	u32 val;
    638
    639	val = (u32)dma | ((size / 16) - 1);
    640	writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
    641
    642	this_cpu_inc(qh->stats->pushes);
    643	return 0;
    644}
    645EXPORT_SYMBOL_GPL(knav_queue_push);
    646
    647/**
    648 * knav_queue_pop()	- pop data (or descriptor) from the head of a queue
    649 * @qhandle:		- hardware queue handle
    650 * @size:		- (optional) size of the data pop'ed.
    651 *
    652 * Returns a DMA address on success, 0 on failure.
    653 */
    654dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
    655{
    656	struct knav_queue *qh = qhandle;
    657	struct knav_queue_inst *inst = qh->inst;
    658	dma_addr_t dma;
    659	u32 val, idx;
    660
    661	/* are we accumulated? */
    662	if (inst->descs) {
    663		if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
    664			atomic_inc(&inst->desc_count);
    665			return 0;
    666		}
    667		idx  = atomic_inc_return(&inst->desc_head);
    668		idx &= ACC_DESCS_MASK;
    669		val = inst->descs[idx];
    670	} else {
    671		val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
    672		if (unlikely(!val))
    673			return 0;
    674	}
    675
    676	dma = val & DESC_PTR_MASK;
    677	if (size)
    678		*size = ((val & DESC_SIZE_MASK) + 1) * 16;
    679
    680	this_cpu_inc(qh->stats->pops);
    681	return dma;
    682}
    683EXPORT_SYMBOL_GPL(knav_queue_pop);
    684
    685/* carve out descriptors and push into queue */
    686static void kdesc_fill_pool(struct knav_pool *pool)
    687{
    688	struct knav_region *region;
    689	int i;
    690
    691	region = pool->region;
    692	pool->desc_size = region->desc_size;
    693	for (i = 0; i < pool->num_desc; i++) {
    694		int index = pool->region_offset + i;
    695		dma_addr_t dma_addr;
    696		unsigned dma_size;
    697		dma_addr = region->dma_start + (region->desc_size * index);
    698		dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
    699		dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
    700					   DMA_TO_DEVICE);
    701		knav_queue_push(pool->queue, dma_addr, dma_size, 0);
    702	}
    703}
    704
    705/* pop out descriptors and close the queue */
    706static void kdesc_empty_pool(struct knav_pool *pool)
    707{
    708	dma_addr_t dma;
    709	unsigned size;
    710	void *desc;
    711	int i;
    712
    713	if (!pool->queue)
    714		return;
    715
    716	for (i = 0;; i++) {
    717		dma = knav_queue_pop(pool->queue, &size);
    718		if (!dma)
    719			break;
    720		desc = knav_pool_desc_dma_to_virt(pool, dma);
    721		if (!desc) {
    722			dev_dbg(pool->kdev->dev,
    723				"couldn't unmap desc, continuing\n");
    724			continue;
    725		}
    726	}
    727	WARN_ON(i != pool->num_desc);
    728	knav_queue_close(pool->queue);
    729}
    730
    731
    732/* Get the DMA address of a descriptor */
    733dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
    734{
    735	struct knav_pool *pool = ph;
    736	return pool->region->dma_start + (virt - pool->region->virt_start);
    737}
    738EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
    739
    740void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
    741{
    742	struct knav_pool *pool = ph;
    743	return pool->region->virt_start + (dma - pool->region->dma_start);
    744}
    745EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
    746
    747/**
    748 * knav_pool_create()	- Create a pool of descriptors
    749 * @name:		- name to give the pool handle
    750 * @num_desc:		- numbers of descriptors in the pool
    751 * @region_id:		- QMSS region id from which the descriptors are to be
    752 *			  allocated.
    753 *
    754 * Returns a pool handle on success.
    755 * Use IS_ERR_OR_NULL() to identify error values on return.
    756 */
    757void *knav_pool_create(const char *name,
    758					int num_desc, int region_id)
    759{
    760	struct knav_region *reg_itr, *region = NULL;
    761	struct knav_pool *pool, *pi = NULL, *iter;
    762	struct list_head *node;
    763	unsigned last_offset;
    764	int ret;
    765
    766	if (!kdev)
    767		return ERR_PTR(-EPROBE_DEFER);
    768
    769	if (!kdev->dev)
    770		return ERR_PTR(-ENODEV);
    771
    772	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
    773	if (!pool) {
    774		dev_err(kdev->dev, "out of memory allocating pool\n");
    775		return ERR_PTR(-ENOMEM);
    776	}
    777
    778	for_each_region(kdev, reg_itr) {
    779		if (reg_itr->id != region_id)
    780			continue;
    781		region = reg_itr;
    782		break;
    783	}
    784
    785	if (!region) {
    786		dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
    787		ret = -EINVAL;
    788		goto err;
    789	}
    790
    791	pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
    792	if (IS_ERR(pool->queue)) {
    793		dev_err(kdev->dev,
    794			"failed to open queue for pool(%s), error %ld\n",
    795			name, PTR_ERR(pool->queue));
    796		ret = PTR_ERR(pool->queue);
    797		goto err;
    798	}
    799
    800	pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
    801	pool->kdev = kdev;
    802	pool->dev = kdev->dev;
    803
    804	mutex_lock(&knav_dev_lock);
    805
    806	if (num_desc > (region->num_desc - region->used_desc)) {
    807		dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
    808			region_id, name);
    809		ret = -ENOMEM;
    810		goto err_unlock;
    811	}
    812
    813	/* Region maintains a sorted (by region offset) list of pools
    814	 * use the first free slot which is large enough to accomodate
    815	 * the request
    816	 */
    817	last_offset = 0;
    818	node = &region->pools;
    819	list_for_each_entry(iter, &region->pools, region_inst) {
    820		if ((iter->region_offset - last_offset) >= num_desc) {
    821			pi = iter;
    822			break;
    823		}
    824		last_offset = iter->region_offset + iter->num_desc;
    825	}
    826
    827	if (pi) {
    828		node = &pi->region_inst;
    829		pool->region = region;
    830		pool->num_desc = num_desc;
    831		pool->region_offset = last_offset;
    832		region->used_desc += num_desc;
    833		list_add_tail(&pool->list, &kdev->pools);
    834		list_add_tail(&pool->region_inst, node);
    835	} else {
    836		dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
    837			name, region_id);
    838		ret = -ENOMEM;
    839		goto err_unlock;
    840	}
    841
    842	mutex_unlock(&knav_dev_lock);
    843	kdesc_fill_pool(pool);
    844	return pool;
    845
    846err_unlock:
    847	mutex_unlock(&knav_dev_lock);
    848err:
    849	kfree(pool->name);
    850	devm_kfree(kdev->dev, pool);
    851	return ERR_PTR(ret);
    852}
    853EXPORT_SYMBOL_GPL(knav_pool_create);
    854
    855/**
    856 * knav_pool_destroy()	- Free a pool of descriptors
    857 * @ph:		- pool handle
    858 */
    859void knav_pool_destroy(void *ph)
    860{
    861	struct knav_pool *pool = ph;
    862
    863	if (!pool)
    864		return;
    865
    866	if (!pool->region)
    867		return;
    868
    869	kdesc_empty_pool(pool);
    870	mutex_lock(&knav_dev_lock);
    871
    872	pool->region->used_desc -= pool->num_desc;
    873	list_del(&pool->region_inst);
    874	list_del(&pool->list);
    875
    876	mutex_unlock(&knav_dev_lock);
    877	kfree(pool->name);
    878	devm_kfree(kdev->dev, pool);
    879}
    880EXPORT_SYMBOL_GPL(knav_pool_destroy);
    881
    882
    883/**
    884 * knav_pool_desc_get()	- Get a descriptor from the pool
    885 * @ph:		- pool handle
    886 *
    887 * Returns descriptor from the pool.
    888 */
    889void *knav_pool_desc_get(void *ph)
    890{
    891	struct knav_pool *pool = ph;
    892	dma_addr_t dma;
    893	unsigned size;
    894	void *data;
    895
    896	dma = knav_queue_pop(pool->queue, &size);
    897	if (unlikely(!dma))
    898		return ERR_PTR(-ENOMEM);
    899	data = knav_pool_desc_dma_to_virt(pool, dma);
    900	return data;
    901}
    902EXPORT_SYMBOL_GPL(knav_pool_desc_get);
    903
    904/**
    905 * knav_pool_desc_put()	- return a descriptor to the pool
    906 * @ph:		- pool handle
    907 * @desc:	- virtual address
    908 */
    909void knav_pool_desc_put(void *ph, void *desc)
    910{
    911	struct knav_pool *pool = ph;
    912	dma_addr_t dma;
    913	dma = knav_pool_desc_virt_to_dma(pool, desc);
    914	knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
    915}
    916EXPORT_SYMBOL_GPL(knav_pool_desc_put);
    917
    918/**
    919 * knav_pool_desc_map()	- Map descriptor for DMA transfer
    920 * @ph:				- pool handle
    921 * @desc:			- address of descriptor to map
    922 * @size:			- size of descriptor to map
    923 * @dma:			- DMA address return pointer
    924 * @dma_sz:			- adjusted return pointer
    925 *
    926 * Returns 0 on success, errno otherwise.
    927 */
    928int knav_pool_desc_map(void *ph, void *desc, unsigned size,
    929					dma_addr_t *dma, unsigned *dma_sz)
    930{
    931	struct knav_pool *pool = ph;
    932	*dma = knav_pool_desc_virt_to_dma(pool, desc);
    933	size = min(size, pool->region->desc_size);
    934	size = ALIGN(size, SMP_CACHE_BYTES);
    935	*dma_sz = size;
    936	dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
    937
    938	/* Ensure the descriptor reaches to the memory */
    939	__iowmb();
    940
    941	return 0;
    942}
    943EXPORT_SYMBOL_GPL(knav_pool_desc_map);
    944
    945/**
    946 * knav_pool_desc_unmap()	- Unmap descriptor after DMA transfer
    947 * @ph:				- pool handle
    948 * @dma:			- DMA address of descriptor to unmap
    949 * @dma_sz:			- size of descriptor to unmap
    950 *
    951 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
    952 * error values on return.
    953 */
    954void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
    955{
    956	struct knav_pool *pool = ph;
    957	unsigned desc_sz;
    958	void *desc;
    959
    960	desc_sz = min(dma_sz, pool->region->desc_size);
    961	desc = knav_pool_desc_dma_to_virt(pool, dma);
    962	dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
    963	prefetch(desc);
    964	return desc;
    965}
    966EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
    967
    968/**
    969 * knav_pool_count()	- Get the number of descriptors in pool.
    970 * @ph:			- pool handle
    971 * Returns number of elements in the pool.
    972 */
    973int knav_pool_count(void *ph)
    974{
    975	struct knav_pool *pool = ph;
    976	return knav_queue_get_count(pool->queue);
    977}
    978EXPORT_SYMBOL_GPL(knav_pool_count);
    979
    980static void knav_queue_setup_region(struct knav_device *kdev,
    981					struct knav_region *region)
    982{
    983	unsigned hw_num_desc, hw_desc_size, size;
    984	struct knav_reg_region __iomem  *regs;
    985	struct knav_qmgr_info *qmgr;
    986	struct knav_pool *pool;
    987	int id = region->id;
    988	struct page *page;
    989
    990	/* unused region? */
    991	if (!region->num_desc) {
    992		dev_warn(kdev->dev, "unused region %s\n", region->name);
    993		return;
    994	}
    995
    996	/* get hardware descriptor value */
    997	hw_num_desc = ilog2(region->num_desc - 1) + 1;
    998
    999	/* did we force fit ourselves into nothingness? */
   1000	if (region->num_desc < 32) {
   1001		region->num_desc = 0;
   1002		dev_warn(kdev->dev, "too few descriptors in region %s\n",
   1003			 region->name);
   1004		return;
   1005	}
   1006
   1007	size = region->num_desc * region->desc_size;
   1008	region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
   1009						GFP_DMA32);
   1010	if (!region->virt_start) {
   1011		region->num_desc = 0;
   1012		dev_err(kdev->dev, "memory alloc failed for region %s\n",
   1013			region->name);
   1014		return;
   1015	}
   1016	region->virt_end = region->virt_start + size;
   1017	page = virt_to_page(region->virt_start);
   1018
   1019	region->dma_start = dma_map_page(kdev->dev, page, 0, size,
   1020					 DMA_BIDIRECTIONAL);
   1021	if (dma_mapping_error(kdev->dev, region->dma_start)) {
   1022		dev_err(kdev->dev, "dma map failed for region %s\n",
   1023			region->name);
   1024		goto fail;
   1025	}
   1026	region->dma_end = region->dma_start + size;
   1027
   1028	pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
   1029	if (!pool) {
   1030		dev_err(kdev->dev, "out of memory allocating dummy pool\n");
   1031		goto fail;
   1032	}
   1033	pool->num_desc = 0;
   1034	pool->region_offset = region->num_desc;
   1035	list_add(&pool->region_inst, &region->pools);
   1036
   1037	dev_dbg(kdev->dev,
   1038		"region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
   1039		region->name, id, region->desc_size, region->num_desc,
   1040		region->link_index, &region->dma_start, &region->dma_end,
   1041		region->virt_start, region->virt_end);
   1042
   1043	hw_desc_size = (region->desc_size / 16) - 1;
   1044	hw_num_desc -= 5;
   1045
   1046	for_each_qmgr(kdev, qmgr) {
   1047		regs = qmgr->reg_region + id;
   1048		writel_relaxed((u32)region->dma_start, &regs->base);
   1049		writel_relaxed(region->link_index, &regs->start_index);
   1050		writel_relaxed(hw_desc_size << 16 | hw_num_desc,
   1051			       &regs->size_count);
   1052	}
   1053	return;
   1054
   1055fail:
   1056	if (region->dma_start)
   1057		dma_unmap_page(kdev->dev, region->dma_start, size,
   1058				DMA_BIDIRECTIONAL);
   1059	if (region->virt_start)
   1060		free_pages_exact(region->virt_start, size);
   1061	region->num_desc = 0;
   1062	return;
   1063}
   1064
   1065static const char *knav_queue_find_name(struct device_node *node)
   1066{
   1067	const char *name;
   1068
   1069	if (of_property_read_string(node, "label", &name) < 0)
   1070		name = node->name;
   1071	if (!name)
   1072		name = "unknown";
   1073	return name;
   1074}
   1075
   1076static int knav_queue_setup_regions(struct knav_device *kdev,
   1077					struct device_node *regions)
   1078{
   1079	struct device *dev = kdev->dev;
   1080	struct knav_region *region;
   1081	struct device_node *child;
   1082	u32 temp[2];
   1083	int ret;
   1084
   1085	for_each_child_of_node(regions, child) {
   1086		region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
   1087		if (!region) {
   1088			of_node_put(child);
   1089			dev_err(dev, "out of memory allocating region\n");
   1090			return -ENOMEM;
   1091		}
   1092
   1093		region->name = knav_queue_find_name(child);
   1094		of_property_read_u32(child, "id", &region->id);
   1095		ret = of_property_read_u32_array(child, "region-spec", temp, 2);
   1096		if (!ret) {
   1097			region->num_desc  = temp[0];
   1098			region->desc_size = temp[1];
   1099		} else {
   1100			dev_err(dev, "invalid region info %s\n", region->name);
   1101			devm_kfree(dev, region);
   1102			continue;
   1103		}
   1104
   1105		if (!of_get_property(child, "link-index", NULL)) {
   1106			dev_err(dev, "No link info for %s\n", region->name);
   1107			devm_kfree(dev, region);
   1108			continue;
   1109		}
   1110		ret = of_property_read_u32(child, "link-index",
   1111					   &region->link_index);
   1112		if (ret) {
   1113			dev_err(dev, "link index not found for %s\n",
   1114				region->name);
   1115			devm_kfree(dev, region);
   1116			continue;
   1117		}
   1118
   1119		INIT_LIST_HEAD(&region->pools);
   1120		list_add_tail(&region->list, &kdev->regions);
   1121	}
   1122	if (list_empty(&kdev->regions)) {
   1123		dev_err(dev, "no valid region information found\n");
   1124		return -ENODEV;
   1125	}
   1126
   1127	/* Next, we run through the regions and set things up */
   1128	for_each_region(kdev, region)
   1129		knav_queue_setup_region(kdev, region);
   1130
   1131	return 0;
   1132}
   1133
   1134static int knav_get_link_ram(struct knav_device *kdev,
   1135				       const char *name,
   1136				       struct knav_link_ram_block *block)
   1137{
   1138	struct platform_device *pdev = to_platform_device(kdev->dev);
   1139	struct device_node *node = pdev->dev.of_node;
   1140	u32 temp[2];
   1141
   1142	/*
   1143	 * Note: link ram resources are specified in "entry" sized units. In
   1144	 * reality, although entries are ~40bits in hardware, we treat them as
   1145	 * 64-bit entities here.
   1146	 *
   1147	 * For example, to specify the internal link ram for Keystone-I class
   1148	 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
   1149	 *
   1150	 * This gets a bit weird when other link rams are used.  For example,
   1151	 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
   1152	 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
   1153	 * which accounts for 64-bits per entry, for 16K entries.
   1154	 */
   1155	if (!of_property_read_u32_array(node, name , temp, 2)) {
   1156		if (temp[0]) {
   1157			/*
   1158			 * queue_base specified => using internal or onchip
   1159			 * link ram WARNING - we do not "reserve" this block
   1160			 */
   1161			block->dma = (dma_addr_t)temp[0];
   1162			block->virt = NULL;
   1163			block->size = temp[1];
   1164		} else {
   1165			block->size = temp[1];
   1166			/* queue_base not specific => allocate requested size */
   1167			block->virt = dmam_alloc_coherent(kdev->dev,
   1168						  8 * block->size, &block->dma,
   1169						  GFP_KERNEL);
   1170			if (!block->virt) {
   1171				dev_err(kdev->dev, "failed to alloc linkram\n");
   1172				return -ENOMEM;
   1173			}
   1174		}
   1175	} else {
   1176		return -ENODEV;
   1177	}
   1178	return 0;
   1179}
   1180
   1181static int knav_queue_setup_link_ram(struct knav_device *kdev)
   1182{
   1183	struct knav_link_ram_block *block;
   1184	struct knav_qmgr_info *qmgr;
   1185
   1186	for_each_qmgr(kdev, qmgr) {
   1187		block = &kdev->link_rams[0];
   1188		dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
   1189			&block->dma, block->virt, block->size);
   1190		writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
   1191		if (kdev->version == QMSS_66AK2G)
   1192			writel_relaxed(block->size,
   1193				       &qmgr->reg_config->link_ram_size0);
   1194		else
   1195			writel_relaxed(block->size - 1,
   1196				       &qmgr->reg_config->link_ram_size0);
   1197		block++;
   1198		if (!block->size)
   1199			continue;
   1200
   1201		dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
   1202			&block->dma, block->virt, block->size);
   1203		writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
   1204	}
   1205
   1206	return 0;
   1207}
   1208
   1209static int knav_setup_queue_range(struct knav_device *kdev,
   1210					struct device_node *node)
   1211{
   1212	struct device *dev = kdev->dev;
   1213	struct knav_range_info *range;
   1214	struct knav_qmgr_info *qmgr;
   1215	u32 temp[2], start, end, id, index;
   1216	int ret, i;
   1217
   1218	range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
   1219	if (!range) {
   1220		dev_err(dev, "out of memory allocating range\n");
   1221		return -ENOMEM;
   1222	}
   1223
   1224	range->kdev = kdev;
   1225	range->name = knav_queue_find_name(node);
   1226	ret = of_property_read_u32_array(node, "qrange", temp, 2);
   1227	if (!ret) {
   1228		range->queue_base = temp[0] - kdev->base_id;
   1229		range->num_queues = temp[1];
   1230	} else {
   1231		dev_err(dev, "invalid queue range %s\n", range->name);
   1232		devm_kfree(dev, range);
   1233		return -EINVAL;
   1234	}
   1235
   1236	for (i = 0; i < RANGE_MAX_IRQS; i++) {
   1237		struct of_phandle_args oirq;
   1238
   1239		if (of_irq_parse_one(node, i, &oirq))
   1240			break;
   1241
   1242		range->irqs[i].irq = irq_create_of_mapping(&oirq);
   1243		if (range->irqs[i].irq == IRQ_NONE)
   1244			break;
   1245
   1246		range->num_irqs++;
   1247
   1248		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
   1249			unsigned long mask;
   1250			int bit;
   1251
   1252			range->irqs[i].cpu_mask = devm_kzalloc(dev,
   1253							       cpumask_size(), GFP_KERNEL);
   1254			if (!range->irqs[i].cpu_mask)
   1255				return -ENOMEM;
   1256
   1257			mask = (oirq.args[2] & 0x0000ff00) >> 8;
   1258			for_each_set_bit(bit, &mask, BITS_PER_LONG)
   1259				cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
   1260		}
   1261	}
   1262
   1263	range->num_irqs = min(range->num_irqs, range->num_queues);
   1264	if (range->num_irqs)
   1265		range->flags |= RANGE_HAS_IRQ;
   1266
   1267	if (of_get_property(node, "qalloc-by-id", NULL))
   1268		range->flags |= RANGE_RESERVED;
   1269
   1270	if (of_get_property(node, "accumulator", NULL)) {
   1271		ret = knav_init_acc_range(kdev, node, range);
   1272		if (ret < 0) {
   1273			devm_kfree(dev, range);
   1274			return ret;
   1275		}
   1276	} else {
   1277		range->ops = &knav_gp_range_ops;
   1278	}
   1279
   1280	/* set threshold to 1, and flush out the queues */
   1281	for_each_qmgr(kdev, qmgr) {
   1282		start = max(qmgr->start_queue, range->queue_base);
   1283		end   = min(qmgr->start_queue + qmgr->num_queues,
   1284			    range->queue_base + range->num_queues);
   1285		for (id = start; id < end; id++) {
   1286			index = id - qmgr->start_queue;
   1287			writel_relaxed(THRESH_GTE | 1,
   1288				       &qmgr->reg_peek[index].ptr_size_thresh);
   1289			writel_relaxed(0,
   1290				       &qmgr->reg_push[index].ptr_size_thresh);
   1291		}
   1292	}
   1293
   1294	list_add_tail(&range->list, &kdev->queue_ranges);
   1295	dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
   1296		range->name, range->queue_base,
   1297		range->queue_base + range->num_queues - 1,
   1298		range->num_irqs,
   1299		(range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
   1300		(range->flags & RANGE_RESERVED) ? ", reserved" : "",
   1301		(range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
   1302	kdev->num_queues_in_use += range->num_queues;
   1303	return 0;
   1304}
   1305
   1306static int knav_setup_queue_pools(struct knav_device *kdev,
   1307				   struct device_node *queue_pools)
   1308{
   1309	struct device_node *type, *range;
   1310
   1311	for_each_child_of_node(queue_pools, type) {
   1312		for_each_child_of_node(type, range) {
   1313			/* return value ignored, we init the rest... */
   1314			knav_setup_queue_range(kdev, range);
   1315		}
   1316	}
   1317
   1318	/* ... and barf if they all failed! */
   1319	if (list_empty(&kdev->queue_ranges)) {
   1320		dev_err(kdev->dev, "no valid queue range found\n");
   1321		return -ENODEV;
   1322	}
   1323	return 0;
   1324}
   1325
   1326static void knav_free_queue_range(struct knav_device *kdev,
   1327				  struct knav_range_info *range)
   1328{
   1329	if (range->ops && range->ops->free_range)
   1330		range->ops->free_range(range);
   1331	list_del(&range->list);
   1332	devm_kfree(kdev->dev, range);
   1333}
   1334
   1335static void knav_free_queue_ranges(struct knav_device *kdev)
   1336{
   1337	struct knav_range_info *range;
   1338
   1339	for (;;) {
   1340		range = first_queue_range(kdev);
   1341		if (!range)
   1342			break;
   1343		knav_free_queue_range(kdev, range);
   1344	}
   1345}
   1346
   1347static void knav_queue_free_regions(struct knav_device *kdev)
   1348{
   1349	struct knav_region *region;
   1350	struct knav_pool *pool, *tmp;
   1351	unsigned size;
   1352
   1353	for (;;) {
   1354		region = first_region(kdev);
   1355		if (!region)
   1356			break;
   1357		list_for_each_entry_safe(pool, tmp, &region->pools, region_inst)
   1358			knav_pool_destroy(pool);
   1359
   1360		size = region->virt_end - region->virt_start;
   1361		if (size)
   1362			free_pages_exact(region->virt_start, size);
   1363		list_del(&region->list);
   1364		devm_kfree(kdev->dev, region);
   1365	}
   1366}
   1367
   1368static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
   1369					struct device_node *node, int index)
   1370{
   1371	struct resource res;
   1372	void __iomem *regs;
   1373	int ret;
   1374
   1375	ret = of_address_to_resource(node, index, &res);
   1376	if (ret) {
   1377		dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
   1378			node, index);
   1379		return ERR_PTR(ret);
   1380	}
   1381
   1382	regs = devm_ioremap_resource(kdev->dev, &res);
   1383	if (IS_ERR(regs))
   1384		dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
   1385			index, node);
   1386	return regs;
   1387}
   1388
   1389static int knav_queue_init_qmgrs(struct knav_device *kdev,
   1390					struct device_node *qmgrs)
   1391{
   1392	struct device *dev = kdev->dev;
   1393	struct knav_qmgr_info *qmgr;
   1394	struct device_node *child;
   1395	u32 temp[2];
   1396	int ret;
   1397
   1398	for_each_child_of_node(qmgrs, child) {
   1399		qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
   1400		if (!qmgr) {
   1401			of_node_put(child);
   1402			dev_err(dev, "out of memory allocating qmgr\n");
   1403			return -ENOMEM;
   1404		}
   1405
   1406		ret = of_property_read_u32_array(child, "managed-queues",
   1407						 temp, 2);
   1408		if (!ret) {
   1409			qmgr->start_queue = temp[0];
   1410			qmgr->num_queues = temp[1];
   1411		} else {
   1412			dev_err(dev, "invalid qmgr queue range\n");
   1413			devm_kfree(dev, qmgr);
   1414			continue;
   1415		}
   1416
   1417		dev_info(dev, "qmgr start queue %d, number of queues %d\n",
   1418			 qmgr->start_queue, qmgr->num_queues);
   1419
   1420		qmgr->reg_peek =
   1421			knav_queue_map_reg(kdev, child,
   1422					   KNAV_QUEUE_PEEK_REG_INDEX);
   1423
   1424		if (kdev->version == QMSS) {
   1425			qmgr->reg_status =
   1426				knav_queue_map_reg(kdev, child,
   1427						   KNAV_QUEUE_STATUS_REG_INDEX);
   1428		}
   1429
   1430		qmgr->reg_config =
   1431			knav_queue_map_reg(kdev, child,
   1432					   (kdev->version == QMSS_66AK2G) ?
   1433					   KNAV_L_QUEUE_CONFIG_REG_INDEX :
   1434					   KNAV_QUEUE_CONFIG_REG_INDEX);
   1435		qmgr->reg_region =
   1436			knav_queue_map_reg(kdev, child,
   1437					   (kdev->version == QMSS_66AK2G) ?
   1438					   KNAV_L_QUEUE_REGION_REG_INDEX :
   1439					   KNAV_QUEUE_REGION_REG_INDEX);
   1440
   1441		qmgr->reg_push =
   1442			knav_queue_map_reg(kdev, child,
   1443					   (kdev->version == QMSS_66AK2G) ?
   1444					    KNAV_L_QUEUE_PUSH_REG_INDEX :
   1445					    KNAV_QUEUE_PUSH_REG_INDEX);
   1446
   1447		if (kdev->version == QMSS) {
   1448			qmgr->reg_pop =
   1449				knav_queue_map_reg(kdev, child,
   1450						   KNAV_QUEUE_POP_REG_INDEX);
   1451		}
   1452
   1453		if (IS_ERR(qmgr->reg_peek) ||
   1454		    ((kdev->version == QMSS) &&
   1455		    (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) ||
   1456		    IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
   1457		    IS_ERR(qmgr->reg_push)) {
   1458			dev_err(dev, "failed to map qmgr regs\n");
   1459			if (kdev->version == QMSS) {
   1460				if (!IS_ERR(qmgr->reg_status))
   1461					devm_iounmap(dev, qmgr->reg_status);
   1462				if (!IS_ERR(qmgr->reg_pop))
   1463					devm_iounmap(dev, qmgr->reg_pop);
   1464			}
   1465			if (!IS_ERR(qmgr->reg_peek))
   1466				devm_iounmap(dev, qmgr->reg_peek);
   1467			if (!IS_ERR(qmgr->reg_config))
   1468				devm_iounmap(dev, qmgr->reg_config);
   1469			if (!IS_ERR(qmgr->reg_region))
   1470				devm_iounmap(dev, qmgr->reg_region);
   1471			if (!IS_ERR(qmgr->reg_push))
   1472				devm_iounmap(dev, qmgr->reg_push);
   1473			devm_kfree(dev, qmgr);
   1474			continue;
   1475		}
   1476
   1477		/* Use same push register for pop as well */
   1478		if (kdev->version == QMSS_66AK2G)
   1479			qmgr->reg_pop = qmgr->reg_push;
   1480
   1481		list_add_tail(&qmgr->list, &kdev->qmgrs);
   1482		dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
   1483			 qmgr->start_queue, qmgr->num_queues,
   1484			 qmgr->reg_peek, qmgr->reg_status,
   1485			 qmgr->reg_config, qmgr->reg_region,
   1486			 qmgr->reg_push, qmgr->reg_pop);
   1487	}
   1488	return 0;
   1489}
   1490
   1491static int knav_queue_init_pdsps(struct knav_device *kdev,
   1492					struct device_node *pdsps)
   1493{
   1494	struct device *dev = kdev->dev;
   1495	struct knav_pdsp_info *pdsp;
   1496	struct device_node *child;
   1497
   1498	for_each_child_of_node(pdsps, child) {
   1499		pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
   1500		if (!pdsp) {
   1501			of_node_put(child);
   1502			dev_err(dev, "out of memory allocating pdsp\n");
   1503			return -ENOMEM;
   1504		}
   1505		pdsp->name = knav_queue_find_name(child);
   1506		pdsp->iram =
   1507			knav_queue_map_reg(kdev, child,
   1508					   KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
   1509		pdsp->regs =
   1510			knav_queue_map_reg(kdev, child,
   1511					   KNAV_QUEUE_PDSP_REGS_REG_INDEX);
   1512		pdsp->intd =
   1513			knav_queue_map_reg(kdev, child,
   1514					   KNAV_QUEUE_PDSP_INTD_REG_INDEX);
   1515		pdsp->command =
   1516			knav_queue_map_reg(kdev, child,
   1517					   KNAV_QUEUE_PDSP_CMD_REG_INDEX);
   1518
   1519		if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
   1520		    IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
   1521			dev_err(dev, "failed to map pdsp %s regs\n",
   1522				pdsp->name);
   1523			if (!IS_ERR(pdsp->command))
   1524				devm_iounmap(dev, pdsp->command);
   1525			if (!IS_ERR(pdsp->iram))
   1526				devm_iounmap(dev, pdsp->iram);
   1527			if (!IS_ERR(pdsp->regs))
   1528				devm_iounmap(dev, pdsp->regs);
   1529			if (!IS_ERR(pdsp->intd))
   1530				devm_iounmap(dev, pdsp->intd);
   1531			devm_kfree(dev, pdsp);
   1532			continue;
   1533		}
   1534		of_property_read_u32(child, "id", &pdsp->id);
   1535		list_add_tail(&pdsp->list, &kdev->pdsps);
   1536		dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
   1537			pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
   1538			pdsp->intd);
   1539	}
   1540	return 0;
   1541}
   1542
   1543static int knav_queue_stop_pdsp(struct knav_device *kdev,
   1544			  struct knav_pdsp_info *pdsp)
   1545{
   1546	u32 val, timeout = 1000;
   1547	int ret;
   1548
   1549	val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
   1550	writel_relaxed(val, &pdsp->regs->control);
   1551	ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
   1552					PDSP_CTRL_RUNNING);
   1553	if (ret < 0) {
   1554		dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
   1555		return ret;
   1556	}
   1557	pdsp->loaded = false;
   1558	pdsp->started = false;
   1559	return 0;
   1560}
   1561
   1562static int knav_queue_load_pdsp(struct knav_device *kdev,
   1563			  struct knav_pdsp_info *pdsp)
   1564{
   1565	int i, ret, fwlen;
   1566	const struct firmware *fw;
   1567	bool found = false;
   1568	u32 *fwdata;
   1569
   1570	for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
   1571		if (knav_acc_firmwares[i]) {
   1572			ret = request_firmware_direct(&fw,
   1573						      knav_acc_firmwares[i],
   1574						      kdev->dev);
   1575			if (!ret) {
   1576				found = true;
   1577				break;
   1578			}
   1579		}
   1580	}
   1581
   1582	if (!found) {
   1583		dev_err(kdev->dev, "failed to get firmware for pdsp\n");
   1584		return -ENODEV;
   1585	}
   1586
   1587	dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
   1588		 knav_acc_firmwares[i]);
   1589
   1590	writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
   1591	/* download the firmware */
   1592	fwdata = (u32 *)fw->data;
   1593	fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
   1594	for (i = 0; i < fwlen; i++)
   1595		writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
   1596
   1597	release_firmware(fw);
   1598	return 0;
   1599}
   1600
   1601static int knav_queue_start_pdsp(struct knav_device *kdev,
   1602			   struct knav_pdsp_info *pdsp)
   1603{
   1604	u32 val, timeout = 1000;
   1605	int ret;
   1606
   1607	/* write a command for sync */
   1608	writel_relaxed(0xffffffff, pdsp->command);
   1609	while (readl_relaxed(pdsp->command) != 0xffffffff)
   1610		cpu_relax();
   1611
   1612	/* soft reset the PDSP */
   1613	val  = readl_relaxed(&pdsp->regs->control);
   1614	val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
   1615	writel_relaxed(val, &pdsp->regs->control);
   1616
   1617	/* enable pdsp */
   1618	val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
   1619	writel_relaxed(val, &pdsp->regs->control);
   1620
   1621	/* wait for command register to clear */
   1622	ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
   1623	if (ret < 0) {
   1624		dev_err(kdev->dev,
   1625			"timed out on pdsp %s command register wait\n",
   1626			pdsp->name);
   1627		return ret;
   1628	}
   1629	return 0;
   1630}
   1631
   1632static void knav_queue_stop_pdsps(struct knav_device *kdev)
   1633{
   1634	struct knav_pdsp_info *pdsp;
   1635
   1636	/* disable all pdsps */
   1637	for_each_pdsp(kdev, pdsp)
   1638		knav_queue_stop_pdsp(kdev, pdsp);
   1639}
   1640
   1641static int knav_queue_start_pdsps(struct knav_device *kdev)
   1642{
   1643	struct knav_pdsp_info *pdsp;
   1644	int ret;
   1645
   1646	knav_queue_stop_pdsps(kdev);
   1647	/* now load them all. We return success even if pdsp
   1648	 * is not loaded as acc channels are optional on having
   1649	 * firmware availability in the system. We set the loaded
   1650	 * and stated flag and when initialize the acc range, check
   1651	 * it and init the range only if pdsp is started.
   1652	 */
   1653	for_each_pdsp(kdev, pdsp) {
   1654		ret = knav_queue_load_pdsp(kdev, pdsp);
   1655		if (!ret)
   1656			pdsp->loaded = true;
   1657	}
   1658
   1659	for_each_pdsp(kdev, pdsp) {
   1660		if (pdsp->loaded) {
   1661			ret = knav_queue_start_pdsp(kdev, pdsp);
   1662			if (!ret)
   1663				pdsp->started = true;
   1664		}
   1665	}
   1666	return 0;
   1667}
   1668
   1669static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
   1670{
   1671	struct knav_qmgr_info *qmgr;
   1672
   1673	for_each_qmgr(kdev, qmgr) {
   1674		if ((id >= qmgr->start_queue) &&
   1675		    (id < qmgr->start_queue + qmgr->num_queues))
   1676			return qmgr;
   1677	}
   1678	return NULL;
   1679}
   1680
   1681static int knav_queue_init_queue(struct knav_device *kdev,
   1682					struct knav_range_info *range,
   1683					struct knav_queue_inst *inst,
   1684					unsigned id)
   1685{
   1686	char irq_name[KNAV_NAME_SIZE];
   1687	inst->qmgr = knav_find_qmgr(id);
   1688	if (!inst->qmgr)
   1689		return -1;
   1690
   1691	INIT_LIST_HEAD(&inst->handles);
   1692	inst->kdev = kdev;
   1693	inst->range = range;
   1694	inst->irq_num = -1;
   1695	inst->id = id;
   1696	scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
   1697	inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
   1698
   1699	if (range->ops && range->ops->init_queue)
   1700		return range->ops->init_queue(range, inst);
   1701	else
   1702		return 0;
   1703}
   1704
   1705static int knav_queue_init_queues(struct knav_device *kdev)
   1706{
   1707	struct knav_range_info *range;
   1708	int size, id, base_idx;
   1709	int idx = 0, ret = 0;
   1710
   1711	/* how much do we need for instance data? */
   1712	size = sizeof(struct knav_queue_inst);
   1713
   1714	/* round this up to a power of 2, keep the index to instance
   1715	 * arithmetic fast.
   1716	 * */
   1717	kdev->inst_shift = order_base_2(size);
   1718	size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
   1719	kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
   1720	if (!kdev->instances)
   1721		return -ENOMEM;
   1722
   1723	for_each_queue_range(kdev, range) {
   1724		if (range->ops && range->ops->init_range)
   1725			range->ops->init_range(range);
   1726		base_idx = idx;
   1727		for (id = range->queue_base;
   1728		     id < range->queue_base + range->num_queues; id++, idx++) {
   1729			ret = knav_queue_init_queue(kdev, range,
   1730					knav_queue_idx_to_inst(kdev, idx), id);
   1731			if (ret < 0)
   1732				return ret;
   1733		}
   1734		range->queue_base_inst =
   1735			knav_queue_idx_to_inst(kdev, base_idx);
   1736	}
   1737	return 0;
   1738}
   1739
   1740/* Match table for of_platform binding */
   1741static const struct of_device_id keystone_qmss_of_match[] = {
   1742	{
   1743		.compatible = "ti,keystone-navigator-qmss",
   1744	},
   1745	{
   1746		.compatible = "ti,66ak2g-navss-qm",
   1747		.data	= (void *)QMSS_66AK2G,
   1748	},
   1749	{},
   1750};
   1751MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
   1752
   1753static int knav_queue_probe(struct platform_device *pdev)
   1754{
   1755	struct device_node *node = pdev->dev.of_node;
   1756	struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
   1757	const struct of_device_id *match;
   1758	struct device *dev = &pdev->dev;
   1759	u32 temp[2];
   1760	int ret;
   1761
   1762	if (!node) {
   1763		dev_err(dev, "device tree info unavailable\n");
   1764		return -ENODEV;
   1765	}
   1766
   1767	kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
   1768	if (!kdev) {
   1769		dev_err(dev, "memory allocation failed\n");
   1770		return -ENOMEM;
   1771	}
   1772
   1773	match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev);
   1774	if (match && match->data)
   1775		kdev->version = QMSS_66AK2G;
   1776
   1777	platform_set_drvdata(pdev, kdev);
   1778	kdev->dev = dev;
   1779	INIT_LIST_HEAD(&kdev->queue_ranges);
   1780	INIT_LIST_HEAD(&kdev->qmgrs);
   1781	INIT_LIST_HEAD(&kdev->pools);
   1782	INIT_LIST_HEAD(&kdev->regions);
   1783	INIT_LIST_HEAD(&kdev->pdsps);
   1784
   1785	pm_runtime_enable(&pdev->dev);
   1786	ret = pm_runtime_resume_and_get(&pdev->dev);
   1787	if (ret < 0) {
   1788		dev_err(dev, "Failed to enable QMSS\n");
   1789		return ret;
   1790	}
   1791
   1792	if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
   1793		dev_err(dev, "queue-range not specified\n");
   1794		ret = -ENODEV;
   1795		goto err;
   1796	}
   1797	kdev->base_id    = temp[0];
   1798	kdev->num_queues = temp[1];
   1799
   1800	/* Initialize queue managers using device tree configuration */
   1801	qmgrs =  of_get_child_by_name(node, "qmgrs");
   1802	if (!qmgrs) {
   1803		dev_err(dev, "queue manager info not specified\n");
   1804		ret = -ENODEV;
   1805		goto err;
   1806	}
   1807	ret = knav_queue_init_qmgrs(kdev, qmgrs);
   1808	of_node_put(qmgrs);
   1809	if (ret)
   1810		goto err;
   1811
   1812	/* get pdsp configuration values from device tree */
   1813	pdsps =  of_get_child_by_name(node, "pdsps");
   1814	if (pdsps) {
   1815		ret = knav_queue_init_pdsps(kdev, pdsps);
   1816		if (ret)
   1817			goto err;
   1818
   1819		ret = knav_queue_start_pdsps(kdev);
   1820		if (ret)
   1821			goto err;
   1822	}
   1823	of_node_put(pdsps);
   1824
   1825	/* get usable queue range values from device tree */
   1826	queue_pools = of_get_child_by_name(node, "queue-pools");
   1827	if (!queue_pools) {
   1828		dev_err(dev, "queue-pools not specified\n");
   1829		ret = -ENODEV;
   1830		goto err;
   1831	}
   1832	ret = knav_setup_queue_pools(kdev, queue_pools);
   1833	of_node_put(queue_pools);
   1834	if (ret)
   1835		goto err;
   1836
   1837	ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
   1838	if (ret) {
   1839		dev_err(kdev->dev, "could not setup linking ram\n");
   1840		goto err;
   1841	}
   1842
   1843	ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
   1844	if (ret) {
   1845		/*
   1846		 * nothing really, we have one linking ram already, so we just
   1847		 * live within our means
   1848		 */
   1849	}
   1850
   1851	ret = knav_queue_setup_link_ram(kdev);
   1852	if (ret)
   1853		goto err;
   1854
   1855	regions = of_get_child_by_name(node, "descriptor-regions");
   1856	if (!regions) {
   1857		dev_err(dev, "descriptor-regions not specified\n");
   1858		ret = -ENODEV;
   1859		goto err;
   1860	}
   1861	ret = knav_queue_setup_regions(kdev, regions);
   1862	of_node_put(regions);
   1863	if (ret)
   1864		goto err;
   1865
   1866	ret = knav_queue_init_queues(kdev);
   1867	if (ret < 0) {
   1868		dev_err(dev, "hwqueue initialization failed\n");
   1869		goto err;
   1870	}
   1871
   1872	debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
   1873			    &knav_queue_debug_fops);
   1874	device_ready = true;
   1875	return 0;
   1876
   1877err:
   1878	knav_queue_stop_pdsps(kdev);
   1879	knav_queue_free_regions(kdev);
   1880	knav_free_queue_ranges(kdev);
   1881	pm_runtime_put_sync(&pdev->dev);
   1882	pm_runtime_disable(&pdev->dev);
   1883	return ret;
   1884}
   1885
   1886static int knav_queue_remove(struct platform_device *pdev)
   1887{
   1888	/* TODO: Free resources */
   1889	pm_runtime_put_sync(&pdev->dev);
   1890	pm_runtime_disable(&pdev->dev);
   1891	return 0;
   1892}
   1893
   1894static struct platform_driver keystone_qmss_driver = {
   1895	.probe		= knav_queue_probe,
   1896	.remove		= knav_queue_remove,
   1897	.driver		= {
   1898		.name	= "keystone-navigator-qmss",
   1899		.of_match_table = keystone_qmss_of_match,
   1900	},
   1901};
   1902module_platform_driver(keystone_qmss_driver);
   1903
   1904MODULE_LICENSE("GPL v2");
   1905MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
   1906MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
   1907MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");