cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

core.c (27459B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Interconnect framework core driver
      4 *
      5 * Copyright (c) 2017-2019, Linaro Ltd.
      6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
      7 */
      8
      9#include <linux/debugfs.h>
     10#include <linux/device.h>
     11#include <linux/idr.h>
     12#include <linux/init.h>
     13#include <linux/interconnect.h>
     14#include <linux/interconnect-provider.h>
     15#include <linux/list.h>
     16#include <linux/module.h>
     17#include <linux/mutex.h>
     18#include <linux/slab.h>
     19#include <linux/of.h>
     20#include <linux/overflow.h>
     21
     22#include "internal.h"
     23
     24#define CREATE_TRACE_POINTS
     25#include "trace.h"
     26
     27static DEFINE_IDR(icc_idr);
     28static LIST_HEAD(icc_providers);
     29static int providers_count;
     30static bool synced_state;
     31static DEFINE_MUTEX(icc_lock);
     32static struct dentry *icc_debugfs_dir;
     33
     34static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
     35{
     36	if (!n)
     37		return;
     38
     39	seq_printf(s, "%-42s %12u %12u\n",
     40		   n->name, n->avg_bw, n->peak_bw);
     41}
     42
     43static int icc_summary_show(struct seq_file *s, void *data)
     44{
     45	struct icc_provider *provider;
     46
     47	seq_puts(s, " node                                  tag          avg         peak\n");
     48	seq_puts(s, "--------------------------------------------------------------------\n");
     49
     50	mutex_lock(&icc_lock);
     51
     52	list_for_each_entry(provider, &icc_providers, provider_list) {
     53		struct icc_node *n;
     54
     55		list_for_each_entry(n, &provider->nodes, node_list) {
     56			struct icc_req *r;
     57
     58			icc_summary_show_one(s, n);
     59			hlist_for_each_entry(r, &n->req_list, req_node) {
     60				u32 avg_bw = 0, peak_bw = 0;
     61
     62				if (!r->dev)
     63					continue;
     64
     65				if (r->enabled) {
     66					avg_bw = r->avg_bw;
     67					peak_bw = r->peak_bw;
     68				}
     69
     70				seq_printf(s, "  %-27s %12u %12u %12u\n",
     71					   dev_name(r->dev), r->tag, avg_bw, peak_bw);
     72			}
     73		}
     74	}
     75
     76	mutex_unlock(&icc_lock);
     77
     78	return 0;
     79}
     80DEFINE_SHOW_ATTRIBUTE(icc_summary);
     81
     82static void icc_graph_show_link(struct seq_file *s, int level,
     83				struct icc_node *n, struct icc_node *m)
     84{
     85	seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
     86		   level == 2 ? "\t\t" : "\t",
     87		   n->id, n->name, m->id, m->name);
     88}
     89
     90static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
     91{
     92	seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
     93		   n->id, n->name, n->id, n->name);
     94	seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
     95	seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
     96	seq_puts(s, "\"]\n");
     97}
     98
     99static int icc_graph_show(struct seq_file *s, void *data)
    100{
    101	struct icc_provider *provider;
    102	struct icc_node *n;
    103	int cluster_index = 0;
    104	int i;
    105
    106	seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
    107	mutex_lock(&icc_lock);
    108
    109	/* draw providers as cluster subgraphs */
    110	cluster_index = 0;
    111	list_for_each_entry(provider, &icc_providers, provider_list) {
    112		seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
    113		if (provider->dev)
    114			seq_printf(s, "\t\tlabel = \"%s\"\n",
    115				   dev_name(provider->dev));
    116
    117		/* draw nodes */
    118		list_for_each_entry(n, &provider->nodes, node_list)
    119			icc_graph_show_node(s, n);
    120
    121		/* draw internal links */
    122		list_for_each_entry(n, &provider->nodes, node_list)
    123			for (i = 0; i < n->num_links; ++i)
    124				if (n->provider == n->links[i]->provider)
    125					icc_graph_show_link(s, 2, n,
    126							    n->links[i]);
    127
    128		seq_puts(s, "\t}\n");
    129	}
    130
    131	/* draw external links */
    132	list_for_each_entry(provider, &icc_providers, provider_list)
    133		list_for_each_entry(n, &provider->nodes, node_list)
    134			for (i = 0; i < n->num_links; ++i)
    135				if (n->provider != n->links[i]->provider)
    136					icc_graph_show_link(s, 1, n,
    137							    n->links[i]);
    138
    139	mutex_unlock(&icc_lock);
    140	seq_puts(s, "}");
    141
    142	return 0;
    143}
    144DEFINE_SHOW_ATTRIBUTE(icc_graph);
    145
    146static struct icc_node *node_find(const int id)
    147{
    148	return idr_find(&icc_idr, id);
    149}
    150
    151static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
    152				  ssize_t num_nodes)
    153{
    154	struct icc_node *node = dst;
    155	struct icc_path *path;
    156	int i;
    157
    158	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
    159	if (!path)
    160		return ERR_PTR(-ENOMEM);
    161
    162	path->num_nodes = num_nodes;
    163
    164	for (i = num_nodes - 1; i >= 0; i--) {
    165		node->provider->users++;
    166		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
    167		path->reqs[i].node = node;
    168		path->reqs[i].dev = dev;
    169		path->reqs[i].enabled = true;
    170		/* reference to previous node was saved during path traversal */
    171		node = node->reverse;
    172	}
    173
    174	return path;
    175}
    176
    177static struct icc_path *path_find(struct device *dev, struct icc_node *src,
    178				  struct icc_node *dst)
    179{
    180	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
    181	struct icc_node *n, *node = NULL;
    182	struct list_head traverse_list;
    183	struct list_head edge_list;
    184	struct list_head visited_list;
    185	size_t i, depth = 1;
    186	bool found = false;
    187
    188	INIT_LIST_HEAD(&traverse_list);
    189	INIT_LIST_HEAD(&edge_list);
    190	INIT_LIST_HEAD(&visited_list);
    191
    192	list_add(&src->search_list, &traverse_list);
    193	src->reverse = NULL;
    194
    195	do {
    196		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
    197			if (node == dst) {
    198				found = true;
    199				list_splice_init(&edge_list, &visited_list);
    200				list_splice_init(&traverse_list, &visited_list);
    201				break;
    202			}
    203			for (i = 0; i < node->num_links; i++) {
    204				struct icc_node *tmp = node->links[i];
    205
    206				if (!tmp) {
    207					path = ERR_PTR(-ENOENT);
    208					goto out;
    209				}
    210
    211				if (tmp->is_traversed)
    212					continue;
    213
    214				tmp->is_traversed = true;
    215				tmp->reverse = node;
    216				list_add_tail(&tmp->search_list, &edge_list);
    217			}
    218		}
    219
    220		if (found)
    221			break;
    222
    223		list_splice_init(&traverse_list, &visited_list);
    224		list_splice_init(&edge_list, &traverse_list);
    225
    226		/* count the hops including the source */
    227		depth++;
    228
    229	} while (!list_empty(&traverse_list));
    230
    231out:
    232
    233	/* reset the traversed state */
    234	list_for_each_entry_reverse(n, &visited_list, search_list)
    235		n->is_traversed = false;
    236
    237	if (found)
    238		path = path_init(dev, dst, depth);
    239
    240	return path;
    241}
    242
    243/*
    244 * We want the path to honor all bandwidth requests, so the average and peak
    245 * bandwidth requirements from each consumer are aggregated at each node.
    246 * The aggregation is platform specific, so each platform can customize it by
    247 * implementing its own aggregate() function.
    248 */
    249
    250static int aggregate_requests(struct icc_node *node)
    251{
    252	struct icc_provider *p = node->provider;
    253	struct icc_req *r;
    254	u32 avg_bw, peak_bw;
    255
    256	node->avg_bw = 0;
    257	node->peak_bw = 0;
    258
    259	if (p->pre_aggregate)
    260		p->pre_aggregate(node);
    261
    262	hlist_for_each_entry(r, &node->req_list, req_node) {
    263		if (r->enabled) {
    264			avg_bw = r->avg_bw;
    265			peak_bw = r->peak_bw;
    266		} else {
    267			avg_bw = 0;
    268			peak_bw = 0;
    269		}
    270		p->aggregate(node, r->tag, avg_bw, peak_bw,
    271			     &node->avg_bw, &node->peak_bw);
    272
    273		/* during boot use the initial bandwidth as a floor value */
    274		if (!synced_state) {
    275			node->avg_bw = max(node->avg_bw, node->init_avg);
    276			node->peak_bw = max(node->peak_bw, node->init_peak);
    277		}
    278	}
    279
    280	return 0;
    281}
    282
    283static int apply_constraints(struct icc_path *path)
    284{
    285	struct icc_node *next, *prev = NULL;
    286	struct icc_provider *p;
    287	int ret = -EINVAL;
    288	int i;
    289
    290	for (i = 0; i < path->num_nodes; i++) {
    291		next = path->reqs[i].node;
    292		p = next->provider;
    293
    294		/* both endpoints should be valid master-slave pairs */
    295		if (!prev || (p != prev->provider && !p->inter_set)) {
    296			prev = next;
    297			continue;
    298		}
    299
    300		/* set the constraints */
    301		ret = p->set(prev, next);
    302		if (ret)
    303			goto out;
    304
    305		prev = next;
    306	}
    307out:
    308	return ret;
    309}
    310
    311int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
    312		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
    313{
    314	*agg_avg += avg_bw;
    315	*agg_peak = max(*agg_peak, peak_bw);
    316
    317	return 0;
    318}
    319EXPORT_SYMBOL_GPL(icc_std_aggregate);
    320
    321/* of_icc_xlate_onecell() - Translate function using a single index.
    322 * @spec: OF phandle args to map into an interconnect node.
    323 * @data: private data (pointer to struct icc_onecell_data)
    324 *
    325 * This is a generic translate function that can be used to model simple
    326 * interconnect providers that have one device tree node and provide
    327 * multiple interconnect nodes. A single cell is used as an index into
    328 * an array of icc nodes specified in the icc_onecell_data struct when
    329 * registering the provider.
    330 */
    331struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
    332				      void *data)
    333{
    334	struct icc_onecell_data *icc_data = data;
    335	unsigned int idx = spec->args[0];
    336
    337	if (idx >= icc_data->num_nodes) {
    338		pr_err("%s: invalid index %u\n", __func__, idx);
    339		return ERR_PTR(-EINVAL);
    340	}
    341
    342	return icc_data->nodes[idx];
    343}
    344EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
    345
    346/**
    347 * of_icc_get_from_provider() - Look-up interconnect node
    348 * @spec: OF phandle args to use for look-up
    349 *
    350 * Looks for interconnect provider under the node specified by @spec and if
    351 * found, uses xlate function of the provider to map phandle args to node.
    352 *
    353 * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
    354 * on failure.
    355 */
    356struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
    357{
    358	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
    359	struct icc_node_data *data = NULL;
    360	struct icc_provider *provider;
    361
    362	if (!spec)
    363		return ERR_PTR(-EINVAL);
    364
    365	mutex_lock(&icc_lock);
    366	list_for_each_entry(provider, &icc_providers, provider_list) {
    367		if (provider->dev->of_node == spec->np) {
    368			if (provider->xlate_extended) {
    369				data = provider->xlate_extended(spec, provider->data);
    370				if (!IS_ERR(data)) {
    371					node = data->node;
    372					break;
    373				}
    374			} else {
    375				node = provider->xlate(spec, provider->data);
    376				if (!IS_ERR(node))
    377					break;
    378			}
    379		}
    380	}
    381	mutex_unlock(&icc_lock);
    382
    383	if (IS_ERR(node))
    384		return ERR_CAST(node);
    385
    386	if (!data) {
    387		data = kzalloc(sizeof(*data), GFP_KERNEL);
    388		if (!data)
    389			return ERR_PTR(-ENOMEM);
    390		data->node = node;
    391	}
    392
    393	return data;
    394}
    395EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
    396
    397static void devm_icc_release(struct device *dev, void *res)
    398{
    399	icc_put(*(struct icc_path **)res);
    400}
    401
    402struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
    403{
    404	struct icc_path **ptr, *path;
    405
    406	ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
    407	if (!ptr)
    408		return ERR_PTR(-ENOMEM);
    409
    410	path = of_icc_get(dev, name);
    411	if (!IS_ERR(path)) {
    412		*ptr = path;
    413		devres_add(dev, ptr);
    414	} else {
    415		devres_free(ptr);
    416	}
    417
    418	return path;
    419}
    420EXPORT_SYMBOL_GPL(devm_of_icc_get);
    421
    422/**
    423 * of_icc_get_by_index() - get a path handle from a DT node based on index
    424 * @dev: device pointer for the consumer device
    425 * @idx: interconnect path index
    426 *
    427 * This function will search for a path between two endpoints and return an
    428 * icc_path handle on success. Use icc_put() to release constraints when they
    429 * are not needed anymore.
    430 * If the interconnect API is disabled, NULL is returned and the consumer
    431 * drivers will still build. Drivers are free to handle this specifically,
    432 * but they don't have to.
    433 *
    434 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
    435 * when the API is disabled or the "interconnects" DT property is missing.
    436 */
    437struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
    438{
    439	struct icc_path *path;
    440	struct icc_node_data *src_data, *dst_data;
    441	struct device_node *np;
    442	struct of_phandle_args src_args, dst_args;
    443	int ret;
    444
    445	if (!dev || !dev->of_node)
    446		return ERR_PTR(-ENODEV);
    447
    448	np = dev->of_node;
    449
    450	/*
    451	 * When the consumer DT node do not have "interconnects" property
    452	 * return a NULL path to skip setting constraints.
    453	 */
    454	if (!of_find_property(np, "interconnects", NULL))
    455		return NULL;
    456
    457	/*
    458	 * We use a combination of phandle and specifier for endpoint. For now
    459	 * lets support only global ids and extend this in the future if needed
    460	 * without breaking DT compatibility.
    461	 */
    462	ret = of_parse_phandle_with_args(np, "interconnects",
    463					 "#interconnect-cells", idx * 2,
    464					 &src_args);
    465	if (ret)
    466		return ERR_PTR(ret);
    467
    468	of_node_put(src_args.np);
    469
    470	ret = of_parse_phandle_with_args(np, "interconnects",
    471					 "#interconnect-cells", idx * 2 + 1,
    472					 &dst_args);
    473	if (ret)
    474		return ERR_PTR(ret);
    475
    476	of_node_put(dst_args.np);
    477
    478	src_data = of_icc_get_from_provider(&src_args);
    479
    480	if (IS_ERR(src_data)) {
    481		dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
    482		return ERR_CAST(src_data);
    483	}
    484
    485	dst_data = of_icc_get_from_provider(&dst_args);
    486
    487	if (IS_ERR(dst_data)) {
    488		dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
    489		kfree(src_data);
    490		return ERR_CAST(dst_data);
    491	}
    492
    493	mutex_lock(&icc_lock);
    494	path = path_find(dev, src_data->node, dst_data->node);
    495	mutex_unlock(&icc_lock);
    496	if (IS_ERR(path)) {
    497		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
    498		goto free_icc_data;
    499	}
    500
    501	if (src_data->tag && src_data->tag == dst_data->tag)
    502		icc_set_tag(path, src_data->tag);
    503
    504	path->name = kasprintf(GFP_KERNEL, "%s-%s",
    505			       src_data->node->name, dst_data->node->name);
    506	if (!path->name) {
    507		kfree(path);
    508		path = ERR_PTR(-ENOMEM);
    509	}
    510
    511free_icc_data:
    512	kfree(src_data);
    513	kfree(dst_data);
    514	return path;
    515}
    516EXPORT_SYMBOL_GPL(of_icc_get_by_index);
    517
    518/**
    519 * of_icc_get() - get a path handle from a DT node based on name
    520 * @dev: device pointer for the consumer device
    521 * @name: interconnect path name
    522 *
    523 * This function will search for a path between two endpoints and return an
    524 * icc_path handle on success. Use icc_put() to release constraints when they
    525 * are not needed anymore.
    526 * If the interconnect API is disabled, NULL is returned and the consumer
    527 * drivers will still build. Drivers are free to handle this specifically,
    528 * but they don't have to.
    529 *
    530 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
    531 * when the API is disabled or the "interconnects" DT property is missing.
    532 */
    533struct icc_path *of_icc_get(struct device *dev, const char *name)
    534{
    535	struct device_node *np;
    536	int idx = 0;
    537
    538	if (!dev || !dev->of_node)
    539		return ERR_PTR(-ENODEV);
    540
    541	np = dev->of_node;
    542
    543	/*
    544	 * When the consumer DT node do not have "interconnects" property
    545	 * return a NULL path to skip setting constraints.
    546	 */
    547	if (!of_find_property(np, "interconnects", NULL))
    548		return NULL;
    549
    550	/*
    551	 * We use a combination of phandle and specifier for endpoint. For now
    552	 * lets support only global ids and extend this in the future if needed
    553	 * without breaking DT compatibility.
    554	 */
    555	if (name) {
    556		idx = of_property_match_string(np, "interconnect-names", name);
    557		if (idx < 0)
    558			return ERR_PTR(idx);
    559	}
    560
    561	return of_icc_get_by_index(dev, idx);
    562}
    563EXPORT_SYMBOL_GPL(of_icc_get);
    564
    565/**
    566 * icc_set_tag() - set an optional tag on a path
    567 * @path: the path we want to tag
    568 * @tag: the tag value
    569 *
    570 * This function allows consumers to append a tag to the requests associated
    571 * with a path, so that a different aggregation could be done based on this tag.
    572 */
    573void icc_set_tag(struct icc_path *path, u32 tag)
    574{
    575	int i;
    576
    577	if (!path)
    578		return;
    579
    580	mutex_lock(&icc_lock);
    581
    582	for (i = 0; i < path->num_nodes; i++)
    583		path->reqs[i].tag = tag;
    584
    585	mutex_unlock(&icc_lock);
    586}
    587EXPORT_SYMBOL_GPL(icc_set_tag);
    588
    589/**
    590 * icc_get_name() - Get name of the icc path
    591 * @path: reference to the path returned by icc_get()
    592 *
    593 * This function is used by an interconnect consumer to get the name of the icc
    594 * path.
    595 *
    596 * Returns a valid pointer on success, or NULL otherwise.
    597 */
    598const char *icc_get_name(struct icc_path *path)
    599{
    600	if (!path)
    601		return NULL;
    602
    603	return path->name;
    604}
    605EXPORT_SYMBOL_GPL(icc_get_name);
    606
    607/**
    608 * icc_set_bw() - set bandwidth constraints on an interconnect path
    609 * @path: reference to the path returned by icc_get()
    610 * @avg_bw: average bandwidth in kilobytes per second
    611 * @peak_bw: peak bandwidth in kilobytes per second
    612 *
    613 * This function is used by an interconnect consumer to express its own needs
    614 * in terms of bandwidth for a previously requested path between two endpoints.
    615 * The requests are aggregated and each node is updated accordingly. The entire
    616 * path is locked by a mutex to ensure that the set() is completed.
    617 * The @path can be NULL when the "interconnects" DT properties is missing,
    618 * which will mean that no constraints will be set.
    619 *
    620 * Returns 0 on success, or an appropriate error code otherwise.
    621 */
    622int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
    623{
    624	struct icc_node *node;
    625	u32 old_avg, old_peak;
    626	size_t i;
    627	int ret;
    628
    629	if (!path)
    630		return 0;
    631
    632	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
    633		return -EINVAL;
    634
    635	mutex_lock(&icc_lock);
    636
    637	old_avg = path->reqs[0].avg_bw;
    638	old_peak = path->reqs[0].peak_bw;
    639
    640	for (i = 0; i < path->num_nodes; i++) {
    641		node = path->reqs[i].node;
    642
    643		/* update the consumer request for this path */
    644		path->reqs[i].avg_bw = avg_bw;
    645		path->reqs[i].peak_bw = peak_bw;
    646
    647		/* aggregate requests for this node */
    648		aggregate_requests(node);
    649
    650		trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
    651	}
    652
    653	ret = apply_constraints(path);
    654	if (ret) {
    655		pr_debug("interconnect: error applying constraints (%d)\n",
    656			 ret);
    657
    658		for (i = 0; i < path->num_nodes; i++) {
    659			node = path->reqs[i].node;
    660			path->reqs[i].avg_bw = old_avg;
    661			path->reqs[i].peak_bw = old_peak;
    662			aggregate_requests(node);
    663		}
    664		apply_constraints(path);
    665	}
    666
    667	mutex_unlock(&icc_lock);
    668
    669	trace_icc_set_bw_end(path, ret);
    670
    671	return ret;
    672}
    673EXPORT_SYMBOL_GPL(icc_set_bw);
    674
    675static int __icc_enable(struct icc_path *path, bool enable)
    676{
    677	int i;
    678
    679	if (!path)
    680		return 0;
    681
    682	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
    683		return -EINVAL;
    684
    685	mutex_lock(&icc_lock);
    686
    687	for (i = 0; i < path->num_nodes; i++)
    688		path->reqs[i].enabled = enable;
    689
    690	mutex_unlock(&icc_lock);
    691
    692	return icc_set_bw(path, path->reqs[0].avg_bw,
    693			  path->reqs[0].peak_bw);
    694}
    695
    696int icc_enable(struct icc_path *path)
    697{
    698	return __icc_enable(path, true);
    699}
    700EXPORT_SYMBOL_GPL(icc_enable);
    701
    702int icc_disable(struct icc_path *path)
    703{
    704	return __icc_enable(path, false);
    705}
    706EXPORT_SYMBOL_GPL(icc_disable);
    707
    708/**
    709 * icc_get() - return a handle for path between two endpoints
    710 * @dev: the device requesting the path
    711 * @src_id: source device port id
    712 * @dst_id: destination device port id
    713 *
    714 * This function will search for a path between two endpoints and return an
    715 * icc_path handle on success. Use icc_put() to release
    716 * constraints when they are not needed anymore.
    717 * If the interconnect API is disabled, NULL is returned and the consumer
    718 * drivers will still build. Drivers are free to handle this specifically,
    719 * but they don't have to.
    720 *
    721 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
    722 * interconnect API is disabled.
    723 */
    724struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
    725{
    726	struct icc_node *src, *dst;
    727	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
    728
    729	mutex_lock(&icc_lock);
    730
    731	src = node_find(src_id);
    732	if (!src)
    733		goto out;
    734
    735	dst = node_find(dst_id);
    736	if (!dst)
    737		goto out;
    738
    739	path = path_find(dev, src, dst);
    740	if (IS_ERR(path)) {
    741		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
    742		goto out;
    743	}
    744
    745	path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
    746	if (!path->name) {
    747		kfree(path);
    748		path = ERR_PTR(-ENOMEM);
    749	}
    750out:
    751	mutex_unlock(&icc_lock);
    752	return path;
    753}
    754EXPORT_SYMBOL_GPL(icc_get);
    755
    756/**
    757 * icc_put() - release the reference to the icc_path
    758 * @path: interconnect path
    759 *
    760 * Use this function to release the constraints on a path when the path is
    761 * no longer needed. The constraints will be re-aggregated.
    762 */
    763void icc_put(struct icc_path *path)
    764{
    765	struct icc_node *node;
    766	size_t i;
    767	int ret;
    768
    769	if (!path || WARN_ON(IS_ERR(path)))
    770		return;
    771
    772	ret = icc_set_bw(path, 0, 0);
    773	if (ret)
    774		pr_err("%s: error (%d)\n", __func__, ret);
    775
    776	mutex_lock(&icc_lock);
    777	for (i = 0; i < path->num_nodes; i++) {
    778		node = path->reqs[i].node;
    779		hlist_del(&path->reqs[i].req_node);
    780		if (!WARN_ON(!node->provider->users))
    781			node->provider->users--;
    782	}
    783	mutex_unlock(&icc_lock);
    784
    785	kfree_const(path->name);
    786	kfree(path);
    787}
    788EXPORT_SYMBOL_GPL(icc_put);
    789
    790static struct icc_node *icc_node_create_nolock(int id)
    791{
    792	struct icc_node *node;
    793
    794	/* check if node already exists */
    795	node = node_find(id);
    796	if (node)
    797		return node;
    798
    799	node = kzalloc(sizeof(*node), GFP_KERNEL);
    800	if (!node)
    801		return ERR_PTR(-ENOMEM);
    802
    803	id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
    804	if (id < 0) {
    805		WARN(1, "%s: couldn't get idr\n", __func__);
    806		kfree(node);
    807		return ERR_PTR(id);
    808	}
    809
    810	node->id = id;
    811
    812	return node;
    813}
    814
    815/**
    816 * icc_node_create() - create a node
    817 * @id: node id
    818 *
    819 * Return: icc_node pointer on success, or ERR_PTR() on error
    820 */
    821struct icc_node *icc_node_create(int id)
    822{
    823	struct icc_node *node;
    824
    825	mutex_lock(&icc_lock);
    826
    827	node = icc_node_create_nolock(id);
    828
    829	mutex_unlock(&icc_lock);
    830
    831	return node;
    832}
    833EXPORT_SYMBOL_GPL(icc_node_create);
    834
    835/**
    836 * icc_node_destroy() - destroy a node
    837 * @id: node id
    838 */
    839void icc_node_destroy(int id)
    840{
    841	struct icc_node *node;
    842
    843	mutex_lock(&icc_lock);
    844
    845	node = node_find(id);
    846	if (node) {
    847		idr_remove(&icc_idr, node->id);
    848		WARN_ON(!hlist_empty(&node->req_list));
    849	}
    850
    851	mutex_unlock(&icc_lock);
    852
    853	kfree(node);
    854}
    855EXPORT_SYMBOL_GPL(icc_node_destroy);
    856
    857/**
    858 * icc_link_create() - create a link between two nodes
    859 * @node: source node id
    860 * @dst_id: destination node id
    861 *
    862 * Create a link between two nodes. The nodes might belong to different
    863 * interconnect providers and the @dst_id node might not exist (if the
    864 * provider driver has not probed yet). So just create the @dst_id node
    865 * and when the actual provider driver is probed, the rest of the node
    866 * data is filled.
    867 *
    868 * Return: 0 on success, or an error code otherwise
    869 */
    870int icc_link_create(struct icc_node *node, const int dst_id)
    871{
    872	struct icc_node *dst;
    873	struct icc_node **new;
    874	int ret = 0;
    875
    876	if (!node->provider)
    877		return -EINVAL;
    878
    879	mutex_lock(&icc_lock);
    880
    881	dst = node_find(dst_id);
    882	if (!dst) {
    883		dst = icc_node_create_nolock(dst_id);
    884
    885		if (IS_ERR(dst)) {
    886			ret = PTR_ERR(dst);
    887			goto out;
    888		}
    889	}
    890
    891	new = krealloc(node->links,
    892		       (node->num_links + 1) * sizeof(*node->links),
    893		       GFP_KERNEL);
    894	if (!new) {
    895		ret = -ENOMEM;
    896		goto out;
    897	}
    898
    899	node->links = new;
    900	node->links[node->num_links++] = dst;
    901
    902out:
    903	mutex_unlock(&icc_lock);
    904
    905	return ret;
    906}
    907EXPORT_SYMBOL_GPL(icc_link_create);
    908
    909/**
    910 * icc_link_destroy() - destroy a link between two nodes
    911 * @src: pointer to source node
    912 * @dst: pointer to destination node
    913 *
    914 * Return: 0 on success, or an error code otherwise
    915 */
    916int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
    917{
    918	struct icc_node **new;
    919	size_t slot;
    920	int ret = 0;
    921
    922	if (IS_ERR_OR_NULL(src))
    923		return -EINVAL;
    924
    925	if (IS_ERR_OR_NULL(dst))
    926		return -EINVAL;
    927
    928	mutex_lock(&icc_lock);
    929
    930	for (slot = 0; slot < src->num_links; slot++)
    931		if (src->links[slot] == dst)
    932			break;
    933
    934	if (WARN_ON(slot == src->num_links)) {
    935		ret = -ENXIO;
    936		goto out;
    937	}
    938
    939	src->links[slot] = src->links[--src->num_links];
    940
    941	new = krealloc(src->links, src->num_links * sizeof(*src->links),
    942		       GFP_KERNEL);
    943	if (new)
    944		src->links = new;
    945	else
    946		ret = -ENOMEM;
    947
    948out:
    949	mutex_unlock(&icc_lock);
    950
    951	return ret;
    952}
    953EXPORT_SYMBOL_GPL(icc_link_destroy);
    954
    955/**
    956 * icc_node_add() - add interconnect node to interconnect provider
    957 * @node: pointer to the interconnect node
    958 * @provider: pointer to the interconnect provider
    959 */
    960void icc_node_add(struct icc_node *node, struct icc_provider *provider)
    961{
    962	if (WARN_ON(node->provider))
    963		return;
    964
    965	mutex_lock(&icc_lock);
    966
    967	node->provider = provider;
    968	list_add_tail(&node->node_list, &provider->nodes);
    969
    970	/* get the initial bandwidth values and sync them with hardware */
    971	if (provider->get_bw) {
    972		provider->get_bw(node, &node->init_avg, &node->init_peak);
    973	} else {
    974		node->init_avg = INT_MAX;
    975		node->init_peak = INT_MAX;
    976	}
    977	node->avg_bw = node->init_avg;
    978	node->peak_bw = node->init_peak;
    979
    980	if (provider->pre_aggregate)
    981		provider->pre_aggregate(node);
    982
    983	if (provider->aggregate)
    984		provider->aggregate(node, 0, node->init_avg, node->init_peak,
    985				    &node->avg_bw, &node->peak_bw);
    986
    987	provider->set(node, node);
    988	node->avg_bw = 0;
    989	node->peak_bw = 0;
    990
    991	mutex_unlock(&icc_lock);
    992}
    993EXPORT_SYMBOL_GPL(icc_node_add);
    994
    995/**
    996 * icc_node_del() - delete interconnect node from interconnect provider
    997 * @node: pointer to the interconnect node
    998 */
    999void icc_node_del(struct icc_node *node)
   1000{
   1001	mutex_lock(&icc_lock);
   1002
   1003	list_del(&node->node_list);
   1004
   1005	mutex_unlock(&icc_lock);
   1006}
   1007EXPORT_SYMBOL_GPL(icc_node_del);
   1008
   1009/**
   1010 * icc_nodes_remove() - remove all previously added nodes from provider
   1011 * @provider: the interconnect provider we are removing nodes from
   1012 *
   1013 * Return: 0 on success, or an error code otherwise
   1014 */
   1015int icc_nodes_remove(struct icc_provider *provider)
   1016{
   1017	struct icc_node *n, *tmp;
   1018
   1019	if (WARN_ON(IS_ERR_OR_NULL(provider)))
   1020		return -EINVAL;
   1021
   1022	list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
   1023		icc_node_del(n);
   1024		icc_node_destroy(n->id);
   1025	}
   1026
   1027	return 0;
   1028}
   1029EXPORT_SYMBOL_GPL(icc_nodes_remove);
   1030
   1031/**
   1032 * icc_provider_add() - add a new interconnect provider
   1033 * @provider: the interconnect provider that will be added into topology
   1034 *
   1035 * Return: 0 on success, or an error code otherwise
   1036 */
   1037int icc_provider_add(struct icc_provider *provider)
   1038{
   1039	if (WARN_ON(!provider->set))
   1040		return -EINVAL;
   1041	if (WARN_ON(!provider->xlate && !provider->xlate_extended))
   1042		return -EINVAL;
   1043
   1044	mutex_lock(&icc_lock);
   1045
   1046	INIT_LIST_HEAD(&provider->nodes);
   1047	list_add_tail(&provider->provider_list, &icc_providers);
   1048
   1049	mutex_unlock(&icc_lock);
   1050
   1051	dev_dbg(provider->dev, "interconnect provider added to topology\n");
   1052
   1053	return 0;
   1054}
   1055EXPORT_SYMBOL_GPL(icc_provider_add);
   1056
   1057/**
   1058 * icc_provider_del() - delete previously added interconnect provider
   1059 * @provider: the interconnect provider that will be removed from topology
   1060 *
   1061 * Return: 0 on success, or an error code otherwise
   1062 */
   1063int icc_provider_del(struct icc_provider *provider)
   1064{
   1065	mutex_lock(&icc_lock);
   1066	if (provider->users) {
   1067		pr_warn("interconnect provider still has %d users\n",
   1068			provider->users);
   1069		mutex_unlock(&icc_lock);
   1070		return -EBUSY;
   1071	}
   1072
   1073	if (!list_empty(&provider->nodes)) {
   1074		pr_warn("interconnect provider still has nodes\n");
   1075		mutex_unlock(&icc_lock);
   1076		return -EBUSY;
   1077	}
   1078
   1079	list_del(&provider->provider_list);
   1080	mutex_unlock(&icc_lock);
   1081
   1082	return 0;
   1083}
   1084EXPORT_SYMBOL_GPL(icc_provider_del);
   1085
   1086static int of_count_icc_providers(struct device_node *np)
   1087{
   1088	struct device_node *child;
   1089	int count = 0;
   1090	const struct of_device_id __maybe_unused ignore_list[] = {
   1091		{ .compatible = "qcom,sc7180-ipa-virt" },
   1092		{ .compatible = "qcom,sdx55-ipa-virt" },
   1093		{}
   1094	};
   1095
   1096	for_each_available_child_of_node(np, child) {
   1097		if (of_property_read_bool(child, "#interconnect-cells") &&
   1098		    likely(!of_match_node(ignore_list, child)))
   1099			count++;
   1100		count += of_count_icc_providers(child);
   1101	}
   1102
   1103	return count;
   1104}
   1105
   1106void icc_sync_state(struct device *dev)
   1107{
   1108	struct icc_provider *p;
   1109	struct icc_node *n;
   1110	static int count;
   1111
   1112	count++;
   1113
   1114	if (count < providers_count)
   1115		return;
   1116
   1117	mutex_lock(&icc_lock);
   1118	synced_state = true;
   1119	list_for_each_entry(p, &icc_providers, provider_list) {
   1120		dev_dbg(p->dev, "interconnect provider is in synced state\n");
   1121		list_for_each_entry(n, &p->nodes, node_list) {
   1122			if (n->init_avg || n->init_peak) {
   1123				n->init_avg = 0;
   1124				n->init_peak = 0;
   1125				aggregate_requests(n);
   1126				p->set(n, n);
   1127			}
   1128		}
   1129	}
   1130	mutex_unlock(&icc_lock);
   1131}
   1132EXPORT_SYMBOL_GPL(icc_sync_state);
   1133
   1134static int __init icc_init(void)
   1135{
   1136	struct device_node *root = of_find_node_by_path("/");
   1137
   1138	providers_count = of_count_icc_providers(root);
   1139	of_node_put(root);
   1140
   1141	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
   1142	debugfs_create_file("interconnect_summary", 0444,
   1143			    icc_debugfs_dir, NULL, &icc_summary_fops);
   1144	debugfs_create_file("interconnect_graph", 0444,
   1145			    icc_debugfs_dir, NULL, &icc_graph_fops);
   1146	return 0;
   1147}
   1148
   1149device_initcall(icc_init);
   1150
   1151MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
   1152MODULE_DESCRIPTION("Interconnect Driver Core");
   1153MODULE_LICENSE("GPL v2");