cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spectrum_flow.c (7625B)


      1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
      2/* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
      3
      4#include <linux/kernel.h>
      5#include <linux/slab.h>
      6#include <linux/errno.h>
      7#include <linux/list.h>
      8#include <net/net_namespace.h>
      9
     10#include "spectrum.h"
     11
     12struct mlxsw_sp_flow_block *
     13mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net)
     14{
     15	struct mlxsw_sp_flow_block *block;
     16
     17	block = kzalloc(sizeof(*block), GFP_KERNEL);
     18	if (!block)
     19		return NULL;
     20	INIT_LIST_HEAD(&block->binding_list);
     21	INIT_LIST_HEAD(&block->mall.list);
     22	block->mlxsw_sp = mlxsw_sp;
     23	block->net = net;
     24	return block;
     25}
     26
     27void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block)
     28{
     29	WARN_ON(!list_empty(&block->binding_list));
     30	kfree(block);
     31}
     32
     33static struct mlxsw_sp_flow_block_binding *
     34mlxsw_sp_flow_block_lookup(struct mlxsw_sp_flow_block *block,
     35			   struct mlxsw_sp_port *mlxsw_sp_port, bool ingress)
     36{
     37	struct mlxsw_sp_flow_block_binding *binding;
     38
     39	list_for_each_entry(binding, &block->binding_list, list)
     40		if (binding->mlxsw_sp_port == mlxsw_sp_port &&
     41		    binding->ingress == ingress)
     42			return binding;
     43	return NULL;
     44}
     45
     46static bool
     47mlxsw_sp_flow_block_ruleset_bound(const struct mlxsw_sp_flow_block *block)
     48{
     49	return block->ruleset_zero;
     50}
     51
     52static int mlxsw_sp_flow_block_bind(struct mlxsw_sp *mlxsw_sp,
     53				    struct mlxsw_sp_flow_block *block,
     54				    struct mlxsw_sp_port *mlxsw_sp_port,
     55				    bool ingress,
     56				    struct netlink_ext_ack *extack)
     57{
     58	struct mlxsw_sp_flow_block_binding *binding;
     59	int err;
     60
     61	if (WARN_ON(mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress)))
     62		return -EEXIST;
     63
     64	if (ingress && block->ingress_blocker_rule_count) {
     65		NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to ingress because it contains unsupported rules");
     66		return -EOPNOTSUPP;
     67	}
     68
     69	if (!ingress && block->egress_blocker_rule_count) {
     70		NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
     71		return -EOPNOTSUPP;
     72	}
     73
     74	err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port, extack);
     75	if (err)
     76		return err;
     77
     78	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
     79	if (!binding) {
     80		err = -ENOMEM;
     81		goto err_binding_alloc;
     82	}
     83	binding->mlxsw_sp_port = mlxsw_sp_port;
     84	binding->ingress = ingress;
     85
     86	if (mlxsw_sp_flow_block_ruleset_bound(block)) {
     87		err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, block, binding);
     88		if (err)
     89			goto err_ruleset_bind;
     90	}
     91
     92	if (ingress)
     93		block->ingress_binding_count++;
     94	else
     95		block->egress_binding_count++;
     96	list_add(&binding->list, &block->binding_list);
     97	return 0;
     98
     99err_ruleset_bind:
    100	kfree(binding);
    101err_binding_alloc:
    102	mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
    103
    104	return err;
    105}
    106
    107static int mlxsw_sp_flow_block_unbind(struct mlxsw_sp *mlxsw_sp,
    108				      struct mlxsw_sp_flow_block *block,
    109				      struct mlxsw_sp_port *mlxsw_sp_port,
    110				      bool ingress)
    111{
    112	struct mlxsw_sp_flow_block_binding *binding;
    113
    114	binding = mlxsw_sp_flow_block_lookup(block, mlxsw_sp_port, ingress);
    115	if (!binding)
    116		return -ENOENT;
    117
    118	list_del(&binding->list);
    119
    120	if (ingress)
    121		block->ingress_binding_count--;
    122	else
    123		block->egress_binding_count--;
    124
    125	if (mlxsw_sp_flow_block_ruleset_bound(block))
    126		mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
    127
    128	kfree(binding);
    129
    130	mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
    131
    132	return 0;
    133}
    134
    135static int mlxsw_sp_flow_block_mall_cb(struct mlxsw_sp_flow_block *flow_block,
    136				       struct tc_cls_matchall_offload *f)
    137{
    138	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
    139
    140	switch (f->command) {
    141	case TC_CLSMATCHALL_REPLACE:
    142		return mlxsw_sp_mall_replace(mlxsw_sp, flow_block, f);
    143	case TC_CLSMATCHALL_DESTROY:
    144		mlxsw_sp_mall_destroy(flow_block, f);
    145		return 0;
    146	default:
    147		return -EOPNOTSUPP;
    148	}
    149}
    150
    151static int mlxsw_sp_flow_block_flower_cb(struct mlxsw_sp_flow_block *flow_block,
    152					 struct flow_cls_offload *f)
    153{
    154	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_flow_block_mlxsw_sp(flow_block);
    155
    156	switch (f->command) {
    157	case FLOW_CLS_REPLACE:
    158		return mlxsw_sp_flower_replace(mlxsw_sp, flow_block, f);
    159	case FLOW_CLS_DESTROY:
    160		mlxsw_sp_flower_destroy(mlxsw_sp, flow_block, f);
    161		return 0;
    162	case FLOW_CLS_STATS:
    163		return mlxsw_sp_flower_stats(mlxsw_sp, flow_block, f);
    164	case FLOW_CLS_TMPLT_CREATE:
    165		return mlxsw_sp_flower_tmplt_create(mlxsw_sp, flow_block, f);
    166	case FLOW_CLS_TMPLT_DESTROY:
    167		mlxsw_sp_flower_tmplt_destroy(mlxsw_sp, flow_block, f);
    168		return 0;
    169	default:
    170		return -EOPNOTSUPP;
    171	}
    172}
    173
    174static int mlxsw_sp_flow_block_cb(enum tc_setup_type type,
    175				  void *type_data, void *cb_priv)
    176{
    177	struct mlxsw_sp_flow_block *flow_block = cb_priv;
    178
    179	if (mlxsw_sp_flow_block_disabled(flow_block))
    180		return -EOPNOTSUPP;
    181
    182	switch (type) {
    183	case TC_SETUP_CLSMATCHALL:
    184		return mlxsw_sp_flow_block_mall_cb(flow_block, type_data);
    185	case TC_SETUP_CLSFLOWER:
    186		return mlxsw_sp_flow_block_flower_cb(flow_block, type_data);
    187	default:
    188		return -EOPNOTSUPP;
    189	}
    190}
    191
    192static void mlxsw_sp_tc_block_release(void *cb_priv)
    193{
    194	struct mlxsw_sp_flow_block *flow_block = cb_priv;
    195
    196	mlxsw_sp_flow_block_destroy(flow_block);
    197}
    198
    199static LIST_HEAD(mlxsw_sp_block_cb_list);
    200
    201static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
    202					struct flow_block_offload *f,
    203					bool ingress)
    204{
    205	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    206	struct mlxsw_sp_flow_block *flow_block;
    207	struct flow_block_cb *block_cb;
    208	bool register_block = false;
    209	int err;
    210
    211	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
    212					mlxsw_sp);
    213	if (!block_cb) {
    214		flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
    215		if (!flow_block)
    216			return -ENOMEM;
    217		block_cb = flow_block_cb_alloc(mlxsw_sp_flow_block_cb,
    218					       mlxsw_sp, flow_block,
    219					       mlxsw_sp_tc_block_release);
    220		if (IS_ERR(block_cb)) {
    221			mlxsw_sp_flow_block_destroy(flow_block);
    222			return PTR_ERR(block_cb);
    223		}
    224		register_block = true;
    225	} else {
    226		flow_block = flow_block_cb_priv(block_cb);
    227	}
    228	flow_block_cb_incref(block_cb);
    229	err = mlxsw_sp_flow_block_bind(mlxsw_sp, flow_block,
    230				       mlxsw_sp_port, ingress, f->extack);
    231	if (err)
    232		goto err_block_bind;
    233
    234	if (ingress)
    235		mlxsw_sp_port->ing_flow_block = flow_block;
    236	else
    237		mlxsw_sp_port->eg_flow_block = flow_block;
    238
    239	if (register_block) {
    240		flow_block_cb_add(block_cb, f);
    241		list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
    242	}
    243
    244	return 0;
    245
    246err_block_bind:
    247	if (!flow_block_cb_decref(block_cb))
    248		flow_block_cb_free(block_cb);
    249	return err;
    250}
    251
    252static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
    253					   struct flow_block_offload *f,
    254					   bool ingress)
    255{
    256	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
    257	struct mlxsw_sp_flow_block *flow_block;
    258	struct flow_block_cb *block_cb;
    259	int err;
    260
    261	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_flow_block_cb,
    262					mlxsw_sp);
    263	if (!block_cb)
    264		return;
    265
    266	if (ingress)
    267		mlxsw_sp_port->ing_flow_block = NULL;
    268	else
    269		mlxsw_sp_port->eg_flow_block = NULL;
    270
    271	flow_block = flow_block_cb_priv(block_cb);
    272	err = mlxsw_sp_flow_block_unbind(mlxsw_sp, flow_block,
    273					 mlxsw_sp_port, ingress);
    274	if (!err && !flow_block_cb_decref(block_cb)) {
    275		flow_block_cb_remove(block_cb, f);
    276		list_del(&block_cb->driver_list);
    277	}
    278}
    279
    280int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
    281				   struct flow_block_offload *f,
    282				   bool ingress)
    283{
    284	f->driver_block_list = &mlxsw_sp_block_cb_list;
    285
    286	switch (f->command) {
    287	case FLOW_BLOCK_BIND:
    288		return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
    289	case FLOW_BLOCK_UNBIND:
    290		mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
    291		return 0;
    292	default:
    293		return -EOPNOTSUPP;
    294	}
    295}