cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtk_ppe_offload.c (13030B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
      4 */
      5
      6#include <linux/if_ether.h>
      7#include <linux/rhashtable.h>
      8#include <linux/ip.h>
      9#include <linux/ipv6.h>
     10#include <net/flow_offload.h>
     11#include <net/pkt_cls.h>
     12#include <net/dsa.h>
     13#include "mtk_eth_soc.h"
     14#include "mtk_wed.h"
     15
     16struct mtk_flow_data {
     17	struct ethhdr eth;
     18
     19	union {
     20		struct {
     21			__be32 src_addr;
     22			__be32 dst_addr;
     23		} v4;
     24
     25		struct {
     26			struct in6_addr src_addr;
     27			struct in6_addr dst_addr;
     28		} v6;
     29	};
     30
     31	__be16 src_port;
     32	__be16 dst_port;
     33
     34	u16 vlan_in;
     35
     36	struct {
     37		u16 id;
     38		__be16 proto;
     39		u8 num;
     40	} vlan;
     41	struct {
     42		u16 sid;
     43		u8 num;
     44	} pppoe;
     45};
     46
     47static const struct rhashtable_params mtk_flow_ht_params = {
     48	.head_offset = offsetof(struct mtk_flow_entry, node),
     49	.key_offset = offsetof(struct mtk_flow_entry, cookie),
     50	.key_len = sizeof(unsigned long),
     51	.automatic_shrinking = true,
     52};
     53
     54static int
     55mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
     56		       bool egress)
     57{
     58	return mtk_foe_entry_set_ipv4_tuple(foe, egress,
     59					    data->v4.src_addr, data->src_port,
     60					    data->v4.dst_addr, data->dst_port);
     61}
     62
     63static int
     64mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
     65{
     66	return mtk_foe_entry_set_ipv6_tuple(foe,
     67					    data->v6.src_addr.s6_addr32, data->src_port,
     68					    data->v6.dst_addr.s6_addr32, data->dst_port);
     69}
     70
     71static void
     72mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
     73{
     74	void *dest = eth + act->mangle.offset;
     75	const void *src = &act->mangle.val;
     76
     77	if (act->mangle.offset > 8)
     78		return;
     79
     80	if (act->mangle.mask == 0xffff) {
     81		src += 2;
     82		dest += 2;
     83	}
     84
     85	memcpy(dest, src, act->mangle.mask ? 2 : 4);
     86}
     87
     88static int
     89mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
     90{
     91	struct net_device_path_ctx ctx = {
     92		.dev = dev,
     93	};
     94	struct net_device_path path = {};
     95
     96	memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
     97
     98	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
     99		return -1;
    100
    101	if (!dev->netdev_ops->ndo_fill_forward_path)
    102		return -1;
    103
    104	if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
    105		return -1;
    106
    107	if (path.type != DEV_PATH_MTK_WDMA)
    108		return -1;
    109
    110	info->wdma_idx = path.mtk_wdma.wdma_idx;
    111	info->queue = path.mtk_wdma.queue;
    112	info->bss = path.mtk_wdma.bss;
    113	info->wcid = path.mtk_wdma.wcid;
    114
    115	return 0;
    116}
    117
    118
    119static int
    120mtk_flow_mangle_ports(const struct flow_action_entry *act,
    121		      struct mtk_flow_data *data)
    122{
    123	u32 val = ntohl(act->mangle.val);
    124
    125	switch (act->mangle.offset) {
    126	case 0:
    127		if (act->mangle.mask == ~htonl(0xffff))
    128			data->dst_port = cpu_to_be16(val);
    129		else
    130			data->src_port = cpu_to_be16(val >> 16);
    131		break;
    132	case 2:
    133		data->dst_port = cpu_to_be16(val);
    134		break;
    135	default:
    136		return -EINVAL;
    137	}
    138
    139	return 0;
    140}
    141
    142static int
    143mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
    144		     struct mtk_flow_data *data)
    145{
    146	__be32 *dest;
    147
    148	switch (act->mangle.offset) {
    149	case offsetof(struct iphdr, saddr):
    150		dest = &data->v4.src_addr;
    151		break;
    152	case offsetof(struct iphdr, daddr):
    153		dest = &data->v4.dst_addr;
    154		break;
    155	default:
    156		return -EINVAL;
    157	}
    158
    159	memcpy(dest, &act->mangle.val, sizeof(u32));
    160
    161	return 0;
    162}
    163
    164static int
    165mtk_flow_get_dsa_port(struct net_device **dev)
    166{
    167#if IS_ENABLED(CONFIG_NET_DSA)
    168	struct dsa_port *dp;
    169
    170	dp = dsa_port_from_netdev(*dev);
    171	if (IS_ERR(dp))
    172		return -ENODEV;
    173
    174	if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
    175		return -ENODEV;
    176
    177	*dev = dp->cpu_dp->master;
    178
    179	return dp->index;
    180#else
    181	return -ENODEV;
    182#endif
    183}
    184
    185static int
    186mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
    187			   struct net_device *dev, const u8 *dest_mac,
    188			   int *wed_index)
    189{
    190	struct mtk_wdma_info info = {};
    191	int pse_port, dsa_port;
    192
    193	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
    194		mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
    195				       info.wcid);
    196		pse_port = 3;
    197		*wed_index = info.wdma_idx;
    198		goto out;
    199	}
    200
    201	dsa_port = mtk_flow_get_dsa_port(&dev);
    202	if (dsa_port >= 0)
    203		mtk_foe_entry_set_dsa(foe, dsa_port);
    204
    205	if (dev == eth->netdev[0])
    206		pse_port = 1;
    207	else if (dev == eth->netdev[1])
    208		pse_port = 2;
    209	else
    210		return -EOPNOTSUPP;
    211
    212out:
    213	mtk_foe_entry_set_pse_port(foe, pse_port);
    214
    215	return 0;
    216}
    217
    218static int
    219mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
    220{
    221	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
    222	struct flow_action_entry *act;
    223	struct mtk_flow_data data = {};
    224	struct mtk_foe_entry foe;
    225	struct net_device *odev = NULL;
    226	struct mtk_flow_entry *entry;
    227	int offload_type = 0;
    228	int wed_index = -1;
    229	u16 addr_type = 0;
    230	u8 l4proto = 0;
    231	int err = 0;
    232	int i;
    233
    234	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
    235		return -EEXIST;
    236
    237	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
    238		struct flow_match_meta match;
    239
    240		flow_rule_match_meta(rule, &match);
    241	} else {
    242		return -EOPNOTSUPP;
    243	}
    244
    245	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
    246		struct flow_match_control match;
    247
    248		flow_rule_match_control(rule, &match);
    249		addr_type = match.key->addr_type;
    250	} else {
    251		return -EOPNOTSUPP;
    252	}
    253
    254	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
    255		struct flow_match_basic match;
    256
    257		flow_rule_match_basic(rule, &match);
    258		l4proto = match.key->ip_proto;
    259	} else {
    260		return -EOPNOTSUPP;
    261	}
    262
    263	switch (addr_type) {
    264	case 0:
    265		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
    266		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
    267			struct flow_match_eth_addrs match;
    268
    269			flow_rule_match_eth_addrs(rule, &match);
    270			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
    271			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
    272		} else {
    273			return -EOPNOTSUPP;
    274		}
    275
    276		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
    277			struct flow_match_vlan match;
    278
    279			flow_rule_match_vlan(rule, &match);
    280
    281			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
    282				return -EOPNOTSUPP;
    283
    284			data.vlan_in = match.key->vlan_id;
    285		}
    286		break;
    287	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
    288		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
    289		break;
    290	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
    291		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
    292		break;
    293	default:
    294		return -EOPNOTSUPP;
    295	}
    296
    297	flow_action_for_each(i, act, &rule->action) {
    298		switch (act->id) {
    299		case FLOW_ACTION_MANGLE:
    300			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
    301				return -EOPNOTSUPP;
    302			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
    303				mtk_flow_offload_mangle_eth(act, &data.eth);
    304			break;
    305		case FLOW_ACTION_REDIRECT:
    306			odev = act->dev;
    307			break;
    308		case FLOW_ACTION_CSUM:
    309			break;
    310		case FLOW_ACTION_VLAN_PUSH:
    311			if (data.vlan.num == 1 ||
    312			    act->vlan.proto != htons(ETH_P_8021Q))
    313				return -EOPNOTSUPP;
    314
    315			data.vlan.id = act->vlan.vid;
    316			data.vlan.proto = act->vlan.proto;
    317			data.vlan.num++;
    318			break;
    319		case FLOW_ACTION_VLAN_POP:
    320			break;
    321		case FLOW_ACTION_PPPOE_PUSH:
    322			if (data.pppoe.num == 1)
    323				return -EOPNOTSUPP;
    324
    325			data.pppoe.sid = act->pppoe.sid;
    326			data.pppoe.num++;
    327			break;
    328		default:
    329			return -EOPNOTSUPP;
    330		}
    331	}
    332
    333	if (!is_valid_ether_addr(data.eth.h_source) ||
    334	    !is_valid_ether_addr(data.eth.h_dest))
    335		return -EINVAL;
    336
    337	err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
    338				    data.eth.h_source,
    339				    data.eth.h_dest);
    340	if (err)
    341		return err;
    342
    343	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
    344		struct flow_match_ports ports;
    345
    346		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
    347			return -EOPNOTSUPP;
    348
    349		flow_rule_match_ports(rule, &ports);
    350		data.src_port = ports.key->src;
    351		data.dst_port = ports.key->dst;
    352	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
    353		return -EOPNOTSUPP;
    354	}
    355
    356	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
    357		struct flow_match_ipv4_addrs addrs;
    358
    359		flow_rule_match_ipv4_addrs(rule, &addrs);
    360
    361		data.v4.src_addr = addrs.key->src;
    362		data.v4.dst_addr = addrs.key->dst;
    363
    364		mtk_flow_set_ipv4_addr(&foe, &data, false);
    365	}
    366
    367	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
    368		struct flow_match_ipv6_addrs addrs;
    369
    370		flow_rule_match_ipv6_addrs(rule, &addrs);
    371
    372		data.v6.src_addr = addrs.key->src;
    373		data.v6.dst_addr = addrs.key->dst;
    374
    375		mtk_flow_set_ipv6_addr(&foe, &data);
    376	}
    377
    378	flow_action_for_each(i, act, &rule->action) {
    379		if (act->id != FLOW_ACTION_MANGLE)
    380			continue;
    381
    382		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
    383			return -EOPNOTSUPP;
    384
    385		switch (act->mangle.htype) {
    386		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
    387		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
    388			err = mtk_flow_mangle_ports(act, &data);
    389			break;
    390		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
    391			err = mtk_flow_mangle_ipv4(act, &data);
    392			break;
    393		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
    394			/* handled earlier */
    395			break;
    396		default:
    397			return -EOPNOTSUPP;
    398		}
    399
    400		if (err)
    401			return err;
    402	}
    403
    404	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
    405		err = mtk_flow_set_ipv4_addr(&foe, &data, true);
    406		if (err)
    407			return err;
    408	}
    409
    410	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
    411		foe.bridge.vlan = data.vlan_in;
    412
    413	if (data.vlan.num == 1) {
    414		if (data.vlan.proto != htons(ETH_P_8021Q))
    415			return -EOPNOTSUPP;
    416
    417		mtk_foe_entry_set_vlan(&foe, data.vlan.id);
    418	}
    419	if (data.pppoe.num == 1)
    420		mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
    421
    422	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
    423					 &wed_index);
    424	if (err)
    425		return err;
    426
    427	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
    428		return err;
    429
    430	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
    431	if (!entry)
    432		return -ENOMEM;
    433
    434	entry->cookie = f->cookie;
    435	memcpy(&entry->data, &foe, sizeof(entry->data));
    436	entry->wed_index = wed_index;
    437
    438	err = mtk_foe_entry_commit(eth->ppe, entry);
    439	if (err < 0)
    440		goto free;
    441
    442	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
    443				     mtk_flow_ht_params);
    444	if (err < 0)
    445		goto clear;
    446
    447	return 0;
    448
    449clear:
    450	mtk_foe_entry_clear(eth->ppe, entry);
    451free:
    452	kfree(entry);
    453	if (wed_index >= 0)
    454	    mtk_wed_flow_remove(wed_index);
    455	return err;
    456}
    457
    458static int
    459mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
    460{
    461	struct mtk_flow_entry *entry;
    462
    463	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
    464				  mtk_flow_ht_params);
    465	if (!entry)
    466		return -ENOENT;
    467
    468	mtk_foe_entry_clear(eth->ppe, entry);
    469	rhashtable_remove_fast(&eth->flow_table, &entry->node,
    470			       mtk_flow_ht_params);
    471	if (entry->wed_index >= 0)
    472		mtk_wed_flow_remove(entry->wed_index);
    473	kfree(entry);
    474
    475	return 0;
    476}
    477
    478static int
    479mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
    480{
    481	struct mtk_flow_entry *entry;
    482	u32 idle;
    483
    484	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
    485				  mtk_flow_ht_params);
    486	if (!entry)
    487		return -ENOENT;
    488
    489	idle = mtk_foe_entry_idle_time(eth->ppe, entry);
    490	f->stats.lastused = jiffies - idle * HZ;
    491
    492	return 0;
    493}
    494
    495static DEFINE_MUTEX(mtk_flow_offload_mutex);
    496
    497static int
    498mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
    499{
    500	struct flow_cls_offload *cls = type_data;
    501	struct net_device *dev = cb_priv;
    502	struct mtk_mac *mac = netdev_priv(dev);
    503	struct mtk_eth *eth = mac->hw;
    504	int err;
    505
    506	if (!tc_can_offload(dev))
    507		return -EOPNOTSUPP;
    508
    509	if (type != TC_SETUP_CLSFLOWER)
    510		return -EOPNOTSUPP;
    511
    512	mutex_lock(&mtk_flow_offload_mutex);
    513	switch (cls->command) {
    514	case FLOW_CLS_REPLACE:
    515		err = mtk_flow_offload_replace(eth, cls);
    516		break;
    517	case FLOW_CLS_DESTROY:
    518		err = mtk_flow_offload_destroy(eth, cls);
    519		break;
    520	case FLOW_CLS_STATS:
    521		err = mtk_flow_offload_stats(eth, cls);
    522		break;
    523	default:
    524		err = -EOPNOTSUPP;
    525		break;
    526	}
    527	mutex_unlock(&mtk_flow_offload_mutex);
    528
    529	return err;
    530}
    531
    532static int
    533mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
    534{
    535	struct mtk_mac *mac = netdev_priv(dev);
    536	struct mtk_eth *eth = mac->hw;
    537	static LIST_HEAD(block_cb_list);
    538	struct flow_block_cb *block_cb;
    539	flow_setup_cb_t *cb;
    540
    541	if (!eth->ppe || !eth->ppe->foe_table)
    542		return -EOPNOTSUPP;
    543
    544	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
    545		return -EOPNOTSUPP;
    546
    547	cb = mtk_eth_setup_tc_block_cb;
    548	f->driver_block_list = &block_cb_list;
    549
    550	switch (f->command) {
    551	case FLOW_BLOCK_BIND:
    552		block_cb = flow_block_cb_lookup(f->block, cb, dev);
    553		if (block_cb) {
    554			flow_block_cb_incref(block_cb);
    555			return 0;
    556		}
    557		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
    558		if (IS_ERR(block_cb))
    559			return PTR_ERR(block_cb);
    560
    561		flow_block_cb_add(block_cb, f);
    562		list_add_tail(&block_cb->driver_list, &block_cb_list);
    563		return 0;
    564	case FLOW_BLOCK_UNBIND:
    565		block_cb = flow_block_cb_lookup(f->block, cb, dev);
    566		if (!block_cb)
    567			return -ENOENT;
    568
    569		if (flow_block_cb_decref(block_cb)) {
    570			flow_block_cb_remove(block_cb, f);
    571			list_del(&block_cb->driver_list);
    572		}
    573		return 0;
    574	default:
    575		return -EOPNOTSUPP;
    576	}
    577}
    578
    579int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
    580		     void *type_data)
    581{
    582	switch (type) {
    583	case TC_SETUP_BLOCK:
    584	case TC_SETUP_FT:
    585		return mtk_eth_setup_tc_block(dev, type_data);
    586	default:
    587		return -EOPNOTSUPP;
    588	}
    589}
    590
    591int mtk_eth_offload_init(struct mtk_eth *eth)
    592{
    593	if (!eth->ppe || !eth->ppe->foe_table)
    594		return 0;
    595
    596	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
    597}