cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtk_ppe.c (20385B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
      3
      4#include <linux/kernel.h>
      5#include <linux/io.h>
      6#include <linux/iopoll.h>
      7#include <linux/etherdevice.h>
      8#include <linux/platform_device.h>
      9#include <linux/if_ether.h>
     10#include <linux/if_vlan.h>
     11#include <net/dsa.h>
     12#include "mtk_eth_soc.h"
     13#include "mtk_ppe.h"
     14#include "mtk_ppe_regs.h"
     15
     16static DEFINE_SPINLOCK(ppe_lock);
     17
     18static const struct rhashtable_params mtk_flow_l2_ht_params = {
     19	.head_offset = offsetof(struct mtk_flow_entry, l2_node),
     20	.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
     21	.key_len = offsetof(struct mtk_foe_bridge, key_end),
     22	.automatic_shrinking = true,
     23};
     24
     25static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
     26{
     27	writel(val, ppe->base + reg);
     28}
     29
     30static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
     31{
     32	return readl(ppe->base + reg);
     33}
     34
     35static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
     36{
     37	u32 val;
     38
     39	val = ppe_r32(ppe, reg);
     40	val &= ~mask;
     41	val |= set;
     42	ppe_w32(ppe, reg, val);
     43
     44	return val;
     45}
     46
     47static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
     48{
     49	return ppe_m32(ppe, reg, 0, val);
     50}
     51
     52static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
     53{
     54	return ppe_m32(ppe, reg, val, 0);
     55}
     56
     57static u32 mtk_eth_timestamp(struct mtk_eth *eth)
     58{
     59	return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
     60}
     61
     62static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
     63{
     64	int ret;
     65	u32 val;
     66
     67	ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
     68				 !(val & MTK_PPE_GLO_CFG_BUSY),
     69				 20, MTK_PPE_WAIT_TIMEOUT_US);
     70
     71	if (ret)
     72		dev_err(ppe->dev, "PPE table busy");
     73
     74	return ret;
     75}
     76
     77static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
     78{
     79	ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
     80	ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
     81}
     82
     83static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
     84{
     85	mtk_ppe_cache_clear(ppe);
     86
     87	ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
     88		enable * MTK_PPE_CACHE_CTL_EN);
     89}
     90
     91static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
     92{
     93	u32 hv1, hv2, hv3;
     94	u32 hash;
     95
     96	switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
     97		case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
     98		case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
     99			hv1 = e->ipv4.orig.ports;
    100			hv2 = e->ipv4.orig.dest_ip;
    101			hv3 = e->ipv4.orig.src_ip;
    102			break;
    103		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
    104		case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
    105			hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
    106			hv1 ^= e->ipv6.ports;
    107
    108			hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
    109			hv2 ^= e->ipv6.dest_ip[0];
    110
    111			hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
    112			hv3 ^= e->ipv6.src_ip[0];
    113			break;
    114		case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
    115		case MTK_PPE_PKT_TYPE_IPV6_6RD:
    116		default:
    117			WARN_ON_ONCE(1);
    118			return MTK_PPE_HASH_MASK;
    119	}
    120
    121	hash = (hv1 & hv2) | ((~hv1) & hv3);
    122	hash = (hash >> 24) | ((hash & 0xffffff) << 8);
    123	hash ^= hv1 ^ hv2 ^ hv3;
    124	hash ^= hash >> 16;
    125	hash <<= 1;
    126	hash &= MTK_PPE_ENTRIES - 1;
    127
    128	return hash;
    129}
    130
    131static inline struct mtk_foe_mac_info *
    132mtk_foe_entry_l2(struct mtk_foe_entry *entry)
    133{
    134	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
    135
    136	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
    137		return &entry->bridge.l2;
    138
    139	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
    140		return &entry->ipv6.l2;
    141
    142	return &entry->ipv4.l2;
    143}
    144
    145static inline u32 *
    146mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
    147{
    148	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
    149
    150	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
    151		return &entry->bridge.ib2;
    152
    153	if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
    154		return &entry->ipv6.ib2;
    155
    156	return &entry->ipv4.ib2;
    157}
    158
    159int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
    160			  u8 pse_port, u8 *src_mac, u8 *dest_mac)
    161{
    162	struct mtk_foe_mac_info *l2;
    163	u32 ports_pad, val;
    164
    165	memset(entry, 0, sizeof(*entry));
    166
    167	val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
    168	      FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
    169	      FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
    170	      MTK_FOE_IB1_BIND_TTL |
    171	      MTK_FOE_IB1_BIND_CACHE;
    172	entry->ib1 = val;
    173
    174	val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
    175	      FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
    176	      FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
    177
    178	if (is_multicast_ether_addr(dest_mac))
    179		val |= MTK_FOE_IB2_MULTICAST;
    180
    181	ports_pad = 0xa5a5a500 | (l4proto & 0xff);
    182	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
    183		entry->ipv4.orig.ports = ports_pad;
    184	if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
    185		entry->ipv6.ports = ports_pad;
    186
    187	if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
    188		ether_addr_copy(entry->bridge.src_mac, src_mac);
    189		ether_addr_copy(entry->bridge.dest_mac, dest_mac);
    190		entry->bridge.ib2 = val;
    191		l2 = &entry->bridge.l2;
    192	} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
    193		entry->ipv6.ib2 = val;
    194		l2 = &entry->ipv6.l2;
    195	} else {
    196		entry->ipv4.ib2 = val;
    197		l2 = &entry->ipv4.l2;
    198	}
    199
    200	l2->dest_mac_hi = get_unaligned_be32(dest_mac);
    201	l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
    202	l2->src_mac_hi = get_unaligned_be32(src_mac);
    203	l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
    204
    205	if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
    206		l2->etype = ETH_P_IPV6;
    207	else
    208		l2->etype = ETH_P_IP;
    209
    210	return 0;
    211}
    212
    213int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
    214{
    215	u32 *ib2 = mtk_foe_entry_ib2(entry);
    216	u32 val;
    217
    218	val = *ib2;
    219	val &= ~MTK_FOE_IB2_DEST_PORT;
    220	val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
    221	*ib2 = val;
    222
    223	return 0;
    224}
    225
    226int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
    227				 __be32 src_addr, __be16 src_port,
    228				 __be32 dest_addr, __be16 dest_port)
    229{
    230	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
    231	struct mtk_ipv4_tuple *t;
    232
    233	switch (type) {
    234	case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
    235		if (egress) {
    236			t = &entry->ipv4.new;
    237			break;
    238		}
    239		fallthrough;
    240	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
    241	case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
    242		t = &entry->ipv4.orig;
    243		break;
    244	case MTK_PPE_PKT_TYPE_IPV6_6RD:
    245		entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
    246		entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
    247		return 0;
    248	default:
    249		WARN_ON_ONCE(1);
    250		return -EINVAL;
    251	}
    252
    253	t->src_ip = be32_to_cpu(src_addr);
    254	t->dest_ip = be32_to_cpu(dest_addr);
    255
    256	if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
    257		return 0;
    258
    259	t->src_port = be16_to_cpu(src_port);
    260	t->dest_port = be16_to_cpu(dest_port);
    261
    262	return 0;
    263}
    264
    265int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
    266				 __be32 *src_addr, __be16 src_port,
    267				 __be32 *dest_addr, __be16 dest_port)
    268{
    269	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
    270	u32 *src, *dest;
    271	int i;
    272
    273	switch (type) {
    274	case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
    275		src = entry->dslite.tunnel_src_ip;
    276		dest = entry->dslite.tunnel_dest_ip;
    277		break;
    278	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
    279	case MTK_PPE_PKT_TYPE_IPV6_6RD:
    280		entry->ipv6.src_port = be16_to_cpu(src_port);
    281		entry->ipv6.dest_port = be16_to_cpu(dest_port);
    282		fallthrough;
    283	case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
    284		src = entry->ipv6.src_ip;
    285		dest = entry->ipv6.dest_ip;
    286		break;
    287	default:
    288		WARN_ON_ONCE(1);
    289		return -EINVAL;
    290	}
    291
    292	for (i = 0; i < 4; i++)
    293		src[i] = be32_to_cpu(src_addr[i]);
    294	for (i = 0; i < 4; i++)
    295		dest[i] = be32_to_cpu(dest_addr[i]);
    296
    297	return 0;
    298}
    299
    300int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
    301{
    302	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
    303
    304	l2->etype = BIT(port);
    305
    306	if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
    307		entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
    308	else
    309		l2->etype |= BIT(8);
    310
    311	entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
    312
    313	return 0;
    314}
    315
    316int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
    317{
    318	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
    319
    320	switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
    321	case 0:
    322		entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
    323			      FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
    324		l2->vlan1 = vid;
    325		return 0;
    326	case 1:
    327		if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
    328			l2->vlan1 = vid;
    329			l2->etype |= BIT(8);
    330		} else {
    331			l2->vlan2 = vid;
    332			entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
    333		}
    334		return 0;
    335	default:
    336		return -ENOSPC;
    337	}
    338}
    339
    340int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
    341{
    342	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
    343
    344	if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
    345	    (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
    346		l2->etype = ETH_P_PPP_SES;
    347
    348	entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
    349	l2->pppoe_id = sid;
    350
    351	return 0;
    352}
    353
    354int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
    355			   int bss, int wcid)
    356{
    357	struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
    358	u32 *ib2 = mtk_foe_entry_ib2(entry);
    359
    360	*ib2 &= ~MTK_FOE_IB2_PORT_MG;
    361	*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
    362	if (wdma_idx)
    363		*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
    364
    365	l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
    366		    FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
    367		    FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
    368
    369	return 0;
    370}
    371
    372static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
    373{
    374	return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
    375	       FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
    376}
    377
    378static bool
    379mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
    380{
    381	int type, len;
    382
    383	if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
    384		return false;
    385
    386	type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
    387	if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
    388		len = offsetof(struct mtk_foe_entry, ipv6._rsv);
    389	else
    390		len = offsetof(struct mtk_foe_entry, ipv4.ib2);
    391
    392	return !memcmp(&entry->data.data, &data->data, len - 4);
    393}
    394
    395static void
    396__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    397{
    398	struct hlist_head *head;
    399	struct hlist_node *tmp;
    400
    401	if (entry->type == MTK_FLOW_TYPE_L2) {
    402		rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
    403				       mtk_flow_l2_ht_params);
    404
    405		head = &entry->l2_flows;
    406		hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
    407			__mtk_foe_entry_clear(ppe, entry);
    408		return;
    409	}
    410
    411	hlist_del_init(&entry->list);
    412	if (entry->hash != 0xffff) {
    413		ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
    414		ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
    415							      MTK_FOE_STATE_BIND);
    416		dma_wmb();
    417	}
    418	entry->hash = 0xffff;
    419
    420	if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
    421		return;
    422
    423	hlist_del_init(&entry->l2_data.list);
    424	kfree(entry);
    425}
    426
    427static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
    428{
    429	u16 timestamp;
    430	u16 now;
    431
    432	now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
    433	timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
    434
    435	if (timestamp > now)
    436		return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
    437	else
    438		return now - timestamp;
    439}
    440
    441static void
    442mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    443{
    444	struct mtk_flow_entry *cur;
    445	struct mtk_foe_entry *hwe;
    446	struct hlist_node *tmp;
    447	int idle;
    448
    449	idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
    450	hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
    451		int cur_idle;
    452		u32 ib1;
    453
    454		hwe = &ppe->foe_table[cur->hash];
    455		ib1 = READ_ONCE(hwe->ib1);
    456
    457		if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
    458			cur->hash = 0xffff;
    459			__mtk_foe_entry_clear(ppe, cur);
    460			continue;
    461		}
    462
    463		cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
    464		if (cur_idle >= idle)
    465			continue;
    466
    467		idle = cur_idle;
    468		entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
    469		entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
    470	}
    471}
    472
    473static void
    474mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    475{
    476	struct mtk_foe_entry *hwe;
    477	struct mtk_foe_entry foe;
    478
    479	spin_lock_bh(&ppe_lock);
    480
    481	if (entry->type == MTK_FLOW_TYPE_L2) {
    482		mtk_flow_entry_update_l2(ppe, entry);
    483		goto out;
    484	}
    485
    486	if (entry->hash == 0xffff)
    487		goto out;
    488
    489	hwe = &ppe->foe_table[entry->hash];
    490	memcpy(&foe, hwe, sizeof(foe));
    491	if (!mtk_flow_entry_match(entry, &foe)) {
    492		entry->hash = 0xffff;
    493		goto out;
    494	}
    495
    496	entry->data.ib1 = foe.ib1;
    497
    498out:
    499	spin_unlock_bh(&ppe_lock);
    500}
    501
    502static void
    503__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
    504		       u16 hash)
    505{
    506	struct mtk_foe_entry *hwe;
    507	u16 timestamp;
    508
    509	timestamp = mtk_eth_timestamp(ppe->eth);
    510	timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
    511	entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
    512	entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
    513
    514	hwe = &ppe->foe_table[hash];
    515	memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
    516	wmb();
    517	hwe->ib1 = entry->ib1;
    518
    519	dma_wmb();
    520
    521	mtk_ppe_cache_clear(ppe);
    522}
    523
    524void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    525{
    526	spin_lock_bh(&ppe_lock);
    527	__mtk_foe_entry_clear(ppe, entry);
    528	spin_unlock_bh(&ppe_lock);
    529}
    530
    531static int
    532mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    533{
    534	entry->type = MTK_FLOW_TYPE_L2;
    535
    536	return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
    537				      mtk_flow_l2_ht_params);
    538}
    539
    540int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    541{
    542	int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
    543	u32 hash;
    544
    545	if (type == MTK_PPE_PKT_TYPE_BRIDGE)
    546		return mtk_foe_entry_commit_l2(ppe, entry);
    547
    548	hash = mtk_ppe_hash_entry(&entry->data);
    549	entry->hash = 0xffff;
    550	spin_lock_bh(&ppe_lock);
    551	hlist_add_head(&entry->list, &ppe->foe_flow[hash / 2]);
    552	spin_unlock_bh(&ppe_lock);
    553
    554	return 0;
    555}
    556
    557static void
    558mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
    559			     u16 hash)
    560{
    561	struct mtk_flow_entry *flow_info;
    562	struct mtk_foe_entry foe, *hwe;
    563	struct mtk_foe_mac_info *l2;
    564	u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
    565	int type;
    566
    567	flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
    568			    GFP_ATOMIC);
    569	if (!flow_info)
    570		return;
    571
    572	flow_info->l2_data.base_flow = entry;
    573	flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
    574	flow_info->hash = hash;
    575	hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 2]);
    576	hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
    577
    578	hwe = &ppe->foe_table[hash];
    579	memcpy(&foe, hwe, sizeof(foe));
    580	foe.ib1 &= ib1_mask;
    581	foe.ib1 |= entry->data.ib1 & ~ib1_mask;
    582
    583	l2 = mtk_foe_entry_l2(&foe);
    584	memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
    585
    586	type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
    587	if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
    588		memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
    589	else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
    590		l2->etype = ETH_P_IPV6;
    591
    592	*mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
    593
    594	__mtk_foe_entry_commit(ppe, &foe, hash);
    595}
    596
    597void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
    598{
    599	struct hlist_head *head = &ppe->foe_flow[hash / 2];
    600	struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
    601	struct mtk_flow_entry *entry;
    602	struct mtk_foe_bridge key = {};
    603	struct hlist_node *n;
    604	struct ethhdr *eh;
    605	bool found = false;
    606	u8 *tag;
    607
    608	spin_lock_bh(&ppe_lock);
    609
    610	if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
    611		goto out;
    612
    613	hlist_for_each_entry_safe(entry, n, head, list) {
    614		if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
    615			if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
    616				     MTK_FOE_STATE_BIND))
    617				continue;
    618
    619			entry->hash = 0xffff;
    620			__mtk_foe_entry_clear(ppe, entry);
    621			continue;
    622		}
    623
    624		if (found || !mtk_flow_entry_match(entry, hwe)) {
    625			if (entry->hash != 0xffff)
    626				entry->hash = 0xffff;
    627			continue;
    628		}
    629
    630		entry->hash = hash;
    631		__mtk_foe_entry_commit(ppe, &entry->data, hash);
    632		found = true;
    633	}
    634
    635	if (found)
    636		goto out;
    637
    638	eh = eth_hdr(skb);
    639	ether_addr_copy(key.dest_mac, eh->h_dest);
    640	ether_addr_copy(key.src_mac, eh->h_source);
    641	tag = skb->data - 2;
    642	key.vlan = 0;
    643	switch (skb->protocol) {
    644#if IS_ENABLED(CONFIG_NET_DSA)
    645	case htons(ETH_P_XDSA):
    646		if (!netdev_uses_dsa(skb->dev) ||
    647		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
    648			goto out;
    649
    650		tag += 4;
    651		if (get_unaligned_be16(tag) != ETH_P_8021Q)
    652			break;
    653
    654		fallthrough;
    655#endif
    656	case htons(ETH_P_8021Q):
    657		key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
    658		break;
    659	default:
    660		break;
    661	}
    662
    663	entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
    664	if (!entry)
    665		goto out;
    666
    667	mtk_foe_entry_commit_subflow(ppe, entry, hash);
    668
    669out:
    670	spin_unlock_bh(&ppe_lock);
    671}
    672
    673int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
    674{
    675	mtk_flow_entry_update(ppe, entry);
    676
    677	return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
    678}
    679
    680struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
    681		 int version)
    682{
    683	struct device *dev = eth->dev;
    684	struct mtk_foe_entry *foe;
    685	struct mtk_ppe *ppe;
    686
    687	ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
    688	if (!ppe)
    689		return NULL;
    690
    691	rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
    692
    693	/* need to allocate a separate device, since it PPE DMA access is
    694	 * not coherent.
    695	 */
    696	ppe->base = base;
    697	ppe->eth = eth;
    698	ppe->dev = dev;
    699	ppe->version = version;
    700
    701	foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
    702				  &ppe->foe_phys, GFP_KERNEL);
    703	if (!foe)
    704		return NULL;
    705
    706	ppe->foe_table = foe;
    707
    708	mtk_ppe_debugfs_init(ppe);
    709
    710	return ppe;
    711}
    712
    713static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
    714{
    715	static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
    716	int i, k;
    717
    718	memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
    719
    720	if (!IS_ENABLED(CONFIG_SOC_MT7621))
    721		return;
    722
    723	/* skip all entries that cross the 1024 byte boundary */
    724	for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
    725		for (k = 0; k < ARRAY_SIZE(skip); k++)
    726			ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
    727}
    728
    729int mtk_ppe_start(struct mtk_ppe *ppe)
    730{
    731	u32 val;
    732
    733	mtk_ppe_init_foe_table(ppe);
    734	ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
    735
    736	val = MTK_PPE_TB_CFG_ENTRY_80B |
    737	      MTK_PPE_TB_CFG_AGE_NON_L4 |
    738	      MTK_PPE_TB_CFG_AGE_UNBIND |
    739	      MTK_PPE_TB_CFG_AGE_TCP |
    740	      MTK_PPE_TB_CFG_AGE_UDP |
    741	      MTK_PPE_TB_CFG_AGE_TCP_FIN |
    742	      FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
    743			 MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
    744	      FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
    745			 MTK_PPE_KEEPALIVE_DISABLE) |
    746	      FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
    747	      FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
    748			 MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
    749	      FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
    750			 MTK_PPE_ENTRIES_SHIFT);
    751	ppe_w32(ppe, MTK_PPE_TB_CFG, val);
    752
    753	ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
    754		MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
    755
    756	mtk_ppe_cache_enable(ppe, true);
    757
    758	val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
    759	      MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
    760	      MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
    761	      MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
    762	      MTK_PPE_FLOW_CFG_IP6_6RD |
    763	      MTK_PPE_FLOW_CFG_IP4_NAT |
    764	      MTK_PPE_FLOW_CFG_IP4_NAPT |
    765	      MTK_PPE_FLOW_CFG_IP4_DSLITE |
    766	      MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
    767	ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
    768
    769	val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
    770	      FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
    771	ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
    772
    773	val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
    774	      FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
    775	ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
    776
    777	val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
    778	      FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
    779	ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
    780
    781	val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
    782	ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
    783
    784	val = MTK_PPE_BIND_LIMIT1_FULL |
    785	      FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
    786	ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
    787
    788	val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
    789	      FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
    790	ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
    791
    792	/* enable PPE */
    793	val = MTK_PPE_GLO_CFG_EN |
    794	      MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
    795	      MTK_PPE_GLO_CFG_IP4_CS_DROP |
    796	      MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
    797	ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
    798
    799	ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
    800
    801	return 0;
    802}
    803
    804int mtk_ppe_stop(struct mtk_ppe *ppe)
    805{
    806	u32 val;
    807	int i;
    808
    809	for (i = 0; i < MTK_PPE_ENTRIES; i++)
    810		ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
    811						   MTK_FOE_STATE_INVALID);
    812
    813	mtk_ppe_cache_enable(ppe, false);
    814
    815	/* disable offload engine */
    816	ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
    817	ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
    818
    819	/* disable aging */
    820	val = MTK_PPE_TB_CFG_AGE_NON_L4 |
    821	      MTK_PPE_TB_CFG_AGE_UNBIND |
    822	      MTK_PPE_TB_CFG_AGE_TCP |
    823	      MTK_PPE_TB_CFG_AGE_UDP |
    824	      MTK_PPE_TB_CFG_AGE_TCP_FIN;
    825	ppe_clear(ppe, MTK_PPE_TB_CFG, val);
    826
    827	return mtk_ppe_wait_busy(ppe);
    828}