cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cxgb4_tc_matchall.c (15295B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (C) 2019 Chelsio Communications.  All rights reserved. */
      3
      4#include "cxgb4.h"
      5#include "cxgb4_tc_matchall.h"
      6#include "sched.h"
      7#include "cxgb4_uld.h"
      8#include "cxgb4_filter.h"
      9#include "cxgb4_tc_flower.h"
     10
     11static int cxgb4_policer_validate(const struct flow_action *action,
     12				  const struct flow_action_entry *act,
     13				  struct netlink_ext_ack *extack)
     14{
     15	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
     16		NL_SET_ERR_MSG_MOD(extack,
     17				   "Offload not supported when exceed action is not drop");
     18		return -EOPNOTSUPP;
     19	}
     20
     21	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
     22	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
     23		NL_SET_ERR_MSG_MOD(extack,
     24				   "Offload not supported when conform action is not pipe or ok");
     25		return -EOPNOTSUPP;
     26	}
     27
     28	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
     29	    !flow_action_is_last_entry(action, act)) {
     30		NL_SET_ERR_MSG_MOD(extack,
     31				   "Offload not supported when conform action is ok, but action is not last");
     32		return -EOPNOTSUPP;
     33	}
     34
     35	if (act->police.peakrate_bytes_ps ||
     36	    act->police.avrate || act->police.overhead) {
     37		NL_SET_ERR_MSG_MOD(extack,
     38				   "Offload not supported when peakrate/avrate/overhead is configured");
     39		return -EOPNOTSUPP;
     40	}
     41
     42	if (act->police.rate_pkt_ps) {
     43		NL_SET_ERR_MSG_MOD(extack,
     44				   "QoS offload not support packets per second");
     45		return -EOPNOTSUPP;
     46	}
     47
     48	return 0;
     49}
     50
     51static int cxgb4_matchall_egress_validate(struct net_device *dev,
     52					  struct tc_cls_matchall_offload *cls)
     53{
     54	struct netlink_ext_ack *extack = cls->common.extack;
     55	struct flow_action *actions = &cls->rule->action;
     56	struct port_info *pi = netdev2pinfo(dev);
     57	struct flow_action_entry *entry;
     58	struct ch_sched_queue qe;
     59	struct sched_class *e;
     60	u64 max_link_rate;
     61	u32 i, speed;
     62	int ret;
     63
     64	if (!flow_action_has_entries(actions)) {
     65		NL_SET_ERR_MSG_MOD(extack,
     66				   "Egress MATCHALL offload needs at least 1 policing action");
     67		return -EINVAL;
     68	} else if (!flow_offload_has_one_action(actions)) {
     69		NL_SET_ERR_MSG_MOD(extack,
     70				   "Egress MATCHALL offload only supports 1 policing action");
     71		return -EINVAL;
     72	} else if (pi->tc_block_shared) {
     73		NL_SET_ERR_MSG_MOD(extack,
     74				   "Egress MATCHALL offload not supported with shared blocks");
     75		return -EINVAL;
     76	}
     77
     78	ret = t4_get_link_params(pi, NULL, &speed, NULL);
     79	if (ret) {
     80		NL_SET_ERR_MSG_MOD(extack,
     81				   "Failed to get max speed supported by the link");
     82		return -EINVAL;
     83	}
     84
     85	/* Convert from Mbps to bps */
     86	max_link_rate = (u64)speed * 1000 * 1000;
     87
     88	flow_action_for_each(i, entry, actions) {
     89		switch (entry->id) {
     90		case FLOW_ACTION_POLICE:
     91			ret = cxgb4_policer_validate(actions, entry, extack);
     92			if (ret)
     93				return ret;
     94
     95			/* Convert bytes per second to bits per second */
     96			if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
     97				NL_SET_ERR_MSG_MOD(extack,
     98						   "Specified policing max rate is larger than underlying link speed");
     99				return -ERANGE;
    100			}
    101			break;
    102		default:
    103			NL_SET_ERR_MSG_MOD(extack,
    104					   "Only policing action supported with Egress MATCHALL offload");
    105			return -EOPNOTSUPP;
    106		}
    107	}
    108
    109	for (i = 0; i < pi->nqsets; i++) {
    110		memset(&qe, 0, sizeof(qe));
    111		qe.queue = i;
    112
    113		e = cxgb4_sched_queue_lookup(dev, &qe);
    114		if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
    115			NL_SET_ERR_MSG_MOD(extack,
    116					   "Some queues are already bound to different class");
    117			return -EBUSY;
    118		}
    119	}
    120
    121	return 0;
    122}
    123
    124static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
    125{
    126	struct port_info *pi = netdev2pinfo(dev);
    127	struct ch_sched_queue qe;
    128	int ret;
    129	u32 i;
    130
    131	for (i = 0; i < pi->nqsets; i++) {
    132		qe.queue = i;
    133		qe.class = tc;
    134		ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
    135		if (ret)
    136			goto out_free;
    137	}
    138
    139	return 0;
    140
    141out_free:
    142	while (i--) {
    143		qe.queue = i;
    144		qe.class = SCHED_CLS_NONE;
    145		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
    146	}
    147
    148	return ret;
    149}
    150
    151static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
    152{
    153	struct port_info *pi = netdev2pinfo(dev);
    154	struct ch_sched_queue qe;
    155	u32 i;
    156
    157	for (i = 0; i < pi->nqsets; i++) {
    158		qe.queue = i;
    159		qe.class = SCHED_CLS_NONE;
    160		cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
    161	}
    162}
    163
    164static int cxgb4_matchall_alloc_tc(struct net_device *dev,
    165				   struct tc_cls_matchall_offload *cls)
    166{
    167	struct ch_sched_params p = {
    168		.type = SCHED_CLASS_TYPE_PACKET,
    169		.u.params.level = SCHED_CLASS_LEVEL_CH_RL,
    170		.u.params.mode = SCHED_CLASS_MODE_CLASS,
    171		.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
    172		.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
    173		.u.params.class = SCHED_CLS_NONE,
    174		.u.params.minrate = 0,
    175		.u.params.weight = 0,
    176		.u.params.pktsize = dev->mtu,
    177	};
    178	struct netlink_ext_ack *extack = cls->common.extack;
    179	struct cxgb4_tc_port_matchall *tc_port_matchall;
    180	struct port_info *pi = netdev2pinfo(dev);
    181	struct adapter *adap = netdev2adap(dev);
    182	struct flow_action_entry *entry;
    183	struct sched_class *e;
    184	int ret;
    185	u32 i;
    186
    187	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    188
    189	flow_action_for_each(i, entry, &cls->rule->action)
    190		if (entry->id == FLOW_ACTION_POLICE)
    191			break;
    192
    193	ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
    194	if (ret)
    195		return ret;
    196
    197	/* Convert from bytes per second to Kbps */
    198	p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
    199	p.u.params.channel = pi->tx_chan;
    200	e = cxgb4_sched_class_alloc(dev, &p);
    201	if (!e) {
    202		NL_SET_ERR_MSG_MOD(extack,
    203				   "No free traffic class available for policing action");
    204		return -ENOMEM;
    205	}
    206
    207	ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
    208	if (ret) {
    209		NL_SET_ERR_MSG_MOD(extack,
    210				   "Could not bind queues to traffic class");
    211		goto out_free;
    212	}
    213
    214	tc_port_matchall->egress.hwtc = e->idx;
    215	tc_port_matchall->egress.cookie = cls->cookie;
    216	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
    217	return 0;
    218
    219out_free:
    220	cxgb4_sched_class_free(dev, e->idx);
    221	return ret;
    222}
    223
    224static void cxgb4_matchall_free_tc(struct net_device *dev)
    225{
    226	struct cxgb4_tc_port_matchall *tc_port_matchall;
    227	struct port_info *pi = netdev2pinfo(dev);
    228	struct adapter *adap = netdev2adap(dev);
    229
    230	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    231	cxgb4_matchall_tc_unbind_queues(dev);
    232	cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
    233
    234	tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
    235	tc_port_matchall->egress.cookie = 0;
    236	tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
    237}
    238
    239static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
    240				       struct tc_cls_matchall_offload *cls)
    241{
    242	struct netlink_ext_ack *extack = cls->common.extack;
    243	struct cxgb4_tc_port_matchall *tc_port_matchall;
    244	struct port_info *pi = netdev2pinfo(dev);
    245	struct adapter *adap = netdev2adap(dev);
    246	struct flow_action_entry *act;
    247	int ret;
    248	u32 i;
    249
    250	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    251	flow_action_for_each(i, act, &cls->rule->action) {
    252		if (act->id == FLOW_ACTION_MIRRED) {
    253			ret = cxgb4_port_mirror_alloc(dev);
    254			if (ret) {
    255				NL_SET_ERR_MSG_MOD(extack,
    256						   "Couldn't allocate mirror");
    257				return ret;
    258			}
    259
    260			tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
    261			break;
    262		}
    263	}
    264
    265	return 0;
    266}
    267
    268static void cxgb4_matchall_mirror_free(struct net_device *dev)
    269{
    270	struct cxgb4_tc_port_matchall *tc_port_matchall;
    271	struct port_info *pi = netdev2pinfo(dev);
    272	struct adapter *adap = netdev2adap(dev);
    273
    274	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    275	if (!tc_port_matchall->ingress.viid_mirror)
    276		return;
    277
    278	cxgb4_port_mirror_free(dev);
    279	tc_port_matchall->ingress.viid_mirror = 0;
    280}
    281
    282static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
    283{
    284	struct cxgb4_tc_port_matchall *tc_port_matchall;
    285	struct port_info *pi = netdev2pinfo(dev);
    286	struct adapter *adap = netdev2adap(dev);
    287	int ret;
    288
    289	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    290	ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
    291			       &tc_port_matchall->ingress.fs[filter_type]);
    292	if (ret)
    293		return ret;
    294
    295	tc_port_matchall->ingress.tid[filter_type] = 0;
    296	return 0;
    297}
    298
    299static int cxgb4_matchall_add_filter(struct net_device *dev,
    300				     struct tc_cls_matchall_offload *cls,
    301				     u8 filter_type)
    302{
    303	struct netlink_ext_ack *extack = cls->common.extack;
    304	struct cxgb4_tc_port_matchall *tc_port_matchall;
    305	struct port_info *pi = netdev2pinfo(dev);
    306	struct adapter *adap = netdev2adap(dev);
    307	struct ch_filter_specification *fs;
    308	int ret, fidx;
    309
    310	/* Get a free filter entry TID, where we can insert this new
    311	 * rule. Only insert rule if its prio doesn't conflict with
    312	 * existing rules.
    313	 */
    314	fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
    315				   false, cls->common.prio);
    316	if (fidx < 0) {
    317		NL_SET_ERR_MSG_MOD(extack,
    318				   "No free LETCAM index available");
    319		return -ENOMEM;
    320	}
    321
    322	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    323	fs = &tc_port_matchall->ingress.fs[filter_type];
    324	memset(fs, 0, sizeof(*fs));
    325
    326	if (fidx < adap->tids.nhpftids)
    327		fs->prio = 1;
    328	fs->tc_prio = cls->common.prio;
    329	fs->tc_cookie = cls->cookie;
    330	fs->type = filter_type;
    331	fs->hitcnts = 1;
    332
    333	fs->val.pfvf_vld = 1;
    334	fs->val.pf = adap->pf;
    335	fs->val.vf = pi->vin;
    336
    337	cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
    338
    339	ret = cxgb4_set_filter(dev, fidx, fs);
    340	if (ret)
    341		return ret;
    342
    343	tc_port_matchall->ingress.tid[filter_type] = fidx;
    344	return 0;
    345}
    346
    347static int cxgb4_matchall_alloc_filter(struct net_device *dev,
    348				       struct tc_cls_matchall_offload *cls)
    349{
    350	struct cxgb4_tc_port_matchall *tc_port_matchall;
    351	struct port_info *pi = netdev2pinfo(dev);
    352	struct adapter *adap = netdev2adap(dev);
    353	int ret, i;
    354
    355	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    356
    357	ret = cxgb4_matchall_mirror_alloc(dev, cls);
    358	if (ret)
    359		return ret;
    360
    361	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
    362		ret = cxgb4_matchall_add_filter(dev, cls, i);
    363		if (ret)
    364			goto out_free;
    365	}
    366
    367	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
    368	return 0;
    369
    370out_free:
    371	while (i-- > 0)
    372		cxgb4_matchall_del_filter(dev, i);
    373
    374	cxgb4_matchall_mirror_free(dev);
    375	return ret;
    376}
    377
    378static int cxgb4_matchall_free_filter(struct net_device *dev)
    379{
    380	struct cxgb4_tc_port_matchall *tc_port_matchall;
    381	struct port_info *pi = netdev2pinfo(dev);
    382	struct adapter *adap = netdev2adap(dev);
    383	int ret;
    384	u8 i;
    385
    386	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    387
    388	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
    389		ret = cxgb4_matchall_del_filter(dev, i);
    390		if (ret)
    391			return ret;
    392	}
    393
    394	cxgb4_matchall_mirror_free(dev);
    395
    396	tc_port_matchall->ingress.packets = 0;
    397	tc_port_matchall->ingress.bytes = 0;
    398	tc_port_matchall->ingress.last_used = 0;
    399	tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
    400	return 0;
    401}
    402
    403int cxgb4_tc_matchall_replace(struct net_device *dev,
    404			      struct tc_cls_matchall_offload *cls_matchall,
    405			      bool ingress)
    406{
    407	struct netlink_ext_ack *extack = cls_matchall->common.extack;
    408	struct cxgb4_tc_port_matchall *tc_port_matchall;
    409	struct port_info *pi = netdev2pinfo(dev);
    410	struct adapter *adap = netdev2adap(dev);
    411	int ret;
    412
    413	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    414	if (ingress) {
    415		if (tc_port_matchall->ingress.state ==
    416		    CXGB4_MATCHALL_STATE_ENABLED) {
    417			NL_SET_ERR_MSG_MOD(extack,
    418					   "Only 1 Ingress MATCHALL can be offloaded");
    419			return -ENOMEM;
    420		}
    421
    422		ret = cxgb4_validate_flow_actions(dev,
    423						  &cls_matchall->rule->action,
    424						  extack, 1);
    425		if (ret)
    426			return ret;
    427
    428		return cxgb4_matchall_alloc_filter(dev, cls_matchall);
    429	}
    430
    431	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
    432		NL_SET_ERR_MSG_MOD(extack,
    433				   "Only 1 Egress MATCHALL can be offloaded");
    434		return -ENOMEM;
    435	}
    436
    437	ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
    438	if (ret)
    439		return ret;
    440
    441	return cxgb4_matchall_alloc_tc(dev, cls_matchall);
    442}
    443
    444int cxgb4_tc_matchall_destroy(struct net_device *dev,
    445			      struct tc_cls_matchall_offload *cls_matchall,
    446			      bool ingress)
    447{
    448	struct cxgb4_tc_port_matchall *tc_port_matchall;
    449	struct port_info *pi = netdev2pinfo(dev);
    450	struct adapter *adap = netdev2adap(dev);
    451
    452	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    453	if (ingress) {
    454		/* All the filter types of this matchall rule save the
    455		 * same cookie. So, checking for the first one is
    456		 * enough.
    457		 */
    458		if (cls_matchall->cookie !=
    459		    tc_port_matchall->ingress.fs[0].tc_cookie)
    460			return -ENOENT;
    461
    462		return cxgb4_matchall_free_filter(dev);
    463	}
    464
    465	if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
    466		return -ENOENT;
    467
    468	cxgb4_matchall_free_tc(dev);
    469	return 0;
    470}
    471
    472int cxgb4_tc_matchall_stats(struct net_device *dev,
    473			    struct tc_cls_matchall_offload *cls_matchall)
    474{
    475	u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
    476	struct cxgb4_tc_port_matchall *tc_port_matchall;
    477	struct cxgb4_matchall_ingress_entry *ingress;
    478	struct port_info *pi = netdev2pinfo(dev);
    479	struct adapter *adap = netdev2adap(dev);
    480	int ret;
    481	u8 i;
    482
    483	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    484	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
    485		return -ENOENT;
    486
    487	ingress = &tc_port_matchall->ingress;
    488	for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
    489		ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
    490						&tmp_packets, &tmp_bytes,
    491						ingress->fs[i].hash);
    492		if (ret)
    493			return ret;
    494
    495		packets += tmp_packets;
    496		bytes += tmp_bytes;
    497	}
    498
    499	if (tc_port_matchall->ingress.packets != packets) {
    500		flow_stats_update(&cls_matchall->stats,
    501				  bytes - tc_port_matchall->ingress.bytes,
    502				  packets - tc_port_matchall->ingress.packets,
    503				  0, tc_port_matchall->ingress.last_used,
    504				  FLOW_ACTION_HW_STATS_IMMEDIATE);
    505
    506		tc_port_matchall->ingress.packets = packets;
    507		tc_port_matchall->ingress.bytes = bytes;
    508		tc_port_matchall->ingress.last_used = jiffies;
    509	}
    510
    511	return 0;
    512}
    513
    514static void cxgb4_matchall_disable_offload(struct net_device *dev)
    515{
    516	struct cxgb4_tc_port_matchall *tc_port_matchall;
    517	struct port_info *pi = netdev2pinfo(dev);
    518	struct adapter *adap = netdev2adap(dev);
    519
    520	tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
    521	if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
    522		cxgb4_matchall_free_tc(dev);
    523
    524	if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
    525		cxgb4_matchall_free_filter(dev);
    526}
    527
    528int cxgb4_init_tc_matchall(struct adapter *adap)
    529{
    530	struct cxgb4_tc_port_matchall *tc_port_matchall;
    531	struct cxgb4_tc_matchall *tc_matchall;
    532	int ret;
    533
    534	tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
    535	if (!tc_matchall)
    536		return -ENOMEM;
    537
    538	tc_port_matchall = kcalloc(adap->params.nports,
    539				   sizeof(*tc_port_matchall),
    540				   GFP_KERNEL);
    541	if (!tc_port_matchall) {
    542		ret = -ENOMEM;
    543		goto out_free_matchall;
    544	}
    545
    546	tc_matchall->port_matchall = tc_port_matchall;
    547	adap->tc_matchall = tc_matchall;
    548	return 0;
    549
    550out_free_matchall:
    551	kfree(tc_matchall);
    552	return ret;
    553}
    554
    555void cxgb4_cleanup_tc_matchall(struct adapter *adap)
    556{
    557	u8 i;
    558
    559	if (adap->tc_matchall) {
    560		if (adap->tc_matchall->port_matchall) {
    561			for (i = 0; i < adap->params.nports; i++) {
    562				struct net_device *dev = adap->port[i];
    563
    564				if (dev)
    565					cxgb4_matchall_disable_offload(dev);
    566			}
    567			kfree(adap->tc_matchall->port_matchall);
    568		}
    569		kfree(adap->tc_matchall);
    570	}
    571}