cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cxgb4_mps.c (5634B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2019 Chelsio Communications, Inc. All rights reserved. */
      3
      4#include "cxgb4.h"
      5
      6static int cxgb4_mps_ref_dec_by_mac(struct adapter *adap,
      7				    const u8 *addr, const u8 *mask)
      8{
      9	u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
     10	struct mps_entries_ref *mps_entry, *tmp;
     11	int ret = -EINVAL;
     12
     13	spin_lock_bh(&adap->mps_ref_lock);
     14	list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
     15		if (ether_addr_equal(mps_entry->addr, addr) &&
     16		    ether_addr_equal(mps_entry->mask, mask ? mask : bitmask)) {
     17			if (!refcount_dec_and_test(&mps_entry->refcnt)) {
     18				spin_unlock_bh(&adap->mps_ref_lock);
     19				return -EBUSY;
     20			}
     21			list_del(&mps_entry->list);
     22			kfree(mps_entry);
     23			ret = 0;
     24			break;
     25		}
     26	}
     27	spin_unlock_bh(&adap->mps_ref_lock);
     28	return ret;
     29}
     30
     31static int cxgb4_mps_ref_dec(struct adapter *adap, u16 idx)
     32{
     33	struct mps_entries_ref *mps_entry, *tmp;
     34	int ret = -EINVAL;
     35
     36	spin_lock(&adap->mps_ref_lock);
     37	list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
     38		if (mps_entry->idx == idx) {
     39			if (!refcount_dec_and_test(&mps_entry->refcnt)) {
     40				spin_unlock(&adap->mps_ref_lock);
     41				return -EBUSY;
     42			}
     43			list_del(&mps_entry->list);
     44			kfree(mps_entry);
     45			ret = 0;
     46			break;
     47		}
     48	}
     49	spin_unlock(&adap->mps_ref_lock);
     50	return ret;
     51}
     52
     53static int cxgb4_mps_ref_inc(struct adapter *adap, const u8 *mac_addr,
     54			     u16 idx, const u8 *mask)
     55{
     56	u8 bitmask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
     57	struct mps_entries_ref *mps_entry;
     58	int ret = 0;
     59
     60	spin_lock_bh(&adap->mps_ref_lock);
     61	list_for_each_entry(mps_entry, &adap->mps_ref, list) {
     62		if (mps_entry->idx == idx) {
     63			refcount_inc(&mps_entry->refcnt);
     64			goto unlock;
     65		}
     66	}
     67	mps_entry = kzalloc(sizeof(*mps_entry), GFP_ATOMIC);
     68	if (!mps_entry) {
     69		ret = -ENOMEM;
     70		goto unlock;
     71	}
     72	ether_addr_copy(mps_entry->mask, mask ? mask : bitmask);
     73	ether_addr_copy(mps_entry->addr, mac_addr);
     74	mps_entry->idx = idx;
     75	refcount_set(&mps_entry->refcnt, 1);
     76	list_add_tail(&mps_entry->list, &adap->mps_ref);
     77unlock:
     78	spin_unlock_bh(&adap->mps_ref_lock);
     79	return ret;
     80}
     81
     82int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
     83			unsigned int naddr, const u8 **addr, bool sleep_ok)
     84{
     85	int ret, i;
     86
     87	for (i = 0; i < naddr; i++) {
     88		if (!cxgb4_mps_ref_dec_by_mac(adap, addr[i], NULL)) {
     89			ret = t4_free_mac_filt(adap, adap->mbox, viid,
     90					       1, &addr[i], sleep_ok);
     91			if (ret < 0)
     92				return ret;
     93		}
     94	}
     95
     96	/* return number of filters freed */
     97	return naddr;
     98}
     99
    100int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
    101			 bool free, unsigned int naddr, const u8 **addr,
    102			 u16 *idx, u64 *hash, bool sleep_ok)
    103{
    104	int ret, i;
    105
    106	ret = t4_alloc_mac_filt(adap, adap->mbox, viid, free,
    107				naddr, addr, idx, hash, sleep_ok);
    108	if (ret < 0)
    109		return ret;
    110
    111	for (i = 0; i < naddr; i++) {
    112		if (idx[i] != 0xffff) {
    113			if (cxgb4_mps_ref_inc(adap, addr[i], idx[i], NULL)) {
    114				ret = -ENOMEM;
    115				goto error;
    116			}
    117		}
    118	}
    119
    120	goto out;
    121error:
    122	cxgb4_free_mac_filt(adap, viid, naddr, addr, sleep_ok);
    123
    124out:
    125	/* Returns a negative error number or the number of filters allocated */
    126	return ret;
    127}
    128
    129int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
    130			  int *tcam_idx, const u8 *addr,
    131			  bool persistent, u8 *smt_idx)
    132{
    133	int ret;
    134
    135	ret = cxgb4_change_mac(pi, viid, tcam_idx,
    136			       addr, persistent, smt_idx);
    137	if (ret < 0)
    138		return ret;
    139
    140	cxgb4_mps_ref_inc(pi->adapter, addr, *tcam_idx, NULL);
    141	return ret;
    142}
    143
    144int cxgb4_free_raw_mac_filt(struct adapter *adap,
    145			    unsigned int viid,
    146			    const u8 *addr,
    147			    const u8 *mask,
    148			    unsigned int idx,
    149			    u8 lookup_type,
    150			    u8 port_id,
    151			    bool sleep_ok)
    152{
    153	int ret = 0;
    154
    155	if (!cxgb4_mps_ref_dec(adap, idx))
    156		ret = t4_free_raw_mac_filt(adap, viid, addr,
    157					   mask, idx, lookup_type,
    158					   port_id, sleep_ok);
    159
    160	return ret;
    161}
    162
    163int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
    164			     unsigned int viid,
    165			     const u8 *addr,
    166			     const u8 *mask,
    167			     unsigned int idx,
    168			     u8 lookup_type,
    169			     u8 port_id,
    170			     bool sleep_ok)
    171{
    172	int ret;
    173
    174	ret = t4_alloc_raw_mac_filt(adap, viid, addr,
    175				    mask, idx, lookup_type,
    176				    port_id, sleep_ok);
    177	if (ret < 0)
    178		return ret;
    179
    180	if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
    181		ret = -ENOMEM;
    182		t4_free_raw_mac_filt(adap, viid, addr,
    183				     mask, idx, lookup_type,
    184				     port_id, sleep_ok);
    185	}
    186
    187	return ret;
    188}
    189
    190int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
    191			      int idx, bool sleep_ok)
    192{
    193	int ret = 0;
    194
    195	if (!cxgb4_mps_ref_dec(adap, idx))
    196		ret = t4_free_encap_mac_filt(adap, viid, idx, sleep_ok);
    197
    198	return ret;
    199}
    200
    201int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
    202			       const u8 *addr, const u8 *mask,
    203			       unsigned int vni, unsigned int vni_mask,
    204			       u8 dip_hit, u8 lookup_type, bool sleep_ok)
    205{
    206	int ret;
    207
    208	ret = t4_alloc_encap_mac_filt(adap, viid, addr, mask, vni, vni_mask,
    209				      dip_hit, lookup_type, sleep_ok);
    210	if (ret < 0)
    211		return ret;
    212
    213	if (cxgb4_mps_ref_inc(adap, addr, ret, mask)) {
    214		ret = -ENOMEM;
    215		t4_free_encap_mac_filt(adap, viid, ret, sleep_ok);
    216	}
    217	return ret;
    218}
    219
    220int cxgb4_init_mps_ref_entries(struct adapter *adap)
    221{
    222	spin_lock_init(&adap->mps_ref_lock);
    223	INIT_LIST_HEAD(&adap->mps_ref);
    224
    225	return 0;
    226}
    227
    228void cxgb4_free_mps_ref_entries(struct adapter *adap)
    229{
    230	struct mps_entries_ref *mps_entry, *tmp;
    231
    232	if (list_empty(&adap->mps_ref))
    233		return;
    234
    235	spin_lock(&adap->mps_ref_lock);
    236	list_for_each_entry_safe(mps_entry, tmp, &adap->mps_ref, list) {
    237		list_del(&mps_entry->list);
    238		kfree(mps_entry);
    239	}
    240	spin_unlock(&adap->mps_ref_lock);
    241}