cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qos_burst.sh (13026B)


      1#!/bin/bash
      2# SPDX-License-Identifier: GPL-2.0
      3#
      4# This test sends 1Gbps of traffic through the switch, into which it then
      5# injects a burst of traffic and tests that there are no drops.
      6#
      7# The 1Gbps stream is created by sending >1Gbps stream from H1. This stream
      8# ingresses through $swp1, and is forwarded thtrough a small temporary pool to a
      9# 1Gbps $swp3.
     10#
     11# Thus a 1Gbps stream enters $swp4, and is forwarded through a large pool to
     12# $swp2, and eventually to H2. Since $swp2 is a 1Gbps port as well, no backlog
     13# is generated.
     14#
     15# At this point, a burst of traffic is forwarded from H3. This enters $swp5, is
     16# forwarded to $swp2, which is fully subscribed by the 1Gbps stream. The
     17# expectation is that the burst is wholly absorbed by the large pool and no
     18# drops are caused. After the burst, there should be a backlog that is hard to
     19# get rid of, because $sw2 is fully subscribed. But because each individual
     20# packet is scheduled soon after getting enqueued, SLL and HLL do not impact the
     21# test.
     22#
     23# +-----------------------+                           +-----------------------+
     24# | H1                    |			      | H3                    |
     25# |   + $h1.111           |			      |          $h3.111 +    |
     26# |   | 192.0.2.33/28     |			      |    192.0.2.35/28 |    |
     27# |   |                   |			      |                  |    |
     28# |   + $h1               |			      |              $h3 +    |
     29# +---|-------------------+  +--------------------+   +------------------|----+
     30#     |                      |                    |       		 |
     31# +---|----------------------|--------------------|----------------------|----+
     32# |   + $swp1          $swp3 +                    + $swp4          $swp5 |    |
     33# |   | iPOOL1        iPOOL0 |                    | iPOOL2        iPOOL2 |    |
     34# |   | ePOOL4        ePOOL5 |                    | ePOOL4        ePOOL4 |    |
     35# |   |                1Gbps |                    | 1Gbps                |    |
     36# | +-|----------------------|-+                +-|----------------------|-+  |
     37# | | + $swp1.111  $swp3.111 + |                | + $swp4.111  $swp5.111 + |  |
     38# | |                          |                |                          |  |
     39# | | BR1                      |                | BR2                      |  |
     40# | |                          |                |                          |  |
     41# | |                          |                |         + $swp2.111      |  |
     42# | +--------------------------+                +---------|----------------+  |
     43# |                                                       |                   |
     44# | iPOOL0: 500KB dynamic                                 |                   |
     45# | iPOOL1: 500KB dynamic                                 |                   |
     46# | iPOOL2: 10MB dynamic                                  + $swp2             |
     47# | ePOOL4: 500KB dynamic                                 | iPOOL0            |
     48# | ePOOL5: 500KB dnamic                                  | ePOOL6            |
     49# | ePOOL6: 10MB dynamic                                  | 1Gbps             |
     50# +-------------------------------------------------------|-------------------+
     51#                                                         |
     52#                                                     +---|-------------------+
     53#                                                     |   + $h2            H2 |
     54#                                                     |   | 1Gbps             |
     55#                                                     |   |                   |
     56#                                                     |   + $h2.111           |
     57#                                                     |     192.0.2.34/28     |
     58#                                                     +-----------------------+
     59#
     60# iPOOL0+ePOOL4 are helper pools for control traffic etc.
     61# iPOOL1+ePOOL5 are helper pools for modeling the 1Gbps stream
     62# iPOOL2+ePOOL6 are pools for soaking the burst traffic
     63
     64ALL_TESTS="
     65	ping_ipv4
     66	test_8K
     67	test_800
     68"
     69
     70lib_dir=$(dirname $0)/../../../net/forwarding
     71
     72NUM_NETIFS=8
     73source $lib_dir/lib.sh
     74source $lib_dir/devlink_lib.sh
     75source qos_lib.sh
     76source mlxsw_lib.sh
     77
     78_1KB=1000
     79_500KB=$((500 * _1KB))
     80_1MB=$((1000 * _1KB))
     81
     82# The failure mode that this specifically tests is exhaustion of descriptor
     83# buffer. The point is to produce a burst that shared buffer should be able
     84# to accommodate, but produce it with small enough packets that the machine
     85# runs out of the descriptor buffer space with default configuration.
     86#
     87# The machine therefore needs to be able to produce line rate with as small
     88# packets as possible, and at the same time have large enough buffer that
     89# when filled with these small packets, it runs out of descriptors.
     90# Spectrum-2 is very close, but cannot perform this test. Therefore use
     91# Spectrum-3 as a minimum, and permit larger burst size, and therefore
     92# larger packets, to reduce spurious failures.
     93#
     94mlxsw_only_on_spectrum 3+ || exit
     95
     96BURST_SIZE=$((50000000))
     97POOL_SIZE=$BURST_SIZE
     98
     99h1_create()
    100{
    101	simple_if_init $h1
    102	mtu_set $h1 10000
    103
    104	vlan_create $h1 111 v$h1 192.0.2.33/28
    105	ip link set dev $h1.111 type vlan egress-qos-map 0:1
    106}
    107
    108h1_destroy()
    109{
    110	vlan_destroy $h1 111
    111
    112	mtu_restore $h1
    113	simple_if_fini $h1
    114}
    115
    116h2_create()
    117{
    118	simple_if_init $h2
    119	mtu_set $h2 10000
    120	ethtool -s $h2 speed 1000 autoneg off
    121
    122	vlan_create $h2 111 v$h2 192.0.2.34/28
    123}
    124
    125h2_destroy()
    126{
    127	vlan_destroy $h2 111
    128
    129	ethtool -s $h2 autoneg on
    130	mtu_restore $h2
    131	simple_if_fini $h2
    132}
    133
    134h3_create()
    135{
    136	simple_if_init $h3
    137	mtu_set $h3 10000
    138
    139	vlan_create $h3 111 v$h3 192.0.2.35/28
    140}
    141
    142h3_destroy()
    143{
    144	vlan_destroy $h3 111
    145
    146	mtu_restore $h3
    147	simple_if_fini $h3
    148}
    149
    150switch_create()
    151{
    152	# pools
    153	# -----
    154
    155	devlink_pool_size_thtype_save 0
    156	devlink_pool_size_thtype_save 4
    157	devlink_pool_size_thtype_save 1
    158	devlink_pool_size_thtype_save 5
    159	devlink_pool_size_thtype_save 2
    160	devlink_pool_size_thtype_save 6
    161
    162	devlink_port_pool_th_save $swp1 1
    163	devlink_port_pool_th_save $swp2 6
    164	devlink_port_pool_th_save $swp3 5
    165	devlink_port_pool_th_save $swp4 2
    166	devlink_port_pool_th_save $swp5 2
    167
    168	devlink_tc_bind_pool_th_save $swp1 1 ingress
    169	devlink_tc_bind_pool_th_save $swp2 1 egress
    170	devlink_tc_bind_pool_th_save $swp3 1 egress
    171	devlink_tc_bind_pool_th_save $swp4 1 ingress
    172	devlink_tc_bind_pool_th_save $swp5 1 ingress
    173
    174	# Control traffic pools. Just reduce the size.
    175	devlink_pool_size_thtype_set 0 dynamic $_500KB
    176	devlink_pool_size_thtype_set 4 dynamic $_500KB
    177
    178	# Stream modeling pools.
    179	devlink_pool_size_thtype_set 1 dynamic $_500KB
    180	devlink_pool_size_thtype_set 5 dynamic $_500KB
    181
    182	# Burst soak pools.
    183	devlink_pool_size_thtype_set 2 static $POOL_SIZE
    184	devlink_pool_size_thtype_set 6 static $POOL_SIZE
    185
    186	# $swp1
    187	# -----
    188
    189	ip link set dev $swp1 up
    190	mtu_set $swp1 10000
    191	vlan_create $swp1 111
    192	ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1
    193
    194	devlink_port_pool_th_set $swp1 1 16
    195	devlink_tc_bind_pool_th_set $swp1 1 ingress 1 16
    196
    197	# Configure qdisc...
    198	tc qdisc replace dev $swp1 root handle 1: \
    199	   ets bands 8 strict 8 priomap 7 6
    200	# ... so that we can assign prio1 traffic to PG1.
    201	dcb buffer set dev $swp1 prio-buffer all:0 1:1
    202
    203	# $swp2
    204	# -----
    205
    206	ip link set dev $swp2 up
    207	mtu_set $swp2 10000
    208	ethtool -s $swp2 speed 1000 autoneg off
    209	vlan_create $swp2 111
    210	ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1
    211
    212	devlink_port_pool_th_set $swp2 6 $POOL_SIZE
    213	devlink_tc_bind_pool_th_set $swp2 1 egress 6 $POOL_SIZE
    214
    215	# prio 0->TC0 (band 7), 1->TC1 (band 6)
    216	tc qdisc replace dev $swp2 root handle 1: \
    217	   ets bands 8 strict 8 priomap 7 6
    218
    219	# $swp3
    220	# -----
    221
    222	ip link set dev $swp3 up
    223	mtu_set $swp3 10000
    224	ethtool -s $swp3 speed 1000 autoneg off
    225	vlan_create $swp3 111
    226	ip link set dev $swp3.111 type vlan egress-qos-map 0:0 1:1
    227
    228	devlink_port_pool_th_set $swp3 5 16
    229	devlink_tc_bind_pool_th_set $swp3 1 egress 5 16
    230
    231	# prio 0->TC0 (band 7), 1->TC1 (band 6)
    232	tc qdisc replace dev $swp3 root handle 1: \
    233	   ets bands 8 strict 8 priomap 7 6
    234
    235	# $swp4
    236	# -----
    237
    238	ip link set dev $swp4 up
    239	mtu_set $swp4 10000
    240	ethtool -s $swp4 speed 1000 autoneg off
    241	vlan_create $swp4 111
    242	ip link set dev $swp4.111 type vlan ingress-qos-map 0:0 1:1
    243
    244	devlink_port_pool_th_set $swp4 2 $POOL_SIZE
    245	devlink_tc_bind_pool_th_set $swp4 1 ingress 2 $POOL_SIZE
    246
    247	# Configure qdisc...
    248	tc qdisc replace dev $swp4 root handle 1: \
    249	   ets bands 8 strict 8 priomap 7 6
    250	# ... so that we can assign prio1 traffic to PG1.
    251	dcb buffer set dev $swp4 prio-buffer all:0 1:1
    252
    253	# $swp5
    254	# -----
    255
    256	ip link set dev $swp5 up
    257	mtu_set $swp5 10000
    258	vlan_create $swp5 111
    259	ip link set dev $swp5.111 type vlan ingress-qos-map 0:0 1:1
    260
    261	devlink_port_pool_th_set $swp5 2 $POOL_SIZE
    262	devlink_tc_bind_pool_th_set $swp5 1 ingress 2 $POOL_SIZE
    263
    264	# Configure qdisc...
    265	tc qdisc replace dev $swp5 root handle 1: \
    266	   ets bands 8 strict 8 priomap 7 6
    267	# ... so that we can assign prio1 traffic to PG1.
    268	dcb buffer set dev $swp5 prio-buffer all:0 1:1
    269
    270	# bridges
    271	# -------
    272
    273	ip link add name br1 type bridge vlan_filtering 0
    274	ip link set dev $swp1.111 master br1
    275	ip link set dev $swp3.111 master br1
    276	ip link set dev br1 up
    277
    278	ip link add name br2 type bridge vlan_filtering 0
    279	ip link set dev $swp2.111 master br2
    280	ip link set dev $swp4.111 master br2
    281	ip link set dev $swp5.111 master br2
    282	ip link set dev br2 up
    283}
    284
    285switch_destroy()
    286{
    287	# Do this first so that we can reset the limits to values that are only
    288	# valid for the original static / dynamic setting.
    289	devlink_pool_size_thtype_restore 6
    290	devlink_pool_size_thtype_restore 5
    291	devlink_pool_size_thtype_restore 4
    292	devlink_pool_size_thtype_restore 2
    293	devlink_pool_size_thtype_restore 1
    294	devlink_pool_size_thtype_restore 0
    295
    296	# bridges
    297	# -------
    298
    299	ip link set dev br2 down
    300	ip link set dev $swp5.111 nomaster
    301	ip link set dev $swp4.111 nomaster
    302	ip link set dev $swp2.111 nomaster
    303	ip link del dev br2
    304
    305	ip link set dev br1 down
    306	ip link set dev $swp3.111 nomaster
    307	ip link set dev $swp1.111 nomaster
    308	ip link del dev br1
    309
    310	# $swp5
    311	# -----
    312
    313	dcb buffer set dev $swp5 prio-buffer all:0
    314	tc qdisc del dev $swp5 root
    315
    316	devlink_tc_bind_pool_th_restore $swp5 1 ingress
    317	devlink_port_pool_th_restore $swp5 2
    318
    319	vlan_destroy $swp5 111
    320	mtu_restore $swp5
    321	ip link set dev $swp5 down
    322
    323	# $swp4
    324	# -----
    325
    326	dcb buffer set dev $swp4 prio-buffer all:0
    327	tc qdisc del dev $swp4 root
    328
    329	devlink_tc_bind_pool_th_restore $swp4 1 ingress
    330	devlink_port_pool_th_restore $swp4 2
    331
    332	vlan_destroy $swp4 111
    333	ethtool -s $swp4 autoneg on
    334	mtu_restore $swp4
    335	ip link set dev $swp4 down
    336
    337	# $swp3
    338	# -----
    339
    340	tc qdisc del dev $swp3 root
    341
    342	devlink_tc_bind_pool_th_restore $swp3 1 egress
    343	devlink_port_pool_th_restore $swp3 5
    344
    345	vlan_destroy $swp3 111
    346	ethtool -s $swp3 autoneg on
    347	mtu_restore $swp3
    348	ip link set dev $swp3 down
    349
    350	# $swp2
    351	# -----
    352
    353	tc qdisc del dev $swp2 root
    354
    355	devlink_tc_bind_pool_th_restore $swp2 1 egress
    356	devlink_port_pool_th_restore $swp2 6
    357
    358	vlan_destroy $swp2 111
    359	ethtool -s $swp2 autoneg on
    360	mtu_restore $swp2
    361	ip link set dev $swp2 down
    362
    363	# $swp1
    364	# -----
    365
    366	dcb buffer set dev $swp1 prio-buffer all:0
    367	tc qdisc del dev $swp1 root
    368
    369	devlink_tc_bind_pool_th_restore $swp1 1 ingress
    370	devlink_port_pool_th_restore $swp1 1
    371
    372	vlan_destroy $swp1 111
    373	mtu_restore $swp1
    374	ip link set dev $swp1 down
    375}
    376
    377setup_prepare()
    378{
    379	h1=${NETIFS[p1]}
    380	swp1=${NETIFS[p2]}
    381
    382	swp2=${NETIFS[p3]}
    383	h2=${NETIFS[p4]}
    384
    385	swp3=${NETIFS[p5]}
    386	swp4=${NETIFS[p6]}
    387
    388	swp5=${NETIFS[p7]}
    389	h3=${NETIFS[p8]}
    390
    391	h2mac=$(mac_get $h2)
    392
    393	vrf_prepare
    394
    395	h1_create
    396	h2_create
    397	h3_create
    398	switch_create
    399}
    400
    401cleanup()
    402{
    403	pre_cleanup
    404
    405	switch_destroy
    406	h3_destroy
    407	h2_destroy
    408	h1_destroy
    409
    410	vrf_cleanup
    411}
    412
    413ping_ipv4()
    414{
    415	ping_test $h1 192.0.2.34 " h1->h2"
    416	ping_test $h3 192.0.2.34 " h3->h2"
    417}
    418
    419__test_qos_burst()
    420{
    421	local pktsize=$1; shift
    422
    423	RET=0
    424
    425	start_traffic_pktsize $pktsize $h1.111 192.0.2.33 192.0.2.34 $h2mac
    426	sleep 1
    427
    428	local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
    429	((q0 == 0))
    430	check_err $? "Transmit queue non-zero?"
    431
    432	local d0=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
    433
    434	local cell_size=$(devlink_cell_size_get)
    435	local cells=$((BURST_SIZE / cell_size))
    436	# Each packet is $pktsize of payload + headers.
    437	local pkt_cells=$(((pktsize + 50 + cell_size - 1)  / cell_size))
    438	# How many packets can we admit:
    439	local pkts=$((cells / pkt_cells))
    440
    441	$MZ $h3 -p $pktsize -Q 1:111 -A 192.0.2.35 -B 192.0.2.34 \
    442		-a own -b $h2mac -c $pkts -t udp -q
    443	sleep 1
    444
    445	local d1=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
    446	((d1 == d0))
    447	check_err $? "Drops seen on egress port: $d0 -> $d1 ($((d1 - d0)))"
    448
    449	# Check that the queue is somewhat close to the burst size This
    450	# makes sure that the lack of drops above was not due to port
    451	# undersubscribtion.
    452	local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
    453	local qe=$((90 * BURST_SIZE / 100))
    454	((q0 > qe))
    455	check_err $? "Queue size expected >$qe, got $q0"
    456
    457	stop_traffic
    458	sleep 2
    459
    460	log_test "Burst: absorb $pkts ${pktsize}-B packets"
    461}
    462
    463test_8K()
    464{
    465	__test_qos_burst 8000
    466}
    467
    468test_800()
    469{
    470	__test_qos_burst 800
    471}
    472
    473bail_on_lldpad
    474
    475trap cleanup EXIT
    476setup_prepare
    477setup_wait
    478tests_run
    479
    480exit $EXIT_STATUS