cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ocelot_devlink.c (27189B)


      1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2/* Copyright 2020-2021 NXP
      3 */
      4#include <net/devlink.h>
      5#include "ocelot.h"
      6
      7/* The queue system tracks four resource consumptions:
      8 * Resource 0: Memory tracked per source port
      9 * Resource 1: Frame references tracked per source port
     10 * Resource 2: Memory tracked per destination port
     11 * Resource 3: Frame references tracked per destination port
     12 */
     13#define OCELOT_RESOURCE_SZ		256
     14#define OCELOT_NUM_RESOURCES		4
     15
     16#define BUF_xxxx_I			(0 * OCELOT_RESOURCE_SZ)
     17#define REF_xxxx_I			(1 * OCELOT_RESOURCE_SZ)
     18#define BUF_xxxx_E			(2 * OCELOT_RESOURCE_SZ)
     19#define REF_xxxx_E			(3 * OCELOT_RESOURCE_SZ)
     20
     21/* For each resource type there are 4 types of watermarks:
     22 * Q_RSRV: reservation per QoS class per port
     23 * PRIO_SHR: sharing watermark per QoS class across all ports
     24 * P_RSRV: reservation per port
     25 * COL_SHR: sharing watermark per color (drop precedence) across all ports
     26 */
     27#define xxx_Q_RSRV_x			0
     28#define xxx_PRIO_SHR_x			216
     29#define xxx_P_RSRV_x			224
     30#define xxx_COL_SHR_x			254
     31
     32/* Reservation Watermarks
     33 * ----------------------
     34 *
     35 * For setting up the reserved areas, egress watermarks exist per port and per
     36 * QoS class for both ingress and egress.
     37 */
     38
     39/*  Amount of packet buffer
     40 *  |  per QoS class
     41 *  |  |  reserved
     42 *  |  |  |   per egress port
     43 *  |  |  |   |
     44 *  V  V  v   v
     45 * BUF_Q_RSRV_E
     46 */
     47#define BUF_Q_RSRV_E(port, prio) \
     48	(BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
     49
     50/*  Amount of packet buffer
     51 *  |  for all port's traffic classes
     52 *  |  |  reserved
     53 *  |  |  |   per egress port
     54 *  |  |  |   |
     55 *  V  V  v   v
     56 * BUF_P_RSRV_E
     57 */
     58#define BUF_P_RSRV_E(port) \
     59	(BUF_xxxx_E + xxx_P_RSRV_x + (port))
     60
     61/*  Amount of packet buffer
     62 *  |  per QoS class
     63 *  |  |  reserved
     64 *  |  |  |   per ingress port
     65 *  |  |  |   |
     66 *  V  V  v   v
     67 * BUF_Q_RSRV_I
     68 */
     69#define BUF_Q_RSRV_I(port, prio) \
     70	(BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
     71
     72/*  Amount of packet buffer
     73 *  |  for all port's traffic classes
     74 *  |  |  reserved
     75 *  |  |  |   per ingress port
     76 *  |  |  |   |
     77 *  V  V  v   v
     78 * BUF_P_RSRV_I
     79 */
     80#define BUF_P_RSRV_I(port) \
     81	(BUF_xxxx_I + xxx_P_RSRV_x + (port))
     82
     83/*  Amount of frame references
     84 *  |  per QoS class
     85 *  |  |  reserved
     86 *  |  |  |   per egress port
     87 *  |  |  |   |
     88 *  V  V  v   v
     89 * REF_Q_RSRV_E
     90 */
     91#define REF_Q_RSRV_E(port, prio) \
     92	(REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
     93
     94/*  Amount of frame references
     95 *  |  for all port's traffic classes
     96 *  |  |  reserved
     97 *  |  |  |   per egress port
     98 *  |  |  |   |
     99 *  V  V  v   v
    100 * REF_P_RSRV_E
    101 */
    102#define REF_P_RSRV_E(port) \
    103	(REF_xxxx_E + xxx_P_RSRV_x + (port))
    104
    105/*  Amount of frame references
    106 *  |  per QoS class
    107 *  |  |  reserved
    108 *  |  |  |   per ingress port
    109 *  |  |  |   |
    110 *  V  V  v   v
    111 * REF_Q_RSRV_I
    112 */
    113#define REF_Q_RSRV_I(port, prio) \
    114	(REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
    115
    116/*  Amount of frame references
    117 *  |  for all port's traffic classes
    118 *  |  |  reserved
    119 *  |  |  |   per ingress port
    120 *  |  |  |   |
    121 *  V  V  v   v
    122 * REF_P_RSRV_I
    123 */
    124#define REF_P_RSRV_I(port) \
    125	(REF_xxxx_I + xxx_P_RSRV_x + (port))
    126
    127/* Sharing Watermarks
    128 * ------------------
    129 *
    130 * The shared memory area is shared between all ports.
    131 */
    132
    133/* Amount of buffer
    134 *  |   per QoS class
    135 *  |   |    from the shared memory area
    136 *  |   |    |  for egress traffic
    137 *  |   |    |  |
    138 *  V   V    v  v
    139 * BUF_PRIO_SHR_E
    140 */
    141#define BUF_PRIO_SHR_E(prio) \
    142	(BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
    143
    144/* Amount of buffer
    145 *  |   per color (drop precedence level)
    146 *  |   |   from the shared memory area
    147 *  |   |   |  for egress traffic
    148 *  |   |   |  |
    149 *  V   V   v  v
    150 * BUF_COL_SHR_E
    151 */
    152#define BUF_COL_SHR_E(dp) \
    153	(BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
    154
    155/* Amount of buffer
    156 *  |   per QoS class
    157 *  |   |    from the shared memory area
    158 *  |   |    |  for ingress traffic
    159 *  |   |    |  |
    160 *  V   V    v  v
    161 * BUF_PRIO_SHR_I
    162 */
    163#define BUF_PRIO_SHR_I(prio) \
    164	(BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
    165
    166/* Amount of buffer
    167 *  |   per color (drop precedence level)
    168 *  |   |   from the shared memory area
    169 *  |   |   |  for ingress traffic
    170 *  |   |   |  |
    171 *  V   V   v  v
    172 * BUF_COL_SHR_I
    173 */
    174#define BUF_COL_SHR_I(dp) \
    175	(BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
    176
    177/* Amount of frame references
    178 *  |   per QoS class
    179 *  |   |    from the shared area
    180 *  |   |    |  for egress traffic
    181 *  |   |    |  |
    182 *  V   V    v  v
    183 * REF_PRIO_SHR_E
    184 */
    185#define REF_PRIO_SHR_E(prio) \
    186	(REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
    187
    188/* Amount of frame references
    189 *  |   per color (drop precedence level)
    190 *  |   |   from the shared area
    191 *  |   |   |  for egress traffic
    192 *  |   |   |  |
    193 *  V   V   v  v
    194 * REF_COL_SHR_E
    195 */
    196#define REF_COL_SHR_E(dp) \
    197	(REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
    198
    199/* Amount of frame references
    200 *  |   per QoS class
    201 *  |   |    from the shared area
    202 *  |   |    |  for ingress traffic
    203 *  |   |    |  |
    204 *  V   V    v  v
    205 * REF_PRIO_SHR_I
    206 */
    207#define REF_PRIO_SHR_I(prio) \
    208	(REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
    209
    210/* Amount of frame references
    211 *  |   per color (drop precedence level)
    212 *  |   |   from the shared area
    213 *  |   |   |  for ingress traffic
    214 *  |   |   |  |
    215 *  V   V   v  v
    216 * REF_COL_SHR_I
    217 */
    218#define REF_COL_SHR_I(dp) \
    219	(REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
    220
    221static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
    222{
    223	int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
    224
    225	return ocelot->ops->wm_dec(wm);
    226}
    227
    228static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
    229{
    230	u32 wm = ocelot->ops->wm_enc(val);
    231
    232	ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
    233}
    234
    235static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
    236			     u32 *maxuse)
    237{
    238	int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
    239
    240	return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
    241}
    242
    243/* The hardware comes out of reset with strange defaults: the sum of all
    244 * reservations for frame memory is larger than the total buffer size.
    245 * One has to wonder how can the reservation watermarks still guarantee
    246 * anything under congestion.
    247 * Bring some sense into the hardware by changing the defaults to disable all
    248 * reservations and rely only on the sharing watermark for frames with drop
    249 * precedence 0. The user can still explicitly request reservations per port
    250 * and per port-tc through devlink-sb.
    251 */
    252static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
    253						  int port)
    254{
    255	int prio;
    256
    257	for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    258		ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
    259		ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
    260		ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
    261		ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
    262	}
    263
    264	ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
    265	ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
    266	ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
    267	ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
    268}
    269
    270/* We want the sharing watermarks to consume all nonreserved resources, for
    271 * efficient resource utilization (a single traffic flow should be able to use
    272 * up the entire buffer space and frame resources as long as there's no
    273 * interference).
    274 * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
    275 * per color (drop precedence).
    276 * The trouble with configuring these sharing watermarks is that:
    277 * (1) There's a risk that we overcommit the resources if we configure
    278 *     (a) all 8 per-TC sharing watermarks to the max
    279 *     (b) all 2 per-color sharing watermarks to the max
    280 * (2) There's a risk that we undercommit the resources if we configure
    281 *     (a) all 8 per-TC sharing watermarks to "max / 8"
    282 *     (b) all 2 per-color sharing watermarks to "max / 2"
    283 * So for Linux, let's just disable the sharing watermarks per traffic class
    284 * (setting them to 0 will make them always exceeded), and rely only on the
    285 * sharing watermark for drop priority 0. So frames with drop priority set to 1
    286 * by QoS classification or policing will still be allowed, but only as long as
    287 * the port and port-TC reservations are not exceeded.
    288 */
    289static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
    290{
    291	int prio;
    292
    293	for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    294		ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
    295		ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
    296		ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
    297		ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
    298	}
    299}
    300
    301static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
    302				u32 *buf_rsrv_e)
    303{
    304	int port, prio;
    305
    306	*buf_rsrv_i = 0;
    307	*buf_rsrv_e = 0;
    308
    309	for (port = 0; port <= ocelot->num_phys_ports; port++) {
    310		for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    311			*buf_rsrv_i += ocelot_wm_read(ocelot,
    312						      BUF_Q_RSRV_I(port, prio));
    313			*buf_rsrv_e += ocelot_wm_read(ocelot,
    314						      BUF_Q_RSRV_E(port, prio));
    315		}
    316
    317		*buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
    318		*buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
    319	}
    320
    321	*buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
    322	*buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
    323}
    324
    325static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
    326				u32 *ref_rsrv_e)
    327{
    328	int port, prio;
    329
    330	*ref_rsrv_i = 0;
    331	*ref_rsrv_e = 0;
    332
    333	for (port = 0; port <= ocelot->num_phys_ports; port++) {
    334		for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    335			*ref_rsrv_i += ocelot_wm_read(ocelot,
    336						      REF_Q_RSRV_I(port, prio));
    337			*ref_rsrv_e += ocelot_wm_read(ocelot,
    338						      REF_Q_RSRV_E(port, prio));
    339		}
    340
    341		*ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
    342		*ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
    343	}
    344}
    345
    346/* Calculate all reservations, then set up the sharing watermark for DP=0 to
    347 * consume the remaining resources up to the pool's configured size.
    348 */
    349static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
    350{
    351	u32 buf_rsrv_i, buf_rsrv_e;
    352	u32 ref_rsrv_i, ref_rsrv_e;
    353	u32 buf_shr_i, buf_shr_e;
    354	u32 ref_shr_i, ref_shr_e;
    355
    356	ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
    357	ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
    358
    359	buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
    360		    buf_rsrv_i;
    361	buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
    362		    buf_rsrv_e;
    363	ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
    364		    ref_rsrv_i;
    365	ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
    366		    ref_rsrv_e;
    367
    368	buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
    369	buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
    370
    371	ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
    372	ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
    373	ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
    374	ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
    375	ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
    376	ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
    377	ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
    378	ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
    379}
    380
    381/* Ensure that all reservations can be enforced */
    382static int ocelot_watermark_validate(struct ocelot *ocelot,
    383				     struct netlink_ext_ack *extack)
    384{
    385	u32 buf_rsrv_i, buf_rsrv_e;
    386	u32 ref_rsrv_i, ref_rsrv_e;
    387
    388	ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
    389	ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
    390
    391	if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
    392		NL_SET_ERR_MSG_MOD(extack,
    393				   "Ingress frame reservations exceed pool size");
    394		return -ERANGE;
    395	}
    396	if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
    397		NL_SET_ERR_MSG_MOD(extack,
    398				   "Egress frame reservations exceed pool size");
    399		return -ERANGE;
    400	}
    401	if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
    402		NL_SET_ERR_MSG_MOD(extack,
    403				   "Ingress reference reservations exceed pool size");
    404		return -ERANGE;
    405	}
    406	if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
    407		NL_SET_ERR_MSG_MOD(extack,
    408				   "Egress reference reservations exceed pool size");
    409		return -ERANGE;
    410	}
    411
    412	return 0;
    413}
    414
    415/* The hardware works like this:
    416 *
    417 *                         Frame forwarding decision taken
    418 *                                       |
    419 *                                       v
    420 *       +--------------------+--------------------+--------------------+
    421 *       |                    |                    |                    |
    422 *       v                    v                    v                    v
    423 * Ingress memory       Egress memory        Ingress frame        Egress frame
    424 *     check                check           reference check      reference check
    425 *       |                    |                    |                    |
    426 *       v                    v                    v                    v
    427 *  BUF_Q_RSRV_I   ok    BUF_Q_RSRV_E   ok    REF_Q_RSRV_I   ok     REF_Q_RSRV_E   ok
    428 *(src port, prio) -+  (dst port, prio) -+  (src port, prio) -+   (dst port, prio) -+
    429 *       |          |         |          |         |          |         |           |
    430 *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
    431 *       v          |         v          |         v          |         v           |
    432 *  BUF_P_RSRV_I  ok|    BUF_P_RSRV_E  ok|    REF_P_RSRV_I  ok|    REF_P_RSRV_E   ok|
    433 *   (src port) ----+     (dst port) ----+     (src port) ----+     (dst port) -----+
    434 *       |          |         |          |         |          |         |           |
    435 *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
    436 *       v          |         v          |         v          |         v           |
    437 * BUF_PRIO_SHR_I ok|   BUF_PRIO_SHR_E ok|   REF_PRIO_SHR_I ok|   REF_PRIO_SHR_E  ok|
    438 *     (prio) ------+       (prio) ------+       (prio) ------+       (prio) -------+
    439 *       |          |         |          |         |          |         |           |
    440 *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
    441 *       v          |         v          |         v          |         v           |
    442 * BUF_COL_SHR_I  ok|   BUF_COL_SHR_E  ok|   REF_COL_SHR_I  ok|   REF_COL_SHR_E   ok|
    443 *      (dp) -------+        (dp) -------+        (dp) -------+        (dp) --------+
    444 *       |          |         |          |         |          |         |           |
    445 *       |exceeded  |         |exceeded  |         |exceeded  |         |exceeded   |
    446 *       v          v         v          v         v          v         v           v
    447 *      fail     success     fail     success     fail     success     fail      success
    448 *       |          |         |          |         |          |         |           |
    449 *       v          v         v          v         v          v         v           v
    450 *       +-----+----+         +-----+----+         +-----+----+         +-----+-----+
    451 *             |                    |                    |                    |
    452 *             +-------> OR <-------+                    +-------> OR <-------+
    453 *                        |                                        |
    454 *                        v                                        v
    455 *                        +----------------> AND <-----------------+
    456 *                                            |
    457 *                                            v
    458 *                                    FIFO drop / accept
    459 *
    460 * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
    461 * At least one (ingress or egress) memory pool and one (ingress or egress)
    462 * frame reference pool need to have resources for frame acceptance to succeed.
    463 *
    464 * The following watermarks are controlled explicitly through devlink-sb:
    465 * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
    466 * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
    467 * The following watermarks are controlled implicitly through devlink-sb:
    468 * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
    469 * The following watermarks are unused and disabled:
    470 * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
    471 *
    472 * This function overrides the hardware defaults with more sane ones (no
    473 * reservations by default, let sharing use all resources) and disables the
    474 * unused watermarks.
    475 */
    476static void ocelot_watermark_init(struct ocelot *ocelot)
    477{
    478	int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
    479	int port;
    480
    481	ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
    482
    483	for (port = 0; port <= ocelot->num_phys_ports; port++)
    484		ocelot_disable_reservation_watermarks(ocelot, port);
    485
    486	ocelot_disable_tc_sharing_watermarks(ocelot);
    487	ocelot_setup_sharing_watermarks(ocelot);
    488}
    489
    490/* Pool size and type are fixed up at runtime. Keeping this structure to
    491 * look up the cell size multipliers.
    492 */
    493static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
    494	[OCELOT_SB_BUF] = {
    495		.cell_size = OCELOT_BUFFER_CELL_SZ,
    496		.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
    497	},
    498	[OCELOT_SB_REF] = {
    499		.cell_size = 1,
    500		.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
    501	},
    502};
    503
    504/* Returns the pool size configured through ocelot_sb_pool_set */
    505int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
    506		       u16 pool_index,
    507		       struct devlink_sb_pool_info *pool_info)
    508{
    509	if (sb_index >= OCELOT_SB_NUM)
    510		return -ENODEV;
    511	if (pool_index >= OCELOT_SB_POOL_NUM)
    512		return -ENODEV;
    513
    514	*pool_info = ocelot_sb_pool[sb_index];
    515	pool_info->size = ocelot->pool_size[sb_index][pool_index];
    516	if (pool_index)
    517		pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
    518	else
    519		pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
    520
    521	return 0;
    522}
    523EXPORT_SYMBOL(ocelot_sb_pool_get);
    524
    525/* The pool size received here configures the total amount of resources used on
    526 * ingress (or on egress, depending upon the pool index). The pool size, minus
    527 * the values for the port and port-tc reservations, is written into the
    528 * COL_SHR(dp=0) sharing watermark.
    529 */
    530int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
    531		       u16 pool_index, u32 size,
    532		       enum devlink_sb_threshold_type threshold_type,
    533		       struct netlink_ext_ack *extack)
    534{
    535	u32 old_pool_size;
    536	int err;
    537
    538	if (sb_index >= OCELOT_SB_NUM) {
    539		NL_SET_ERR_MSG_MOD(extack,
    540				   "Invalid sb, use 0 for buffers and 1 for frame references");
    541		return -ENODEV;
    542	}
    543	if (pool_index >= OCELOT_SB_POOL_NUM) {
    544		NL_SET_ERR_MSG_MOD(extack,
    545				   "Invalid pool, use 0 for ingress and 1 for egress");
    546		return -ENODEV;
    547	}
    548	if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
    549		NL_SET_ERR_MSG_MOD(extack,
    550				   "Only static threshold supported");
    551		return -EOPNOTSUPP;
    552	}
    553
    554	old_pool_size = ocelot->pool_size[sb_index][pool_index];
    555	ocelot->pool_size[sb_index][pool_index] = size;
    556
    557	err = ocelot_watermark_validate(ocelot, extack);
    558	if (err) {
    559		ocelot->pool_size[sb_index][pool_index] = old_pool_size;
    560		return err;
    561	}
    562
    563	ocelot_setup_sharing_watermarks(ocelot);
    564
    565	return 0;
    566}
    567EXPORT_SYMBOL(ocelot_sb_pool_set);
    568
    569/* This retrieves the configuration made with ocelot_sb_port_pool_set */
    570int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
    571			    unsigned int sb_index, u16 pool_index,
    572			    u32 *p_threshold)
    573{
    574	int wm_index;
    575
    576	switch (sb_index) {
    577	case OCELOT_SB_BUF:
    578		if (pool_index == OCELOT_SB_POOL_ING)
    579			wm_index = BUF_P_RSRV_I(port);
    580		else
    581			wm_index = BUF_P_RSRV_E(port);
    582		break;
    583	case OCELOT_SB_REF:
    584		if (pool_index == OCELOT_SB_POOL_ING)
    585			wm_index = REF_P_RSRV_I(port);
    586		else
    587			wm_index = REF_P_RSRV_E(port);
    588		break;
    589	default:
    590		return -ENODEV;
    591	}
    592
    593	*p_threshold = ocelot_wm_read(ocelot, wm_index);
    594	*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
    595
    596	return 0;
    597}
    598EXPORT_SYMBOL(ocelot_sb_port_pool_get);
    599
    600/* This configures the P_RSRV per-port reserved resource watermark */
    601int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
    602			    unsigned int sb_index, u16 pool_index,
    603			    u32 threshold, struct netlink_ext_ack *extack)
    604{
    605	int wm_index, err;
    606	u32 old_thr;
    607
    608	switch (sb_index) {
    609	case OCELOT_SB_BUF:
    610		if (pool_index == OCELOT_SB_POOL_ING)
    611			wm_index = BUF_P_RSRV_I(port);
    612		else
    613			wm_index = BUF_P_RSRV_E(port);
    614		break;
    615	case OCELOT_SB_REF:
    616		if (pool_index == OCELOT_SB_POOL_ING)
    617			wm_index = REF_P_RSRV_I(port);
    618		else
    619			wm_index = REF_P_RSRV_E(port);
    620		break;
    621	default:
    622		NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
    623		return -ENODEV;
    624	}
    625
    626	threshold /= ocelot_sb_pool[sb_index].cell_size;
    627
    628	old_thr = ocelot_wm_read(ocelot, wm_index);
    629	ocelot_wm_write(ocelot, wm_index, threshold);
    630
    631	err = ocelot_watermark_validate(ocelot, extack);
    632	if (err) {
    633		ocelot_wm_write(ocelot, wm_index, old_thr);
    634		return err;
    635	}
    636
    637	ocelot_setup_sharing_watermarks(ocelot);
    638
    639	return 0;
    640}
    641EXPORT_SYMBOL(ocelot_sb_port_pool_set);
    642
    643/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
    644int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
    645			       unsigned int sb_index, u16 tc_index,
    646			       enum devlink_sb_pool_type pool_type,
    647			       u16 *p_pool_index, u32 *p_threshold)
    648{
    649	int wm_index;
    650
    651	switch (sb_index) {
    652	case OCELOT_SB_BUF:
    653		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    654			wm_index = BUF_Q_RSRV_I(port, tc_index);
    655		else
    656			wm_index = BUF_Q_RSRV_E(port, tc_index);
    657		break;
    658	case OCELOT_SB_REF:
    659		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    660			wm_index = REF_Q_RSRV_I(port, tc_index);
    661		else
    662			wm_index = REF_Q_RSRV_E(port, tc_index);
    663		break;
    664	default:
    665		return -ENODEV;
    666	}
    667
    668	*p_threshold = ocelot_wm_read(ocelot, wm_index);
    669	*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
    670
    671	if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    672		*p_pool_index = 0;
    673	else
    674		*p_pool_index = 1;
    675
    676	return 0;
    677}
    678EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
    679
    680/* This configures the Q_RSRV per-port-tc reserved resource watermark */
    681int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
    682			       unsigned int sb_index, u16 tc_index,
    683			       enum devlink_sb_pool_type pool_type,
    684			       u16 pool_index, u32 threshold,
    685			       struct netlink_ext_ack *extack)
    686{
    687	int wm_index, err;
    688	u32 old_thr;
    689
    690	/* Paranoid check? */
    691	if (pool_index == OCELOT_SB_POOL_ING &&
    692	    pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
    693		return -EINVAL;
    694	if (pool_index == OCELOT_SB_POOL_EGR &&
    695	    pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
    696		return -EINVAL;
    697
    698	switch (sb_index) {
    699	case OCELOT_SB_BUF:
    700		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    701			wm_index = BUF_Q_RSRV_I(port, tc_index);
    702		else
    703			wm_index = BUF_Q_RSRV_E(port, tc_index);
    704		break;
    705	case OCELOT_SB_REF:
    706		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    707			wm_index = REF_Q_RSRV_I(port, tc_index);
    708		else
    709			wm_index = REF_Q_RSRV_E(port, tc_index);
    710		break;
    711	default:
    712		NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
    713		return -ENODEV;
    714	}
    715
    716	threshold /= ocelot_sb_pool[sb_index].cell_size;
    717
    718	old_thr = ocelot_wm_read(ocelot, wm_index);
    719	ocelot_wm_write(ocelot, wm_index, threshold);
    720	err = ocelot_watermark_validate(ocelot, extack);
    721	if (err) {
    722		ocelot_wm_write(ocelot, wm_index, old_thr);
    723		return err;
    724	}
    725
    726	ocelot_setup_sharing_watermarks(ocelot);
    727
    728	return 0;
    729}
    730EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
    731
    732/* The hardware does not support atomic snapshots, we'll read out the
    733 * occupancy registers individually and have this as just a stub.
    734 */
    735int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
    736{
    737	return 0;
    738}
    739EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
    740
    741/* The watermark occupancy registers are cleared upon read,
    742 * so let's read them.
    743 */
    744int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
    745{
    746	u32 inuse, maxuse;
    747	int port, prio;
    748
    749	switch (sb_index) {
    750	case OCELOT_SB_BUF:
    751		for (port = 0; port <= ocelot->num_phys_ports; port++) {
    752			for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    753				ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
    754						 &inuse, &maxuse);
    755				ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
    756						 &inuse, &maxuse);
    757			}
    758			ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
    759					 &inuse, &maxuse);
    760			ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
    761					 &inuse, &maxuse);
    762		}
    763		break;
    764	case OCELOT_SB_REF:
    765		for (port = 0; port <= ocelot->num_phys_ports; port++) {
    766			for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
    767				ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
    768						 &inuse, &maxuse);
    769				ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
    770						 &inuse, &maxuse);
    771			}
    772			ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
    773					 &inuse, &maxuse);
    774			ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
    775					 &inuse, &maxuse);
    776		}
    777		break;
    778	default:
    779		return -ENODEV;
    780	}
    781
    782	return 0;
    783}
    784EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
    785
    786/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
    787int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
    788				unsigned int sb_index, u16 pool_index,
    789				u32 *p_cur, u32 *p_max)
    790{
    791	int wm_index;
    792
    793	switch (sb_index) {
    794	case OCELOT_SB_BUF:
    795		if (pool_index == OCELOT_SB_POOL_ING)
    796			wm_index = BUF_P_RSRV_I(port);
    797		else
    798			wm_index = BUF_P_RSRV_E(port);
    799		break;
    800	case OCELOT_SB_REF:
    801		if (pool_index == OCELOT_SB_POOL_ING)
    802			wm_index = REF_P_RSRV_I(port);
    803		else
    804			wm_index = REF_P_RSRV_E(port);
    805		break;
    806	default:
    807		return -ENODEV;
    808	}
    809
    810	ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
    811	*p_cur *= ocelot_sb_pool[sb_index].cell_size;
    812	*p_max *= ocelot_sb_pool[sb_index].cell_size;
    813
    814	return 0;
    815}
    816EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
    817
    818/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
    819int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
    820				   unsigned int sb_index, u16 tc_index,
    821				   enum devlink_sb_pool_type pool_type,
    822				   u32 *p_cur, u32 *p_max)
    823{
    824	int wm_index;
    825
    826	switch (sb_index) {
    827	case OCELOT_SB_BUF:
    828		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    829			wm_index = BUF_Q_RSRV_I(port, tc_index);
    830		else
    831			wm_index = BUF_Q_RSRV_E(port, tc_index);
    832		break;
    833	case OCELOT_SB_REF:
    834		if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
    835			wm_index = REF_Q_RSRV_I(port, tc_index);
    836		else
    837			wm_index = REF_Q_RSRV_E(port, tc_index);
    838		break;
    839	default:
    840		return -ENODEV;
    841	}
    842
    843	ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
    844	*p_cur *= ocelot_sb_pool[sb_index].cell_size;
    845	*p_max *= ocelot_sb_pool[sb_index].cell_size;
    846
    847	return 0;
    848}
    849EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
    850
    851int ocelot_devlink_sb_register(struct ocelot *ocelot)
    852{
    853	int err;
    854
    855	err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
    856				  ocelot->packet_buffer_size, 1, 1,
    857				  OCELOT_NUM_TC, OCELOT_NUM_TC);
    858	if (err)
    859		return err;
    860
    861	err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
    862				  ocelot->num_frame_refs, 1, 1,
    863				  OCELOT_NUM_TC, OCELOT_NUM_TC);
    864	if (err) {
    865		devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
    866		return err;
    867	}
    868
    869	ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
    870	ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
    871	ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
    872	ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
    873
    874	ocelot_watermark_init(ocelot);
    875
    876	return 0;
    877}
    878EXPORT_SYMBOL(ocelot_devlink_sb_register);
    879
    880void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
    881{
    882	devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
    883	devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
    884}
    885EXPORT_SYMBOL(ocelot_devlink_sb_unregister);