cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gen_stats.c (13532B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * net/core/gen_stats.c
      4 *
      5 * Authors:  Thomas Graf <tgraf@suug.ch>
      6 *           Jamal Hadi Salim
      7 *           Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
      8 *
      9 * See Documentation/networking/gen_stats.rst
     10 */
     11
     12#include <linux/types.h>
     13#include <linux/kernel.h>
     14#include <linux/module.h>
     15#include <linux/interrupt.h>
     16#include <linux/socket.h>
     17#include <linux/rtnetlink.h>
     18#include <linux/gen_stats.h>
     19#include <net/netlink.h>
     20#include <net/gen_stats.h>
     21#include <net/sch_generic.h>
     22
     23static inline int
     24gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
     25{
     26	if (nla_put_64bit(d->skb, type, size, buf, padattr))
     27		goto nla_put_failure;
     28	return 0;
     29
     30nla_put_failure:
     31	if (d->lock)
     32		spin_unlock_bh(d->lock);
     33	kfree(d->xstats);
     34	d->xstats = NULL;
     35	d->xstats_len = 0;
     36	return -1;
     37}
     38
     39/**
     40 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
     41 * @skb: socket buffer to put statistics TLVs into
     42 * @type: TLV type for top level statistic TLV
     43 * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
     44 * @xstats_type: TLV type for backward compatibility xstats TLV
     45 * @lock: statistics lock
     46 * @d: dumping handle
     47 * @padattr: padding attribute
     48 *
     49 * Initializes the dumping handle, grabs the statistic lock and appends
     50 * an empty TLV header to the socket buffer for use a container for all
     51 * other statistic TLVS.
     52 *
     53 * The dumping handle is marked to be in backward compatibility mode telling
     54 * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
     55 *
     56 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
     57 */
     58int
     59gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
     60			     int xstats_type, spinlock_t *lock,
     61			     struct gnet_dump *d, int padattr)
     62	__acquires(lock)
     63{
     64	memset(d, 0, sizeof(*d));
     65
     66	if (type)
     67		d->tail = (struct nlattr *)skb_tail_pointer(skb);
     68	d->skb = skb;
     69	d->compat_tc_stats = tc_stats_type;
     70	d->compat_xstats = xstats_type;
     71	d->padattr = padattr;
     72	if (lock) {
     73		d->lock = lock;
     74		spin_lock_bh(lock);
     75	}
     76	if (d->tail) {
     77		int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
     78
     79		/* The initial attribute added in gnet_stats_copy() may be
     80		 * preceded by a padding attribute, in which case d->tail will
     81		 * end up pointing at the padding instead of the real attribute.
     82		 * Fix this so gnet_stats_finish_copy() adjusts the length of
     83		 * the right attribute.
     84		 */
     85		if (ret == 0 && d->tail->nla_type == padattr)
     86			d->tail = (struct nlattr *)((char *)d->tail +
     87						    NLA_ALIGN(d->tail->nla_len));
     88		return ret;
     89	}
     90
     91	return 0;
     92}
     93EXPORT_SYMBOL(gnet_stats_start_copy_compat);
     94
     95/**
     96 * gnet_stats_start_copy - start dumping procedure in compatibility mode
     97 * @skb: socket buffer to put statistics TLVs into
     98 * @type: TLV type for top level statistic TLV
     99 * @lock: statistics lock
    100 * @d: dumping handle
    101 * @padattr: padding attribute
    102 *
    103 * Initializes the dumping handle, grabs the statistic lock and appends
    104 * an empty TLV header to the socket buffer for use a container for all
    105 * other statistic TLVS.
    106 *
    107 * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
    108 */
    109int
    110gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
    111		      struct gnet_dump *d, int padattr)
    112{
    113	return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
    114}
    115EXPORT_SYMBOL(gnet_stats_start_copy);
    116
    117/* Must not be inlined, due to u64_stats seqcount_t lockdep key */
    118void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b)
    119{
    120	u64_stats_set(&b->bytes, 0);
    121	u64_stats_set(&b->packets, 0);
    122	u64_stats_init(&b->syncp);
    123}
    124EXPORT_SYMBOL(gnet_stats_basic_sync_init);
    125
    126static void gnet_stats_add_basic_cpu(struct gnet_stats_basic_sync *bstats,
    127				     struct gnet_stats_basic_sync __percpu *cpu)
    128{
    129	u64 t_bytes = 0, t_packets = 0;
    130	int i;
    131
    132	for_each_possible_cpu(i) {
    133		struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
    134		unsigned int start;
    135		u64 bytes, packets;
    136
    137		do {
    138			start = u64_stats_fetch_begin_irq(&bcpu->syncp);
    139			bytes = u64_stats_read(&bcpu->bytes);
    140			packets = u64_stats_read(&bcpu->packets);
    141		} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
    142
    143		t_bytes += bytes;
    144		t_packets += packets;
    145	}
    146	_bstats_update(bstats, t_bytes, t_packets);
    147}
    148
    149void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
    150			  struct gnet_stats_basic_sync __percpu *cpu,
    151			  struct gnet_stats_basic_sync *b, bool running)
    152{
    153	unsigned int start;
    154	u64 bytes = 0;
    155	u64 packets = 0;
    156
    157	WARN_ON_ONCE((cpu || running) && in_hardirq());
    158
    159	if (cpu) {
    160		gnet_stats_add_basic_cpu(bstats, cpu);
    161		return;
    162	}
    163	do {
    164		if (running)
    165			start = u64_stats_fetch_begin_irq(&b->syncp);
    166		bytes = u64_stats_read(&b->bytes);
    167		packets = u64_stats_read(&b->packets);
    168	} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
    169
    170	_bstats_update(bstats, bytes, packets);
    171}
    172EXPORT_SYMBOL(gnet_stats_add_basic);
    173
    174static void gnet_stats_read_basic(u64 *ret_bytes, u64 *ret_packets,
    175				  struct gnet_stats_basic_sync __percpu *cpu,
    176				  struct gnet_stats_basic_sync *b, bool running)
    177{
    178	unsigned int start;
    179
    180	if (cpu) {
    181		u64 t_bytes = 0, t_packets = 0;
    182		int i;
    183
    184		for_each_possible_cpu(i) {
    185			struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i);
    186			unsigned int start;
    187			u64 bytes, packets;
    188
    189			do {
    190				start = u64_stats_fetch_begin_irq(&bcpu->syncp);
    191				bytes = u64_stats_read(&bcpu->bytes);
    192				packets = u64_stats_read(&bcpu->packets);
    193			} while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
    194
    195			t_bytes += bytes;
    196			t_packets += packets;
    197		}
    198		*ret_bytes = t_bytes;
    199		*ret_packets = t_packets;
    200		return;
    201	}
    202	do {
    203		if (running)
    204			start = u64_stats_fetch_begin_irq(&b->syncp);
    205		*ret_bytes = u64_stats_read(&b->bytes);
    206		*ret_packets = u64_stats_read(&b->packets);
    207	} while (running && u64_stats_fetch_retry_irq(&b->syncp, start));
    208}
    209
    210static int
    211___gnet_stats_copy_basic(struct gnet_dump *d,
    212			 struct gnet_stats_basic_sync __percpu *cpu,
    213			 struct gnet_stats_basic_sync *b,
    214			 int type, bool running)
    215{
    216	u64 bstats_bytes, bstats_packets;
    217
    218	gnet_stats_read_basic(&bstats_bytes, &bstats_packets, cpu, b, running);
    219
    220	if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
    221		d->tc_stats.bytes = bstats_bytes;
    222		d->tc_stats.packets = bstats_packets;
    223	}
    224
    225	if (d->tail) {
    226		struct gnet_stats_basic sb;
    227		int res;
    228
    229		memset(&sb, 0, sizeof(sb));
    230		sb.bytes = bstats_bytes;
    231		sb.packets = bstats_packets;
    232		res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
    233		if (res < 0 || sb.packets == bstats_packets)
    234			return res;
    235		/* emit 64bit stats only if needed */
    236		return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats_packets,
    237				       sizeof(bstats_packets), TCA_STATS_PAD);
    238	}
    239	return 0;
    240}
    241
    242/**
    243 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
    244 * @d: dumping handle
    245 * @cpu: copy statistic per cpu
    246 * @b: basic statistics
    247 * @running: true if @b represents a running qdisc, thus @b's
    248 *           internal values might change during basic reads.
    249 *           Only used if @cpu is NULL
    250 *
    251 * Context: task; must not be run from IRQ or BH contexts
    252 *
    253 * Appends the basic statistics to the top level TLV created by
    254 * gnet_stats_start_copy().
    255 *
    256 * Returns 0 on success or -1 with the statistic lock released
    257 * if the room in the socket buffer was not sufficient.
    258 */
    259int
    260gnet_stats_copy_basic(struct gnet_dump *d,
    261		      struct gnet_stats_basic_sync __percpu *cpu,
    262		      struct gnet_stats_basic_sync *b,
    263		      bool running)
    264{
    265	return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC, running);
    266}
    267EXPORT_SYMBOL(gnet_stats_copy_basic);
    268
    269/**
    270 * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
    271 * @d: dumping handle
    272 * @cpu: copy statistic per cpu
    273 * @b: basic statistics
    274 * @running: true if @b represents a running qdisc, thus @b's
    275 *           internal values might change during basic reads.
    276 *           Only used if @cpu is NULL
    277 *
    278 * Context: task; must not be run from IRQ or BH contexts
    279 *
    280 * Appends the basic statistics to the top level TLV created by
    281 * gnet_stats_start_copy().
    282 *
    283 * Returns 0 on success or -1 with the statistic lock released
    284 * if the room in the socket buffer was not sufficient.
    285 */
    286int
    287gnet_stats_copy_basic_hw(struct gnet_dump *d,
    288			 struct gnet_stats_basic_sync __percpu *cpu,
    289			 struct gnet_stats_basic_sync *b,
    290			 bool running)
    291{
    292	return ___gnet_stats_copy_basic(d, cpu, b, TCA_STATS_BASIC_HW, running);
    293}
    294EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
    295
    296/**
    297 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
    298 * @d: dumping handle
    299 * @rate_est: rate estimator
    300 *
    301 * Appends the rate estimator statistics to the top level TLV created by
    302 * gnet_stats_start_copy().
    303 *
    304 * Returns 0 on success or -1 with the statistic lock released
    305 * if the room in the socket buffer was not sufficient.
    306 */
    307int
    308gnet_stats_copy_rate_est(struct gnet_dump *d,
    309			 struct net_rate_estimator __rcu **rate_est)
    310{
    311	struct gnet_stats_rate_est64 sample;
    312	struct gnet_stats_rate_est est;
    313	int res;
    314
    315	if (!gen_estimator_read(rate_est, &sample))
    316		return 0;
    317	est.bps = min_t(u64, UINT_MAX, sample.bps);
    318	/* we have some time before reaching 2^32 packets per second */
    319	est.pps = sample.pps;
    320
    321	if (d->compat_tc_stats) {
    322		d->tc_stats.bps = est.bps;
    323		d->tc_stats.pps = est.pps;
    324	}
    325
    326	if (d->tail) {
    327		res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
    328				      TCA_STATS_PAD);
    329		if (res < 0 || est.bps == sample.bps)
    330			return res;
    331		/* emit 64bit stats only if needed */
    332		return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
    333				       sizeof(sample), TCA_STATS_PAD);
    334	}
    335
    336	return 0;
    337}
    338EXPORT_SYMBOL(gnet_stats_copy_rate_est);
    339
    340static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats,
    341				     const struct gnet_stats_queue __percpu *q)
    342{
    343	int i;
    344
    345	for_each_possible_cpu(i) {
    346		const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
    347
    348		qstats->qlen += qcpu->backlog;
    349		qstats->backlog += qcpu->backlog;
    350		qstats->drops += qcpu->drops;
    351		qstats->requeues += qcpu->requeues;
    352		qstats->overlimits += qcpu->overlimits;
    353	}
    354}
    355
    356void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
    357			  const struct gnet_stats_queue __percpu *cpu,
    358			  const struct gnet_stats_queue *q)
    359{
    360	if (cpu) {
    361		gnet_stats_add_queue_cpu(qstats, cpu);
    362	} else {
    363		qstats->qlen += q->qlen;
    364		qstats->backlog += q->backlog;
    365		qstats->drops += q->drops;
    366		qstats->requeues += q->requeues;
    367		qstats->overlimits += q->overlimits;
    368	}
    369}
    370EXPORT_SYMBOL(gnet_stats_add_queue);
    371
    372/**
    373 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
    374 * @d: dumping handle
    375 * @cpu_q: per cpu queue statistics
    376 * @q: queue statistics
    377 * @qlen: queue length statistics
    378 *
    379 * Appends the queue statistics to the top level TLV created by
    380 * gnet_stats_start_copy(). Using per cpu queue statistics if
    381 * they are available.
    382 *
    383 * Returns 0 on success or -1 with the statistic lock released
    384 * if the room in the socket buffer was not sufficient.
    385 */
    386int
    387gnet_stats_copy_queue(struct gnet_dump *d,
    388		      struct gnet_stats_queue __percpu *cpu_q,
    389		      struct gnet_stats_queue *q, __u32 qlen)
    390{
    391	struct gnet_stats_queue qstats = {0};
    392
    393	gnet_stats_add_queue(&qstats, cpu_q, q);
    394	qstats.qlen = qlen;
    395
    396	if (d->compat_tc_stats) {
    397		d->tc_stats.drops = qstats.drops;
    398		d->tc_stats.qlen = qstats.qlen;
    399		d->tc_stats.backlog = qstats.backlog;
    400		d->tc_stats.overlimits = qstats.overlimits;
    401	}
    402
    403	if (d->tail)
    404		return gnet_stats_copy(d, TCA_STATS_QUEUE,
    405				       &qstats, sizeof(qstats),
    406				       TCA_STATS_PAD);
    407
    408	return 0;
    409}
    410EXPORT_SYMBOL(gnet_stats_copy_queue);
    411
    412/**
    413 * gnet_stats_copy_app - copy application specific statistics into statistics TLV
    414 * @d: dumping handle
    415 * @st: application specific statistics data
    416 * @len: length of data
    417 *
    418 * Appends the application specific statistics to the top level TLV created by
    419 * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
    420 * handle is in backward compatibility mode.
    421 *
    422 * Returns 0 on success or -1 with the statistic lock released
    423 * if the room in the socket buffer was not sufficient.
    424 */
    425int
    426gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
    427{
    428	if (d->compat_xstats) {
    429		d->xstats = kmemdup(st, len, GFP_ATOMIC);
    430		if (!d->xstats)
    431			goto err_out;
    432		d->xstats_len = len;
    433	}
    434
    435	if (d->tail)
    436		return gnet_stats_copy(d, TCA_STATS_APP, st, len,
    437				       TCA_STATS_PAD);
    438
    439	return 0;
    440
    441err_out:
    442	if (d->lock)
    443		spin_unlock_bh(d->lock);
    444	d->xstats_len = 0;
    445	return -1;
    446}
    447EXPORT_SYMBOL(gnet_stats_copy_app);
    448
    449/**
    450 * gnet_stats_finish_copy - finish dumping procedure
    451 * @d: dumping handle
    452 *
    453 * Corrects the length of the top level TLV to include all TLVs added
    454 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
    455 * if gnet_stats_start_copy_compat() was used and releases the statistics
    456 * lock.
    457 *
    458 * Returns 0 on success or -1 with the statistic lock released
    459 * if the room in the socket buffer was not sufficient.
    460 */
    461int
    462gnet_stats_finish_copy(struct gnet_dump *d)
    463{
    464	if (d->tail)
    465		d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
    466
    467	if (d->compat_tc_stats)
    468		if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
    469				    sizeof(d->tc_stats), d->padattr) < 0)
    470			return -1;
    471
    472	if (d->compat_xstats && d->xstats) {
    473		if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
    474				    d->xstats_len, d->padattr) < 0)
    475			return -1;
    476	}
    477
    478	if (d->lock)
    479		spin_unlock_bh(d->lock);
    480	kfree(d->xstats);
    481	d->xstats = NULL;
    482	d->xstats_len = 0;
    483	return 0;
    484}
    485EXPORT_SYMBOL(gnet_stats_finish_copy);