cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cls_cgroup.h (2091B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 * cls_cgroup.h			Control Group Classifier
      4 *
      5 * Authors:	Thomas Graf <tgraf@suug.ch>
      6 */
      7
      8#ifndef _NET_CLS_CGROUP_H
      9#define _NET_CLS_CGROUP_H
     10
     11#include <linux/cgroup.h>
     12#include <linux/hardirq.h>
     13#include <linux/rcupdate.h>
     14#include <net/sock.h>
     15#include <net/inet_sock.h>
     16
     17#ifdef CONFIG_CGROUP_NET_CLASSID
     18struct cgroup_cls_state {
     19	struct cgroup_subsys_state css;
     20	u32 classid;
     21};
     22
     23struct cgroup_cls_state *task_cls_state(struct task_struct *p);
     24
     25static inline u32 task_cls_classid(struct task_struct *p)
     26{
     27	u32 classid;
     28
     29	if (in_interrupt())
     30		return 0;
     31
     32	rcu_read_lock();
     33	classid = container_of(task_css(p, net_cls_cgrp_id),
     34			       struct cgroup_cls_state, css)->classid;
     35	rcu_read_unlock();
     36
     37	return classid;
     38}
     39
     40static inline void sock_update_classid(struct sock_cgroup_data *skcd)
     41{
     42	u32 classid;
     43
     44	classid = task_cls_classid(current);
     45	sock_cgroup_set_classid(skcd, classid);
     46}
     47
     48static inline u32 __task_get_classid(struct task_struct *task)
     49{
     50	return task_cls_state(task)->classid;
     51}
     52
     53static inline u32 task_get_classid(const struct sk_buff *skb)
     54{
     55	u32 classid = __task_get_classid(current);
     56
     57	/* Due to the nature of the classifier it is required to ignore all
     58	 * packets originating from softirq context as accessing `current'
     59	 * would lead to false results.
     60	 *
     61	 * This test assumes that all callers of dev_queue_xmit() explicitly
     62	 * disable bh. Knowing this, it is possible to detect softirq based
     63	 * calls by looking at the number of nested bh disable calls because
     64	 * softirqs always disables bh.
     65	 */
     66	if (in_serving_softirq()) {
     67		struct sock *sk = skb_to_full_sk(skb);
     68
     69		/* If there is an sock_cgroup_classid we'll use that. */
     70		if (!sk || !sk_fullsock(sk))
     71			return 0;
     72
     73		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
     74	}
     75
     76	return classid;
     77}
     78#else /* !CONFIG_CGROUP_NET_CLASSID */
     79static inline void sock_update_classid(struct sock_cgroup_data *skcd)
     80{
     81}
     82
     83static inline u32 task_get_classid(const struct sk_buff *skb)
     84{
     85	return 0;
     86}
     87#endif /* CONFIG_CGROUP_NET_CLASSID */
     88#endif  /* _NET_CLS_CGROUP_H */