cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cgroup-internal.h (9069B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __CGROUP_INTERNAL_H
      3#define __CGROUP_INTERNAL_H
      4
      5#include <linux/cgroup.h>
      6#include <linux/kernfs.h>
      7#include <linux/workqueue.h>
      8#include <linux/list.h>
      9#include <linux/refcount.h>
     10#include <linux/fs_parser.h>
     11
     12#define TRACE_CGROUP_PATH_LEN 1024
     13extern spinlock_t trace_cgroup_path_lock;
     14extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
     15extern void __init enable_debug_cgroup(void);
     16
     17/*
     18 * cgroup_path() takes a spin lock. It is good practice not to take
     19 * spin locks within trace point handlers, as they are mostly hidden
     20 * from normal view. As cgroup_path() can take the kernfs_rename_lock
     21 * spin lock, it is best to not call that function from the trace event
     22 * handler.
     23 *
     24 * Note: trace_cgroup_##type##_enabled() is a static branch that will only
     25 *       be set when the trace event is enabled.
     26 */
     27#define TRACE_CGROUP_PATH(type, cgrp, ...)				\
     28	do {								\
     29		if (trace_cgroup_##type##_enabled()) {			\
     30			unsigned long flags;				\
     31			spin_lock_irqsave(&trace_cgroup_path_lock,	\
     32					  flags);			\
     33			cgroup_path(cgrp, trace_cgroup_path,		\
     34				    TRACE_CGROUP_PATH_LEN);		\
     35			trace_cgroup_##type(cgrp, trace_cgroup_path,	\
     36					    ##__VA_ARGS__);		\
     37			spin_unlock_irqrestore(&trace_cgroup_path_lock, \
     38					       flags);			\
     39		}							\
     40	} while (0)
     41
     42/*
     43 * The cgroup filesystem superblock creation/mount context.
     44 */
     45struct cgroup_fs_context {
     46	struct kernfs_fs_context kfc;
     47	struct cgroup_root	*root;
     48	struct cgroup_namespace	*ns;
     49	unsigned int	flags;			/* CGRP_ROOT_* flags */
     50
     51	/* cgroup1 bits */
     52	bool		cpuset_clone_children;
     53	bool		none;			/* User explicitly requested empty subsystem */
     54	bool		all_ss;			/* Seen 'all' option */
     55	u16		subsys_mask;		/* Selected subsystems */
     56	char		*name;			/* Hierarchy name */
     57	char		*release_agent;		/* Path for release notifications */
     58};
     59
     60static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
     61{
     62	struct kernfs_fs_context *kfc = fc->fs_private;
     63
     64	return container_of(kfc, struct cgroup_fs_context, kfc);
     65}
     66
     67struct cgroup_pidlist;
     68
     69struct cgroup_file_ctx {
     70	struct cgroup_namespace	*ns;
     71
     72	struct {
     73		void			*trigger;
     74	} psi;
     75
     76	struct {
     77		bool			started;
     78		struct css_task_iter	iter;
     79	} procs;
     80
     81	struct {
     82		struct cgroup_pidlist	*pidlist;
     83	} procs1;
     84};
     85
     86/*
     87 * A cgroup can be associated with multiple css_sets as different tasks may
     88 * belong to different cgroups on different hierarchies.  In the other
     89 * direction, a css_set is naturally associated with multiple cgroups.
     90 * This M:N relationship is represented by the following link structure
     91 * which exists for each association and allows traversing the associations
     92 * from both sides.
     93 */
     94struct cgrp_cset_link {
     95	/* the cgroup and css_set this link associates */
     96	struct cgroup		*cgrp;
     97	struct css_set		*cset;
     98
     99	/* list of cgrp_cset_links anchored at cgrp->cset_links */
    100	struct list_head	cset_link;
    101
    102	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
    103	struct list_head	cgrp_link;
    104};
    105
    106/* used to track tasks and csets during migration */
    107struct cgroup_taskset {
    108	/* the src and dst cset list running through cset->mg_node */
    109	struct list_head	src_csets;
    110	struct list_head	dst_csets;
    111
    112	/* the number of tasks in the set */
    113	int			nr_tasks;
    114
    115	/* the subsys currently being processed */
    116	int			ssid;
    117
    118	/*
    119	 * Fields for cgroup_taskset_*() iteration.
    120	 *
    121	 * Before migration is committed, the target migration tasks are on
    122	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
    123	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
    124	 * or ->dst_csets depending on whether migration is committed.
    125	 *
    126	 * ->cur_csets and ->cur_task point to the current task position
    127	 * during iteration.
    128	 */
    129	struct list_head	*csets;
    130	struct css_set		*cur_cset;
    131	struct task_struct	*cur_task;
    132};
    133
    134/* migration context also tracks preloading */
    135struct cgroup_mgctx {
    136	/*
    137	 * Preloaded source and destination csets.  Used to guarantee
    138	 * atomic success or failure on actual migration.
    139	 */
    140	struct list_head	preloaded_src_csets;
    141	struct list_head	preloaded_dst_csets;
    142
    143	/* tasks and csets to migrate */
    144	struct cgroup_taskset	tset;
    145
    146	/* subsystems affected by migration */
    147	u16			ss_mask;
    148};
    149
    150#define CGROUP_TASKSET_INIT(tset)						\
    151{										\
    152	.src_csets		= LIST_HEAD_INIT(tset.src_csets),		\
    153	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),		\
    154	.csets			= &tset.src_csets,				\
    155}
    156
    157#define CGROUP_MGCTX_INIT(name)							\
    158{										\
    159	LIST_HEAD_INIT(name.preloaded_src_csets),				\
    160	LIST_HEAD_INIT(name.preloaded_dst_csets),				\
    161	CGROUP_TASKSET_INIT(name.tset),						\
    162}
    163
    164#define DEFINE_CGROUP_MGCTX(name)						\
    165	struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
    166
    167extern struct mutex cgroup_mutex;
    168extern spinlock_t css_set_lock;
    169extern struct cgroup_subsys *cgroup_subsys[];
    170extern struct list_head cgroup_roots;
    171extern struct file_system_type cgroup_fs_type;
    172
    173/* iterate across the hierarchies */
    174#define for_each_root(root)						\
    175	list_for_each_entry((root), &cgroup_roots, root_list)
    176
    177/**
    178 * for_each_subsys - iterate all enabled cgroup subsystems
    179 * @ss: the iteration cursor
    180 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
    181 */
    182#define for_each_subsys(ss, ssid)					\
    183	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
    184	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
    185
    186static inline bool cgroup_is_dead(const struct cgroup *cgrp)
    187{
    188	return !(cgrp->self.flags & CSS_ONLINE);
    189}
    190
    191static inline bool notify_on_release(const struct cgroup *cgrp)
    192{
    193	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
    194}
    195
    196void put_css_set_locked(struct css_set *cset);
    197
    198static inline void put_css_set(struct css_set *cset)
    199{
    200	unsigned long flags;
    201
    202	/*
    203	 * Ensure that the refcount doesn't hit zero while any readers
    204	 * can see it. Similar to atomic_dec_and_lock(), but for an
    205	 * rwlock
    206	 */
    207	if (refcount_dec_not_one(&cset->refcount))
    208		return;
    209
    210	spin_lock_irqsave(&css_set_lock, flags);
    211	put_css_set_locked(cset);
    212	spin_unlock_irqrestore(&css_set_lock, flags);
    213}
    214
    215/*
    216 * refcounted get/put for css_set objects
    217 */
    218static inline void get_css_set(struct css_set *cset)
    219{
    220	refcount_inc(&cset->refcount);
    221}
    222
    223bool cgroup_ssid_enabled(int ssid);
    224bool cgroup_on_dfl(const struct cgroup *cgrp);
    225bool cgroup_is_thread_root(struct cgroup *cgrp);
    226bool cgroup_is_threaded(struct cgroup *cgrp);
    227
    228struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
    229struct cgroup *task_cgroup_from_root(struct task_struct *task,
    230				     struct cgroup_root *root);
    231struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
    232void cgroup_kn_unlock(struct kernfs_node *kn);
    233int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
    234			  struct cgroup_namespace *ns);
    235
    236void cgroup_free_root(struct cgroup_root *root);
    237void init_cgroup_root(struct cgroup_fs_context *ctx);
    238int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
    239int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
    240int cgroup_do_get_tree(struct fs_context *fc);
    241
    242int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
    243void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
    244void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
    245			    struct cgroup_mgctx *mgctx);
    246int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
    247int cgroup_migrate(struct task_struct *leader, bool threadgroup,
    248		   struct cgroup_mgctx *mgctx);
    249
    250int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
    251		       bool threadgroup);
    252struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
    253					     bool *locked)
    254	__acquires(&cgroup_threadgroup_rwsem);
    255void cgroup_procs_write_finish(struct task_struct *task, bool locked)
    256	__releases(&cgroup_threadgroup_rwsem);
    257
    258void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
    259
    260int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
    261int cgroup_rmdir(struct kernfs_node *kn);
    262int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
    263		     struct kernfs_root *kf_root);
    264
    265int __cgroup_task_count(const struct cgroup *cgrp);
    266int cgroup_task_count(const struct cgroup *cgrp);
    267
    268/*
    269 * rstat.c
    270 */
    271int cgroup_rstat_init(struct cgroup *cgrp);
    272void cgroup_rstat_exit(struct cgroup *cgrp);
    273void cgroup_rstat_boot(void);
    274void cgroup_base_stat_cputime_show(struct seq_file *seq);
    275
    276/*
    277 * namespace.c
    278 */
    279extern const struct proc_ns_operations cgroupns_operations;
    280
    281/*
    282 * cgroup-v1.c
    283 */
    284extern struct cftype cgroup1_base_files[];
    285extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
    286extern const struct fs_parameter_spec cgroup1_fs_parameters[];
    287
    288int proc_cgroupstats_show(struct seq_file *m, void *v);
    289bool cgroup1_ssid_disabled(int ssid);
    290void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
    291void cgroup1_release_agent(struct work_struct *work);
    292void cgroup1_check_for_release(struct cgroup *cgrp);
    293int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
    294int cgroup1_get_tree(struct fs_context *fc);
    295int cgroup1_reconfigure(struct fs_context *ctx);
    296
    297#endif /* __CGROUP_INTERNAL_H */