cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cgroup.h (28830B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_CGROUP_H
      3#define _LINUX_CGROUP_H
      4/*
      5 *  cgroup interface
      6 *
      7 *  Copyright (C) 2003 BULL SA
      8 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
      9 *
     10 */
     11
     12#include <linux/sched.h>
     13#include <linux/cpumask.h>
     14#include <linux/nodemask.h>
     15#include <linux/rculist.h>
     16#include <linux/cgroupstats.h>
     17#include <linux/fs.h>
     18#include <linux/seq_file.h>
     19#include <linux/kernfs.h>
     20#include <linux/jump_label.h>
     21#include <linux/types.h>
     22#include <linux/ns_common.h>
     23#include <linux/nsproxy.h>
     24#include <linux/user_namespace.h>
     25#include <linux/refcount.h>
     26#include <linux/kernel_stat.h>
     27
     28#include <linux/cgroup-defs.h>
     29
     30struct kernel_clone_args;
     31
     32#ifdef CONFIG_CGROUPS
     33
     34/*
     35 * All weight knobs on the default hierarchy should use the following min,
     36 * default and max values.  The default value is the logarithmic center of
     37 * MIN and MAX and allows 100x to be expressed in both directions.
     38 */
     39#define CGROUP_WEIGHT_MIN		1
     40#define CGROUP_WEIGHT_DFL		100
     41#define CGROUP_WEIGHT_MAX		10000
     42
     43/* walk only threadgroup leaders */
     44#define CSS_TASK_ITER_PROCS		(1U << 0)
     45/* walk all threaded css_sets in the domain */
     46#define CSS_TASK_ITER_THREADED		(1U << 1)
     47
     48/* internal flags */
     49#define CSS_TASK_ITER_SKIPPED		(1U << 16)
     50
     51/* a css_task_iter should be treated as an opaque object */
     52struct css_task_iter {
     53	struct cgroup_subsys		*ss;
     54	unsigned int			flags;
     55
     56	struct list_head		*cset_pos;
     57	struct list_head		*cset_head;
     58
     59	struct list_head		*tcset_pos;
     60	struct list_head		*tcset_head;
     61
     62	struct list_head		*task_pos;
     63
     64	struct list_head		*cur_tasks_head;
     65	struct css_set			*cur_cset;
     66	struct css_set			*cur_dcset;
     67	struct task_struct		*cur_task;
     68	struct list_head		iters_node;	/* css_set->task_iters */
     69};
     70
     71extern struct cgroup_root cgrp_dfl_root;
     72extern struct css_set init_css_set;
     73
     74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
     75#include <linux/cgroup_subsys.h>
     76#undef SUBSYS
     77
     78#define SUBSYS(_x)								\
     79	extern struct static_key_true _x ## _cgrp_subsys_enabled_key;		\
     80	extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
     81#include <linux/cgroup_subsys.h>
     82#undef SUBSYS
     83
     84/**
     85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
     86 * @ss: subsystem in question
     87 */
     88#define cgroup_subsys_enabled(ss)						\
     89	static_branch_likely(&ss ## _enabled_key)
     90
     91/**
     92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
     93 * @ss: subsystem in question
     94 */
     95#define cgroup_subsys_on_dfl(ss)						\
     96	static_branch_likely(&ss ## _on_dfl_key)
     97
     98bool css_has_online_children(struct cgroup_subsys_state *css);
     99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
    100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
    101					 struct cgroup_subsys *ss);
    102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
    103					     struct cgroup_subsys *ss);
    104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
    105						       struct cgroup_subsys *ss);
    106
    107struct cgroup *cgroup_get_from_path(const char *path);
    108struct cgroup *cgroup_get_from_fd(int fd);
    109
    110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
    111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
    112
    113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
    114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
    115int cgroup_rm_cftypes(struct cftype *cfts);
    116void cgroup_file_notify(struct cgroup_file *cfile);
    117
    118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
    119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
    120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
    121		     struct pid *pid, struct task_struct *tsk);
    122
    123void cgroup_fork(struct task_struct *p);
    124extern int cgroup_can_fork(struct task_struct *p,
    125			   struct kernel_clone_args *kargs);
    126extern void cgroup_cancel_fork(struct task_struct *p,
    127			       struct kernel_clone_args *kargs);
    128extern void cgroup_post_fork(struct task_struct *p,
    129			     struct kernel_clone_args *kargs);
    130void cgroup_exit(struct task_struct *p);
    131void cgroup_release(struct task_struct *p);
    132void cgroup_free(struct task_struct *p);
    133
    134int cgroup_init_early(void);
    135int cgroup_init(void);
    136
    137int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
    138
    139/*
    140 * Iteration helpers and macros.
    141 */
    142
    143struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
    144					   struct cgroup_subsys_state *parent);
    145struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
    146						    struct cgroup_subsys_state *css);
    147struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
    148struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
    149						     struct cgroup_subsys_state *css);
    150
    151struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
    152					 struct cgroup_subsys_state **dst_cssp);
    153struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
    154					struct cgroup_subsys_state **dst_cssp);
    155
    156void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
    157			 struct css_task_iter *it);
    158struct task_struct *css_task_iter_next(struct css_task_iter *it);
    159void css_task_iter_end(struct css_task_iter *it);
    160
    161/**
    162 * css_for_each_child - iterate through children of a css
    163 * @pos: the css * to use as the loop cursor
    164 * @parent: css whose children to walk
    165 *
    166 * Walk @parent's children.  Must be called under rcu_read_lock().
    167 *
    168 * If a subsystem synchronizes ->css_online() and the start of iteration, a
    169 * css which finished ->css_online() is guaranteed to be visible in the
    170 * future iterations and will stay visible until the last reference is put.
    171 * A css which hasn't finished ->css_online() or already finished
    172 * ->css_offline() may show up during traversal.  It's each subsystem's
    173 * responsibility to synchronize against on/offlining.
    174 *
    175 * It is allowed to temporarily drop RCU read lock during iteration.  The
    176 * caller is responsible for ensuring that @pos remains accessible until
    177 * the start of the next iteration by, for example, bumping the css refcnt.
    178 */
    179#define css_for_each_child(pos, parent)					\
    180	for ((pos) = css_next_child(NULL, (parent)); (pos);		\
    181	     (pos) = css_next_child((pos), (parent)))
    182
    183/**
    184 * css_for_each_descendant_pre - pre-order walk of a css's descendants
    185 * @pos: the css * to use as the loop cursor
    186 * @root: css whose descendants to walk
    187 *
    188 * Walk @root's descendants.  @root is included in the iteration and the
    189 * first node to be visited.  Must be called under rcu_read_lock().
    190 *
    191 * If a subsystem synchronizes ->css_online() and the start of iteration, a
    192 * css which finished ->css_online() is guaranteed to be visible in the
    193 * future iterations and will stay visible until the last reference is put.
    194 * A css which hasn't finished ->css_online() or already finished
    195 * ->css_offline() may show up during traversal.  It's each subsystem's
    196 * responsibility to synchronize against on/offlining.
    197 *
    198 * For example, the following guarantees that a descendant can't escape
    199 * state updates of its ancestors.
    200 *
    201 * my_online(@css)
    202 * {
    203 *	Lock @css's parent and @css;
    204 *	Inherit state from the parent;
    205 *	Unlock both.
    206 * }
    207 *
    208 * my_update_state(@css)
    209 * {
    210 *	css_for_each_descendant_pre(@pos, @css) {
    211 *		Lock @pos;
    212 *		if (@pos == @css)
    213 *			Update @css's state;
    214 *		else
    215 *			Verify @pos is alive and inherit state from its parent;
    216 *		Unlock @pos;
    217 *	}
    218 * }
    219 *
    220 * As long as the inheriting step, including checking the parent state, is
    221 * enclosed inside @pos locking, double-locking the parent isn't necessary
    222 * while inheriting.  The state update to the parent is guaranteed to be
    223 * visible by walking order and, as long as inheriting operations to the
    224 * same @pos are atomic to each other, multiple updates racing each other
    225 * still result in the correct state.  It's guaranateed that at least one
    226 * inheritance happens for any css after the latest update to its parent.
    227 *
    228 * If checking parent's state requires locking the parent, each inheriting
    229 * iteration should lock and unlock both @pos->parent and @pos.
    230 *
    231 * Alternatively, a subsystem may choose to use a single global lock to
    232 * synchronize ->css_online() and ->css_offline() against tree-walking
    233 * operations.
    234 *
    235 * It is allowed to temporarily drop RCU read lock during iteration.  The
    236 * caller is responsible for ensuring that @pos remains accessible until
    237 * the start of the next iteration by, for example, bumping the css refcnt.
    238 */
    239#define css_for_each_descendant_pre(pos, css)				\
    240	for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);	\
    241	     (pos) = css_next_descendant_pre((pos), (css)))
    242
    243/**
    244 * css_for_each_descendant_post - post-order walk of a css's descendants
    245 * @pos: the css * to use as the loop cursor
    246 * @css: css whose descendants to walk
    247 *
    248 * Similar to css_for_each_descendant_pre() but performs post-order
    249 * traversal instead.  @root is included in the iteration and the last
    250 * node to be visited.
    251 *
    252 * If a subsystem synchronizes ->css_online() and the start of iteration, a
    253 * css which finished ->css_online() is guaranteed to be visible in the
    254 * future iterations and will stay visible until the last reference is put.
    255 * A css which hasn't finished ->css_online() or already finished
    256 * ->css_offline() may show up during traversal.  It's each subsystem's
    257 * responsibility to synchronize against on/offlining.
    258 *
    259 * Note that the walk visibility guarantee example described in pre-order
    260 * walk doesn't apply the same to post-order walks.
    261 */
    262#define css_for_each_descendant_post(pos, css)				\
    263	for ((pos) = css_next_descendant_post(NULL, (css)); (pos);	\
    264	     (pos) = css_next_descendant_post((pos), (css)))
    265
    266/**
    267 * cgroup_taskset_for_each - iterate cgroup_taskset
    268 * @task: the loop cursor
    269 * @dst_css: the destination css
    270 * @tset: taskset to iterate
    271 *
    272 * @tset may contain multiple tasks and they may belong to multiple
    273 * processes.
    274 *
    275 * On the v2 hierarchy, there may be tasks from multiple processes and they
    276 * may not share the source or destination csses.
    277 *
    278 * On traditional hierarchies, when there are multiple tasks in @tset, if a
    279 * task of a process is in @tset, all tasks of the process are in @tset.
    280 * Also, all are guaranteed to share the same source and destination csses.
    281 *
    282 * Iteration is not in any specific order.
    283 */
    284#define cgroup_taskset_for_each(task, dst_css, tset)			\
    285	for ((task) = cgroup_taskset_first((tset), &(dst_css));		\
    286	     (task);							\
    287	     (task) = cgroup_taskset_next((tset), &(dst_css)))
    288
    289/**
    290 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
    291 * @leader: the loop cursor
    292 * @dst_css: the destination css
    293 * @tset: taskset to iterate
    294 *
    295 * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
    296 * may not contain any.
    297 */
    298#define cgroup_taskset_for_each_leader(leader, dst_css, tset)		\
    299	for ((leader) = cgroup_taskset_first((tset), &(dst_css));	\
    300	     (leader);							\
    301	     (leader) = cgroup_taskset_next((tset), &(dst_css)))	\
    302		if ((leader) != (leader)->group_leader)			\
    303			;						\
    304		else
    305
    306/*
    307 * Inline functions.
    308 */
    309
    310static inline u64 cgroup_id(const struct cgroup *cgrp)
    311{
    312	return cgrp->kn->id;
    313}
    314
    315/**
    316 * css_get - obtain a reference on the specified css
    317 * @css: target css
    318 *
    319 * The caller must already have a reference.
    320 */
    321static inline void css_get(struct cgroup_subsys_state *css)
    322{
    323	if (!(css->flags & CSS_NO_REF))
    324		percpu_ref_get(&css->refcnt);
    325}
    326
    327/**
    328 * css_get_many - obtain references on the specified css
    329 * @css: target css
    330 * @n: number of references to get
    331 *
    332 * The caller must already have a reference.
    333 */
    334static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
    335{
    336	if (!(css->flags & CSS_NO_REF))
    337		percpu_ref_get_many(&css->refcnt, n);
    338}
    339
    340/**
    341 * css_tryget - try to obtain a reference on the specified css
    342 * @css: target css
    343 *
    344 * Obtain a reference on @css unless it already has reached zero and is
    345 * being released.  This function doesn't care whether @css is on or
    346 * offline.  The caller naturally needs to ensure that @css is accessible
    347 * but doesn't have to be holding a reference on it - IOW, RCU protected
    348 * access is good enough for this function.  Returns %true if a reference
    349 * count was successfully obtained; %false otherwise.
    350 */
    351static inline bool css_tryget(struct cgroup_subsys_state *css)
    352{
    353	if (!(css->flags & CSS_NO_REF))
    354		return percpu_ref_tryget(&css->refcnt);
    355	return true;
    356}
    357
    358/**
    359 * css_tryget_online - try to obtain a reference on the specified css if online
    360 * @css: target css
    361 *
    362 * Obtain a reference on @css if it's online.  The caller naturally needs
    363 * to ensure that @css is accessible but doesn't have to be holding a
    364 * reference on it - IOW, RCU protected access is good enough for this
    365 * function.  Returns %true if a reference count was successfully obtained;
    366 * %false otherwise.
    367 */
    368static inline bool css_tryget_online(struct cgroup_subsys_state *css)
    369{
    370	if (!(css->flags & CSS_NO_REF))
    371		return percpu_ref_tryget_live(&css->refcnt);
    372	return true;
    373}
    374
    375/**
    376 * css_is_dying - test whether the specified css is dying
    377 * @css: target css
    378 *
    379 * Test whether @css is in the process of offlining or already offline.  In
    380 * most cases, ->css_online() and ->css_offline() callbacks should be
    381 * enough; however, the actual offline operations are RCU delayed and this
    382 * test returns %true also when @css is scheduled to be offlined.
    383 *
    384 * This is useful, for example, when the use case requires synchronous
    385 * behavior with respect to cgroup removal.  cgroup removal schedules css
    386 * offlining but the css can seem alive while the operation is being
    387 * delayed.  If the delay affects user visible semantics, this test can be
    388 * used to resolve the situation.
    389 */
    390static inline bool css_is_dying(struct cgroup_subsys_state *css)
    391{
    392	return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
    393}
    394
    395/**
    396 * css_put - put a css reference
    397 * @css: target css
    398 *
    399 * Put a reference obtained via css_get() and css_tryget_online().
    400 */
    401static inline void css_put(struct cgroup_subsys_state *css)
    402{
    403	if (!(css->flags & CSS_NO_REF))
    404		percpu_ref_put(&css->refcnt);
    405}
    406
    407/**
    408 * css_put_many - put css references
    409 * @css: target css
    410 * @n: number of references to put
    411 *
    412 * Put references obtained via css_get() and css_tryget_online().
    413 */
    414static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
    415{
    416	if (!(css->flags & CSS_NO_REF))
    417		percpu_ref_put_many(&css->refcnt, n);
    418}
    419
    420static inline void cgroup_get(struct cgroup *cgrp)
    421{
    422	css_get(&cgrp->self);
    423}
    424
    425static inline bool cgroup_tryget(struct cgroup *cgrp)
    426{
    427	return css_tryget(&cgrp->self);
    428}
    429
    430static inline void cgroup_put(struct cgroup *cgrp)
    431{
    432	css_put(&cgrp->self);
    433}
    434
    435/**
    436 * task_css_set_check - obtain a task's css_set with extra access conditions
    437 * @task: the task to obtain css_set for
    438 * @__c: extra condition expression to be passed to rcu_dereference_check()
    439 *
    440 * A task's css_set is RCU protected, initialized and exited while holding
    441 * task_lock(), and can only be modified while holding both cgroup_mutex
    442 * and task_lock() while the task is alive.  This macro verifies that the
    443 * caller is inside proper critical section and returns @task's css_set.
    444 *
    445 * The caller can also specify additional allowed conditions via @__c, such
    446 * as locks used during the cgroup_subsys::attach() methods.
    447 */
    448#ifdef CONFIG_PROVE_RCU
    449extern struct mutex cgroup_mutex;
    450extern spinlock_t css_set_lock;
    451#define task_css_set_check(task, __c)					\
    452	rcu_dereference_check((task)->cgroups,				\
    453		rcu_read_lock_sched_held() ||				\
    454		lockdep_is_held(&cgroup_mutex) ||			\
    455		lockdep_is_held(&css_set_lock) ||			\
    456		((task)->flags & PF_EXITING) || (__c))
    457#else
    458#define task_css_set_check(task, __c)					\
    459	rcu_dereference((task)->cgroups)
    460#endif
    461
    462/**
    463 * task_css_check - obtain css for (task, subsys) w/ extra access conds
    464 * @task: the target task
    465 * @subsys_id: the target subsystem ID
    466 * @__c: extra condition expression to be passed to rcu_dereference_check()
    467 *
    468 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
    469 * synchronization rules are the same as task_css_set_check().
    470 */
    471#define task_css_check(task, subsys_id, __c)				\
    472	task_css_set_check((task), (__c))->subsys[(subsys_id)]
    473
    474/**
    475 * task_css_set - obtain a task's css_set
    476 * @task: the task to obtain css_set for
    477 *
    478 * See task_css_set_check().
    479 */
    480static inline struct css_set *task_css_set(struct task_struct *task)
    481{
    482	return task_css_set_check(task, false);
    483}
    484
    485/**
    486 * task_css - obtain css for (task, subsys)
    487 * @task: the target task
    488 * @subsys_id: the target subsystem ID
    489 *
    490 * See task_css_check().
    491 */
    492static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
    493						   int subsys_id)
    494{
    495	return task_css_check(task, subsys_id, false);
    496}
    497
    498/**
    499 * task_get_css - find and get the css for (task, subsys)
    500 * @task: the target task
    501 * @subsys_id: the target subsystem ID
    502 *
    503 * Find the css for the (@task, @subsys_id) combination, increment a
    504 * reference on and return it.  This function is guaranteed to return a
    505 * valid css.  The returned css may already have been offlined.
    506 */
    507static inline struct cgroup_subsys_state *
    508task_get_css(struct task_struct *task, int subsys_id)
    509{
    510	struct cgroup_subsys_state *css;
    511
    512	rcu_read_lock();
    513	while (true) {
    514		css = task_css(task, subsys_id);
    515		/*
    516		 * Can't use css_tryget_online() here.  A task which has
    517		 * PF_EXITING set may stay associated with an offline css.
    518		 * If such task calls this function, css_tryget_online()
    519		 * will keep failing.
    520		 */
    521		if (likely(css_tryget(css)))
    522			break;
    523		cpu_relax();
    524	}
    525	rcu_read_unlock();
    526	return css;
    527}
    528
    529/**
    530 * task_css_is_root - test whether a task belongs to the root css
    531 * @task: the target task
    532 * @subsys_id: the target subsystem ID
    533 *
    534 * Test whether @task belongs to the root css on the specified subsystem.
    535 * May be invoked in any context.
    536 */
    537static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
    538{
    539	return task_css_check(task, subsys_id, true) ==
    540		init_css_set.subsys[subsys_id];
    541}
    542
    543static inline struct cgroup *task_cgroup(struct task_struct *task,
    544					 int subsys_id)
    545{
    546	return task_css(task, subsys_id)->cgroup;
    547}
    548
    549static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
    550{
    551	return task_css_set(task)->dfl_cgrp;
    552}
    553
    554static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
    555{
    556	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
    557
    558	if (parent_css)
    559		return container_of(parent_css, struct cgroup, self);
    560	return NULL;
    561}
    562
    563/**
    564 * cgroup_is_descendant - test ancestry
    565 * @cgrp: the cgroup to be tested
    566 * @ancestor: possible ancestor of @cgrp
    567 *
    568 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
    569 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
    570 * and @ancestor are accessible.
    571 */
    572static inline bool cgroup_is_descendant(struct cgroup *cgrp,
    573					struct cgroup *ancestor)
    574{
    575	if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
    576		return false;
    577	return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
    578}
    579
    580/**
    581 * cgroup_ancestor - find ancestor of cgroup
    582 * @cgrp: cgroup to find ancestor of
    583 * @ancestor_level: level of ancestor to find starting from root
    584 *
    585 * Find ancestor of cgroup at specified level starting from root if it exists
    586 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
    587 * @ancestor_level.
    588 *
    589 * This function is safe to call as long as @cgrp is accessible.
    590 */
    591static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
    592					     int ancestor_level)
    593{
    594	if (cgrp->level < ancestor_level)
    595		return NULL;
    596	while (cgrp && cgrp->level > ancestor_level)
    597		cgrp = cgroup_parent(cgrp);
    598	return cgrp;
    599}
    600
    601/**
    602 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
    603 * @task: the task to be tested
    604 * @ancestor: possible ancestor of @task's cgroup
    605 *
    606 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
    607 * It follows all the same rules as cgroup_is_descendant, and only applies
    608 * to the default hierarchy.
    609 */
    610static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
    611					       struct cgroup *ancestor)
    612{
    613	struct css_set *cset = task_css_set(task);
    614
    615	return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
    616}
    617
    618/* no synchronization, the result can only be used as a hint */
    619static inline bool cgroup_is_populated(struct cgroup *cgrp)
    620{
    621	return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
    622		cgrp->nr_populated_threaded_children;
    623}
    624
    625/* returns ino associated with a cgroup */
    626static inline ino_t cgroup_ino(struct cgroup *cgrp)
    627{
    628	return kernfs_ino(cgrp->kn);
    629}
    630
    631/* cft/css accessors for cftype->write() operation */
    632static inline struct cftype *of_cft(struct kernfs_open_file *of)
    633{
    634	return of->kn->priv;
    635}
    636
    637struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
    638
    639/* cft/css accessors for cftype->seq_*() operations */
    640static inline struct cftype *seq_cft(struct seq_file *seq)
    641{
    642	return of_cft(seq->private);
    643}
    644
    645static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
    646{
    647	return of_css(seq->private);
    648}
    649
    650/*
    651 * Name / path handling functions.  All are thin wrappers around the kernfs
    652 * counterparts and can be called under any context.
    653 */
    654
    655static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
    656{
    657	return kernfs_name(cgrp->kn, buf, buflen);
    658}
    659
    660static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
    661{
    662	return kernfs_path(cgrp->kn, buf, buflen);
    663}
    664
    665static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
    666{
    667	pr_cont_kernfs_name(cgrp->kn);
    668}
    669
    670static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
    671{
    672	pr_cont_kernfs_path(cgrp->kn);
    673}
    674
    675static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
    676{
    677	return &cgrp->psi;
    678}
    679
    680bool cgroup_psi_enabled(void);
    681
    682static inline void cgroup_init_kthreadd(void)
    683{
    684	/*
    685	 * kthreadd is inherited by all kthreads, keep it in the root so
    686	 * that the new kthreads are guaranteed to stay in the root until
    687	 * initialization is finished.
    688	 */
    689	current->no_cgroup_migration = 1;
    690}
    691
    692static inline void cgroup_kthread_ready(void)
    693{
    694	/*
    695	 * This kthread finished initialization.  The creator should have
    696	 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
    697	 */
    698	current->no_cgroup_migration = 0;
    699}
    700
    701void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
    702struct cgroup *cgroup_get_from_id(u64 id);
    703#else /* !CONFIG_CGROUPS */
    704
    705struct cgroup_subsys_state;
    706struct cgroup;
    707
    708static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
    709static inline void css_get(struct cgroup_subsys_state *css) {}
    710static inline void css_put(struct cgroup_subsys_state *css) {}
    711static inline int cgroup_attach_task_all(struct task_struct *from,
    712					 struct task_struct *t) { return 0; }
    713static inline int cgroupstats_build(struct cgroupstats *stats,
    714				    struct dentry *dentry) { return -EINVAL; }
    715
    716static inline void cgroup_fork(struct task_struct *p) {}
    717static inline int cgroup_can_fork(struct task_struct *p,
    718				  struct kernel_clone_args *kargs) { return 0; }
    719static inline void cgroup_cancel_fork(struct task_struct *p,
    720				      struct kernel_clone_args *kargs) {}
    721static inline void cgroup_post_fork(struct task_struct *p,
    722				    struct kernel_clone_args *kargs) {}
    723static inline void cgroup_exit(struct task_struct *p) {}
    724static inline void cgroup_release(struct task_struct *p) {}
    725static inline void cgroup_free(struct task_struct *p) {}
    726
    727static inline int cgroup_init_early(void) { return 0; }
    728static inline int cgroup_init(void) { return 0; }
    729static inline void cgroup_init_kthreadd(void) {}
    730static inline void cgroup_kthread_ready(void) {}
    731
    732static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
    733{
    734	return NULL;
    735}
    736
    737static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
    738{
    739	return NULL;
    740}
    741
    742static inline bool cgroup_psi_enabled(void)
    743{
    744	return false;
    745}
    746
    747static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
    748					       struct cgroup *ancestor)
    749{
    750	return true;
    751}
    752
    753static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
    754{}
    755
    756static inline struct cgroup *cgroup_get_from_id(u64 id)
    757{
    758	return NULL;
    759}
    760#endif /* !CONFIG_CGROUPS */
    761
    762#ifdef CONFIG_CGROUPS
    763/*
    764 * cgroup scalable recursive statistics.
    765 */
    766void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
    767void cgroup_rstat_flush(struct cgroup *cgrp);
    768void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
    769void cgroup_rstat_flush_hold(struct cgroup *cgrp);
    770void cgroup_rstat_flush_release(void);
    771
    772/*
    773 * Basic resource stats.
    774 */
    775#ifdef CONFIG_CGROUP_CPUACCT
    776void cpuacct_charge(struct task_struct *tsk, u64 cputime);
    777void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
    778#else
    779static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
    780static inline void cpuacct_account_field(struct task_struct *tsk, int index,
    781					 u64 val) {}
    782#endif
    783
    784void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
    785void __cgroup_account_cputime_field(struct cgroup *cgrp,
    786				    enum cpu_usage_stat index, u64 delta_exec);
    787
    788static inline void cgroup_account_cputime(struct task_struct *task,
    789					  u64 delta_exec)
    790{
    791	struct cgroup *cgrp;
    792
    793	cpuacct_charge(task, delta_exec);
    794
    795	cgrp = task_dfl_cgroup(task);
    796	if (cgroup_parent(cgrp))
    797		__cgroup_account_cputime(cgrp, delta_exec);
    798}
    799
    800static inline void cgroup_account_cputime_field(struct task_struct *task,
    801						enum cpu_usage_stat index,
    802						u64 delta_exec)
    803{
    804	struct cgroup *cgrp;
    805
    806	cpuacct_account_field(task, index, delta_exec);
    807
    808	cgrp = task_dfl_cgroup(task);
    809	if (cgroup_parent(cgrp))
    810		__cgroup_account_cputime_field(cgrp, index, delta_exec);
    811}
    812
    813#else	/* CONFIG_CGROUPS */
    814
    815static inline void cgroup_account_cputime(struct task_struct *task,
    816					  u64 delta_exec) {}
    817static inline void cgroup_account_cputime_field(struct task_struct *task,
    818						enum cpu_usage_stat index,
    819						u64 delta_exec) {}
    820
    821#endif	/* CONFIG_CGROUPS */
    822
    823/*
    824 * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
    825 * definition in cgroup-defs.h.
    826 */
    827#ifdef CONFIG_SOCK_CGROUP_DATA
    828
    829void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
    830void cgroup_sk_clone(struct sock_cgroup_data *skcd);
    831void cgroup_sk_free(struct sock_cgroup_data *skcd);
    832
    833static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
    834{
    835	return skcd->cgroup;
    836}
    837
    838#else	/* CONFIG_CGROUP_DATA */
    839
    840static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
    841static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
    842static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
    843
    844#endif	/* CONFIG_CGROUP_DATA */
    845
    846struct cgroup_namespace {
    847	struct ns_common	ns;
    848	struct user_namespace	*user_ns;
    849	struct ucounts		*ucounts;
    850	struct css_set          *root_cset;
    851};
    852
    853extern struct cgroup_namespace init_cgroup_ns;
    854
    855#ifdef CONFIG_CGROUPS
    856
    857void free_cgroup_ns(struct cgroup_namespace *ns);
    858
    859struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
    860					struct user_namespace *user_ns,
    861					struct cgroup_namespace *old_ns);
    862
    863int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
    864		   struct cgroup_namespace *ns);
    865
    866#else /* !CONFIG_CGROUPS */
    867
    868static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
    869static inline struct cgroup_namespace *
    870copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
    871	       struct cgroup_namespace *old_ns)
    872{
    873	return old_ns;
    874}
    875
    876#endif /* !CONFIG_CGROUPS */
    877
    878static inline void get_cgroup_ns(struct cgroup_namespace *ns)
    879{
    880	if (ns)
    881		refcount_inc(&ns->ns.count);
    882}
    883
    884static inline void put_cgroup_ns(struct cgroup_namespace *ns)
    885{
    886	if (ns && refcount_dec_and_test(&ns->ns.count))
    887		free_cgroup_ns(ns);
    888}
    889
    890#ifdef CONFIG_CGROUPS
    891
    892void cgroup_enter_frozen(void);
    893void cgroup_leave_frozen(bool always_leave);
    894void cgroup_update_frozen(struct cgroup *cgrp);
    895void cgroup_freeze(struct cgroup *cgrp, bool freeze);
    896void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
    897				 struct cgroup *dst);
    898
    899static inline bool cgroup_task_frozen(struct task_struct *task)
    900{
    901	return task->frozen;
    902}
    903
    904#else /* !CONFIG_CGROUPS */
    905
    906static inline void cgroup_enter_frozen(void) { }
    907static inline void cgroup_leave_frozen(bool always_leave) { }
    908static inline bool cgroup_task_frozen(struct task_struct *task)
    909{
    910	return false;
    911}
    912
    913#endif /* !CONFIG_CGROUPS */
    914
    915#ifdef CONFIG_CGROUP_BPF
    916static inline void cgroup_bpf_get(struct cgroup *cgrp)
    917{
    918	percpu_ref_get(&cgrp->bpf.refcnt);
    919}
    920
    921static inline void cgroup_bpf_put(struct cgroup *cgrp)
    922{
    923	percpu_ref_put(&cgrp->bpf.refcnt);
    924}
    925
    926#else /* CONFIG_CGROUP_BPF */
    927
    928static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
    929static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
    930
    931#endif /* CONFIG_CGROUP_BPF */
    932
    933#endif /* _LINUX_CGROUP_H */