cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rbtree_augmented.h (9756B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3  Red Black Trees
      4  (C) 1999  Andrea Arcangeli <andrea@suse.de>
      5  (C) 2002  David Woodhouse <dwmw2@infradead.org>
      6  (C) 2012  Michel Lespinasse <walken@google.com>
      7
      8
      9  linux/include/linux/rbtree_augmented.h
     10*/
     11
     12#ifndef _LINUX_RBTREE_AUGMENTED_H
     13#define _LINUX_RBTREE_AUGMENTED_H
     14
     15#include <linux/compiler.h>
     16#include <linux/rbtree.h>
     17#include <linux/rcupdate.h>
     18
     19/*
     20 * Please note - only struct rb_augment_callbacks and the prototypes for
     21 * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
     22 * The rest are implementation details you are not expected to depend on.
     23 *
     24 * See Documentation/core-api/rbtree.rst for documentation and samples.
     25 */
     26
     27struct rb_augment_callbacks {
     28	void (*propagate)(struct rb_node *node, struct rb_node *stop);
     29	void (*copy)(struct rb_node *old, struct rb_node *new);
     30	void (*rotate)(struct rb_node *old, struct rb_node *new);
     31};
     32
     33extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
     34	void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
     35
     36/*
     37 * Fixup the rbtree and update the augmented information when rebalancing.
     38 *
     39 * On insertion, the user must update the augmented information on the path
     40 * leading to the inserted node, then call rb_link_node() as usual and
     41 * rb_insert_augmented() instead of the usual rb_insert_color() call.
     42 * If rb_insert_augmented() rebalances the rbtree, it will callback into
     43 * a user provided function to update the augmented information on the
     44 * affected subtrees.
     45 */
     46static inline void
     47rb_insert_augmented(struct rb_node *node, struct rb_root *root,
     48		    const struct rb_augment_callbacks *augment)
     49{
     50	__rb_insert_augmented(node, root, augment->rotate);
     51}
     52
     53static inline void
     54rb_insert_augmented_cached(struct rb_node *node,
     55			   struct rb_root_cached *root, bool newleft,
     56			   const struct rb_augment_callbacks *augment)
     57{
     58	if (newleft)
     59		root->rb_leftmost = node;
     60	rb_insert_augmented(node, &root->rb_root, augment);
     61}
     62
     63/*
     64 * Template for declaring augmented rbtree callbacks (generic case)
     65 *
     66 * RBSTATIC:    'static' or empty
     67 * RBNAME:      name of the rb_augment_callbacks structure
     68 * RBSTRUCT:    struct type of the tree nodes
     69 * RBFIELD:     name of struct rb_node field within RBSTRUCT
     70 * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree
     71 * RBCOMPUTE:   name of function that recomputes the RBAUGMENTED data
     72 */
     73
     74#define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME,				\
     75			     RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE)	\
     76static inline void							\
     77RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop)		\
     78{									\
     79	while (rb != stop) {						\
     80		RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD);	\
     81		if (RBCOMPUTE(node, true))				\
     82			break;						\
     83		rb = rb_parent(&node->RBFIELD);				\
     84	}								\
     85}									\
     86static inline void							\
     87RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new)		\
     88{									\
     89	RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD);		\
     90	RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD);		\
     91	new->RBAUGMENTED = old->RBAUGMENTED;				\
     92}									\
     93static void								\
     94RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new)	\
     95{									\
     96	RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD);		\
     97	RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD);		\
     98	new->RBAUGMENTED = old->RBAUGMENTED;				\
     99	RBCOMPUTE(old, false);						\
    100}									\
    101RBSTATIC const struct rb_augment_callbacks RBNAME = {			\
    102	.propagate = RBNAME ## _propagate,				\
    103	.copy = RBNAME ## _copy,					\
    104	.rotate = RBNAME ## _rotate					\
    105};
    106
    107/*
    108 * Template for declaring augmented rbtree callbacks,
    109 * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes.
    110 *
    111 * RBSTATIC:    'static' or empty
    112 * RBNAME:      name of the rb_augment_callbacks structure
    113 * RBSTRUCT:    struct type of the tree nodes
    114 * RBFIELD:     name of struct rb_node field within RBSTRUCT
    115 * RBTYPE:      type of the RBAUGMENTED field
    116 * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree
    117 * RBCOMPUTE:   name of function that returns the per-node RBTYPE scalar
    118 */
    119
    120#define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD,	      \
    121				 RBTYPE, RBAUGMENTED, RBCOMPUTE)	      \
    122static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit)	      \
    123{									      \
    124	RBSTRUCT *child;						      \
    125	RBTYPE max = RBCOMPUTE(node);					      \
    126	if (node->RBFIELD.rb_left) {					      \
    127		child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD);   \
    128		if (child->RBAUGMENTED > max)				      \
    129			max = child->RBAUGMENTED;			      \
    130	}								      \
    131	if (node->RBFIELD.rb_right) {					      \
    132		child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD);  \
    133		if (child->RBAUGMENTED > max)				      \
    134			max = child->RBAUGMENTED;			      \
    135	}								      \
    136	if (exit && node->RBAUGMENTED == max)				      \
    137		return true;						      \
    138	node->RBAUGMENTED = max;					      \
    139	return false;							      \
    140}									      \
    141RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME,					      \
    142		     RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max)
    143
    144
    145#define	RB_RED		0
    146#define	RB_BLACK	1
    147
    148#define __rb_parent(pc)    ((struct rb_node *)(pc & ~3))
    149
    150#define __rb_color(pc)     ((pc) & 1)
    151#define __rb_is_black(pc)  __rb_color(pc)
    152#define __rb_is_red(pc)    (!__rb_color(pc))
    153#define rb_color(rb)       __rb_color((rb)->__rb_parent_color)
    154#define rb_is_red(rb)      __rb_is_red((rb)->__rb_parent_color)
    155#define rb_is_black(rb)    __rb_is_black((rb)->__rb_parent_color)
    156
    157static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
    158{
    159	rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
    160}
    161
    162static inline void rb_set_parent_color(struct rb_node *rb,
    163				       struct rb_node *p, int color)
    164{
    165	rb->__rb_parent_color = (unsigned long)p | color;
    166}
    167
    168static inline void
    169__rb_change_child(struct rb_node *old, struct rb_node *new,
    170		  struct rb_node *parent, struct rb_root *root)
    171{
    172	if (parent) {
    173		if (parent->rb_left == old)
    174			WRITE_ONCE(parent->rb_left, new);
    175		else
    176			WRITE_ONCE(parent->rb_right, new);
    177	} else
    178		WRITE_ONCE(root->rb_node, new);
    179}
    180
    181static inline void
    182__rb_change_child_rcu(struct rb_node *old, struct rb_node *new,
    183		      struct rb_node *parent, struct rb_root *root)
    184{
    185	if (parent) {
    186		if (parent->rb_left == old)
    187			rcu_assign_pointer(parent->rb_left, new);
    188		else
    189			rcu_assign_pointer(parent->rb_right, new);
    190	} else
    191		rcu_assign_pointer(root->rb_node, new);
    192}
    193
    194extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
    195	void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
    196
    197static __always_inline struct rb_node *
    198__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
    199		     const struct rb_augment_callbacks *augment)
    200{
    201	struct rb_node *child = node->rb_right;
    202	struct rb_node *tmp = node->rb_left;
    203	struct rb_node *parent, *rebalance;
    204	unsigned long pc;
    205
    206	if (!tmp) {
    207		/*
    208		 * Case 1: node to erase has no more than 1 child (easy!)
    209		 *
    210		 * Note that if there is one child it must be red due to 5)
    211		 * and node must be black due to 4). We adjust colors locally
    212		 * so as to bypass __rb_erase_color() later on.
    213		 */
    214		pc = node->__rb_parent_color;
    215		parent = __rb_parent(pc);
    216		__rb_change_child(node, child, parent, root);
    217		if (child) {
    218			child->__rb_parent_color = pc;
    219			rebalance = NULL;
    220		} else
    221			rebalance = __rb_is_black(pc) ? parent : NULL;
    222		tmp = parent;
    223	} else if (!child) {
    224		/* Still case 1, but this time the child is node->rb_left */
    225		tmp->__rb_parent_color = pc = node->__rb_parent_color;
    226		parent = __rb_parent(pc);
    227		__rb_change_child(node, tmp, parent, root);
    228		rebalance = NULL;
    229		tmp = parent;
    230	} else {
    231		struct rb_node *successor = child, *child2;
    232
    233		tmp = child->rb_left;
    234		if (!tmp) {
    235			/*
    236			 * Case 2: node's successor is its right child
    237			 *
    238			 *    (n)          (s)
    239			 *    / \          / \
    240			 *  (x) (s)  ->  (x) (c)
    241			 *        \
    242			 *        (c)
    243			 */
    244			parent = successor;
    245			child2 = successor->rb_right;
    246
    247			augment->copy(node, successor);
    248		} else {
    249			/*
    250			 * Case 3: node's successor is leftmost under
    251			 * node's right child subtree
    252			 *
    253			 *    (n)          (s)
    254			 *    / \          / \
    255			 *  (x) (y)  ->  (x) (y)
    256			 *      /            /
    257			 *    (p)          (p)
    258			 *    /            /
    259			 *  (s)          (c)
    260			 *    \
    261			 *    (c)
    262			 */
    263			do {
    264				parent = successor;
    265				successor = tmp;
    266				tmp = tmp->rb_left;
    267			} while (tmp);
    268			child2 = successor->rb_right;
    269			WRITE_ONCE(parent->rb_left, child2);
    270			WRITE_ONCE(successor->rb_right, child);
    271			rb_set_parent(child, successor);
    272
    273			augment->copy(node, successor);
    274			augment->propagate(parent, successor);
    275		}
    276
    277		tmp = node->rb_left;
    278		WRITE_ONCE(successor->rb_left, tmp);
    279		rb_set_parent(tmp, successor);
    280
    281		pc = node->__rb_parent_color;
    282		tmp = __rb_parent(pc);
    283		__rb_change_child(node, successor, tmp, root);
    284
    285		if (child2) {
    286			rb_set_parent_color(child2, parent, RB_BLACK);
    287			rebalance = NULL;
    288		} else {
    289			rebalance = rb_is_black(successor) ? parent : NULL;
    290		}
    291		successor->__rb_parent_color = pc;
    292		tmp = successor;
    293	}
    294
    295	augment->propagate(tmp, NULL);
    296	return rebalance;
    297}
    298
    299static __always_inline void
    300rb_erase_augmented(struct rb_node *node, struct rb_root *root,
    301		   const struct rb_augment_callbacks *augment)
    302{
    303	struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
    304	if (rebalance)
    305		__rb_erase_color(rebalance, root, augment->rotate);
    306}
    307
    308static __always_inline void
    309rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root,
    310			  const struct rb_augment_callbacks *augment)
    311{
    312	if (root->rb_leftmost == node)
    313		root->rb_leftmost = rb_next(node);
    314	rb_erase_augmented(node, &root->rb_root, augment);
    315}
    316
    317#endif	/* _LINUX_RBTREE_AUGMENTED_H */