kcsan.h (2241B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and 4 * data structures to set up runtime. See kcsan-checks.h for explicit checks and 5 * modifiers. For more info please see Documentation/dev-tools/kcsan.rst. 6 * 7 * Copyright (C) 2019, Google LLC. 8 */ 9 10#ifndef _LINUX_KCSAN_H 11#define _LINUX_KCSAN_H 12 13#include <linux/kcsan-checks.h> 14#include <linux/types.h> 15 16#ifdef CONFIG_KCSAN 17 18/* 19 * Context for each thread of execution: for tasks, this is stored in 20 * task_struct, and interrupts access internal per-CPU storage. 21 */ 22struct kcsan_ctx { 23 int disable_count; /* disable counter */ 24 int disable_scoped; /* disable scoped access counter */ 25 int atomic_next; /* number of following atomic ops */ 26 27 /* 28 * We distinguish between: (a) nestable atomic regions that may contain 29 * other nestable regions; and (b) flat atomic regions that do not keep 30 * track of nesting. Both (a) and (b) are entirely independent of each 31 * other, and a flat region may be started in a nestable region or 32 * vice-versa. 33 * 34 * This is required because, for example, in the annotations for 35 * seqlocks, we declare seqlock writer critical sections as (a) nestable 36 * atomic regions, but reader critical sections as (b) flat atomic 37 * regions, but have encountered cases where seqlock reader critical 38 * sections are contained within writer critical sections (the opposite 39 * may be possible, too). 40 * 41 * To support these cases, we independently track the depth of nesting 42 * for (a), and whether the leaf level is flat for (b). 43 */ 44 int atomic_nest_count; 45 bool in_flat_atomic; 46 47 /* 48 * Access mask for all accesses if non-zero. 49 */ 50 unsigned long access_mask; 51 52 /* List of scoped accesses; likely to be empty. */ 53 struct list_head scoped_accesses; 54 55#ifdef CONFIG_KCSAN_WEAK_MEMORY 56 /* 57 * Scoped access for modeling access reordering to detect missing memory 58 * barriers; only keep 1 to keep fast-path complexity manageable. 59 */ 60 struct kcsan_scoped_access reorder_access; 61#endif 62}; 63 64/** 65 * kcsan_init - initialize KCSAN runtime 66 */ 67void kcsan_init(void); 68 69#else /* CONFIG_KCSAN */ 70 71static inline void kcsan_init(void) { } 72 73#endif /* CONFIG_KCSAN */ 74 75#endif /* _LINUX_KCSAN_H */