cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

Kconfig (43031B)


      1# SPDX-License-Identifier: GPL-2.0
      2#
      3# General architecture dependent options
      4#
      5
      6#
      7# Note: arch/$(SRCARCH)/Kconfig needs to be included first so that it can
      8# override the default values in this file.
      9#
     10source "arch/$(SRCARCH)/Kconfig"
     11
     12menu "General architecture-dependent options"
     13
     14config CRASH_CORE
     15	bool
     16
     17config KEXEC_CORE
     18	select CRASH_CORE
     19	bool
     20
     21config KEXEC_ELF
     22	bool
     23
     24config HAVE_IMA_KEXEC
     25	bool
     26
     27config ARCH_HAS_SUBPAGE_FAULTS
     28	bool
     29	help
     30	  Select if the architecture can check permissions at sub-page
     31	  granularity (e.g. arm64 MTE). The probe_user_*() functions
     32	  must be implemented.
     33
     34config HOTPLUG_SMT
     35	bool
     36
     37config GENERIC_ENTRY
     38       bool
     39
     40config KPROBES
     41	bool "Kprobes"
     42	depends on MODULES
     43	depends on HAVE_KPROBES
     44	select KALLSYMS
     45	select TASKS_RCU if PREEMPTION
     46	help
     47	  Kprobes allows you to trap at almost any kernel address and
     48	  execute a callback function.  register_kprobe() establishes
     49	  a probepoint and specifies the callback.  Kprobes is useful
     50	  for kernel debugging, non-intrusive instrumentation and testing.
     51	  If in doubt, say "N".
     52
     53config JUMP_LABEL
     54	bool "Optimize very unlikely/likely branches"
     55	depends on HAVE_ARCH_JUMP_LABEL
     56	depends on CC_HAS_ASM_GOTO
     57	select OBJTOOL if HAVE_JUMP_LABEL_HACK
     58	help
     59	 This option enables a transparent branch optimization that
     60	 makes certain almost-always-true or almost-always-false branch
     61	 conditions even cheaper to execute within the kernel.
     62
     63	 Certain performance-sensitive kernel code, such as trace points,
     64	 scheduler functionality, networking code and KVM have such
     65	 branches and include support for this optimization technique.
     66
     67	 If it is detected that the compiler has support for "asm goto",
     68	 the kernel will compile such branches with just a nop
     69	 instruction. When the condition flag is toggled to true, the
     70	 nop will be converted to a jump instruction to execute the
     71	 conditional block of instructions.
     72
     73	 This technique lowers overhead and stress on the branch prediction
     74	 of the processor and generally makes the kernel faster. The update
     75	 of the condition is slower, but those are always very rare.
     76
     77	 ( On 32-bit x86, the necessary options added to the compiler
     78	   flags may increase the size of the kernel slightly. )
     79
     80config STATIC_KEYS_SELFTEST
     81	bool "Static key selftest"
     82	depends on JUMP_LABEL
     83	help
     84	  Boot time self-test of the branch patching code.
     85
     86config STATIC_CALL_SELFTEST
     87	bool "Static call selftest"
     88	depends on HAVE_STATIC_CALL
     89	help
     90	  Boot time self-test of the call patching code.
     91
     92config OPTPROBES
     93	def_bool y
     94	depends on KPROBES && HAVE_OPTPROBES
     95	select TASKS_RCU if PREEMPTION
     96
     97config KPROBES_ON_FTRACE
     98	def_bool y
     99	depends on KPROBES && HAVE_KPROBES_ON_FTRACE
    100	depends on DYNAMIC_FTRACE_WITH_REGS
    101	help
    102	 If function tracer is enabled and the arch supports full
    103	 passing of pt_regs to function tracing, then kprobes can
    104	 optimize on top of function tracing.
    105
    106config UPROBES
    107	def_bool n
    108	depends on ARCH_SUPPORTS_UPROBES
    109	help
    110	  Uprobes is the user-space counterpart to kprobes: they
    111	  enable instrumentation applications (such as 'perf probe')
    112	  to establish unintrusive probes in user-space binaries and
    113	  libraries, by executing handler functions when the probes
    114	  are hit by user-space applications.
    115
    116	  ( These probes come in the form of single-byte breakpoints,
    117	    managed by the kernel and kept transparent to the probed
    118	    application. )
    119
    120config HAVE_64BIT_ALIGNED_ACCESS
    121	def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
    122	help
    123	  Some architectures require 64 bit accesses to be 64 bit
    124	  aligned, which also requires structs containing 64 bit values
    125	  to be 64 bit aligned too. This includes some 32 bit
    126	  architectures which can do 64 bit accesses, as well as 64 bit
    127	  architectures without unaligned access.
    128
    129	  This symbol should be selected by an architecture if 64 bit
    130	  accesses are required to be 64 bit aligned in this way even
    131	  though it is not a 64 bit architecture.
    132
    133	  See Documentation/core-api/unaligned-memory-access.rst for
    134	  more information on the topic of unaligned memory accesses.
    135
    136config HAVE_EFFICIENT_UNALIGNED_ACCESS
    137	bool
    138	help
    139	  Some architectures are unable to perform unaligned accesses
    140	  without the use of get_unaligned/put_unaligned. Others are
    141	  unable to perform such accesses efficiently (e.g. trap on
    142	  unaligned access and require fixing it up in the exception
    143	  handler.)
    144
    145	  This symbol should be selected by an architecture if it can
    146	  perform unaligned accesses efficiently to allow different
    147	  code paths to be selected for these cases. Some network
    148	  drivers, for example, could opt to not fix up alignment
    149	  problems with received packets if doing so would not help
    150	  much.
    151
    152	  See Documentation/core-api/unaligned-memory-access.rst for more
    153	  information on the topic of unaligned memory accesses.
    154
    155config ARCH_USE_BUILTIN_BSWAP
    156	bool
    157	help
    158	 Modern versions of GCC (since 4.4) have builtin functions
    159	 for handling byte-swapping. Using these, instead of the old
    160	 inline assembler that the architecture code provides in the
    161	 __arch_bswapXX() macros, allows the compiler to see what's
    162	 happening and offers more opportunity for optimisation. In
    163	 particular, the compiler will be able to combine the byteswap
    164	 with a nearby load or store and use load-and-swap or
    165	 store-and-swap instructions if the architecture has them. It
    166	 should almost *never* result in code which is worse than the
    167	 hand-coded assembler in <asm/swab.h>.  But just in case it
    168	 does, the use of the builtins is optional.
    169
    170	 Any architecture with load-and-swap or store-and-swap
    171	 instructions should set this. And it shouldn't hurt to set it
    172	 on architectures that don't have such instructions.
    173
    174config KRETPROBES
    175	def_bool y
    176	depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK)
    177
    178config KRETPROBE_ON_RETHOOK
    179	def_bool y
    180	depends on HAVE_RETHOOK
    181	depends on KRETPROBES
    182	select RETHOOK
    183
    184config USER_RETURN_NOTIFIER
    185	bool
    186	depends on HAVE_USER_RETURN_NOTIFIER
    187	help
    188	  Provide a kernel-internal notification when a cpu is about to
    189	  switch to user mode.
    190
    191config HAVE_IOREMAP_PROT
    192	bool
    193
    194config HAVE_KPROBES
    195	bool
    196
    197config HAVE_KRETPROBES
    198	bool
    199
    200config HAVE_OPTPROBES
    201	bool
    202
    203config HAVE_KPROBES_ON_FTRACE
    204	bool
    205
    206config ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
    207	bool
    208	help
    209	  Since kretprobes modifies return address on the stack, the
    210	  stacktrace may see the kretprobe trampoline address instead
    211	  of correct one. If the architecture stacktrace code and
    212	  unwinder can adjust such entries, select this configuration.
    213
    214config HAVE_FUNCTION_ERROR_INJECTION
    215	bool
    216
    217config HAVE_NMI
    218	bool
    219
    220config HAVE_FUNCTION_DESCRIPTORS
    221	bool
    222
    223config TRACE_IRQFLAGS_SUPPORT
    224	bool
    225
    226#
    227# An arch should select this if it provides all these things:
    228#
    229#	task_pt_regs()		in asm/processor.h or asm/ptrace.h
    230#	arch_has_single_step()	if there is hardware single-step support
    231#	arch_has_block_step()	if there is hardware block-step support
    232#	asm/syscall.h		supplying asm-generic/syscall.h interface
    233#	linux/regset.h		user_regset interfaces
    234#	CORE_DUMP_USE_REGSET	#define'd in linux/elf.h
    235#	TIF_SYSCALL_TRACE	calls ptrace_report_syscall_{entry,exit}
    236#	TIF_NOTIFY_RESUME	calls resume_user_mode_work()
    237#
    238config HAVE_ARCH_TRACEHOOK
    239	bool
    240
    241config HAVE_DMA_CONTIGUOUS
    242	bool
    243
    244config GENERIC_SMP_IDLE_THREAD
    245	bool
    246
    247config GENERIC_IDLE_POLL_SETUP
    248	bool
    249
    250config ARCH_HAS_FORTIFY_SOURCE
    251	bool
    252	help
    253	  An architecture should select this when it can successfully
    254	  build and run with CONFIG_FORTIFY_SOURCE.
    255
    256#
    257# Select if the arch provides a historic keepinit alias for the retain_initrd
    258# command line option
    259#
    260config ARCH_HAS_KEEPINITRD
    261	bool
    262
    263# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
    264config ARCH_HAS_SET_MEMORY
    265	bool
    266
    267# Select if arch has all set_direct_map_invalid/default() functions
    268config ARCH_HAS_SET_DIRECT_MAP
    269	bool
    270
    271#
    272# Select if the architecture provides the arch_dma_set_uncached symbol to
    273# either provide an uncached segment alias for a DMA allocation, or
    274# to remap the page tables in place.
    275#
    276config ARCH_HAS_DMA_SET_UNCACHED
    277	bool
    278
    279#
    280# Select if the architectures provides the arch_dma_clear_uncached symbol
    281# to undo an in-place page table remap for uncached access.
    282#
    283config ARCH_HAS_DMA_CLEAR_UNCACHED
    284	bool
    285
    286# Select if arch init_task must go in the __init_task_data section
    287config ARCH_TASK_STRUCT_ON_STACK
    288	bool
    289
    290# Select if arch has its private alloc_task_struct() function
    291config ARCH_TASK_STRUCT_ALLOCATOR
    292	bool
    293
    294config HAVE_ARCH_THREAD_STRUCT_WHITELIST
    295	bool
    296	depends on !ARCH_TASK_STRUCT_ALLOCATOR
    297	help
    298	  An architecture should select this to provide hardened usercopy
    299	  knowledge about what region of the thread_struct should be
    300	  whitelisted for copying to userspace. Normally this is only the
    301	  FPU registers. Specifically, arch_thread_struct_whitelist()
    302	  should be implemented. Without this, the entire thread_struct
    303	  field in task_struct will be left whitelisted.
    304
    305# Select if arch has its private alloc_thread_stack() function
    306config ARCH_THREAD_STACK_ALLOCATOR
    307	bool
    308
    309# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
    310config ARCH_WANTS_DYNAMIC_TASK_STRUCT
    311	bool
    312
    313config ARCH_WANTS_NO_INSTR
    314	bool
    315	help
    316	  An architecture should select this if the noinstr macro is being used on
    317	  functions to denote that the toolchain should avoid instrumenting such
    318	  functions and is required for correctness.
    319
    320config ARCH_32BIT_OFF_T
    321	bool
    322	depends on !64BIT
    323	help
    324	  All new 32-bit architectures should have 64-bit off_t type on
    325	  userspace side which corresponds to the loff_t kernel type. This
    326	  is the requirement for modern ABIs. Some existing architectures
    327	  still support 32-bit off_t. This option is enabled for all such
    328	  architectures explicitly.
    329
    330# Selected by 64 bit architectures which have a 32 bit f_tinode in struct ustat
    331config ARCH_32BIT_USTAT_F_TINODE
    332	bool
    333
    334config HAVE_ASM_MODVERSIONS
    335	bool
    336	help
    337	  This symbol should be selected by an architecture if it provides
    338	  <asm/asm-prototypes.h> to support the module versioning for symbols
    339	  exported from assembly code.
    340
    341config HAVE_REGS_AND_STACK_ACCESS_API
    342	bool
    343	help
    344	  This symbol should be selected by an architecture if it supports
    345	  the API needed to access registers and stack entries from pt_regs,
    346	  declared in asm/ptrace.h
    347	  For example the kprobes-based event tracer needs this API.
    348
    349config HAVE_RSEQ
    350	bool
    351	depends on HAVE_REGS_AND_STACK_ACCESS_API
    352	help
    353	  This symbol should be selected by an architecture if it
    354	  supports an implementation of restartable sequences.
    355
    356config HAVE_FUNCTION_ARG_ACCESS_API
    357	bool
    358	help
    359	  This symbol should be selected by an architecture if it supports
    360	  the API needed to access function arguments from pt_regs,
    361	  declared in asm/ptrace.h
    362
    363config HAVE_HW_BREAKPOINT
    364	bool
    365	depends on PERF_EVENTS
    366
    367config HAVE_MIXED_BREAKPOINTS_REGS
    368	bool
    369	depends on HAVE_HW_BREAKPOINT
    370	help
    371	  Depending on the arch implementation of hardware breakpoints,
    372	  some of them have separate registers for data and instruction
    373	  breakpoints addresses, others have mixed registers to store
    374	  them but define the access type in a control register.
    375	  Select this option if your arch implements breakpoints under the
    376	  latter fashion.
    377
    378config HAVE_USER_RETURN_NOTIFIER
    379	bool
    380
    381config HAVE_PERF_EVENTS_NMI
    382	bool
    383	help
    384	  System hardware can generate an NMI using the perf event
    385	  subsystem.  Also has support for calculating CPU cycle events
    386	  to determine how many clock cycles in a given period.
    387
    388config HAVE_HARDLOCKUP_DETECTOR_PERF
    389	bool
    390	depends on HAVE_PERF_EVENTS_NMI
    391	help
    392	  The arch chooses to use the generic perf-NMI-based hardlockup
    393	  detector. Must define HAVE_PERF_EVENTS_NMI.
    394
    395config HAVE_NMI_WATCHDOG
    396	depends on HAVE_NMI
    397	bool
    398	help
    399	  The arch provides a low level NMI watchdog. It provides
    400	  asm/nmi.h, and defines its own arch_touch_nmi_watchdog().
    401
    402config HAVE_HARDLOCKUP_DETECTOR_ARCH
    403	bool
    404	select HAVE_NMI_WATCHDOG
    405	help
    406	  The arch chooses to provide its own hardlockup detector, which is
    407	  a superset of the HAVE_NMI_WATCHDOG. It also conforms to config
    408	  interfaces and parameters provided by hardlockup detector subsystem.
    409
    410config HAVE_PERF_REGS
    411	bool
    412	help
    413	  Support selective register dumps for perf events. This includes
    414	  bit-mapping of each registers and a unique architecture id.
    415
    416config HAVE_PERF_USER_STACK_DUMP
    417	bool
    418	help
    419	  Support user stack dumps for perf event samples. This needs
    420	  access to the user stack pointer which is not unified across
    421	  architectures.
    422
    423config HAVE_ARCH_JUMP_LABEL
    424	bool
    425
    426config HAVE_ARCH_JUMP_LABEL_RELATIVE
    427	bool
    428
    429config MMU_GATHER_TABLE_FREE
    430	bool
    431
    432config MMU_GATHER_RCU_TABLE_FREE
    433	bool
    434	select MMU_GATHER_TABLE_FREE
    435
    436config MMU_GATHER_PAGE_SIZE
    437	bool
    438
    439config MMU_GATHER_NO_RANGE
    440	bool
    441
    442config MMU_GATHER_NO_GATHER
    443	bool
    444	depends on MMU_GATHER_TABLE_FREE
    445
    446config ARCH_WANT_IRQS_OFF_ACTIVATE_MM
    447	bool
    448	help
    449	  Temporary select until all architectures can be converted to have
    450	  irqs disabled over activate_mm. Architectures that do IPI based TLB
    451	  shootdowns should enable this.
    452
    453config ARCH_HAVE_NMI_SAFE_CMPXCHG
    454	bool
    455
    456config HAVE_ALIGNED_STRUCT_PAGE
    457	bool
    458	help
    459	  This makes sure that struct pages are double word aligned and that
    460	  e.g. the SLUB allocator can perform double word atomic operations
    461	  on a struct page for better performance. However selecting this
    462	  might increase the size of a struct page by a word.
    463
    464config HAVE_CMPXCHG_LOCAL
    465	bool
    466
    467config HAVE_CMPXCHG_DOUBLE
    468	bool
    469
    470config ARCH_WEAK_RELEASE_ACQUIRE
    471	bool
    472
    473config ARCH_WANT_IPC_PARSE_VERSION
    474	bool
    475
    476config ARCH_WANT_COMPAT_IPC_PARSE_VERSION
    477	bool
    478
    479config ARCH_WANT_OLD_COMPAT_IPC
    480	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
    481	bool
    482
    483config HAVE_ARCH_SECCOMP
    484	bool
    485	help
    486	  An arch should select this symbol to support seccomp mode 1 (the fixed
    487	  syscall policy), and must provide an overrides for __NR_seccomp_sigreturn,
    488	  and compat syscalls if the asm-generic/seccomp.h defaults need adjustment:
    489	  - __NR_seccomp_read_32
    490	  - __NR_seccomp_write_32
    491	  - __NR_seccomp_exit_32
    492	  - __NR_seccomp_sigreturn_32
    493
    494config HAVE_ARCH_SECCOMP_FILTER
    495	bool
    496	select HAVE_ARCH_SECCOMP
    497	help
    498	  An arch should select this symbol if it provides all of these things:
    499	  - all the requirements for HAVE_ARCH_SECCOMP
    500	  - syscall_get_arch()
    501	  - syscall_get_arguments()
    502	  - syscall_rollback()
    503	  - syscall_set_return_value()
    504	  - SIGSYS siginfo_t support
    505	  - secure_computing is called from a ptrace_event()-safe context
    506	  - secure_computing return value is checked and a return value of -1
    507	    results in the system call being skipped immediately.
    508	  - seccomp syscall wired up
    509	  - if !HAVE_SPARSE_SYSCALL_NR, have SECCOMP_ARCH_NATIVE,
    510	    SECCOMP_ARCH_NATIVE_NR, SECCOMP_ARCH_NATIVE_NAME defined. If
    511	    COMPAT is supported, have the SECCOMP_ARCH_COMPAT* defines too.
    512
    513config SECCOMP
    514	prompt "Enable seccomp to safely execute untrusted bytecode"
    515	def_bool y
    516	depends on HAVE_ARCH_SECCOMP
    517	help
    518	  This kernel feature is useful for number crunching applications
    519	  that may need to handle untrusted bytecode during their
    520	  execution. By using pipes or other transports made available
    521	  to the process as file descriptors supporting the read/write
    522	  syscalls, it's possible to isolate those applications in their
    523	  own address space using seccomp. Once seccomp is enabled via
    524	  prctl(PR_SET_SECCOMP) or the seccomp() syscall, it cannot be
    525	  disabled and the task is only allowed to execute a few safe
    526	  syscalls defined by each seccomp mode.
    527
    528	  If unsure, say Y.
    529
    530config SECCOMP_FILTER
    531	def_bool y
    532	depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
    533	help
    534	  Enable tasks to build secure computing environments defined
    535	  in terms of Berkeley Packet Filter programs which implement
    536	  task-defined system call filtering polices.
    537
    538	  See Documentation/userspace-api/seccomp_filter.rst for details.
    539
    540config SECCOMP_CACHE_DEBUG
    541	bool "Show seccomp filter cache status in /proc/pid/seccomp_cache"
    542	depends on SECCOMP_FILTER && !HAVE_SPARSE_SYSCALL_NR
    543	depends on PROC_FS
    544	help
    545	  This enables the /proc/pid/seccomp_cache interface to monitor
    546	  seccomp cache data. The file format is subject to change. Reading
    547	  the file requires CAP_SYS_ADMIN.
    548
    549	  This option is for debugging only. Enabling presents the risk that
    550	  an adversary may be able to infer the seccomp filter logic.
    551
    552	  If unsure, say N.
    553
    554config HAVE_ARCH_STACKLEAK
    555	bool
    556	help
    557	  An architecture should select this if it has the code which
    558	  fills the used part of the kernel stack with the STACKLEAK_POISON
    559	  value before returning from system calls.
    560
    561config HAVE_STACKPROTECTOR
    562	bool
    563	help
    564	  An arch should select this symbol if:
    565	  - it has implemented a stack canary (e.g. __stack_chk_guard)
    566
    567config STACKPROTECTOR
    568	bool "Stack Protector buffer overflow detection"
    569	depends on HAVE_STACKPROTECTOR
    570	depends on $(cc-option,-fstack-protector)
    571	default y
    572	help
    573	  This option turns on the "stack-protector" GCC feature. This
    574	  feature puts, at the beginning of functions, a canary value on
    575	  the stack just before the return address, and validates
    576	  the value just before actually returning.  Stack based buffer
    577	  overflows (that need to overwrite this return address) now also
    578	  overwrite the canary, which gets detected and the attack is then
    579	  neutralized via a kernel panic.
    580
    581	  Functions will have the stack-protector canary logic added if they
    582	  have an 8-byte or larger character array on the stack.
    583
    584	  This feature requires gcc version 4.2 or above, or a distribution
    585	  gcc with the feature backported ("-fstack-protector").
    586
    587	  On an x86 "defconfig" build, this feature adds canary checks to
    588	  about 3% of all kernel functions, which increases kernel code size
    589	  by about 0.3%.
    590
    591config STACKPROTECTOR_STRONG
    592	bool "Strong Stack Protector"
    593	depends on STACKPROTECTOR
    594	depends on $(cc-option,-fstack-protector-strong)
    595	default y
    596	help
    597	  Functions will have the stack-protector canary logic added in any
    598	  of the following conditions:
    599
    600	  - local variable's address used as part of the right hand side of an
    601	    assignment or function argument
    602	  - local variable is an array (or union containing an array),
    603	    regardless of array type or length
    604	  - uses register local variables
    605
    606	  This feature requires gcc version 4.9 or above, or a distribution
    607	  gcc with the feature backported ("-fstack-protector-strong").
    608
    609	  On an x86 "defconfig" build, this feature adds canary checks to
    610	  about 20% of all kernel functions, which increases the kernel code
    611	  size by about 2%.
    612
    613config ARCH_SUPPORTS_SHADOW_CALL_STACK
    614	bool
    615	help
    616	  An architecture should select this if it supports the compiler's
    617	  Shadow Call Stack and implements runtime support for shadow stack
    618	  switching.
    619
    620config SHADOW_CALL_STACK
    621	bool "Shadow Call Stack"
    622	depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
    623	depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
    624	help
    625	  This option enables the compiler's Shadow Call Stack, which
    626	  uses a shadow stack to protect function return addresses from
    627	  being overwritten by an attacker. More information can be found
    628	  in the compiler's documentation:
    629
    630	  - Clang: https://clang.llvm.org/docs/ShadowCallStack.html
    631	  - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options
    632
    633	  Note that security guarantees in the kernel differ from the
    634	  ones documented for user space. The kernel must store addresses
    635	  of shadow stacks in memory, which means an attacker capable of
    636	  reading and writing arbitrary memory may be able to locate them
    637	  and hijack control flow by modifying the stacks.
    638
    639config LTO
    640	bool
    641	help
    642	  Selected if the kernel will be built using the compiler's LTO feature.
    643
    644config LTO_CLANG
    645	bool
    646	select LTO
    647	help
    648	  Selected if the kernel will be built using Clang's LTO feature.
    649
    650config ARCH_SUPPORTS_LTO_CLANG
    651	bool
    652	help
    653	  An architecture should select this option if it supports:
    654	  - compiling with Clang,
    655	  - compiling inline assembly with Clang's integrated assembler,
    656	  - and linking with LLD.
    657
    658config ARCH_SUPPORTS_LTO_CLANG_THIN
    659	bool
    660	help
    661	  An architecture should select this option if it can support Clang's
    662	  ThinLTO mode.
    663
    664config HAS_LTO_CLANG
    665	def_bool y
    666	depends on CC_IS_CLANG && LD_IS_LLD && AS_IS_LLVM
    667	depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
    668	depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
    669	depends on ARCH_SUPPORTS_LTO_CLANG
    670	depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
    671	depends on !KASAN || KASAN_HW_TAGS
    672	depends on !GCOV_KERNEL
    673	help
    674	  The compiler and Kconfig options support building with Clang's
    675	  LTO.
    676
    677choice
    678	prompt "Link Time Optimization (LTO)"
    679	default LTO_NONE
    680	help
    681	  This option enables Link Time Optimization (LTO), which allows the
    682	  compiler to optimize binaries globally.
    683
    684	  If unsure, select LTO_NONE. Note that LTO is very resource-intensive
    685	  so it's disabled by default.
    686
    687config LTO_NONE
    688	bool "None"
    689	help
    690	  Build the kernel normally, without Link Time Optimization (LTO).
    691
    692config LTO_CLANG_FULL
    693	bool "Clang Full LTO (EXPERIMENTAL)"
    694	depends on HAS_LTO_CLANG
    695	depends on !COMPILE_TEST
    696	select LTO_CLANG
    697	help
    698          This option enables Clang's full Link Time Optimization (LTO), which
    699          allows the compiler to optimize the kernel globally. If you enable
    700          this option, the compiler generates LLVM bitcode instead of ELF
    701          object files, and the actual compilation from bitcode happens at
    702          the LTO link step, which may take several minutes depending on the
    703          kernel configuration. More information can be found from LLVM's
    704          documentation:
    705
    706	    https://llvm.org/docs/LinkTimeOptimization.html
    707
    708	  During link time, this option can use a large amount of RAM, and
    709	  may take much longer than the ThinLTO option.
    710
    711config LTO_CLANG_THIN
    712	bool "Clang ThinLTO (EXPERIMENTAL)"
    713	depends on HAS_LTO_CLANG && ARCH_SUPPORTS_LTO_CLANG_THIN
    714	select LTO_CLANG
    715	help
    716	  This option enables Clang's ThinLTO, which allows for parallel
    717	  optimization and faster incremental compiles compared to the
    718	  CONFIG_LTO_CLANG_FULL option. More information can be found
    719	  from Clang's documentation:
    720
    721	    https://clang.llvm.org/docs/ThinLTO.html
    722
    723	  If unsure, say Y.
    724endchoice
    725
    726config ARCH_SUPPORTS_CFI_CLANG
    727	bool
    728	help
    729	  An architecture should select this option if it can support Clang's
    730	  Control-Flow Integrity (CFI) checking.
    731
    732config CFI_CLANG
    733	bool "Use Clang's Control Flow Integrity (CFI)"
    734	depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
    735	depends on CLANG_VERSION >= 140000
    736	select KALLSYMS
    737	help
    738	  This option enables Clang’s forward-edge Control Flow Integrity
    739	  (CFI) checking, where the compiler injects a runtime check to each
    740	  indirect function call to ensure the target is a valid function with
    741	  the correct static type. This restricts possible call targets and
    742	  makes it more difficult for an attacker to exploit bugs that allow
    743	  the modification of stored function pointers. More information can be
    744	  found from Clang's documentation:
    745
    746	    https://clang.llvm.org/docs/ControlFlowIntegrity.html
    747
    748config CFI_CLANG_SHADOW
    749	bool "Use CFI shadow to speed up cross-module checks"
    750	default y
    751	depends on CFI_CLANG && MODULES
    752	help
    753	  If you select this option, the kernel builds a fast look-up table of
    754	  CFI check functions in loaded modules to reduce performance overhead.
    755
    756	  If unsure, say Y.
    757
    758config CFI_PERMISSIVE
    759	bool "Use CFI in permissive mode"
    760	depends on CFI_CLANG
    761	help
    762	  When selected, Control Flow Integrity (CFI) violations result in a
    763	  warning instead of a kernel panic. This option should only be used
    764	  for finding indirect call type mismatches during development.
    765
    766	  If unsure, say N.
    767
    768config HAVE_ARCH_WITHIN_STACK_FRAMES
    769	bool
    770	help
    771	  An architecture should select this if it can walk the kernel stack
    772	  frames to determine if an object is part of either the arguments
    773	  or local variables (i.e. that it excludes saved return addresses,
    774	  and similar) by implementing an inline arch_within_stack_frames(),
    775	  which is used by CONFIG_HARDENED_USERCOPY.
    776
    777config HAVE_CONTEXT_TRACKING
    778	bool
    779	help
    780	  Provide kernel/user boundaries probes necessary for subsystems
    781	  that need it, such as userspace RCU extended quiescent state.
    782	  Syscalls need to be wrapped inside user_exit()-user_enter(), either
    783	  optimized behind static key or through the slow path using TIF_NOHZ
    784	  flag. Exceptions handlers must be wrapped as well. Irqs are already
    785	  protected inside rcu_irq_enter/rcu_irq_exit() but preemption or signal
    786	  handling on irq exit still need to be protected.
    787
    788config HAVE_CONTEXT_TRACKING_OFFSTACK
    789	bool
    790	help
    791	  Architecture neither relies on exception_enter()/exception_exit()
    792	  nor on schedule_user(). Also preempt_schedule_notrace() and
    793	  preempt_schedule_irq() can't be called in a preemptible section
    794	  while context tracking is CONTEXT_USER. This feature reflects a sane
    795	  entry implementation where the following requirements are met on
    796	  critical entry code, ie: before user_exit() or after user_enter():
    797
    798	  - Critical entry code isn't preemptible (or better yet:
    799	    not interruptible).
    800	  - No use of RCU read side critical sections, unless rcu_nmi_enter()
    801	    got called.
    802	  - No use of instrumentation, unless instrumentation_begin() got
    803	    called.
    804
    805config HAVE_TIF_NOHZ
    806	bool
    807	help
    808	  Arch relies on TIF_NOHZ and syscall slow path to implement context
    809	  tracking calls to user_enter()/user_exit().
    810
    811config HAVE_VIRT_CPU_ACCOUNTING
    812	bool
    813
    814config HAVE_VIRT_CPU_ACCOUNTING_IDLE
    815	bool
    816	help
    817	  Architecture has its own way to account idle CPU time and therefore
    818	  doesn't implement vtime_account_idle().
    819
    820config ARCH_HAS_SCALED_CPUTIME
    821	bool
    822
    823config HAVE_VIRT_CPU_ACCOUNTING_GEN
    824	bool
    825	default y if 64BIT
    826	help
    827	  With VIRT_CPU_ACCOUNTING_GEN, cputime_t becomes 64-bit.
    828	  Before enabling this option, arch code must be audited
    829	  to ensure there are no races in concurrent read/write of
    830	  cputime_t. For example, reading/writing 64-bit cputime_t on
    831	  some 32-bit arches may require multiple accesses, so proper
    832	  locking is needed to protect against concurrent accesses.
    833
    834config HAVE_IRQ_TIME_ACCOUNTING
    835	bool
    836	help
    837	  Archs need to ensure they use a high enough resolution clock to
    838	  support irq time accounting and then call enable_sched_clock_irqtime().
    839
    840config HAVE_MOVE_PUD
    841	bool
    842	help
    843	  Architectures that select this are able to move page tables at the
    844	  PUD level. If there are only 3 page table levels, the move effectively
    845	  happens at the PGD level.
    846
    847config HAVE_MOVE_PMD
    848	bool
    849	help
    850	  Archs that select this are able to move page tables at the PMD level.
    851
    852config HAVE_ARCH_TRANSPARENT_HUGEPAGE
    853	bool
    854
    855config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
    856	bool
    857
    858config HAVE_ARCH_HUGE_VMAP
    859	bool
    860
    861#
    862#  Archs that select this would be capable of PMD-sized vmaps (i.e.,
    863#  arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
    864#  must be used to enable allocations to use hugepages.
    865#
    866config HAVE_ARCH_HUGE_VMALLOC
    867	depends on HAVE_ARCH_HUGE_VMAP
    868	bool
    869
    870config ARCH_WANT_HUGE_PMD_SHARE
    871	bool
    872
    873config HAVE_ARCH_SOFT_DIRTY
    874	bool
    875
    876config HAVE_MOD_ARCH_SPECIFIC
    877	bool
    878	help
    879	  The arch uses struct mod_arch_specific to store data.  Many arches
    880	  just need a simple module loader without arch specific data - those
    881	  should not enable this.
    882
    883config MODULES_USE_ELF_RELA
    884	bool
    885	help
    886	  Modules only use ELF RELA relocations.  Modules with ELF REL
    887	  relocations will give an error.
    888
    889config MODULES_USE_ELF_REL
    890	bool
    891	help
    892	  Modules only use ELF REL relocations.  Modules with ELF RELA
    893	  relocations will give an error.
    894
    895config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
    896	bool
    897	help
    898	  For architectures like powerpc/32 which have constraints on module
    899	  allocation and need to allocate module data outside of module area.
    900
    901config HAVE_IRQ_EXIT_ON_IRQ_STACK
    902	bool
    903	help
    904	  Architecture doesn't only execute the irq handler on the irq stack
    905	  but also irq_exit(). This way we can process softirqs on this irq
    906	  stack instead of switching to a new one when we call __do_softirq()
    907	  in the end of an hardirq.
    908	  This spares a stack switch and improves cache usage on softirq
    909	  processing.
    910
    911config HAVE_SOFTIRQ_ON_OWN_STACK
    912	bool
    913	help
    914	  Architecture provides a function to run __do_softirq() on a
    915	  separate stack.
    916
    917config ALTERNATE_USER_ADDRESS_SPACE
    918	bool
    919	help
    920	  Architectures set this when the CPU uses separate address
    921	  spaces for kernel and user space pointers. In this case, the
    922	  access_ok() check on a __user pointer is skipped.
    923
    924config PGTABLE_LEVELS
    925	int
    926	default 2
    927
    928config ARCH_HAS_ELF_RANDOMIZE
    929	bool
    930	help
    931	  An architecture supports choosing randomized locations for
    932	  stack, mmap, brk, and ET_DYN. Defined functions:
    933	  - arch_mmap_rnd()
    934	  - arch_randomize_brk()
    935
    936config HAVE_ARCH_MMAP_RND_BITS
    937	bool
    938	help
    939	  An arch should select this symbol if it supports setting a variable
    940	  number of bits for use in establishing the base address for mmap
    941	  allocations, has MMU enabled and provides values for both:
    942	  - ARCH_MMAP_RND_BITS_MIN
    943	  - ARCH_MMAP_RND_BITS_MAX
    944
    945config HAVE_EXIT_THREAD
    946	bool
    947	help
    948	  An architecture implements exit_thread.
    949
    950config ARCH_MMAP_RND_BITS_MIN
    951	int
    952
    953config ARCH_MMAP_RND_BITS_MAX
    954	int
    955
    956config ARCH_MMAP_RND_BITS_DEFAULT
    957	int
    958
    959config ARCH_MMAP_RND_BITS
    960	int "Number of bits to use for ASLR of mmap base address" if EXPERT
    961	range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
    962	default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
    963	default ARCH_MMAP_RND_BITS_MIN
    964	depends on HAVE_ARCH_MMAP_RND_BITS
    965	help
    966	  This value can be used to select the number of bits to use to
    967	  determine the random offset to the base address of vma regions
    968	  resulting from mmap allocations. This value will be bounded
    969	  by the architecture's minimum and maximum supported values.
    970
    971	  This value can be changed after boot using the
    972	  /proc/sys/vm/mmap_rnd_bits tunable
    973
    974config HAVE_ARCH_MMAP_RND_COMPAT_BITS
    975	bool
    976	help
    977	  An arch should select this symbol if it supports running applications
    978	  in compatibility mode, supports setting a variable number of bits for
    979	  use in establishing the base address for mmap allocations, has MMU
    980	  enabled and provides values for both:
    981	  - ARCH_MMAP_RND_COMPAT_BITS_MIN
    982	  - ARCH_MMAP_RND_COMPAT_BITS_MAX
    983
    984config ARCH_MMAP_RND_COMPAT_BITS_MIN
    985	int
    986
    987config ARCH_MMAP_RND_COMPAT_BITS_MAX
    988	int
    989
    990config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
    991	int
    992
    993config ARCH_MMAP_RND_COMPAT_BITS
    994	int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
    995	range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
    996	default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
    997	default ARCH_MMAP_RND_COMPAT_BITS_MIN
    998	depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
    999	help
   1000	  This value can be used to select the number of bits to use to
   1001	  determine the random offset to the base address of vma regions
   1002	  resulting from mmap allocations for compatible applications This
   1003	  value will be bounded by the architecture's minimum and maximum
   1004	  supported values.
   1005
   1006	  This value can be changed after boot using the
   1007	  /proc/sys/vm/mmap_rnd_compat_bits tunable
   1008
   1009config HAVE_ARCH_COMPAT_MMAP_BASES
   1010	bool
   1011	help
   1012	  This allows 64bit applications to invoke 32-bit mmap() syscall
   1013	  and vice-versa 32-bit applications to call 64-bit mmap().
   1014	  Required for applications doing different bitness syscalls.
   1015
   1016config PAGE_SIZE_LESS_THAN_64KB
   1017	def_bool y
   1018	depends on !ARM64_64K_PAGES
   1019	depends on !IA64_PAGE_SIZE_64KB
   1020	depends on !PAGE_SIZE_64KB
   1021	depends on !PARISC_PAGE_SIZE_64KB
   1022	depends on PAGE_SIZE_LESS_THAN_256KB
   1023
   1024config PAGE_SIZE_LESS_THAN_256KB
   1025	def_bool y
   1026	depends on !PAGE_SIZE_256KB
   1027
   1028# This allows to use a set of generic functions to determine mmap base
   1029# address by giving priority to top-down scheme only if the process
   1030# is not in legacy mode (compat task, unlimited stack size or
   1031# sysctl_legacy_va_layout).
   1032# Architecture that selects this option can provide its own version of:
   1033# - STACK_RND_MASK
   1034config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
   1035	bool
   1036	depends on MMU
   1037	select ARCH_HAS_ELF_RANDOMIZE
   1038
   1039config HAVE_OBJTOOL
   1040	bool
   1041
   1042config HAVE_JUMP_LABEL_HACK
   1043	bool
   1044
   1045config HAVE_NOINSTR_HACK
   1046	bool
   1047
   1048config HAVE_NOINSTR_VALIDATION
   1049	bool
   1050
   1051config HAVE_UACCESS_VALIDATION
   1052	bool
   1053	select OBJTOOL
   1054
   1055config HAVE_STACK_VALIDATION
   1056	bool
   1057	help
   1058	  Architecture supports objtool compile-time frame pointer rule
   1059	  validation.
   1060
   1061config HAVE_RELIABLE_STACKTRACE
   1062	bool
   1063	help
   1064	  Architecture has either save_stack_trace_tsk_reliable() or
   1065	  arch_stack_walk_reliable() function which only returns a stack trace
   1066	  if it can guarantee the trace is reliable.
   1067
   1068config HAVE_ARCH_HASH
   1069	bool
   1070	default n
   1071	help
   1072	  If this is set, the architecture provides an <asm/hash.h>
   1073	  file which provides platform-specific implementations of some
   1074	  functions in <linux/hash.h> or fs/namei.c.
   1075
   1076config HAVE_ARCH_NVRAM_OPS
   1077	bool
   1078
   1079config ISA_BUS_API
   1080	def_bool ISA
   1081
   1082#
   1083# ABI hall of shame
   1084#
   1085config CLONE_BACKWARDS
   1086	bool
   1087	help
   1088	  Architecture has tls passed as the 4th argument of clone(2),
   1089	  not the 5th one.
   1090
   1091config CLONE_BACKWARDS2
   1092	bool
   1093	help
   1094	  Architecture has the first two arguments of clone(2) swapped.
   1095
   1096config CLONE_BACKWARDS3
   1097	bool
   1098	help
   1099	  Architecture has tls passed as the 3rd argument of clone(2),
   1100	  not the 5th one.
   1101
   1102config ODD_RT_SIGACTION
   1103	bool
   1104	help
   1105	  Architecture has unusual rt_sigaction(2) arguments
   1106
   1107config OLD_SIGSUSPEND
   1108	bool
   1109	help
   1110	  Architecture has old sigsuspend(2) syscall, of one-argument variety
   1111
   1112config OLD_SIGSUSPEND3
   1113	bool
   1114	help
   1115	  Even weirder antique ABI - three-argument sigsuspend(2)
   1116
   1117config OLD_SIGACTION
   1118	bool
   1119	help
   1120	  Architecture has old sigaction(2) syscall.  Nope, not the same
   1121	  as OLD_SIGSUSPEND | OLD_SIGSUSPEND3 - alpha has sigsuspend(2),
   1122	  but fairly different variant of sigaction(2), thanks to OSF/1
   1123	  compatibility...
   1124
   1125config COMPAT_OLD_SIGACTION
   1126	bool
   1127
   1128config COMPAT_32BIT_TIME
   1129	bool "Provide system calls for 32-bit time_t"
   1130	default !64BIT || COMPAT
   1131	help
   1132	  This enables 32 bit time_t support in addition to 64 bit time_t support.
   1133	  This is relevant on all 32-bit architectures, and 64-bit architectures
   1134	  as part of compat syscall handling.
   1135
   1136config ARCH_NO_PREEMPT
   1137	bool
   1138
   1139config ARCH_EPHEMERAL_INODES
   1140	def_bool n
   1141	help
   1142	  An arch should select this symbol if it doesn't keep track of inode
   1143	  instances on its own, but instead relies on something else (e.g. the
   1144	  host kernel for an UML kernel).
   1145
   1146config ARCH_SUPPORTS_RT
   1147	bool
   1148
   1149config CPU_NO_EFFICIENT_FFS
   1150	def_bool n
   1151
   1152config HAVE_ARCH_VMAP_STACK
   1153	def_bool n
   1154	help
   1155	  An arch should select this symbol if it can support kernel stacks
   1156	  in vmalloc space.  This means:
   1157
   1158	  - vmalloc space must be large enough to hold many kernel stacks.
   1159	    This may rule out many 32-bit architectures.
   1160
   1161	  - Stacks in vmalloc space need to work reliably.  For example, if
   1162	    vmap page tables are created on demand, either this mechanism
   1163	    needs to work while the stack points to a virtual address with
   1164	    unpopulated page tables or arch code (switch_to() and switch_mm(),
   1165	    most likely) needs to ensure that the stack's page table entries
   1166	    are populated before running on a possibly unpopulated stack.
   1167
   1168	  - If the stack overflows into a guard page, something reasonable
   1169	    should happen.  The definition of "reasonable" is flexible, but
   1170	    instantly rebooting without logging anything would be unfriendly.
   1171
   1172config VMAP_STACK
   1173	default y
   1174	bool "Use a virtually-mapped stack"
   1175	depends on HAVE_ARCH_VMAP_STACK
   1176	depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC
   1177	help
   1178	  Enable this if you want the use virtually-mapped kernel stacks
   1179	  with guard pages.  This causes kernel stack overflows to be
   1180	  caught immediately rather than causing difficult-to-diagnose
   1181	  corruption.
   1182
   1183	  To use this with software KASAN modes, the architecture must support
   1184	  backing virtual mappings with real shadow memory, and KASAN_VMALLOC
   1185	  must be enabled.
   1186
   1187config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
   1188	def_bool n
   1189	help
   1190	  An arch should select this symbol if it can support kernel stack
   1191	  offset randomization with calls to add_random_kstack_offset()
   1192	  during syscall entry and choose_random_kstack_offset() during
   1193	  syscall exit. Careful removal of -fstack-protector-strong and
   1194	  -fstack-protector should also be applied to the entry code and
   1195	  closely examined, as the artificial stack bump looks like an array
   1196	  to the compiler, so it will attempt to add canary checks regardless
   1197	  of the static branch state.
   1198
   1199config RANDOMIZE_KSTACK_OFFSET
   1200	bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT
   1201	default y
   1202	depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
   1203	depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
   1204	help
   1205	  The kernel stack offset can be randomized (after pt_regs) by
   1206	  roughly 5 bits of entropy, frustrating memory corruption
   1207	  attacks that depend on stack address determinism or
   1208	  cross-syscall address exposures.
   1209
   1210	  The feature is controlled via the "randomize_kstack_offset=on/off"
   1211	  kernel boot param, and if turned off has zero overhead due to its use
   1212	  of static branches (see JUMP_LABEL).
   1213
   1214	  If unsure, say Y.
   1215
   1216config RANDOMIZE_KSTACK_OFFSET_DEFAULT
   1217	bool "Default state of kernel stack offset randomization"
   1218	depends on RANDOMIZE_KSTACK_OFFSET
   1219	help
   1220	  Kernel stack offset randomization is controlled by kernel boot param
   1221	  "randomize_kstack_offset=on/off", and this config chooses the default
   1222	  boot state.
   1223
   1224config ARCH_OPTIONAL_KERNEL_RWX
   1225	def_bool n
   1226
   1227config ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
   1228	def_bool n
   1229
   1230config ARCH_HAS_STRICT_KERNEL_RWX
   1231	def_bool n
   1232
   1233config STRICT_KERNEL_RWX
   1234	bool "Make kernel text and rodata read-only" if ARCH_OPTIONAL_KERNEL_RWX
   1235	depends on ARCH_HAS_STRICT_KERNEL_RWX
   1236	default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
   1237	help
   1238	  If this is set, kernel text and rodata memory will be made read-only,
   1239	  and non-text memory will be made non-executable. This provides
   1240	  protection against certain security exploits (e.g. executing the heap
   1241	  or modifying text)
   1242
   1243	  These features are considered standard security practice these days.
   1244	  You should say Y here in almost all cases.
   1245
   1246config ARCH_HAS_STRICT_MODULE_RWX
   1247	def_bool n
   1248
   1249config STRICT_MODULE_RWX
   1250	bool "Set loadable kernel module data as NX and text as RO" if ARCH_OPTIONAL_KERNEL_RWX
   1251	depends on ARCH_HAS_STRICT_MODULE_RWX && MODULES
   1252	default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
   1253	help
   1254	  If this is set, module text and rodata memory will be made read-only,
   1255	  and non-text memory will be made non-executable. This provides
   1256	  protection against certain security exploits (e.g. writing to text)
   1257
   1258# select if the architecture provides an asm/dma-direct.h header
   1259config ARCH_HAS_PHYS_TO_DMA
   1260	bool
   1261
   1262config HAVE_ARCH_COMPILER_H
   1263	bool
   1264	help
   1265	  An architecture can select this if it provides an
   1266	  asm/compiler.h header that should be included after
   1267	  linux/compiler-*.h in order to override macro definitions that those
   1268	  headers generally provide.
   1269
   1270config HAVE_ARCH_PREL32_RELOCATIONS
   1271	bool
   1272	help
   1273	  May be selected by an architecture if it supports place-relative
   1274	  32-bit relocations, both in the toolchain and in the module loader,
   1275	  in which case relative references can be used in special sections
   1276	  for PCI fixup, initcalls etc which are only half the size on 64 bit
   1277	  architectures, and don't require runtime relocation on relocatable
   1278	  kernels.
   1279
   1280config ARCH_USE_MEMREMAP_PROT
   1281	bool
   1282
   1283config LOCK_EVENT_COUNTS
   1284	bool "Locking event counts collection"
   1285	depends on DEBUG_FS
   1286	help
   1287	  Enable light-weight counting of various locking related events
   1288	  in the system with minimal performance impact. This reduces
   1289	  the chance of application behavior change because of timing
   1290	  differences. The counts are reported via debugfs.
   1291
   1292# Select if the architecture has support for applying RELR relocations.
   1293config ARCH_HAS_RELR
   1294	bool
   1295
   1296config RELR
   1297	bool "Use RELR relocation packing"
   1298	depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
   1299	default y
   1300	help
   1301	  Store the kernel's dynamic relocations in the RELR relocation packing
   1302	  format. Requires a compatible linker (LLD supports this feature), as
   1303	  well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
   1304	  are compatible).
   1305
   1306config ARCH_HAS_MEM_ENCRYPT
   1307	bool
   1308
   1309config ARCH_HAS_CC_PLATFORM
   1310	bool
   1311
   1312config HAVE_SPARSE_SYSCALL_NR
   1313       bool
   1314       help
   1315          An architecture should select this if its syscall numbering is sparse
   1316	  to save space. For example, MIPS architecture has a syscall array with
   1317	  entries at 4000, 5000 and 6000 locations. This option turns on syscall
   1318	  related optimizations for a given architecture.
   1319
   1320config ARCH_HAS_VDSO_DATA
   1321	bool
   1322
   1323config HAVE_STATIC_CALL
   1324	bool
   1325
   1326config HAVE_STATIC_CALL_INLINE
   1327	bool
   1328	depends on HAVE_STATIC_CALL
   1329	select OBJTOOL
   1330
   1331config HAVE_PREEMPT_DYNAMIC
   1332	bool
   1333
   1334config HAVE_PREEMPT_DYNAMIC_CALL
   1335	bool
   1336	depends on HAVE_STATIC_CALL
   1337	select HAVE_PREEMPT_DYNAMIC
   1338	help
   1339	   An architecture should select this if it can handle the preemption
   1340	   model being selected at boot time using static calls.
   1341
   1342	   Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
   1343	   preemption function will be patched directly.
   1344
   1345	   Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
   1346	   call to a preemption function will go through a trampoline, and the
   1347	   trampoline will be patched.
   1348
   1349	   It is strongly advised to support inline static call to avoid any
   1350	   overhead.
   1351
   1352config HAVE_PREEMPT_DYNAMIC_KEY
   1353	bool
   1354	depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
   1355	select HAVE_PREEMPT_DYNAMIC
   1356	help
   1357	   An architecture should select this if it can handle the preemption
   1358	   model being selected at boot time using static keys.
   1359
   1360	   Each preemption function will be given an early return based on a
   1361	   static key. This should have slightly lower overhead than non-inline
   1362	   static calls, as this effectively inlines each trampoline into the
   1363	   start of its callee. This may avoid redundant work, and may
   1364	   integrate better with CFI schemes.
   1365
   1366	   This will have greater overhead than using inline static calls as
   1367	   the call to the preemption function cannot be entirely elided.
   1368
   1369config ARCH_WANT_LD_ORPHAN_WARN
   1370	bool
   1371	help
   1372	  An arch should select this symbol once all linker sections are explicitly
   1373	  included, size-asserted, or discarded in the linker scripts. This is
   1374	  important because we never want expected sections to be placed heuristically
   1375	  by the linker, since the locations of such sections can change between linker
   1376	  versions.
   1377
   1378config HAVE_ARCH_PFN_VALID
   1379	bool
   1380
   1381config ARCH_SUPPORTS_DEBUG_PAGEALLOC
   1382	bool
   1383
   1384config ARCH_SUPPORTS_PAGE_TABLE_CHECK
   1385	bool
   1386
   1387config ARCH_SPLIT_ARG64
   1388	bool
   1389	help
   1390	   If a 32-bit architecture requires 64-bit arguments to be split into
   1391	   pairs of 32-bit arguments, select this option.
   1392
   1393config ARCH_HAS_ELFCORE_COMPAT
   1394	bool
   1395
   1396config ARCH_HAS_PARANOID_L1D_FLUSH
   1397	bool
   1398
   1399config DYNAMIC_SIGFRAME
   1400	bool
   1401
   1402# Select, if arch has a named attribute group bound to NUMA device nodes.
   1403config HAVE_ARCH_NODE_DEV_GROUP
   1404	bool
   1405
   1406source "kernel/gcov/Kconfig"
   1407
   1408source "scripts/gcc-plugins/Kconfig"
   1409
   1410endmenu