cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

Kconfig (11819B)


      1# SPDX-License-Identifier: GPL-2.0-only
      2menu "Xen driver support"
      3	depends on XEN
      4
      5config XEN_BALLOON
      6	bool "Xen memory balloon driver"
      7	default y
      8	help
      9	  The balloon driver allows the Xen domain to request more memory from
     10	  the system to expand the domain's memory allocation, or alternatively
     11	  return unneeded memory to the system.
     12
     13config XEN_BALLOON_MEMORY_HOTPLUG
     14	bool "Memory hotplug support for Xen balloon driver"
     15	depends on XEN_BALLOON && MEMORY_HOTPLUG
     16	default y
     17	help
     18	  Memory hotplug support for Xen balloon driver allows expanding memory
     19	  available for the system above limit declared at system startup.
     20	  It is very useful on critical systems which require long
     21	  run without rebooting.
     22
     23	  It's also very useful for non PV domains to obtain unpopulated physical
     24	  memory ranges to use in order to map foreign memory or grants.
     25
     26	  Memory could be hotplugged in following steps:
     27
     28	    1) target domain: ensure that memory auto online policy is in
     29	       effect by checking /sys/devices/system/memory/auto_online_blocks
     30	       file (should be 'online').
     31
     32	    2) control domain: xl mem-max <target-domain> <maxmem>
     33	       where <maxmem> is >= requested memory size,
     34
     35	    3) control domain: xl mem-set <target-domain> <memory>
     36	       where <memory> is requested memory size; alternatively memory
     37	       could be added by writing proper value to
     38	       /sys/devices/system/xen_memory/xen_memory0/target or
     39	       /sys/devices/system/xen_memory/xen_memory0/target_kb on the
     40	       target domain.
     41
     42	  Alternatively, if memory auto onlining was not requested at step 1
     43	  the newly added memory can be manually onlined in the target domain
     44	  by doing the following:
     45
     46		for i in /sys/devices/system/memory/memory*/state; do \
     47		  [ "`cat "$i"`" = offline ] && echo online > "$i"; done
     48
     49	  or by adding the following line to udev rules:
     50
     51	  SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
     52
     53config XEN_MEMORY_HOTPLUG_LIMIT
     54	int "Hotplugged memory limit (in GiB) for a PV guest"
     55	default 512
     56	depends on XEN_HAVE_PVMMU
     57	depends on MEMORY_HOTPLUG
     58	help
     59	  Maxmium amount of memory (in GiB) that a PV guest can be
     60	  expanded to when using memory hotplug.
     61
     62	  A PV guest can have more memory than this limit if is
     63	  started with a larger maximum.
     64
     65	  This value is used to allocate enough space in internal
     66	  tables needed for physical memory administration.
     67
     68config XEN_SCRUB_PAGES_DEFAULT
     69	bool "Scrub pages before returning them to system by default"
     70	depends on XEN_BALLOON
     71	default y
     72	help
     73	  Scrub pages before returning them to the system for reuse by
     74	  other domains.  This makes sure that any confidential data
     75	  is not accidentally visible to other domains.  It is more
     76	  secure, but slightly less efficient. This can be controlled with
     77	  xen_scrub_pages=0 parameter and
     78	  /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
     79	  This option only sets the default value.
     80
     81	  If in doubt, say yes.
     82
     83config XEN_DEV_EVTCHN
     84	tristate "Xen /dev/xen/evtchn device"
     85	default y
     86	help
     87	  The evtchn driver allows a userspace process to trigger event
     88	  channels and to receive notification of an event channel
     89	  firing.
     90	  If in doubt, say yes.
     91
     92config XEN_BACKEND
     93	bool "Backend driver support"
     94	default XEN_DOM0
     95	help
     96	  Support for backend device drivers that provide I/O services
     97	  to other virtual machines.
     98
     99config XENFS
    100	tristate "Xen filesystem"
    101	select XEN_PRIVCMD
    102	default y
    103	help
    104	  The xen filesystem provides a way for domains to share
    105	  information with each other and with the hypervisor.
    106	  For example, by reading and writing the "xenbus" file, guests
    107	  may pass arbitrary information to the initial domain.
    108	  If in doubt, say yes.
    109
    110config XEN_COMPAT_XENFS
    111	bool "Create compatibility mount point /proc/xen"
    112	depends on XENFS
    113	default y
    114	help
    115	  The old xenstore userspace tools expect to find "xenbus"
    116	  under /proc/xen, but "xenbus" is now found at the root of the
    117	  xenfs filesystem.  Selecting this causes the kernel to create
    118	  the compatibility mount point /proc/xen if it is running on
    119	  a xen platform.
    120	  If in doubt, say yes.
    121
    122config XEN_SYS_HYPERVISOR
    123	bool "Create xen entries under /sys/hypervisor"
    124	depends on SYSFS
    125	select SYS_HYPERVISOR
    126	default y
    127	help
    128	  Create entries under /sys/hypervisor describing the Xen
    129	  hypervisor environment.  When running native or in another
    130	  virtual environment, /sys/hypervisor will still be present,
    131	  but will have no xen contents.
    132
    133config XEN_XENBUS_FRONTEND
    134	tristate
    135
    136config XEN_GNTDEV
    137	tristate "userspace grant access device driver"
    138	depends on XEN
    139	default m
    140	select MMU_NOTIFIER
    141	help
    142	  Allows userspace processes to use grants.
    143
    144config XEN_GNTDEV_DMABUF
    145	bool "Add support for dma-buf grant access device driver extension"
    146	depends on XEN_GNTDEV && XEN_GRANT_DMA_ALLOC
    147	select DMA_SHARED_BUFFER
    148	help
    149	  Allows userspace processes and kernel modules to use Xen backed
    150	  dma-buf implementation. With this extension grant references to
    151	  the pages of an imported dma-buf can be exported for other domain
    152	  use and grant references coming from a foreign domain can be
    153	  converted into a local dma-buf for local export.
    154
    155config XEN_GRANT_DEV_ALLOC
    156	tristate "User-space grant reference allocator driver"
    157	depends on XEN
    158	default m
    159	help
    160	  Allows userspace processes to create pages with access granted
    161	  to other domains. This can be used to implement frontend drivers
    162	  or as part of an inter-domain shared memory channel.
    163
    164config XEN_GRANT_DMA_ALLOC
    165	bool "Allow allocating DMA capable buffers with grant reference module"
    166	depends on XEN && HAS_DMA
    167	help
    168	  Extends grant table module API to allow allocating DMA capable
    169	  buffers and mapping foreign grant references on top of it.
    170	  The resulting buffer is similar to one allocated by the balloon
    171	  driver in that proper memory reservation is made by
    172	  ({increase|decrease}_reservation and VA mappings are updated if
    173	  needed).
    174	  This is useful for sharing foreign buffers with HW drivers which
    175	  cannot work with scattered buffers provided by the balloon driver,
    176	  but require DMAable memory instead.
    177
    178config SWIOTLB_XEN
    179	def_bool y
    180	depends on XEN_PV || ARM || ARM64
    181	select DMA_OPS
    182	select SWIOTLB
    183
    184config XEN_PCI_STUB
    185	bool
    186
    187config XEN_PCIDEV_STUB
    188	tristate "Xen PCI-device stub driver"
    189	depends on PCI && !X86 && XEN
    190	depends on XEN_BACKEND
    191	select XEN_PCI_STUB
    192	default m
    193	help
    194	  The PCI device stub driver provides limited version of the PCI
    195	  device backend driver without para-virtualized support for guests.
    196	  If you select this to be a module, you will need to make sure no
    197	  other driver has bound to the device(s) you want to make visible to
    198	  other guests.
    199
    200	  The "hide" parameter (only applicable if backend driver is compiled
    201	  into the kernel) allows you to bind the PCI devices to this module
    202	  from the default device drivers. The argument is the list of PCI BDFs:
    203	  xen-pciback.hide=(03:00.0)(04:00.0)
    204
    205	  If in doubt, say m.
    206
    207config XEN_PCIDEV_BACKEND
    208	tristate "Xen PCI-device backend driver"
    209	depends on PCI && X86 && XEN
    210	depends on XEN_BACKEND
    211	select XEN_PCI_STUB
    212	default m
    213	help
    214	  The PCI device backend driver allows the kernel to export arbitrary
    215	  PCI devices to other guests. If you select this to be a module, you
    216	  will need to make sure no other driver has bound to the device(s)
    217	  you want to make visible to other guests.
    218
    219	  The parameter "passthrough" allows you specify how you want the PCI
    220	  devices to appear in the guest. You can choose the default (0) where
    221	  PCI topology starts at 00.00.0, or (1) for passthrough if you want
    222	  the PCI devices topology appear the same as in the host.
    223
    224	  The "hide" parameter (only applicable if backend driver is compiled
    225	  into the kernel) allows you to bind the PCI devices to this module
    226	  from the default device drivers. The argument is the list of PCI BDFs:
    227	  xen-pciback.hide=(03:00.0)(04:00.0)
    228
    229	  If in doubt, say m.
    230
    231config XEN_PVCALLS_FRONTEND
    232	tristate "XEN PV Calls frontend driver"
    233	depends on INET && XEN
    234	select XEN_XENBUS_FRONTEND
    235	help
    236	  Experimental frontend for the Xen PV Calls protocol
    237	  (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
    238	  sends a small set of POSIX calls to the backend, which
    239	  implements them.
    240
    241config XEN_PVCALLS_BACKEND
    242	tristate "XEN PV Calls backend driver"
    243	depends on INET && XEN && XEN_BACKEND
    244	help
    245	  Experimental backend for the Xen PV Calls protocol
    246	  (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
    247	  allows PV Calls frontends to send POSIX calls to the backend,
    248	  which implements them.
    249
    250	  If in doubt, say n.
    251
    252config XEN_SCSI_BACKEND
    253	tristate "XEN SCSI backend driver"
    254	depends on XEN && XEN_BACKEND && TARGET_CORE
    255	help
    256	  The SCSI backend driver allows the kernel to export its SCSI Devices
    257	  to other guests via a high-performance shared-memory interface.
    258	  Only needed for systems running as XEN driver domains (e.g. Dom0) and
    259	  if guests need generic access to SCSI devices.
    260
    261config XEN_PRIVCMD
    262	tristate "Xen hypercall passthrough driver"
    263	depends on XEN
    264	default m
    265	help
    266	  The hypercall passthrough driver allows privileged user programs to
    267	  perform Xen hypercalls. This driver is normally required for systems
    268	  running as Dom0 to perform privileged operations, but in some
    269	  disaggregated Xen setups this driver might be needed for other
    270	  domains, too.
    271
    272config XEN_ACPI_PROCESSOR
    273	tristate "Xen ACPI processor"
    274	depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
    275	default m
    276	help
    277	  This ACPI processor uploads Power Management information to the Xen
    278	  hypervisor.
    279
    280	  To do that the driver parses the Power Management data and uploads
    281	  said information to the Xen hypervisor. Then the Xen hypervisor can
    282	  select the proper Cx and Pxx states. It also registers itself as the
    283	  SMM so that other drivers (such as ACPI cpufreq scaling driver) will
    284	  not load.
    285
    286	  To compile this driver as a module, choose M here: the module will be
    287	  called xen_acpi_processor  If you do not know what to choose, select
    288	  M here. If the CPUFREQ drivers are built in, select Y here.
    289
    290config XEN_MCE_LOG
    291	bool "Xen platform mcelog"
    292	depends on XEN_PV_DOM0 && X86_MCE
    293	help
    294	  Allow kernel fetching MCE error from Xen platform and
    295	  converting it into Linux mcelog format for mcelog tools
    296
    297config XEN_HAVE_PVMMU
    298	bool
    299
    300config XEN_EFI
    301	def_bool y
    302	depends on (ARM || ARM64 || X86_64) && EFI
    303
    304config XEN_AUTO_XLATE
    305	def_bool y
    306	depends on ARM || ARM64 || XEN_PVHVM
    307	help
    308	  Support for auto-translated physmap guests.
    309
    310config XEN_ACPI
    311	def_bool y
    312	depends on X86 && ACPI
    313
    314config XEN_SYMS
    315	bool "Xen symbols"
    316	depends on X86 && XEN_DOM0 && XENFS
    317	default y if KALLSYMS
    318	help
    319	  Exports hypervisor symbols (along with their types and addresses) via
    320	  /proc/xen/xensyms file, similar to /proc/kallsyms
    321
    322config XEN_HAVE_VPMU
    323	bool
    324
    325config XEN_FRONT_PGDIR_SHBUF
    326	tristate
    327
    328config XEN_UNPOPULATED_ALLOC
    329	bool "Use unpopulated memory ranges for guest mappings"
    330	depends on ZONE_DEVICE
    331	default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
    332	help
    333	  Use unpopulated memory ranges in order to create mappings for guest
    334	  memory regions, including grant maps and foreign pages. This avoids
    335	  having to balloon out RAM regions in order to obtain physical memory
    336	  space to create such mappings.
    337
    338config XEN_GRANT_DMA_IOMMU
    339	bool
    340	select IOMMU_API
    341
    342config XEN_GRANT_DMA_OPS
    343	bool
    344	select DMA_OPS
    345
    346config XEN_VIRTIO
    347	bool "Xen virtio support"
    348	depends on VIRTIO
    349	select XEN_GRANT_DMA_OPS
    350	select XEN_GRANT_DMA_IOMMU if OF
    351	help
    352	  Enable virtio support for running as Xen guest. Depending on the
    353	  guest type this will require special support on the backend side
    354	  (qemu or kernel, depending on the virtio device types used).
    355
    356	  If in doubt, say n.
    357
    358endmenu