cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

page_track.c (7725B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Support KVM gust page tracking
      4 *
      5 * This feature allows us to track page access in guest. Currently, only
      6 * write access is tracked.
      7 *
      8 * Copyright(C) 2015 Intel Corporation.
      9 *
     10 * Author:
     11 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
     12 */
     13
     14#include <linux/kvm_host.h>
     15#include <linux/rculist.h>
     16
     17#include <asm/kvm_page_track.h>
     18
     19#include "mmu.h"
     20#include "mmu_internal.h"
     21
     22#include "../cachepc/cachepc.h"
     23#include "../cachepc/track.h"
     24
     25bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
     26{
     27	return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
     28	       !tdp_enabled || kvm_shadow_root_allocated(kvm);
     29}
     30
     31void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
     32{
     33	int i;
     34
     35	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
     36		kvfree(slot->arch.gfn_track[i]);
     37		slot->arch.gfn_track[i] = NULL;
     38	}
     39}
     40
     41int kvm_page_track_create_memslot(struct kvm *kvm,
     42				  struct kvm_memory_slot *slot,
     43				  unsigned long npages)
     44{
     45	int i;
     46
     47	for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
     48		if (i == KVM_PAGE_TRACK_WRITE &&
     49		    !kvm_page_track_write_tracking_enabled(kvm))
     50			continue;
     51
     52		slot->arch.gfn_track[i] =
     53			__vcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
     54				  GFP_KERNEL_ACCOUNT);
     55		if (!slot->arch.gfn_track[i])
     56			goto track_free;
     57	}
     58
     59	return 0;
     60
     61track_free:
     62	kvm_page_track_free_memslot(slot);
     63	return -ENOMEM;
     64}
     65
     66static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
     67{
     68	if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
     69		return false;
     70
     71	return true;
     72}
     73
     74int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
     75{
     76	unsigned short *gfn_track;
     77
     78	if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE])
     79		return 0;
     80
     81	gfn_track = __vcalloc(slot->npages, sizeof(*gfn_track),
     82			      GFP_KERNEL_ACCOUNT);
     83	if (gfn_track == NULL)
     84		return -ENOMEM;
     85
     86	slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track;
     87	return 0;
     88}
     89
     90static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
     91			     enum kvm_page_track_mode mode, short count)
     92{
     93	int index, val;
     94
     95	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
     96
     97	val = slot->arch.gfn_track[mode][index];
     98
     99	if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
    100		return;
    101
    102	slot->arch.gfn_track[mode][index] += count;
    103}
    104
    105/*
    106 * add guest page to the tracking pool so that corresponding access on that
    107 * page will be intercepted.
    108 *
    109 * It should be called under the protection both of mmu-lock and kvm->srcu
    110 * or kvm->slots_lock.
    111 *
    112 * @kvm: the guest instance we are interested in.
    113 * @slot: the @gfn belongs to.
    114 * @gfn: the guest page.
    115 * @mode: tracking mode, currently only write track is supported.
    116 */
    117void kvm_slot_page_track_add_page(struct kvm *kvm,
    118				  struct kvm_memory_slot *slot, gfn_t gfn,
    119				  enum kvm_page_track_mode mode)
    120{
    121	if (WARN_ON(!page_track_mode_is_valid(mode)))
    122		return;
    123
    124	if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
    125		    !kvm_page_track_write_tracking_enabled(kvm)))
    126		return;
    127
    128	update_gfn_track(slot, gfn, mode, 1);
    129
    130	/*
    131	 * new track stops large page mapping for the
    132	 * tracked page.
    133	 */
    134	kvm_mmu_gfn_disallow_lpage(slot, gfn);
    135
    136	if (cpc_kvm_mmu_slot_gfn_protect(kvm,
    137			slot, gfn, PG_LEVEL_4K, mode)) {
    138		kvm_flush_remote_tlbs(kvm);
    139	}
    140}
    141EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
    142
    143/*
    144 * remove the guest page from the tracking pool which stops the interception
    145 * of corresponding access on that page. It is the opposed operation of
    146 * kvm_slot_page_track_add_page().
    147 *
    148 * It should be called under the protection both of mmu-lock and kvm->srcu
    149 * or kvm->slots_lock.
    150 *
    151 * @kvm: the guest instance we are interested in.
    152 * @slot: the @gfn belongs to.
    153 * @gfn: the guest page.
    154 * @mode: tracking mode, currently only write track is supported.
    155 */
    156void kvm_slot_page_track_remove_page(struct kvm *kvm,
    157				     struct kvm_memory_slot *slot, gfn_t gfn,
    158				     enum kvm_page_track_mode mode)
    159{
    160	if (WARN_ON(!page_track_mode_is_valid(mode)))
    161		return;
    162
    163	if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
    164		    !kvm_page_track_write_tracking_enabled(kvm)))
    165		return;
    166
    167	update_gfn_track(slot, gfn, mode, -1);
    168
    169	/*
    170	 * allow large page mapping for the tracked page
    171	 * after the tracker is gone.
    172	 */
    173	kvm_mmu_gfn_allow_lpage(slot, gfn);
    174}
    175EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
    176
    177/*
    178 * check if the corresponding access on the specified guest page is tracked.
    179 */
    180bool kvm_slot_page_track_is_active(struct kvm *kvm,
    181				   const struct kvm_memory_slot *slot,
    182				   gfn_t gfn, enum kvm_page_track_mode mode)
    183{
    184	int index;
    185
    186	if (WARN_ON(!page_track_mode_is_valid(mode)))
    187		return false;
    188
    189	if (!slot)
    190		return false;
    191
    192	if (mode == KVM_PAGE_TRACK_WRITE &&
    193	    !kvm_page_track_write_tracking_enabled(kvm))
    194		return false;
    195
    196	index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
    197	return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
    198}
    199
    200void kvm_page_track_cleanup(struct kvm *kvm)
    201{
    202	struct kvm_page_track_notifier_head *head;
    203
    204	head = &kvm->arch.track_notifier_head;
    205	cleanup_srcu_struct(&head->track_srcu);
    206}
    207
    208int kvm_page_track_init(struct kvm *kvm)
    209{
    210	struct kvm_page_track_notifier_head *head;
    211
    212	head = &kvm->arch.track_notifier_head;
    213	INIT_HLIST_HEAD(&head->track_notifier_list);
    214	return init_srcu_struct(&head->track_srcu);
    215}
    216
    217/*
    218 * register the notifier so that event interception for the tracked guest
    219 * pages can be received.
    220 */
    221void
    222kvm_page_track_register_notifier(struct kvm *kvm,
    223				 struct kvm_page_track_notifier_node *n)
    224{
    225	struct kvm_page_track_notifier_head *head;
    226
    227	head = &kvm->arch.track_notifier_head;
    228
    229	write_lock(&kvm->mmu_lock);
    230	hlist_add_head_rcu(&n->node, &head->track_notifier_list);
    231	write_unlock(&kvm->mmu_lock);
    232}
    233EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
    234
    235/*
    236 * stop receiving the event interception. It is the opposed operation of
    237 * kvm_page_track_register_notifier().
    238 */
    239void
    240kvm_page_track_unregister_notifier(struct kvm *kvm,
    241				   struct kvm_page_track_notifier_node *n)
    242{
    243	struct kvm_page_track_notifier_head *head;
    244
    245	head = &kvm->arch.track_notifier_head;
    246
    247	write_lock(&kvm->mmu_lock);
    248	hlist_del_rcu(&n->node);
    249	write_unlock(&kvm->mmu_lock);
    250	synchronize_srcu(&head->track_srcu);
    251}
    252EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
    253
    254/*
    255 * Notify the node that write access is intercepted and write emulation is
    256 * finished at this time.
    257 *
    258 * The node should figure out if the written page is the one that node is
    259 * interested in by itself.
    260 */
    261void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
    262			  int bytes)
    263{
    264	struct kvm_page_track_notifier_head *head;
    265	struct kvm_page_track_notifier_node *n;
    266	int idx;
    267
    268	head = &vcpu->kvm->arch.track_notifier_head;
    269
    270	if (hlist_empty(&head->track_notifier_list))
    271		return;
    272
    273	idx = srcu_read_lock(&head->track_srcu);
    274	hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
    275				srcu_read_lock_held(&head->track_srcu))
    276		if (n->track_write)
    277			n->track_write(vcpu, gpa, new, bytes, n);
    278	srcu_read_unlock(&head->track_srcu, idx);
    279}
    280
    281/*
    282 * Notify the node that memory slot is being removed or moved so that it can
    283 * drop write-protection for the pages in the memory slot.
    284 *
    285 * The node should figure out it has any write-protected pages in this slot
    286 * by itself.
    287 */
    288void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
    289{
    290	struct kvm_page_track_notifier_head *head;
    291	struct kvm_page_track_notifier_node *n;
    292	int idx;
    293
    294	head = &kvm->arch.track_notifier_head;
    295
    296	if (hlist_empty(&head->track_notifier_list))
    297		return;
    298
    299	idx = srcu_read_lock(&head->track_srcu);
    300	hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
    301				srcu_read_lock_held(&head->track_srcu))
    302		if (n->track_flush_slot)
    303			n->track_flush_slot(kvm, slot, n);
    304	srcu_read_unlock(&head->track_srcu, idx);
    305}