1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
#include "../sevstep/sevstep.h"
#include "../sevstep/uspt.h"
static void
sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
const int modes[] = {
KVM_PAGE_TRACK_WRITE,
KVM_PAGE_TRACK_ACCESS,
KVM_PAGE_TRACK_EXEC
};
uint64_t current_rip;
bool was_tracked;
int have_rip, i;
int send_err;
was_tracked = false;
for (i = 0; i < sizeof(modes) / sizeof(modes[0]); i++) {
if (kvm_slot_page_track_is_active(vcpu->kvm,
fault->slot, fault->gfn, modes[i])) {
sevstep_untrack_single_page(vcpu, fault->gfn, modes[i]);
was_tracked = true;
}
}
if (was_tracked) {
have_rip = false;
if (sevstep_uspt_should_get_rip())
have_rip = sevstep_get_rip_kvm_vcpu(vcpu, ¤t_rip) == 0;
if (sevstep_uspt_batch_tracking_in_progress()) {
send_err = sevstep_uspt_batch_tracking_save(fault->gfn << PAGE_SHIFT,
fault->error_code, have_rip, current_rip);
if (send_err) {
printk_ratelimited(
"sevstep_uspt_batch_tracking_save failed with %d\n"
"##########################\n", send_err);
}
sevstep_uspt_batch_tracking_handle_retrack(vcpu, fault->gfn);
sevstep_uspt_batch_tracking_inc_event_idx();
} else {
send_err = sevstep_uspt_send_and_block(fault->gfn << PAGE_SHIFT,
fault->error_code, have_rip, current_rip);
if (send_err) {
printk("sevstep_uspt_send_and_block failed with %d\n"
"##########################\n", send_err);
}
}
}
}
bool
sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
{
u64 spte = *sptep;
bool shouldFlush = false;
if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte)))
return false;
rmap_printk("spte %p %llx\n", sptep, *sptep);
if (pt_protect)
spte &= ~EPT_SPTE_MMU_WRITABLE;
if (mode == KVM_PAGE_TRACK_WRITE) {
spte = spte & ~PT_WRITABLE_MASK;
shouldFlush = true;
} else if (mode == KVM_PAGE_TRACK_RESET_ACCESSED) {
spte = spte & ~PT_ACCESSED_MASK;
} else if (mode == KVM_PAGE_TRACK_ACCESS) {
spte = spte & ~PT_PRESENT_MASK;
spte = spte & ~PT_WRITABLE_MASK;
spte = spte & ~PT_USER_MASK;
spte = spte | (0x1ULL << PT64_NX_SHIFT);
shouldFlush = true;
} else if (mode == KVM_PAGE_TRACK_EXEC) {
spte = spte | (0x1ULL << PT64_NX_SHIFT);
shouldFlush = true;
} else if (mode == KVM_PAGE_TRACK_RESET_EXEC) {
spte = spte & ~(0x1ULL << PT64_NX_SHIFT);
shouldFlush = true;
} else {
printk(KERN_WARNING "spte_protect was called with invalid mode"
"parameter %d\n",mode);
}
shouldFlush |= mmu_spte_update(sptep, spte);
return shouldFlush;
}
EXPORT_SYMBOL(sevstep_spte_protect);
bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
bool pt_protect, enum kvm_page_track_mode mode)
{
u64 *sptep;
struct rmap_iterator iter;
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep) {
flush |= sevstep_spte_protect(sptep, pt_protect, mode);
}
return flush;
}
EXPORT_SYMBOL(sevstep_rmap_protect);
bool
sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
uint64_t gfn, int min_level, enum kvm_page_track_mode mode)
{
struct kvm_rmap_head *rmap_head;
bool protected;
int i;
protected = false;
if (kvm_memslots_have_rmaps(kvm)) {
for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = gfn_to_rmap(gfn, i, slot);
protected |= sevstep_rmap_protect(rmap_head, true, mode);
}
}
if (is_tdp_mmu_enabled(kvm)) {
protected |= kvm_tdp_mmu_write_protect_gfn(kvm,
slot, gfn, min_level);
}
return protected;
}
EXPORT_SYMBOL(sevstep_kvm_mmu_slot_gfn_protect);
|