commit dd1e29c4828b01b477a003679234097434f800d4
parent b66fb6b2cdf003de4b580002fdeeacdb93a6bf75
Author: Louis Burda <quent.burda@gmail.com>
Date: Wed, 5 Oct 2022 17:49:06 +0200
More minor refactors
Diffstat:
6 files changed, 145 insertions(+), 139 deletions(-)
diff --git a/cachepc/cachepc.c b/cachepc/cachepc.c
@@ -39,8 +39,8 @@ cachepc_init_pmc(uint8_t index, uint8_t event_no, uint8_t event_mask,
reg_addr = 0xc0010200 + index * 2;
event = event_no | (event_mask << 8);
event |= (1ULL << 22); /* enable performance counter */
- event |= (kernel_user * 1ULL) << 16;
- event |= (host_guest * 1ULL) << 40;
+ event |= ((kernel_user & 0b11) * 1ULL) << 16;
+ event |= ((host_guest & 0b11) * 1ULL) << 40;
printk(KERN_WARNING "CachePC: Initialized %i. PMC %02X:%02X\n",
index, event_no, event_mask);
asm volatile ("wrmsr" : : "c"(reg_addr), "a"(event), "d"(0x00));
diff --git a/cachepc/kvm.c b/cachepc/kvm.c
@@ -285,8 +285,8 @@ cachepc_kvm_init_pmc_ioctl(void *p)
event = *(uint32_t *)p;
index = (event & 0xFF000000) >> 24;
- host_guest = (event & 0x00F00000) >> 20;
- kernel_user = (event & 0x000F0000) >> 16;
+ host_guest = (event & 0x00300000) >> 20;
+ kernel_user = (event & 0x00030000) >> 16;
event_no = (event & 0x0000FF00) >> 8;
event_mask = (event & 0x000000FF) >> 0;
diff --git a/sevstep/mmu.c b/sevstep/mmu.c
@@ -52,9 +52,10 @@ sevstep_uspt_page_fault_handle(struct kvm_vcpu *vcpu,
bool
sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
{
- u64 spte = *sptep;
- bool shouldFlush = false;
+ u64 spte;
+ bool flush;
+ spte = *sptep;
if (!is_writable_pte(spte) && !(pt_protect && is_mmu_writable_spte(spte)))
return false;
@@ -63,9 +64,10 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
if (pt_protect)
spte &= ~EPT_SPTE_MMU_WRITABLE;
+ flush = false;
if (mode == KVM_PAGE_TRACK_WRITE) {
spte = spte & ~PT_WRITABLE_MASK;
- shouldFlush = true;
+ flush = true;
} else if (mode == KVM_PAGE_TRACK_RESET_ACCESSED) {
spte = spte & ~PT_ACCESSED_MASK;
} else if (mode == KVM_PAGE_TRACK_ACCESS) {
@@ -73,19 +75,20 @@ sevstep_spte_protect(u64 *sptep, bool pt_protect, enum kvm_page_track_mode mode)
spte = spte & ~PT_WRITABLE_MASK;
spte = spte & ~PT_USER_MASK;
spte = spte | (0x1ULL << PT64_NX_SHIFT);
- shouldFlush = true;
+ flush = true;
} else if (mode == KVM_PAGE_TRACK_EXEC) {
spte = spte | (0x1ULL << PT64_NX_SHIFT);
- shouldFlush = true;
+ flush = true;
} else if (mode == KVM_PAGE_TRACK_RESET_EXEC) {
spte = spte & ~(0x1ULL << PT64_NX_SHIFT);
- shouldFlush = true;
+ flush = true;
} else {
printk(KERN_WARNING "spte_protect was called with invalid mode"
"parameter %d\n",mode);
}
- shouldFlush |= mmu_spte_update(sptep, spte);
- return shouldFlush;
+ flush |= mmu_spte_update(sptep, spte);
+
+ return flush;
}
EXPORT_SYMBOL(sevstep_spte_protect);
diff --git a/sevstep/sevstep.c b/sevstep/sevstep.c
@@ -44,19 +44,8 @@
struct kvm* main_vm;
EXPORT_SYMBOL(main_vm);
-void
-sevstep_setup_pmcs(void)
-{
- /* retired instructions */
- cachepc_init_pmc(0, 0xc0, 0x00, PMC_GUEST, PMC_KERNEL | PMC_USER);
-
- /* l2 data cache hit & miss */
- cachepc_init_pmc(1, 0x64, 0x70, PMC_HOST, PMC_KERNEL);
-}
-EXPORT_SYMBOL(sevstep_setup_pmcs);
-
bool
-sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode)
{
int idx;
@@ -65,35 +54,41 @@ sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if (mode == KVM_PAGE_TRACK_ACCESS) {
- //printk("Removing gfn: %016llx from acess page track pool\n", gfn);
+ pr_warn("Adding gfn: %016llx to access page track pool\n", gfn);
}
+
if (mode == KVM_PAGE_TRACK_WRITE) {
- //printk("Removing gfn: %016llx from write page track pool\n", gfn);
+ pr_warn("Adding gfn: %016llx to write page track pool\n", gfn);
}
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm,slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
+ kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
} else {
- printk("Failed to untrack %016llx because ", gfn);
+ pr_warn("Failed to track %016llx because ", gfn);
if (slot == NULL) {
printk(KERN_CONT "slot was null");
- } else if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page track was not active");
+ }
+ if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ printk(KERN_CONT "page is already tracked");
}
printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_untrack_single_page);
+EXPORT_SYMBOL(sevstep_track_single_page);
bool
-sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ enum kvm_page_track_mode mode)
{
int idx;
bool ret;
@@ -101,23 +96,38 @@ sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
+ if (mode == KVM_PAGE_TRACK_ACCESS) {
+ pr_warn("Removing gfn: %016llx from acess page track pool\n", gfn);
+ }
+ if (mode == KVM_PAGE_TRACK_WRITE) {
+ pr_warn("Removing gfn: %016llx from write page track pool\n", gfn);
+ }
+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if( slot != NULL ) {
+ if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
write_lock(&vcpu->kvm->mmu_lock);
- //Vincent: The kvm mmu function now requires min_level
- //We want all pages to protected so we do PG_LEVEL_4K
- //https://patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
- sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm,slot,gfn,PG_LEVEL_4K,KVM_PAGE_TRACK_RESET_ACCESSED);
+ kvm_slot_page_track_remove_page(vcpu->kvm, slot, gfn, mode);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
+ } else {
+ pr_warn("Failed to untrack %016llx because ", gfn);
+ if (slot == NULL) {
+ printk(KERN_CONT "slot was null");
+ } else if (!kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
+ printk(KERN_CONT "page track was not active");
+ }
+ printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_reset_accessed_on_page);
+EXPORT_SYMBOL(sevstep_untrack_single_page);
bool
-sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
+sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int idx;
bool ret;
@@ -125,25 +135,27 @@ sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if( slot != NULL ) {
+ if (slot != NULL) {
write_lock(&vcpu->kvm->mmu_lock);
- //Vincent: The kvm mmu function now requires min_level
- //We want all pages to protected so we do PG_LEVEL_4K
- //https://patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
+ // Vincent: The kvm mmu function now requires min_level
+ // We want all pages to protected so we do PG_LEVEL_4K
+ // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
- PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC);
+ PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_ACCESSED);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_clear_nx_on_page);
+EXPORT_SYMBOL(sevstep_reset_accessed_on_page);
bool
-sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- enum kvm_page_track_mode mode)
+sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int idx;
bool ret;
@@ -151,53 +163,41 @@ sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
ret = false;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (mode == KVM_PAGE_TRACK_ACCESS) {
- //printk_ratelimited("Adding gfn: %016llx to acess page track pool\n", gfn);
- //printk("Adding gfn: %016llx to acess page track pool\n", gfn);
- }
- if (mode == KVM_PAGE_TRACK_WRITE) {
- //printk_ratelimited("Adding gfn: %016llx to write page track pool\n", gfn);
- }
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- if (slot != NULL && !kvm_slot_page_track_is_active(vcpu->kvm,slot, gfn, mode)) {
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (slot != NULL) {
write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_add_page(vcpu->kvm, slot, gfn, mode);
+ // Vincent: The kvm mmu function now requires min_level
+ // We want all pages to protected so we do PG_LEVEL_4K
+ // https:// patchwork.kernel.org/project/kvm/patch/20210416082511.2856-2-zhukeqian1@huawei.com/
+ sevstep_kvm_mmu_slot_gfn_protect(vcpu->kvm, slot, gfn,
+ PG_LEVEL_4K, KVM_PAGE_TRACK_RESET_EXEC);
write_unlock(&vcpu->kvm->mmu_lock);
ret = true;
-
- } else {
-
- printk("Failed to track %016llx because ", gfn);
- if (slot == NULL) {
- printk(KERN_CONT "slot was null");
- }
- if (kvm_slot_page_track_is_active(vcpu->kvm, slot, gfn, mode)) {
- printk(KERN_CONT "page is already tracked");
- }
- printk(KERN_CONT "\n");
}
+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
return ret;
}
-EXPORT_SYMBOL(sevstep_track_single_page);
+EXPORT_SYMBOL(sevstep_clear_nx_on_page);
long
sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
- long count = 0;
- u64 iterator, iterat_max;
struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *first_memslot;
+ struct rb_node *node;
+ u64 iterator, iterat_max;
+ long count = 0;
int idx;
- //Vincent: Memslots interface changed into a rb tree, see
- //here: https://lwn.net/Articles/856392/
- //and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
- //Thus we use instead of
- //iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
- // + vcpu->kvm->memslots[0]->memslots[0].npages;
- struct rb_node *node;
- struct kvm_memory_slot *first_memslot;
+ // Vincent: Memslots interface changed into a rb tree, see
+ // here: https:// lwn.net/Articles/856392/
+ // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
+ // Thus we use instead of
+ // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
+ // + vcpu->kvm->memslots[0]->memslots[0].npages;
node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
iterat_max = first_memslot->base_gfn + first_memslot->npages;
@@ -221,20 +221,19 @@ EXPORT_SYMBOL(sevstep_start_tracking);
long
sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
- long count = 0;
- u64 iterator, iterat_max;
struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *first_memslot;
+ struct rb_node *node;
+ u64 iterator, iterat_max;
+ long count = 0;
int idx;
-
- //Vincent: Memslots interface changed into a rb tree, see
- //here: https://lwn.net/Articles/856392/
- //and here: https://lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
- //Thus we use instead of
- //iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
- // + vcpu->kvm->memslots[0]->memslots[0].npages;
- struct rb_node *node;
- struct kvm_memory_slot *first_memslot;
+ // Vincent: Memslots interface changed into a rb tree, see
+ // here: https:// lwn.net/Articles/856392/
+ // and here: https:// lore.kernel.org/all/cover.1632171478.git.maciej.szmigiero@oracle.com/T/#u
+ // Thus we use instead of
+ // iterat_max = vcpu->kvm->memslots[0]->memslots[0].base_gfn
+ // + vcpu->kvm->memslots[0]->memslots[0].npages;
node = rb_last(&(vcpu->kvm->memslots[0]->gfn_tree));
first_memslot = container_of(node, struct kvm_memory_slot, gfn_node[0]);
iterat_max = first_memslot->base_gfn + first_memslot->npages;
@@ -242,12 +241,13 @@ sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode)
{
idx = srcu_read_lock(&vcpu->kvm->srcu);
slot = kvm_vcpu_gfn_to_memslot(vcpu, iterator);
- //Vincent: I think see here https://patchwork.kernel.org/project/kvm/patch/20210924163152.289027-22-pbonzini@redhat.com/
- if ( slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
- write_lock(&vcpu->kvm->mmu_lock);
- kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode);
- write_unlock(&vcpu->kvm->mmu_lock);
- count++;
+ // Vincent: I think see here
+ // https:// patchwork.kernel.org/project/kvm/patch/20210924163152.289027-22-pbonzini@redhat.com/
+ if (slot != NULL && kvm_slot_page_track_is_active(vcpu->kvm, slot, iterator, mode)) {
+ write_lock(&vcpu->kvm->mmu_lock);
+ kvm_slot_page_track_remove_page(vcpu->kvm, slot, iterator, mode);
+ write_unlock(&vcpu->kvm->mmu_lock);
+ count++;
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
diff --git a/sevstep/sevstep.h b/sevstep/sevstep.h
@@ -19,16 +19,14 @@ bool sevstep_rmap_protect(struct kvm_rmap_head *rmap_head,
bool sevstep_kvm_mmu_slot_gfn_protect(struct kvm *kvm, struct kvm_memory_slot *slot,
uint64_t gfn, int min_level, enum kvm_page_track_mode mode);
-bool sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- enum kvm_page_track_mode mode);
bool sevstep_track_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
enum kvm_page_track_mode mode);
+bool sevstep_untrack_single_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ enum kvm_page_track_mode mode);
bool sevstep_reset_accessed_on_page(struct kvm_vcpu *vcpu, gfn_t gfn);
bool sevstep_clear_nx_on_page(struct kvm_vcpu *vcpu, gfn_t gfn);
long sevstep_start_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
long sevstep_stop_tracking(struct kvm_vcpu *vcpu, enum kvm_page_track_mode mode);
-void sevstep_setup_pmcs(void);
-
int sevstep_get_rip_kvm_vcpu(struct kvm_vcpu *vcpu, uint64_t *rip);
diff --git a/sevstep/uspt.c b/sevstep/uspt.c
@@ -30,12 +30,19 @@ typedef struct {
bool error_occured;
} batch_track_state_t;
+typedef struct {
+ uint64_t idx_for_last_perf_reading;
+ uint64_t last_perf_reading;
+ uint64_t delta_valid_idx;
+ uint64_t delta;
+} perf_state_t;
+
// crude sync mechanism. don't know a good way to act on errors yet.
-uint64_t last_sent_event_id = 1;
-uint64_t last_acked_event_id = 1;
+static uint64_t last_sent_event_id = 1;
+static uint64_t last_acked_event_id = 1;
DEFINE_RWLOCK(event_lock);
-page_fault_event_t sent_event;
+static page_fault_event_t sent_event;
static int have_event = 0;
static bool get_rip = true;
@@ -45,14 +52,7 @@ static int inited = 0;
DEFINE_SPINLOCK(batch_track_state_lock);
static batch_track_state_t batch_track_state;
-typedef struct {
- uint64_t idx_for_last_perf_reading;
- uint64_t last_perf_reading;
- uint64_t delta_valid_idx;
- uint64_t delta;
-} perf_state_t;
-
-perf_state_t perf_state;
+static perf_state_t perf_state;
static uint64_t perf_state_update_and_get_delta(uint64_t current_event_idx);
@@ -69,7 +69,7 @@ sevstep_uspt_clear(void)
}
int
-sevstep_uspt_initialize(int pid,bool should_get_rip)
+sevstep_uspt_initialize(int pid, bool should_get_rip)
{
write_lock(&event_lock);
inited = 1;
@@ -109,8 +109,7 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
read_lock(&event_lock);
if (!sevstep_uspt_is_initialiized()) {
- printk("userspace_page_track_signals: "
- "sevstep_uspt_send_and_block : ctx not initialized!\n");
+ pr_warn("sevstep_uspt_send_and_block: ctx not initialized!\n");
read_unlock(&event_lock);
return 1;
}
@@ -118,7 +117,8 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
write_lock(&event_lock);
if (last_sent_event_id != last_acked_event_id) {
- printk("event id_s out of sync, aborting. Fix this later\n");
+ pr_warn("sevstep_uspt_send_and_block: "
+ "event id_s out of sync, aborting. Fix this later\n");
write_unlock(&event_lock);
return 1;
} else {
@@ -136,7 +136,7 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
// for poll based system;
have_event = 1;
sent_event = message_for_user;
- // printk("sevstep_uspt_send_and_block sending event %llu\n",sent_event.id);
+ // printk("sevstep_uspt_send_and_block sending event %llu\n", sent_event.id);
write_unlock(&event_lock);
@@ -145,7 +145,9 @@ sevstep_uspt_send_and_block(uint64_t faulted_gpa, uint32_t error_code,
abort_after = ktime_get() + 1000000000ULL; // 1 sec in nanosecond
while (!sevstep_uspt_is_event_done(sent_event.id)) {
if (ktime_get() > abort_after) {
- printk("Waiting for ack of event %llu timed out, continuing\n",sent_event.id);
+ pr_warn("sevstep_uspt_send_and_block: "
+ "Waiting for ack of event %llu timed out, "
+ "continuing\n",sent_event.id);
return 3;
}
}
@@ -201,7 +203,8 @@ sevstep_uspt_handle_ack_event_ioctl(ack_event_t event)
last_acked_event_id = last_sent_event_id;
} else {
err = 1;
- printk("last sent event id is %llu but received ack for %llu\n",
+ pr_warn("sevstep_uspt_handle_ack_event_ioctl: "
+ "last sent event id is %llu but received ack for %llu\n",
last_sent_event_id, event.id);
}
write_unlock(&event_lock);
@@ -227,7 +230,7 @@ perf_state_update_and_get_delta(uint64_t current_event_idx)
/* otherwise update, but logic is only valid for two consecutive events */
if (current_event_idx != perf_state.idx_for_last_perf_reading+1) {
- printk_ratelimited(KERN_CRIT "perf_state_update_and_get_delta: "
+ pr_warn("perf_state_update_and_get_delta: "
"last reading was for idx %llu but was queried for %llu\n",
perf_state.idx_for_last_perf_reading, current_event_idx);
}
@@ -259,8 +262,8 @@ sevstep_uspt_batch_tracking_start(int tracking_type,uint64_t expected_events,
spin_lock(&batch_track_state_lock);
if (batch_track_state.is_active) {
- printk("userspace_page_track_signals: overwriting "
- "active batch track config!\n");
+ pr_warn("sevstep_uspt_batch_tracking_start: "
+ "overwriting active batch track config!\n");
if (batch_track_state.events != NULL ) {
vfree(batch_track_state.events);
}
@@ -269,11 +272,12 @@ sevstep_uspt_batch_tracking_start(int tracking_type,uint64_t expected_events,
spin_unlock(&batch_track_state_lock);
buffer_size = expected_events * sizeof(page_fault_event_t);
- printk("sevstep_uspt_batch_tracking_start trying to alloc %llu "
- "bytes buffer for events\n", buffer_size);
+ pr_warn("sevstep_uspt_batch_tracking_start: "
+ "trying to alloc %llu bytes buffer for events\n",
+ buffer_size);
events = vmalloc(buffer_size);
if (events == NULL) {
- printk("userspace_page_track_signals: "
+ pr_warn("sevstep_uspt_batch_tracking_start: "
"faperf_cpuiled to alloc %llu bytes for event buffer\n",
buffer_size);
return 1; // note: lock not held here
@@ -326,24 +330,23 @@ sevstep_uspt_batch_tracking_handle_retrack(struct kvm_vcpu* vcpu,
}
if (smp_processor_id() != batch_track_state.perf_cpu) {
- printk("sevstep_uspt_batch_tracking_handle_retrack: perf was "
+ pr_warn("sevstep_uspt_batch_tracking_handle_retrack: perf was "
"programmed on logical cpu %d but handler was called "
"on %d. Did you forget to pin the vcpu thread?\n",
batch_track_state.perf_cpu, smp_processor_id());
}
ret_instr_delta = perf_state_update_and_get_delta(batch_track_state.event_next_idx);
-
// faulting instructions is probably the same as on last fault
// try to add current fault to retrack log and return
// for first event idx we do not have a valid ret_instr_delta.
// Retracking for the frist time is fine, if we loop, we end up here
// again but with a valid delta on one of the next event
- if( (ret_instr_delta < 2) && ( batch_track_state.event_next_idx != 0) ) {
+ if ((ret_instr_delta < 2) && ( batch_track_state.event_next_idx != 0)) {
next_idx = batch_track_state.gfn_retrack_backlog_next_idx;
if (next_idx >= ARRLEN(batch_track_state.gfn_retrack_backlog)) {
- printk("sevstep_uspt_batch_tracking_handle_retrack: retrack "
- "backlog full, dropping retrack for fault "
+ pr_warn("sevstep_uspt_batch_tracking_handle_retrack: "
+ "retrack backlog full, dropping retrack for fault "
"at 0x%llx\n", current_fault_gfn);
} else {
batch_track_state.gfn_retrack_backlog[next_idx] = current_fault_gfn;
@@ -379,7 +382,8 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
spin_lock(&batch_track_state_lock);
if (!batch_track_state.is_active) {
- printk_ratelimited("userspace_page_track_signals: got save but batch tracking is not active!\n");
+ pr_warn("sevstep_uspt_batch_tracking_save: "
+ "got save but batch tracking is not active!\n");
batch_track_state.error_occured = true;
spin_unlock(&batch_track_state_lock);
return 1;
@@ -387,14 +391,14 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
if (batch_track_state.event_next_idx >= batch_track_state.events_size) {
- printk_ratelimited("userspace_page_track_signals: events buffer is full!\n");
+ pr_warn("sevstep_uspt_batch_tracking_save: events buffer is full!\n");
batch_track_state.error_occured = true;
spin_unlock(&batch_track_state_lock);
return 1;
}
if (smp_processor_id() != batch_track_state.perf_cpu) {
- printk("sevstep_uspt_batch_tracking_handle_retrack: perf was "
+ pr_warn("sevstep_uspt_batch_tracking_save: perf was "
"programmed on logical cpu %d but handler was called "
"on %d. Did you forget to pin the vcpu thread?\n",
batch_track_state.perf_cpu, smp_processor_id());
@@ -403,7 +407,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
if (batch_track_state.events == NULL) {
- printk(KERN_CRIT "userspace_page_track_signals: events buf was "
+ pr_warn("sevstep_uspt_batch_tracking_save: events buf was "
"NULL but \"is_active\" was set! This should never happen!!!\n");
spin_unlock(&batch_track_state_lock);
return 1;
@@ -423,7 +427,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
if (batch_track_state.gfn_retrack_backlog_next_idx
> ARRLEN(batch_track_state.gfn_retrack_backlog)) {
- printk_ratelimited("userspace_page_track_signals: "
+ pr_warn("sevstep_uspt_batch_tracking_save: "
"gfn retrack backlog overflow!\n");
batch_track_state.error_occured = true;
spin_unlock(&batch_track_state_lock);
@@ -431,6 +435,7 @@ sevstep_uspt_batch_tracking_save(uint64_t faulted_gpa, uint32_t error_code,
}
spin_unlock(&batch_track_state_lock);
+
return 0;
}
@@ -439,7 +444,7 @@ sevstep_uspt_batch_tracking_stop(page_fault_event_t* results, uint64_t len, bool
{
spin_lock(&batch_track_state_lock);
if (!batch_track_state.is_active) {
- printk("userspace_page_track_signals: batch tracking not active\n");
+ pr_warn("sevstep_uspt: batch tracking not active\n");
spin_unlock(&batch_track_state_lock);
return 1;
@@ -447,8 +452,8 @@ sevstep_uspt_batch_tracking_stop(page_fault_event_t* results, uint64_t len, bool
batch_track_state.is_active = false;
if (len > batch_track_state.event_next_idx) {
- printk("userspace_page_track_signals: requested %llu "
- "events but got only %llu\n",
+ pr_warn("sevstep_uspt_batch_tracking_stop: "
+ "requested %llu events but got only %llu\n",
len, batch_track_state.event_next_idx);
spin_unlock(&batch_track_state_lock);
return 1;