cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvm_onhyperv.c (2838B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * KVM L1 hypervisor optimizations on Hyper-V.
      4 */
      5
      6#include <linux/kvm_host.h>
      7#include <asm/mshyperv.h>
      8
      9#include "hyperv.h"
     10#include "kvm_onhyperv.h"
     11
     12static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
     13		void *data)
     14{
     15	struct kvm_tlb_range *range = data;
     16
     17	return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
     18			range->pages);
     19}
     20
     21static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
     22					   struct kvm_tlb_range *range)
     23{
     24	if (range)
     25		return hyperv_flush_guest_mapping_range(root_tdp,
     26				kvm_fill_hv_flush_list_func, (void *)range);
     27	else
     28		return hyperv_flush_guest_mapping(root_tdp);
     29}
     30
     31int hv_remote_flush_tlb_with_range(struct kvm *kvm,
     32		struct kvm_tlb_range *range)
     33{
     34	struct kvm_arch *kvm_arch = &kvm->arch;
     35	struct kvm_vcpu *vcpu;
     36	int ret = 0, nr_unique_valid_roots;
     37	unsigned long i;
     38	hpa_t root;
     39
     40	spin_lock(&kvm_arch->hv_root_tdp_lock);
     41
     42	if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
     43		nr_unique_valid_roots = 0;
     44
     45		/*
     46		 * Flush all valid roots, and see if all vCPUs have converged
     47		 * on a common root, in which case future flushes can skip the
     48		 * loop and flush the common root.
     49		 */
     50		kvm_for_each_vcpu(i, vcpu, kvm) {
     51			root = vcpu->arch.hv_root_tdp;
     52			if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
     53				continue;
     54
     55			/*
     56			 * Set the tracked root to the first valid root.  Keep
     57			 * this root for the entirety of the loop even if more
     58			 * roots are encountered as a low effort optimization
     59			 * to avoid flushing the same (first) root again.
     60			 */
     61			if (++nr_unique_valid_roots == 1)
     62				kvm_arch->hv_root_tdp = root;
     63
     64			if (!ret)
     65				ret = hv_remote_flush_root_tdp(root, range);
     66
     67			/*
     68			 * Stop processing roots if a failure occurred and
     69			 * multiple valid roots have already been detected.
     70			 */
     71			if (ret && nr_unique_valid_roots > 1)
     72				break;
     73		}
     74
     75		/*
     76		 * The optimized flush of a single root can't be used if there
     77		 * are multiple valid roots (obviously).
     78		 */
     79		if (nr_unique_valid_roots > 1)
     80			kvm_arch->hv_root_tdp = INVALID_PAGE;
     81	} else {
     82		ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
     83	}
     84
     85	spin_unlock(&kvm_arch->hv_root_tdp_lock);
     86	return ret;
     87}
     88EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
     89
     90int hv_remote_flush_tlb(struct kvm *kvm)
     91{
     92	return hv_remote_flush_tlb_with_range(kvm, NULL);
     93}
     94EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
     95
     96void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
     97{
     98	struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
     99
    100	if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
    101		spin_lock(&kvm_arch->hv_root_tdp_lock);
    102		vcpu->arch.hv_root_tdp = root_tdp;
    103		if (root_tdp != kvm_arch->hv_root_tdp)
    104			kvm_arch->hv_root_tdp = INVALID_PAGE;
    105		spin_unlock(&kvm_arch->hv_root_tdp_lock);
    106	}
    107}
    108EXPORT_SYMBOL_GPL(hv_track_root_tdp);