From: Paolo Bonzini <pbonzini@redhat.com>
Date: Mon, 24 Jun 2019 13:06:21 +0200
Subject: KVM: x86: make FNAME(fetch) and __direct_map more similar
Git-commit: 3fcf2d1bdeb6a513523cb2c77012a6b047aa859c
Patch-mainline: v5.3-rc1
References: bsc#1117665

These two functions are basically doing the same thing through
kvm_mmu_get_page, link_shadow_page and mmu_set_spte; yet, for historical
reasons, their code looks very different.  This patch tries to take the
best of each and make them very similar, so that it is easy to understand
changes that apply to both of them.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/kvm/mmu.c         |   56 ++++++++++++++++++++-------------------------
 arch/x86/kvm/paging_tmpl.h |   26 +++++++++-----------
 2 files changed, 37 insertions(+), 45 deletions(-)

--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2273,50 +2273,46 @@ static void direct_pte_prefetch(struct k
 	__direct_pte_prefetch(vcpu, sp, sptep);
 }
 
-static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-			int map_writable, int level, gfn_t gfn, pfn_t pfn,
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
+			int map_writable, int level, pfn_t pfn,
 			bool prefault)
 {
-	struct kvm_shadow_walk_iterator iterator;
+	struct kvm_shadow_walk_iterator it;
 	struct kvm_mmu_page *sp;
-	int pt_write = 0;
-	gfn_t pseudo_gfn;
+	unsigned pte_access = ACC_ALL;
+	int ret = 0;
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+	gfn_t base_gfn = gfn;
 
-	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
-		if (iterator.level == level) {
-			unsigned pte_access = ACC_ALL;
-
-			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
-				     0, write, 1, &pt_write,
-				     level, gfn, pfn, prefault, map_writable);
-			direct_pte_prefetch(vcpu, iterator.sptep);
-			++vcpu->stat.pf_fixed;
+	for_each_shadow_entry(vcpu, gpa, it) {
+		base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+		if (it.level == level)
 			break;
-		}
 
-		drop_large_spte(vcpu, iterator.sptep);
-		if (*iterator.sptep == shadow_trap_nonpresent_pte) {
-			u64 base_addr = iterator.addr;
-
-			base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
-			pseudo_gfn = base_addr >> PAGE_SHIFT;
-			sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
-					      iterator.level - 1,
-					      1, ACC_ALL, iterator.sptep);
+		drop_large_spte(vcpu, it.sptep);
+		if (*it.sptep == shadow_trap_nonpresent_pte) {
+			sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr,
+					      it.level - 1, 1, ACC_ALL, it.sptep);
 			if (!sp) {
 				pgprintk("nonpaging_map: ENOMEM\n");
 				kvm_release_pfn_clean(pfn);
 				return -ENOMEM;
 			}
-
-			__set_spte(iterator.sptep,
+			__set_spte(it.sptep,
 				   __pa(sp->spt)
 				   | PT_PRESENT_MASK | PT_WRITABLE_MASK
 				   | shadow_user_mask | shadow_x_mask
 				   | shadow_accessed_mask);
 		}
 	}
-	return pt_write;
+
+	mmu_set_spte(vcpu, it.sptep, ACC_ALL, pte_access,
+		     0, write, 1, &ret,
+		     level, base_gfn, pfn, prefault, map_writable);
+	direct_pte_prefetch(vcpu, it.sptep);
+	++vcpu->stat.pf_fixed;
+
+	return ret;
 }
 
 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
@@ -2430,8 +2426,7 @@ static int nonpaging_map(struct kvm_vcpu
 	kvm_mmu_free_some_pages(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
-	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
-			 prefault);
+	r = __direct_map(vcpu, v, write, map_writable, level, pfn, prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 
@@ -2802,8 +2797,7 @@ static int tdp_page_fault(struct kvm_vcp
 	kvm_mmu_free_some_pages(vcpu);
 	if (likely(!force_pt_level))
 		transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
-	r = __direct_map(vcpu, gpa, write, map_writable,
-			 level, gfn, pfn, prefault);
+	r = __direct_map(vcpu, gpa, write, map_writable, level, pfn, prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 	return r;
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -486,6 +486,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
 	int top_level;
 	unsigned direct_access;
 	struct kvm_shadow_walk_iterator it;
+	gfn_t base_gfn;
 
 	if (!is_present_gpte(gw->ptes[gw->level - 1]))
 		return NULL;
@@ -532,28 +533,26 @@ static u64 *FNAME(fetch)(struct kvm_vcpu
 			link_shadow_page(it.sptep, sp);
 	}
 
-	for (;
-	     shadow_walk_okay(&it) && it.level > hlevel;
-	     shadow_walk_next(&it)) {
-		gfn_t direct_gfn;
 
+	base_gfn = gw->gfn;
+	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
+		base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+		if (it.level == hlevel)
+			break;
 		validate_direct_spte(vcpu, it.sptep, direct_access);
 
 		drop_large_spte(vcpu, it.sptep);
 
-		if (is_shadow_present_pte(*it.sptep))
-			continue;
-
-		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
-
-		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
-				      true, direct_access, it.sptep);
-		link_shadow_page(it.sptep, sp);
+		if (!is_shadow_present_pte(*it.sptep)) {
+			sp = kvm_mmu_get_page(vcpu, base_gfn, addr, it.level-1,
+					      true, direct_access, it.sptep);
+			link_shadow_page(it.sptep, sp);
+		}
 	}
 
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
 		     user_fault, write_fault, dirty, ptwrite, it.level,
-		     gw->gfn, pfn, prefault, map_writable);
+		     base_gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
 	return it.sptep;
@@ -655,7 +654,6 @@ static int FNAME(page_fault)(struct kvm_
 	if (!write_pt)
 		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
 
-	++vcpu->stat.pf_fixed;
 	trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
