KVM: MMU: check rmap for every spte

The read-only spte also has reverse mapping, so fix the code to check them,
also modify the function name to fit its doing

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Xiao Guangrong 2010-08-28 19:20:47 +08:00 committed by Avi Kivity
parent 9ad17b1001
commit 0beb8d6604

View file

@ -3644,40 +3644,38 @@ void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
struct kvm_mmu_page *rev_sp; struct kvm_mmu_page *rev_sp;
gfn_t gfn; gfn_t gfn;
if (is_writable_pte(*sptep)) {
rev_sp = page_header(__pa(sptep));
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
if (!gfn_to_memslot(kvm, gfn)) { rev_sp = page_header(__pa(sptep));
if (!printk_ratelimit()) gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
return;
printk(KERN_ERR "%s: no memslot for gfn %llx\n", if (!gfn_to_memslot(kvm, gfn)) {
audit_msg, gfn); if (!printk_ratelimit())
printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
audit_msg, (long int)(sptep - rev_sp->spt),
rev_sp->gfn);
dump_stack();
return; return;
} printk(KERN_ERR "%s: no memslot for gfn %llx\n",
audit_msg, gfn);
rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
if (!*rmapp) { audit_msg, (long int)(sptep - rev_sp->spt),
if (!printk_ratelimit()) rev_sp->gfn);
return; dump_stack();
printk(KERN_ERR "%s: no rmap for writable spte %llx\n", return;
audit_msg, *sptep);
dump_stack();
}
} }
rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
if (!*rmapp) {
if (!printk_ratelimit())
return;
printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
audit_msg, *sptep);
dump_stack();
}
} }
void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu) void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
{ {
mmu_spte_walk(vcpu, inspect_spte_has_rmap); mmu_spte_walk(vcpu, inspect_spte_has_rmap);
} }
static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu) static void check_mappings_rmap(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
int i; int i;
@ -3689,12 +3687,9 @@ static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
continue; continue;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
u64 ent = pt[i]; if (!is_rmap_spte(pt[i]))
continue;
if (!(ent & PT_PRESENT_MASK))
continue;
if (!is_writable_pte(ent))
continue;
inspect_spte_has_rmap(vcpu->kvm, &pt[i]); inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
} }
} }
@ -3703,7 +3698,7 @@ static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
static void audit_rmap(struct kvm_vcpu *vcpu) static void audit_rmap(struct kvm_vcpu *vcpu)
{ {
check_writable_mappings_rmap(vcpu); check_mappings_rmap(vcpu);
count_rmaps(vcpu); count_rmaps(vcpu);
} }
@ -3746,7 +3741,7 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
audit_write_protection(vcpu); audit_write_protection(vcpu);
if (strcmp("pre pte write", audit_msg) != 0) if (strcmp("pre pte write", audit_msg) != 0)
audit_mappings(vcpu); audit_mappings(vcpu);
audit_writable_sptes_have_rmaps(vcpu); audit_sptes_have_rmaps(vcpu);
dbg = olddbg; dbg = olddbg;
} }