diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index bc11402df83b..47131b92b990 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -66,8 +66,8 @@ void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); void kvm_init_mmu(struct kvm_vcpu *vcpu); -void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, - gpa_t nested_cr3); +void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, + unsigned long cr4, u64 efer, gpa_t nested_cr3); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index dffa9486e642..f3c4c6349ddc 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4659,8 +4659,8 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) } static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, - u32 cr0, u32 cr4, u32 efer, - union kvm_mmu_role new_role) + unsigned long cr0, unsigned long cr4, + u64 efer, union kvm_mmu_role new_role) { if (!(cr0 & X86_CR0_PG)) nonpaging_init_context(vcpu, context); @@ -4675,7 +4675,8 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte reset_shadow_zero_bits_mask(vcpu, context); } -static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer) +static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, + unsigned long cr4, u64 efer) { struct kvm_mmu *context = &vcpu->arch.root_mmu; union kvm_mmu_role new_role = @@ -4697,8 +4698,8 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) return role; } -void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, - gpa_t nested_cr3) +void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, + unsigned long cr4, u64 efer, gpa_t nested_cr3) { struct kvm_mmu *context = &vcpu->arch.guest_mmu; union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index dca20f949b63..9f0e7ed672b2 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1244,8 +1244,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, &user_kvm_nested_state->data.svm[0]; struct vmcb_control_area *ctl; struct vmcb_save_area *save; + unsigned long cr0; int ret; - u32 cr0; BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > KVM_STATE_NESTED_SVM_VMCB_SIZE); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c862783035b8..0b059698cd5c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9095,8 +9095,8 @@ static void enter_smm(struct kvm_vcpu *vcpu) { struct kvm_segment cs, ds; struct desc_ptr dt; + unsigned long cr0; char buf[512]; - u32 cr0; memset(buf, 0, 512); #ifdef CONFIG_X86_64