mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
f3b65bbaed
The .change_pte() MMU notifier callback was intended as an
optimization. The original point of it was that KSM could tell KVM to flip
its secondary PTE to a new location without having to first zap it. At
the time there was also an .invalidate_page() callback; both of them were
*not* bracketed by calls to mmu_notifier_invalidate_range_{start,end}(),
and .invalidate_page() also doubled as a fallback implementation of
.change_pte().
Later on, however, both callbacks were changed to occur within an
invalidate_range_start/end() block.
In the case of .change_pte(), commit 6bdb913f0a
("mm: wrap calls to
set_pte_at_notify with invalidate_range_start and invalidate_range_end",
2012-10-09) did so to remove the fallback from .invalidate_page() to
.change_pte() and allow sleepable .invalidate_page() hooks.
This however made KVM's usage of the .change_pte() callback completely
moot, because KVM unmaps the sPTEs during .invalidate_range_start()
and therefore .change_pte() has no hope of finding a sPTE to change.
Drop the generic KVM code that dispatches to kvm_set_spte_gfn(), as
well as all the architecture specific implementations.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Anup Patel <anup@brainfault.org>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Reviewed-by: Bibo Mao <maobibo@loongson.cn>
Message-ID: <20240405115815.3226315-2-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
37 lines
1.4 KiB
C
37 lines
1.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Copyright IBM Corporation, 2013
|
|
* Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
|
|
*/
|
|
|
|
#ifndef __POWERPC_KVM_BOOK3S_H__
|
|
#define __POWERPC_KVM_BOOK3S_H__
|
|
|
|
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
|
|
struct kvm_memory_slot *memslot);
|
|
extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
|
|
extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
|
|
extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
|
|
|
|
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
|
|
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
|
|
extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
|
|
unsigned int inst, int *advance);
|
|
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
|
|
int sprn, ulong spr_val);
|
|
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
|
|
int sprn, ulong *spr_val);
|
|
extern int kvmppc_book3s_init_pr(void);
|
|
void kvmppc_book3s_exit_pr(void);
|
|
extern int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr);
|
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
|
|
#else
|
|
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
|
|
#endif
|
|
|
|
extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
|
|
extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
|
|
|
|
#endif
|