add pmap_active_cpus()

For amd64, i386, arm, and riscv, i.e. all architectures except arm64,
the custom implementation is provided since we maintain the bitmask of
active CPUs anyway.

Arm64 uses somewhat naive iteration over CPUs and match current vmspace'
pmap with the argument. It is not guaranteed that vmspace->pmap is the
same as the active pmap, but the inaccuracy should be toleratable.

Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D32360
This commit is contained in:
Konstantin Belousov 2021-10-08 01:25:54 +03:00
parent c7f73a1588
commit 8882b7852a
8 changed files with 60 additions and 0 deletions

View file

@ -10256,6 +10256,12 @@ pmap_activate_boot(pmap_t pmap)
PCPU_SET(ucr3, PMAP_NO_CR3);
}
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
*res = pmap->pm_active;
}
void
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{

View file

@ -6194,6 +6194,12 @@ pmap_activate(struct thread *td)
critical_exit();
}
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
*res = pmap->pm_active;
}
/*
* Perform the pmap work for mincore(2). If the page is not both referenced and
* modified by this pmap, returns its physical address so that the caller can

View file

@ -129,6 +129,8 @@ extern struct pmap kernel_pmap_store;
(uint64_t)(asid) << TTBR_ASID_SHIFT; \
})
#define PMAP_WANT_ACTIVE_CPUS_NAIVE
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;

View file

@ -944,6 +944,12 @@ pmap_kremove(vm_offset_t va)
pmap_methods_ptr->pm_kremove(va);
}
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
*res = pmap->pm_active;
}
extern struct pmap_methods pmap_pae_methods, pmap_nopae_methods;
int pae_mode;
SYSCTL_INT(_vm_pmap, OID_AUTO, pae_mode, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,

View file

@ -253,3 +253,9 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
return (FALSE);
}
}
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
*res = pmap->pm_active;
}

View file

@ -4687,6 +4687,12 @@ pmap_activate_boot(pmap_t pmap)
PCPU_SET(curpmap, pmap);
}
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
*res = pmap->pm_active;
}
void
pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
{

View file

@ -90,6 +90,7 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
#ifdef _KERNEL
#include <sys/_cpuset.h>
struct thread;
/*
@ -118,6 +119,7 @@ extern vm_offset_t kernel_vm_end;
#define PMAP_TS_REFERENCED_MAX 5
void pmap_activate(struct thread *td);
void pmap_active_cpus(pmap_t pmap, cpuset_t *res);
void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
int advice);
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *,

View file

@ -78,6 +78,7 @@
#include <sys/msan.h>
#include <sys/proc.h>
#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/vmem.h>
#include <sys/vmmeter.h>
@ -907,6 +908,31 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
#endif
}
#ifdef PMAP_WANT_ACTIVE_CPUS_NAIVE
void
pmap_active_cpus(pmap_t pmap, cpuset_t *res)
{
struct thread *td;
struct proc *p;
struct vmspace *vm;
int c;
CPU_ZERO(res);
CPU_FOREACH(c) {
td = cpuid_to_pcpu[c]->pc_curthread;
p = td->td_proc;
if (p == NULL)
continue;
vm = vmspace_acquire_ref(p);
if (vm == NULL)
continue;
if (pmap == vmspace_pmap(vm))
CPU_SET(c, res);
vmspace_free(vm);
}
}
#endif
/*
* Allow userspace to directly trigger the VM drain routine for testing
* purposes.