mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
oslib: allocate PROT_NONE pages on top of RAM
This inserts a read and write protected page between RAM and QEMU memory. This makes it harder to exploit QEMU bugs resulting from buffer overflows in devices using variants of cpu_physical_memory_map, dma_memory_map etc. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
c2dfc5ba3f
commit
9fac18f03a
1 changed files with 4 additions and 4 deletions
|
@ -128,7 +128,7 @@ void *qemu_memalign(size_t alignment, size_t size)
|
|||
void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
|
||||
{
|
||||
size_t align = QEMU_VMALLOC_ALIGN;
|
||||
size_t total = size + align - getpagesize();
|
||||
size_t total = size + align;
|
||||
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
|
||||
void *ptr1;
|
||||
|
@ -154,8 +154,8 @@ void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
|
|||
if (offset > 0) {
|
||||
munmap(ptr - offset, offset);
|
||||
}
|
||||
if (total > size) {
|
||||
munmap(ptr + size, total - size);
|
||||
if (total > size + getpagesize()) {
|
||||
munmap(ptr + size + getpagesize(), total - size - getpagesize());
|
||||
}
|
||||
|
||||
trace_qemu_anon_ram_alloc(size, ptr);
|
||||
|
@ -172,7 +172,7 @@ void qemu_anon_ram_free(void *ptr, size_t size)
|
|||
{
|
||||
trace_qemu_anon_ram_free(ptr, size);
|
||||
if (ptr) {
|
||||
munmap(ptr, size);
|
||||
munmap(ptr, size + getpagesize());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue