Clean up /dev/mem now that pmap handles illegal aliases properly. Don't

allow access to device memory through /dev/mem, or try to make modifying
kernel text through /dev/mem safe (it is not).
This commit is contained in:
Jake Burkholder 2003-03-17 18:53:02 +00:00
parent 5262ab2453
commit 56a6b03a6d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=112349

View file

@ -64,11 +64,15 @@
#include <vm/vm.h> #include <vm/vm.h>
#include <vm/vm_param.h> #include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h> #include <vm/pmap.h>
#include <vm/vm_extern.h> #include <vm/vm_extern.h>
#include <machine/cache.h> #include <machine/cache.h>
#include <machine/md_var.h>
#include <machine/pmap.h> #include <machine/pmap.h>
#include <machine/tlb.h>
#include <machine/upa.h> #include <machine/upa.h>
static dev_t memdev, kmemdev; static dev_t memdev, kmemdev;
@ -115,19 +119,26 @@ mmopen(dev_t dev, int flags, int fmt, struct thread *td)
return (0); return (0);
} }
#define IOSTART UPA_MEMSTART
/*ARGSUSED*/ /*ARGSUSED*/
static int static int
mmrw(dev_t dev, struct uio *uio, int flags) mmrw(dev_t dev, struct uio *uio, int flags)
{ {
struct iovec *iov; struct iovec *iov;
int error = 0; vm_offset_t eva;
vm_offset_t addr, eaddr, o, v = 0; vm_offset_t off;
vm_offset_t ova;
vm_offset_t pa;
vm_offset_t va;
vm_prot_t prot; vm_prot_t prot;
vm_size_t c = 0; vm_size_t cnt;
u_long asi; vm_page_t m;
char *buf = NULL; int color;
int error;
int i;
cnt = 0;
error = 0;
ova = 0;
GIANT_REQUIRED; GIANT_REQUIRED;
@ -143,69 +154,74 @@ mmrw(dev_t dev, struct uio *uio, int flags)
switch (minor(dev)) { switch (minor(dev)) {
case 0: case 0:
/* mem (physical memory) */ /* mem (physical memory) */
if (buf == NULL) { pa = uio->uio_offset & ~PAGE_MASK;
buf = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK); if (!is_physical_memory(pa)) {
if (buf == NULL) { error = EFAULT;
error = ENOMEM; break;
}
off = uio->uio_offset & PAGE_MASK;
cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
PAGE_MASK);
cnt = min(cnt, PAGE_SIZE - off);
cnt = min(cnt, iov->iov_len);
m = NULL;
for (i = 0; phys_avail[i] != 0; i += 2) {
if (pa >= phys_avail[i] &&
pa < phys_avail[i + 1]) {
m = PHYS_TO_VM_PAGE(pa);
break; break;
} }
} }
v = uio->uio_offset;
asi = ASI_PHYS_USE_EC; if (m != NULL) {
/* Access device memory noncacheable. */ if (ova == 0) {
if (v >= IOSTART) ova = kmem_alloc_wait(kernel_map,
asi = ASI_PHYS_BYPASS_EC_WITH_EBIT; PAGE_SIZE * DCACHE_COLORS);
o = v & PAGE_MASK; }
c = ulmin(iov->iov_len, PAGE_SIZE - o); if ((color = m->md.color) == -1)
/* va = ova;
* This double copy could be avoided, at the cost of else
* inlining a version of uiomove. Since this is not va = ova + color * PAGE_SIZE;
* performance-critical, it is probably not worth it. pmap_qenter(va, &m, 1);
*/ error = uiomove((void *)(va + off), cnt,
if (uio->uio_rw == UIO_READ) uio);
ascopyfrom(asi, v, buf, c); pmap_qremove(va, 1);
error = uiomove(buf, c, uio); } else {
if (error == 0 && uio->uio_rw == UIO_WRITE) va = TLB_PHYS_TO_DIRECT(pa);
ascopyto(buf, asi, v, c); error = uiomove((void *)(va + off), cnt,
/* uio);
* If a write was evil enough to change kernel code,
* I$ must be flushed. Also, D$ must be flushed if there
* is a chance that there is a cacheable mapping to
* avoid working with stale data.
*/
if (v < IOSTART && uio->uio_rw == UIO_WRITE) {
icache_inval_phys(v, v + c);
dcache_inval_phys(v, v + c);
} }
break; break;
case 1: case 1:
/* kmem (kernel memory) */ /* kmem (kernel memory) */
c = iov->iov_len; va = trunc_page(uio->uio_offset);
eva = round_page(uio->uio_offset + iov->iov_len);
/* /*
* Make sure that all of the pages are currently resident so * Make sure that all of the pages are currently
* that we don't create any zero-fill pages. * resident so we don't create any zero fill pages.
*/ */
addr = trunc_page(uio->uio_offset); for (; va < eva; va += PAGE_SIZE)
eaddr = round_page(uio->uio_offset + c); if (pmap_kextract(va) == 0)
return (EFAULT);
for (; addr < eaddr; addr += PAGE_SIZE)
if (pmap_extract(kernel_pmap, addr) == 0)
return EFAULT;
prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
VM_PROT_WRITE; VM_PROT_WRITE;
v = uio->uio_offset; va = uio->uio_offset;
if (v < VM_MIN_DIRECT_ADDRESS && if (va < VM_MIN_DIRECT_ADDRESS &&
kernacc((caddr_t)v, c, prot) == FALSE) kernacc((void *)va, iov->iov_len, prot) == FALSE)
return (EFAULT); return (EFAULT);
error = uiomove((caddr_t)v, c, uio);
if (uio->uio_rw == UIO_WRITE) error = uiomove((void *)va, iov->iov_len, uio);
icache_flush(v, v + c); break;
default:
return (ENODEV);
} }
} }
if (buf != NULL) if (ova != 0)
free(buf, M_DEVBUF); kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * DCACHE_COLORS);
return (error); return (error);
} }