Make uio_yield() a global. Call uio_yield() between chunks

in vn_rdwr_inchunks(), allowing other processes to gain an exclusive
lock on the vnode.  Specifically: directory scanning, to avoid a race to the
root directory, and multiple child processes coring simultaniously so they
can figure out that some other core'ing child has an exclusive adv lock and
just exit instead.

This completely fixes performance problems when large programs core.  You
can have hundreds of copies (forked children) of the same binary core all
at once and not notice.

MFC after:	3 days
This commit is contained in:
Matthew Dillon 2001-09-26 06:54:32 +00:00
parent c36b0a4edb
commit 3418ebebfe
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=83959
5 changed files with 10 additions and 8 deletions

View file

@ -269,14 +269,14 @@ aout_coredump(td, vp, limit)
error = vn_rdwr(UIO_WRITE, vp, vm->vm_daddr,
(int)ctob(vm->vm_dsize),
(off_t)ctob(UAREA_PAGES + KSTACK_PAGES), UIO_USERSPACE,
IO_UNIT, cred, (int *) NULL, td);
IO_UNIT | IO_DIRECT, cred, (int *) NULL, td);
if (error == 0)
error = vn_rdwr_inchunks(UIO_WRITE, vp,
(caddr_t) trunc_page(USRSTACK - ctob(vm->vm_ssize)),
round_page(ctob(vm->vm_ssize)),
(off_t)ctob(UAREA_PAGES + KSTACK_PAGES) +
ctob(vm->vm_dsize), UIO_USERSPACE,
IO_UNIT, cred, (int *) NULL, td);
IO_UNIT | IO_DIRECT, cred, (int *) NULL, td);
return (error);
}

View file

@ -802,7 +802,7 @@ elf_coredump(td, vp, limit)
error = vn_rdwr_inchunks(UIO_WRITE, vp,
(caddr_t)php->p_vaddr,
php->p_filesz, offset, UIO_USERSPACE,
IO_UNIT, cred, (int *)NULL, curthread); /* XXXKSE */
IO_UNIT | IO_DIRECT, cred, (int *)NULL, curthread); /* XXXKSE */
if (error != 0)
break;
offset += php->p_filesz;
@ -966,7 +966,7 @@ elf_corehdr(td, vp, cred, numsegs, hdr, hdrsize)
/* Write it to the core file. */
return vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
UIO_SYSSPACE, IO_UNIT, cred, NULL, td); /* XXXKSE */
UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NULL, td); /* XXXKSE */
}
static void

View file

@ -55,8 +55,6 @@
#include <vm/vm_page.h>
#include <vm/vm_map.h>
static void uio_yield __P((void));
SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
@ -381,7 +379,7 @@ phashinit(elements, type, nentries)
return (hashtbl);
}
static void
void
uio_yield()
{
struct thread *td;

View file

@ -338,7 +338,9 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
* Package up an I/O request on a vnode into a uio and do it. The I/O
* request is split up into smaller chunks and we try to avoid saturating
* the buffer cache while potentially holding a vnode locked, so we
* check bwillwrite() before calling vn_rdwr()
* check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
* to give other processes a chance to lock the vnode (either other processes
* core'ing the same binary, or unrelated processes scanning the directory).
*/
int
vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
@ -367,6 +369,7 @@ vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
break;
offset += chunk;
base += chunk;
uio_yield();
} while (len);
if (aresid)
*aresid += len;

View file

@ -84,6 +84,7 @@ struct uio {
struct vm_object;
void uio_yield __P((void));
int uiomove __P((caddr_t, int, struct uio *));
int uiomoveco __P((caddr_t, int, struct uio *, struct vm_object *));
int uioread __P((int, struct uio *, struct vm_object *, int *));