The last bits of the alloca -> mmap fix. IA64 and SPARC64 (current only).

Untested (testing request went unanswered), but sparc64 is not expected to
cause problems.  IA64 is not expected to cause problems but the patch was
slightly more complex so the possibility exists.

Approved by:    jdp
This commit is contained in:
Matthew Dillon 2002-06-22 18:36:21 +00:00
parent e0cf55e42a
commit b6801e6b54
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=98629
2 changed files with 58 additions and 21 deletions

View file

@ -190,24 +190,35 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
const Elf_Rela *rela;
SymCache *cache;
struct fptr **fptrs;
int bytes = obj->nchains * sizeof(SymCache);
int fbytes = obj->nchains * sizeof(struct fptr *);
int r = -1;
cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
if (cache == MAP_FAILED)
cache = NULL;
if (cache != NULL)
memset(cache, 0, obj->nchains * sizeof(SymCache));
memset(cache, 0, bytes);
/*
* When relocating rtld itself, we need to avoid using malloc.
*/
if (obj == obj_rtld)
fptrs = (struct fptr **)
alloca(obj->nchains * sizeof(struct fptr *));
else
if (obj == obj_rtld) {
fptrs = mmap(NULL, fbytes, PROT_READ|PROT_WRITE,
MAP_ANON, -1, 0);
if (fptrs == MAP_FAILED)
fptrs = NULL;
} else {
fptrs = (struct fptr **)
malloc(obj->nchains * sizeof(struct fptr *));
}
if (fptrs == NULL)
return -1;
memset(fptrs, 0, obj->nchains * sizeof(struct fptr *));
goto done;
memset(fptrs, 0, fbytes);
/* Perform relocations without addend if there are any: */
rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
@ -218,28 +229,43 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
locrela.r_offset = rel->r_offset;
locrela.r_addend = 0;
if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs))
return -1;
goto done;
}
/* Perform relocations with addend if there are any: */
relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) {
if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs))
return -1;
goto done;
}
/*
* Remember the fptrs in case of later calls to dlsym(). Don't
* bother for rtld - we will lazily create a table in
* make_function_pointer(). At this point we still can't risk
* calling malloc().
* make_function_pointer(). We still can't risk calling malloc()
* in the rtld case.
*
* When remembering fptrs, NULL out our local fptrs variable so we
* do not free it.
*/
if (obj != obj_rtld)
obj->priv = fptrs;
else
if (obj == obj_rtld) {
obj->priv = NULL;
} else {
obj->priv = fptrs;
fptrs = NULL;
}
return 0;
r = 0;
done:
if (cache)
munmap(cache, bytes);
if (fptrs) {
if (obj == obj_rtld)
munmap(fptrs, fbytes);
else
free(fptrs);
}
return (r);
}
/* Process the PLT relocations. */

View file

@ -249,18 +249,29 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
const Elf_Rela *relalim;
const Elf_Rela *rela;
SymCache *cache;
int bytes = obj->nchains * sizeof(SymCache);
int r = -1;
cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache));
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
if (cache == MAP_FAILED)
cache = NULL;
if (cache != NULL)
memset(cache, 0, obj->nchains * sizeof(SymCache));
relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
if (reloc_nonplt_object(obj, rela, cache) < 0)
return (-1);
goto done;
}
return (0);
r = 0;
done:
if (cache)
munmap(cache, bytes);
return (r);
}
static int