Define memmove and make bcopy alt entry point

Make a memmove entry point just before bcopy and have it swap its args
before continuing into the body of bcopy. Adjust the returns to return
dst (original %o0 swapped to %o1) from both entry points. bcopy users
will ignore them. Since these are in the branch delay slot, it should
take no additional time. I use %o6 for this rather than just move %o1
back to %o2 at the end since my sparc64 assembler knowledge is weak.
Also eliminate wrapper call from memmove to bcopy.

Differential Revision: https://reviews.freebsd.org/D15374
This commit is contained in:
Warner Losh 2018-05-24 21:11:28 +00:00
parent 49ab568eff
commit 78a76e3628
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=334182
2 changed files with 12 additions and 5 deletions

View file

@ -71,7 +71,6 @@ libkern/ffsll.c standard
libkern/fls.c standard
libkern/flsl.c standard
libkern/flsll.c standard
libkern/memmove.c standard
sparc64/central/central.c optional central
sparc64/ebus/ebus.c optional ebus
sparc64/ebus/epic.c optional epic ebus

View file

@ -265,9 +265,17 @@ ENTRY(bcmp)
END(bcmp)
/*
* void *memmove(void *dst, const void *src, size_t len)
* void bcopy(const void *src, void *dst, size_t len)
*/
ENTRY(bcopy)
ENTRY(memmove)
/*
* Swap src/dst for memmove/bcopy differences
*/
mov %o0, %o6
mov %o1, %o0
mov %o6, %o1
ALTENTRY(bcopy)
/*
* Check for overlap, and copy backwards if so.
*/
@ -290,15 +298,15 @@ ENTRY(bcopy)
ba %xcc, 1b
stb %g1, [%o1]
2: retl
nop
mov %o6, %o0
/*
* Do the fast version.
*/
3: _MEMCPY(%o1, %o0, %o2, EMPTY, EMPTY, EMPTY, EMPTY)
retl
nop
END(bcopy)
mov %o6, %o0
END(memmove)
/*
* void bzero(void *b, size_t len)