linux/tools/include/nolibc/arch-x86_64.h
Ammar Faizi 12108aa8c1 tools/nolibc: x86-64: Use rep stosb for memset()
Simplify memset() on the x86-64 arch.

The x86-64 arch has a 'rep stosb' instruction, which can perform
memset() using only a single instruction, given:

    %al  = value (just like the second argument of memset())
    %rdi = destination
    %rcx = length

Before this patch:
```
  00000000000010c9 <memset>:
    10c9: 48 89 f8              mov    %rdi,%rax
    10cc: 48 85 d2              test   %rdx,%rdx
    10cf: 74 0e                 je     10df <memset+0x16>
    10d1: 31 c9                 xor    %ecx,%ecx
    10d3: 40 88 34 08           mov    %sil,(%rax,%rcx,1)
    10d7: 48 ff c1              inc    %rcx
    10da: 48 39 ca              cmp    %rcx,%rdx
    10dd: 75 f4                 jne    10d3 <memset+0xa>
    10df: c3                    ret
```

After this patch:
```
  0000000000001511 <memset>:
    1511: 96                    xchg   %eax,%esi
    1512: 48 89 d1              mov    %rdx,%rcx
    1515: 57                    push   %rdi
    1516: f3 aa                 rep stos %al,%es:(%rdi)
    1518: 58                    pop    %rax
    1519: c3                    ret
```

v2:
  - Use pushq %rdi / popq %rax (Alviro).
  - Use xchg %eax, %esi (Willy).

Link: https://lore.kernel.org/lkml/ZO9e6h2jjVIMpBJP@1wt.eu
Suggested-by: Alviro Iskandar Setiawan <alviro.iskandar@gnuweeb.org>
Suggested-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Ammar Faizi <ammarfaizi2@gnuweeb.org>
Reviewed-by: Alviro Iskandar Setiawan <alviro.iskandar@gnuweeb.org>
Signed-off-by: Willy Tarreau <w@1wt.eu>
Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
2023-10-12 21:14:00 +02:00

219 lines
10 KiB
C

/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
/*
* x86_64 specific definitions for NOLIBC
* Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
*/
#ifndef _NOLIBC_ARCH_X86_64_H
#define _NOLIBC_ARCH_X86_64_H
#include "compiler.h"
#include "crt.h"
/* Syscalls for x86_64 :
* - registers are 64-bit
* - syscall number is passed in rax
* - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
* - the system call is performed by calling the syscall instruction
* - syscall return comes in rax
* - rcx and r11 are clobbered, others are preserved.
* - the arguments are cast to long and assigned into the target registers
* which are then simply passed as registers to the asm code, so that we
* don't have to experience issues with register constraints.
* - the syscall number is always specified last in order to allow to force
* some registers before (gcc refuses a %-register at the last position).
* - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
* Calling Conventions.
*
* Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/home
*
*/
#define my_syscall0(num) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall1(num, arg1) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall2(num, arg1, arg2) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall3(num, arg1, arg2, arg3) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall4(num, arg1, arg2, arg3, arg4) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
register long _arg4 __asm__ ("r10") = (long)(arg4); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
register long _arg4 __asm__ ("r10") = (long)(arg4); \
register long _arg5 __asm__ ("r8") = (long)(arg5); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
register long _arg4 __asm__ ("r10") = (long)(arg4); \
register long _arg5 __asm__ ("r8") = (long)(arg5); \
register long _arg6 __asm__ ("r9") = (long)(arg6); \
\
__asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"r"(_arg6), "0"(_num) \
: "rcx", "r11", "memory", "cc" \
); \
_ret; \
})
/* startup code */
/*
* x86-64 System V ABI mandates:
* 1) %rsp must be 16-byte aligned right before the function call.
* 2) The deepest stack frame should be zero (the %rbp).
*
*/
void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
"xor %ebp, %ebp\n" /* zero the stack frame */
"mov %rsp, %rdi\n" /* save stack pointer to %rdi, as arg1 of _start_c */
"and $-16, %rsp\n" /* %rsp must be 16-byte aligned before call */
"call _start_c\n" /* transfer to c runtime */
"hlt\n" /* ensure it does not return */
);
__builtin_unreachable();
}
#define NOLIBC_ARCH_HAS_MEMMOVE
void *memmove(void *dst, const void *src, size_t len);
#define NOLIBC_ARCH_HAS_MEMCPY
void *memcpy(void *dst, const void *src, size_t len);
#define NOLIBC_ARCH_HAS_MEMSET
void *memset(void *dst, int c, size_t len);
__asm__ (
".section .text.nolibc_memmove_memcpy\n"
".weak memmove\n"
".weak memcpy\n"
"memmove:\n"
"memcpy:\n"
"movq %rdx, %rcx\n\t"
"movq %rdi, %rax\n\t"
"movq %rdi, %rdx\n\t"
"subq %rsi, %rdx\n\t"
"cmpq %rcx, %rdx\n\t"
"jb .Lbackward_copy\n\t"
"rep movsb\n\t"
"retq\n"
".Lbackward_copy:"
"leaq -1(%rdi, %rcx, 1), %rdi\n\t"
"leaq -1(%rsi, %rcx, 1), %rsi\n\t"
"std\n\t"
"rep movsb\n\t"
"cld\n\t"
"retq\n"
".section .text.nolibc_memset\n"
".weak memset\n"
"memset:\n"
"xchgl %eax, %esi\n\t"
"movq %rdx, %rcx\n\t"
"pushq %rdi\n\t"
"rep stosb\n\t"
"popq %rax\n\t"
"retq\n"
);
#endif /* _NOLIBC_ARCH_X86_64_H */