linux-user pull request for June 2016

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIVAwUAV1gdMrRIkN7ePJvAAQhLcg/+Kby99taEuewItrA1yDs75jxOlLqaJopd
 cVzo4LFRFPhIn4UEKqRQS0CGoIeU/DYOmObvuUzJxs2LyUoHoqmQOwEm5obC2a85
 JrHo/NOppYBbyvvIEAAXzZDCZo0KZKVclrlT+AX5obpOSNSvAnKvEuLWq1aQ9WGN
 n4AzHuFEl885cd4nFd8VK/xth89bqz6U/z8CjgIuw3mczp1XNrK5IJJwAy5epHay
 GCBr9XHooW3SU971WS20RTRS0D33tKPHgCU3ZeZ3rKh4g3JNj6/ixdVgzi9NqFsQ
 5DzAj/iBGhN1LtCOednRS6tUt32Bhy8G/g4O3GiXdejagAmNe2wz31cveNJ8S3W5
 DK8SZAnJlz06zN5uIpOVQgDOqfXZkCp7ndq779QJoHOAnuOjJBcUbhw1myz2R3eR
 6208tStWl3R0+ATEK8CZ7ejg1cUHvdzyqGJA+1nC2HaFUrBWipxN8jf2fz9vO/wG
 G7zNbahvVgyJWO7bPNK4mxkb6qkWCETnCnLJsq2ZbmtPEMcINjD8vNWLNvFGVG8b
 2HbinDrzh0Z9Zik5gLZfiVyP5HFaWSrJn9QRVIgaUjuIH9n3/25sl9OvW/JLjxJ+
 h2P17CLnAK6dhUYc4R3wQTx2X/N2FvO4DD8iMYOcgDY6fhZ2b6EEyE9yBgQrIDbF
 gU1AlC/CX+M=
 =AXqa
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/riku/tags/pull-linux-user-20160608' into staging

linux-user pull request for June 2016

# gpg: Signature made Wed 08 Jun 2016 14:27:14 BST
# gpg:                using RSA key 0xB44890DEDE3C9BC0
# gpg: Good signature from "Riku Voipio <riku.voipio@iki.fi>"
# gpg:                 aka "Riku Voipio <riku.voipio@linaro.org>"

* remotes/riku/tags/pull-linux-user-20160608: (44 commits)
  linux-user: In fork_end(), remove correct CPUs from CPU list
  linux-user: Special-case ERESTARTSYS in target_strerror()
  linux-user: Make target_strerror() return 'const char *'
  linux-user: Correct signedness of target_flock l_start and l_len fields
  linux-user: Use safe_syscall wrapper for ioctl
  linux-user: Use safe_syscall wrapper for accept and accept4 syscalls
  linux-user: Use safe_syscall wrapper for semop
  linux-user: Use safe_syscall wrapper for epoll_wait syscalls
  linux-user: Use safe_syscall wrapper for poll and ppoll syscalls
  linux-user: Use safe_syscall wrapper for sleep syscalls
  linux-user: Use safe_syscall wrapper for rt_sigtimedwait syscall
  linux-user: Use safe_syscall wrapper for flock
  linux-user: Use safe_syscall wrapper for mq_timedsend and mq_timedreceive
  linux-user: Use safe_syscall wrapper for msgsnd and msgrcv
  linux-user: Use safe_syscall wrapper for send* and recv* syscalls
  linux-user: Use safe_syscall wrapper for connect syscall
  linux-user: Use safe_syscall wrapper for readv and writev syscalls
  linux-user: Fix error conversion in 64-bit fadvise syscall
  linux-user: Fix NR_fadvise64 and NR_fadvise64_64 for 32-bit guests
  linux-user: Fix handling of arm_fadvise64_64 syscall
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

Conflicts:
	configure
	scripts/qemu-binfmt-conf.sh
This commit is contained in:
Peter Maydell 2016-06-08 18:34:32 +01:00
commit b66e10e4c9
11 changed files with 1698 additions and 518 deletions

38
configure vendored
View file

@ -3800,8 +3800,8 @@ if compile_prog "" "" ; then
epoll=yes epoll=yes
fi fi
# epoll_create1 and epoll_pwait are later additions # epoll_create1 is a later addition
# so we must check separately for their presence # so we must check separately for its presence
epoll_create1=no epoll_create1=no
cat > $TMPC << EOF cat > $TMPC << EOF
#include <sys/epoll.h> #include <sys/epoll.h>
@ -3823,20 +3823,6 @@ if compile_prog "" "" ; then
epoll_create1=yes epoll_create1=yes
fi fi
epoll_pwait=no
cat > $TMPC << EOF
#include <sys/epoll.h>
int main(void)
{
epoll_pwait(0, 0, 0, 0, 0);
return 0;
}
EOF
if compile_prog "" "" ; then
epoll_pwait=yes
fi
# check for sendfile support # check for sendfile support
sendfile=no sendfile=no
cat > $TMPC << EOF cat > $TMPC << EOF
@ -4528,6 +4514,19 @@ if compile_prog "" "" ; then
have_fsxattr=yes have_fsxattr=yes
fi fi
##########################################
# check if rtnetlink.h exists and is useful
have_rtnetlink=no
cat > $TMPC << EOF
#include <linux/rtnetlink.h>
int main(void) {
return IFLA_PROTO_DOWN;
}
EOF
if compile_prog "" "" ; then
have_rtnetlink=yes
fi
################################################# #################################################
# Sparc implicitly links with --relax, which is # Sparc implicitly links with --relax, which is
# incompatible with -r, so --no-relax should be # incompatible with -r, so --no-relax should be
@ -5135,9 +5134,6 @@ fi
if test "$epoll_create1" = "yes" ; then if test "$epoll_create1" = "yes" ; then
echo "CONFIG_EPOLL_CREATE1=y" >> $config_host_mak echo "CONFIG_EPOLL_CREATE1=y" >> $config_host_mak
fi fi
if test "$epoll_pwait" = "yes" ; then
echo "CONFIG_EPOLL_PWAIT=y" >> $config_host_mak
fi
if test "$sendfile" = "yes" ; then if test "$sendfile" = "yes" ; then
echo "CONFIG_SENDFILE=y" >> $config_host_mak echo "CONFIG_SENDFILE=y" >> $config_host_mak
fi fi
@ -5482,6 +5478,10 @@ if test "$rdma" = "yes" ; then
echo "CONFIG_RDMA=y" >> $config_host_mak echo "CONFIG_RDMA=y" >> $config_host_mak
fi fi
if test "$have_rtnetlink" = "yes" ; then
echo "CONFIG_RTNETLINK=y" >> $config_host_mak
fi
# Hold two types of flag: # Hold two types of flag:
# CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on # CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on
# a thread we have a handle to # a thread we have a handle to

View file

@ -1493,19 +1493,6 @@ void gdb_exit(CPUArchState *env, int code)
} }
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
int
gdb_queuesig (void)
{
GDBState *s;
s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return 0;
else
return 1;
}
int int
gdb_handlesig(CPUState *cpu, int sig) gdb_handlesig(CPUState *cpu, int sig)
{ {

View file

@ -48,7 +48,6 @@ int use_gdb_syscalls(void);
void gdb_set_stop_cpu(CPUState *cpu); void gdb_set_stop_cpu(CPUState *cpu);
void gdb_exit(CPUArchState *, int); void gdb_exit(CPUArchState *, int);
#ifdef CONFIG_USER_ONLY #ifdef CONFIG_USER_ONLY
int gdb_queuesig (void);
int gdb_handlesig(CPUState *, int); int gdb_handlesig(CPUState *, int);
void gdb_signalled(CPUArchState *, int); void gdb_signalled(CPUArchState *, int);
void gdbserver_fork(CPUState *); void gdbserver_fork(CPUState *);

View file

@ -24,6 +24,7 @@
* -1-and-errno-set convention is done by the calling wrapper. * -1-and-errno-set convention is done by the calling wrapper.
*/ */
safe_syscall_base: safe_syscall_base:
.cfi_startproc
/* This saves a frame pointer and aligns the stack for the syscall. /* This saves a frame pointer and aligns the stack for the syscall.
* (It's unclear if the syscall ABI has the same stack alignment * (It's unclear if the syscall ABI has the same stack alignment
* requirements as the userspace function call ABI, but better safe than * requirements as the userspace function call ABI, but better safe than
@ -31,6 +32,8 @@ safe_syscall_base:
* does not list any ABI differences regarding stack alignment.) * does not list any ABI differences regarding stack alignment.)
*/ */
push %rbp push %rbp
.cfi_adjust_cfa_offset 8
.cfi_rel_offset rbp, 0
/* The syscall calling convention isn't the same as the /* The syscall calling convention isn't the same as the
* C one: * C one:
@ -70,12 +73,19 @@ safe_syscall_start:
safe_syscall_end: safe_syscall_end:
/* code path for having successfully executed the syscall */ /* code path for having successfully executed the syscall */
pop %rbp pop %rbp
.cfi_remember_state
.cfi_def_cfa_offset 8
.cfi_restore rbp
ret ret
return_ERESTARTSYS: return_ERESTARTSYS:
/* code path when we didn't execute the syscall */ /* code path when we didn't execute the syscall */
.cfi_restore_state
mov $-TARGET_ERESTARTSYS, %rax mov $-TARGET_ERESTARTSYS, %rax
pop %rbp pop %rbp
.cfi_def_cfa_offset 8
.cfi_restore rbp
ret ret
.cfi_endproc
.size safe_syscall_base, .-safe_syscall_base .size safe_syscall_base, .-safe_syscall_base

View file

@ -131,7 +131,7 @@ void fork_end(int child)
Discard information about the parent threads. */ Discard information about the parent threads. */
CPU_FOREACH_SAFE(cpu, next_cpu) { CPU_FOREACH_SAFE(cpu, next_cpu) {
if (cpu != thread_cpu) { if (cpu != thread_cpu) {
QTAILQ_REMOVE(&cpus, thread_cpu, node); QTAILQ_REMOVE(&cpus, cpu, node);
} }
} }
pending_cpus = 0; pending_cpus = 0;
@ -3795,14 +3795,7 @@ void stop_all_tasks(void)
/* Assumes contents are already zeroed. */ /* Assumes contents are already zeroed. */
void init_task_state(TaskState *ts) void init_task_state(TaskState *ts)
{ {
int i;
ts->used = 1; ts->used = 1;
ts->first_free = ts->sigqueue_table;
for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) {
ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1];
}
ts->sigqueue_table[i].next = NULL;
} }
CPUArchState *cpu_copy(CPUArchState *env) CPUArchState *cpu_copy(CPUArchState *env)

View file

@ -78,16 +78,9 @@ struct vm86_saved_state {
#define MAX_SIGQUEUE_SIZE 1024 #define MAX_SIGQUEUE_SIZE 1024
struct sigqueue {
struct sigqueue *next;
target_siginfo_t info;
};
struct emulated_sigtable { struct emulated_sigtable {
int pending; /* true if signal is pending */ int pending; /* true if signal is pending */
struct sigqueue *first; target_siginfo_t info;
struct sigqueue info; /* in order to always have memory for the
first signal, we put it here */
}; };
/* NOTE: we force a big alignment so that the stack stored after is /* NOTE: we force a big alignment so that the stack stored after is
@ -123,14 +116,32 @@ typedef struct TaskState {
#endif #endif
uint32_t stack_base; uint32_t stack_base;
int used; /* non zero if used */ int used; /* non zero if used */
bool sigsegv_blocked; /* SIGSEGV blocked by guest */
struct image_info *info; struct image_info *info;
struct linux_binprm *bprm; struct linux_binprm *bprm;
struct emulated_sigtable sync_signal;
struct emulated_sigtable sigtab[TARGET_NSIG]; struct emulated_sigtable sigtab[TARGET_NSIG];
struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */ /* This thread's signal mask, as requested by the guest program.
struct sigqueue *first_free; /* first free siginfo queue entry */ * The actual signal mask of this thread may differ:
int signal_pending; /* non zero if a signal may be pending */ * + we don't let SIGSEGV and SIGBUS be blocked while running guest code
* + sometimes we block all signals to avoid races
*/
sigset_t signal_mask;
/* The signal mask imposed by a guest sigsuspend syscall, if we are
* currently in the middle of such a syscall
*/
sigset_t sigsuspend_mask;
/* Nonzero if we're leaving a sigsuspend and sigsuspend_mask is valid. */
int in_sigsuspend;
/* Nonzero if process_pending_signals() needs to do something (either
* handle a pending signal or unblock signals).
* This flag is written from a signal handler so should be accessed via
* the atomic_read() and atomic_write() functions. (It is not accessed
* from multiple threads.)
*/
int signal_pending;
} __attribute__((aligned(16))) TaskState; } __attribute__((aligned(16))) TaskState;
extern char *exec_path; extern char *exec_path;
@ -184,7 +195,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2); void gemu_log(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
extern THREAD CPUState *thread_cpu; extern THREAD CPUState *thread_cpu;
void cpu_loop(CPUArchState *env); void cpu_loop(CPUArchState *env);
char *target_strerror(int err); const char *target_strerror(int err);
int get_osversion(void); int get_osversion(void);
void init_qemu_uname_release(void); void init_qemu_uname_release(void);
void fork_start(void); void fork_start(void);
@ -235,6 +246,12 @@ unsigned long init_guest_space(unsigned long host_start,
* It's also OK to implement these with safe_syscall, though it will be * It's also OK to implement these with safe_syscall, though it will be
* a little less efficient if a signal is delivered at the 'wrong' moment. * a little less efficient if a signal is delivered at the 'wrong' moment.
* *
* Some non-interruptible syscalls need to be handled using block_signals()
* to block signals for the duration of the syscall. This mainly applies
* to code which needs to modify the data structures used by the
* host_signal_handler() function and the functions it calls, including
* all syscalls which change the thread's signal mask.
*
* (2) Interruptible syscalls * (2) Interruptible syscalls
* *
* These are guest syscalls that can be interrupted by signals and * These are guest syscalls that can be interrupted by signals and
@ -266,6 +283,8 @@ unsigned long init_guest_space(unsigned long host_start,
* you make in the implementation returns either -TARGET_ERESTARTSYS or * you make in the implementation returns either -TARGET_ERESTARTSYS or
* EINTR though.) * EINTR though.)
* *
* block_signals() cannot be used for interruptible syscalls.
*
* *
* How and why the safe_syscall implementation works: * How and why the safe_syscall implementation works:
* *
@ -352,6 +371,25 @@ long do_sigreturn(CPUArchState *env);
long do_rt_sigreturn(CPUArchState *env); long do_rt_sigreturn(CPUArchState *env);
abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp);
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset); int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset);
/**
* block_signals: block all signals while handling this guest syscall
*
* Block all signals, and arrange that the signal mask is returned to
* its correct value for the guest before we resume execution of guest code.
* If this function returns non-zero, then the caller should immediately
* return -TARGET_ERESTARTSYS to the main loop, which will take the pending
* signal and restart execution of the syscall.
* If block_signals() returns zero, then the caller can continue with
* emulation of the system call knowing that no signals can be taken
* (and therefore that no race conditions will result).
* This should only be called once, because if it is called a second time
* it will always return non-zero. (Think of it like a mutex that can't
* be recursively locked.)
* Signals will be unblocked again by process_pending_signals().
*
* Return value: non-zero if there was a pending signal, zero if not.
*/
int block_signals(void); /* Returns non zero if signal pending */
#ifdef TARGET_I386 #ifdef TARGET_I386
/* vm86.c */ /* vm86.c */

View file

@ -17,6 +17,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>. * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/ */
#include "qemu/osdep.h" #include "qemu/osdep.h"
#include "qemu/bitops.h"
#include <sys/ucontext.h> #include <sys/ucontext.h>
#include <sys/resource.h> #include <sys/resource.h>
@ -190,125 +191,213 @@ void target_to_host_old_sigset(sigset_t *sigset,
target_to_host_sigset(sigset, &d); target_to_host_sigset(sigset, &d);
} }
int block_signals(void)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
sigset_t set;
int pending;
/* It's OK to block everything including SIGSEGV, because we won't
* run any further guest code before unblocking signals in
* process_pending_signals().
*/
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
pending = atomic_xchg(&ts->signal_pending, 1);
return pending;
}
/* Wrapper for sigprocmask function /* Wrapper for sigprocmask function
* Emulates a sigprocmask in a safe way for the guest. Note that set and oldset * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
* are host signal set, not guest ones. This wraps the sigprocmask host calls * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
* that should be protected (calls originated from guest) * a signal was already pending and the syscall must be restarted, or
* 0 on success.
* If set is NULL, this is guaranteed not to fail.
*/ */
int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{ {
int ret; TaskState *ts = (TaskState *)thread_cpu->opaque;
sigset_t val;
sigset_t *temp = NULL; if (oldset) {
CPUState *cpu = thread_cpu; *oldset = ts->signal_mask;
TaskState *ts = (TaskState *)cpu->opaque; }
bool segv_was_blocked = ts->sigsegv_blocked;
if (set) { if (set) {
bool has_sigsegv = sigismember(set, SIGSEGV); int i;
val = *set;
temp = &val;
sigdelset(temp, SIGSEGV); if (block_signals()) {
return -TARGET_ERESTARTSYS;
}
switch (how) { switch (how) {
case SIG_BLOCK: case SIG_BLOCK:
if (has_sigsegv) { sigorset(&ts->signal_mask, &ts->signal_mask, set);
ts->sigsegv_blocked = true;
}
break; break;
case SIG_UNBLOCK: case SIG_UNBLOCK:
if (has_sigsegv) { for (i = 1; i <= NSIG; ++i) {
ts->sigsegv_blocked = false; if (sigismember(set, i)) {
sigdelset(&ts->signal_mask, i);
}
} }
break; break;
case SIG_SETMASK: case SIG_SETMASK:
ts->sigsegv_blocked = has_sigsegv; ts->signal_mask = *set;
break; break;
default: default:
g_assert_not_reached(); g_assert_not_reached();
} }
/* Silently ignore attempts to change blocking status of KILL or STOP */
sigdelset(&ts->signal_mask, SIGKILL);
sigdelset(&ts->signal_mask, SIGSTOP);
} }
return 0;
ret = sigprocmask(how, temp, oldset);
if (oldset && segv_was_blocked) {
sigaddset(oldset, SIGSEGV);
}
return ret;
} }
#if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
!defined(TARGET_X86_64)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
static void set_sigmask(const sigset_t *set)
{
TaskState *ts = (TaskState *)thread_cpu->opaque;
ts->signal_mask = *set;
}
#endif
/* siginfo conversion */ /* siginfo conversion */
static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo, static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
const siginfo_t *info) const siginfo_t *info)
{ {
int sig = host_to_target_signal(info->si_signo); int sig = host_to_target_signal(info->si_signo);
int si_code = info->si_code;
int si_type;
tinfo->si_signo = sig; tinfo->si_signo = sig;
tinfo->si_errno = 0; tinfo->si_errno = 0;
tinfo->si_code = info->si_code; tinfo->si_code = info->si_code;
if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV /* This is awkward, because we have to use a combination of
|| sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { * the si_code and si_signo to figure out which of the union's
/* Should never come here, but who knows. The information for * members are valid. (Within the host kernel it is always possible
the target is irrelevant. */ * to tell, but the kernel carefully avoids giving userspace the
tinfo->_sifields._sigfault._addr = 0; * high 16 bits of si_code, so we don't have the information to
} else if (sig == TARGET_SIGIO) { * do this the easy way...) We therefore make our best guess,
tinfo->_sifields._sigpoll._band = info->si_band; * bearing in mind that a guest can spoof most of the si_codes
tinfo->_sifields._sigpoll._fd = info->si_fd; * via rt_sigqueueinfo() if it likes.
} else if (sig == TARGET_SIGCHLD) { *
tinfo->_sifields._sigchld._pid = info->si_pid; * Once we have made our guess, we record it in the top 16 bits of
tinfo->_sifields._sigchld._uid = info->si_uid; * the si_code, so that tswap_siginfo() later can use it.
tinfo->_sifields._sigchld._status * tswap_siginfo() will strip these top bits out before writing
* si_code to the guest (sign-extending the lower bits).
*/
switch (si_code) {
case SI_USER:
case SI_TKILL:
case SI_KERNEL:
/* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
* These are the only unspoofable si_code values.
*/
tinfo->_sifields._kill._pid = info->si_pid;
tinfo->_sifields._kill._uid = info->si_uid;
si_type = QEMU_SI_KILL;
break;
default:
/* Everything else is spoofable. Make best guess based on signal */
switch (sig) {
case TARGET_SIGCHLD:
tinfo->_sifields._sigchld._pid = info->si_pid;
tinfo->_sifields._sigchld._uid = info->si_uid;
tinfo->_sifields._sigchld._status
= host_to_target_waitstatus(info->si_status); = host_to_target_waitstatus(info->si_status);
tinfo->_sifields._sigchld._utime = info->si_utime; tinfo->_sifields._sigchld._utime = info->si_utime;
tinfo->_sifields._sigchld._stime = info->si_stime; tinfo->_sifields._sigchld._stime = info->si_stime;
} else if (sig >= TARGET_SIGRTMIN) { si_type = QEMU_SI_CHLD;
tinfo->_sifields._rt._pid = info->si_pid; break;
tinfo->_sifields._rt._uid = info->si_uid; case TARGET_SIGIO:
/* XXX: potential problem if 64 bit */ tinfo->_sifields._sigpoll._band = info->si_band;
tinfo->_sifields._rt._sigval.sival_ptr tinfo->_sifields._sigpoll._fd = info->si_fd;
si_type = QEMU_SI_POLL;
break;
default:
/* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
tinfo->_sifields._rt._pid = info->si_pid;
tinfo->_sifields._rt._uid = info->si_uid;
/* XXX: potential problem if 64 bit */
tinfo->_sifields._rt._sigval.sival_ptr
= (abi_ulong)(unsigned long)info->si_value.sival_ptr; = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
si_type = QEMU_SI_RT;
break;
}
break;
} }
tinfo->si_code = deposit32(si_code, 16, 16, si_type);
} }
static void tswap_siginfo(target_siginfo_t *tinfo, static void tswap_siginfo(target_siginfo_t *tinfo,
const target_siginfo_t *info) const target_siginfo_t *info)
{ {
int sig = info->si_signo; int si_type = extract32(info->si_code, 16, 16);
tinfo->si_signo = tswap32(sig); int si_code = sextract32(info->si_code, 0, 16);
tinfo->si_errno = tswap32(info->si_errno);
tinfo->si_code = tswap32(info->si_code);
if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV __put_user(info->si_signo, &tinfo->si_signo);
|| sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) { __put_user(info->si_errno, &tinfo->si_errno);
tinfo->_sifields._sigfault._addr __put_user(si_code, &tinfo->si_code);
= tswapal(info->_sifields._sigfault._addr);
} else if (sig == TARGET_SIGIO) { /* We can use our internal marker of which fields in the structure
tinfo->_sifields._sigpoll._band * are valid, rather than duplicating the guesswork of
= tswap32(info->_sifields._sigpoll._band); * host_to_target_siginfo_noswap() here.
tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd); */
} else if (sig == TARGET_SIGCHLD) { switch (si_type) {
tinfo->_sifields._sigchld._pid case QEMU_SI_KILL:
= tswap32(info->_sifields._sigchld._pid); __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
tinfo->_sifields._sigchld._uid __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
= tswap32(info->_sifields._sigchld._uid); break;
tinfo->_sifields._sigchld._status case QEMU_SI_TIMER:
= tswap32(info->_sifields._sigchld._status); __put_user(info->_sifields._timer._timer1,
tinfo->_sifields._sigchld._utime &tinfo->_sifields._timer._timer1);
= tswapal(info->_sifields._sigchld._utime); __put_user(info->_sifields._timer._timer2,
tinfo->_sifields._sigchld._stime &tinfo->_sifields._timer._timer2);
= tswapal(info->_sifields._sigchld._stime); break;
} else if (sig >= TARGET_SIGRTMIN) { case QEMU_SI_POLL:
tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid); __put_user(info->_sifields._sigpoll._band,
tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid); &tinfo->_sifields._sigpoll._band);
tinfo->_sifields._rt._sigval.sival_ptr __put_user(info->_sifields._sigpoll._fd,
= tswapal(info->_sifields._rt._sigval.sival_ptr); &tinfo->_sifields._sigpoll._fd);
break;
case QEMU_SI_FAULT:
__put_user(info->_sifields._sigfault._addr,
&tinfo->_sifields._sigfault._addr);
break;
case QEMU_SI_CHLD:
__put_user(info->_sifields._sigchld._pid,
&tinfo->_sifields._sigchld._pid);
__put_user(info->_sifields._sigchld._uid,
&tinfo->_sifields._sigchld._uid);
__put_user(info->_sifields._sigchld._status,
&tinfo->_sifields._sigchld._status);
__put_user(info->_sifields._sigchld._utime,
&tinfo->_sifields._sigchld._utime);
__put_user(info->_sifields._sigchld._stime,
&tinfo->_sifields._sigchld._stime);
break;
case QEMU_SI_RT:
__put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
__put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
__put_user(info->_sifields._rt._sigval.sival_ptr,
&tinfo->_sifields._rt._sigval.sival_ptr);
break;
default:
g_assert_not_reached();
} }
} }
void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info) void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
{ {
host_to_target_siginfo_noswap(tinfo, info); host_to_target_siginfo_noswap(tinfo, info);
@ -319,13 +408,18 @@ void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
/* XXX: find a solution for 64 bit (additional malloced data is needed) */ /* XXX: find a solution for 64 bit (additional malloced data is needed) */
void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo) void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
{ {
info->si_signo = tswap32(tinfo->si_signo); /* This conversion is used only for the rt_sigqueueinfo syscall,
info->si_errno = tswap32(tinfo->si_errno); * and so we know that the _rt fields are the valid ones.
info->si_code = tswap32(tinfo->si_code); */
info->si_pid = tswap32(tinfo->_sifields._rt._pid); abi_ulong sival_ptr;
info->si_uid = tswap32(tinfo->_sifields._rt._uid);
info->si_value.sival_ptr = __get_user(info->si_signo, &tinfo->si_signo);
(void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr); __get_user(info->si_errno, &tinfo->si_errno);
__get_user(info->si_code, &tinfo->si_code);
__get_user(info->si_pid, &tinfo->_sifields._rt._pid);
__get_user(info->si_uid, &tinfo->_sifields._rt._uid);
__get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
info->si_value.sival_ptr = (void *)(long)sival_ptr;
} }
static int fatal_signal (int sig) static int fatal_signal (int sig)
@ -367,6 +461,7 @@ static int core_dump_signal(int sig)
void signal_init(void) void signal_init(void)
{ {
TaskState *ts = (TaskState *)thread_cpu->opaque;
struct sigaction act; struct sigaction act;
struct sigaction oact; struct sigaction oact;
int i, j; int i, j;
@ -382,6 +477,9 @@ void signal_init(void)
target_to_host_signal_table[j] = i; target_to_host_signal_table[j] = i;
} }
/* Set the signal mask from the host mask. */
sigprocmask(0, 0, &ts->signal_mask);
/* set all host signal handlers. ALL signals are blocked during /* set all host signal handlers. ALL signals are blocked during
the handlers to serialize them. */ the handlers to serialize them. */
memset(sigact_table, 0, sizeof(sigact_table)); memset(sigact_table, 0, sizeof(sigact_table));
@ -408,27 +506,6 @@ void signal_init(void)
} }
} }
/* signal queue handling */
static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
struct sigqueue *q = ts->first_free;
if (!q)
return NULL;
ts->first_free = q->next;
return q;
}
static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
{
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
q->next = ts->first_free;
ts->first_free = q;
}
/* abort execution with signal */ /* abort execution with signal */
static void QEMU_NORETURN force_sig(int target_sig) static void QEMU_NORETURN force_sig(int target_sig)
@ -490,75 +567,21 @@ int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
{ {
CPUState *cpu = ENV_GET_CPU(env); CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque; TaskState *ts = cpu->opaque;
struct emulated_sigtable *k;
struct sigqueue *q, **pq;
abi_ulong handler;
int queue;
trace_user_queue_signal(env, sig); trace_user_queue_signal(env, sig);
k = &ts->sigtab[sig - 1];
queue = gdb_queuesig ();
handler = sigact_table[sig - 1]._sa_handler;
if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) { /* Currently all callers define siginfo structures which
/* Guest has blocked SIGSEGV but we got one anyway. Assume this * use the _sifields._sigfault union member, so we can
* is a forced SIGSEGV (ie one the kernel handles via force_sig_info * set the type here. If that changes we should push this
* because it got a real MMU fault). A blocked SIGSEGV in that * out so the si_type is passed in by callers.
* situation is treated as if using the default handler. This is */
* not correct if some other process has randomly sent us a SIGSEGV info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
* via kill(), but that is not easy to distinguish at this point,
* so we assume it doesn't happen.
*/
handler = TARGET_SIG_DFL;
}
if (!queue && handler == TARGET_SIG_DFL) { ts->sync_signal.info = *info;
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { ts->sync_signal.pending = sig;
kill(getpid(),SIGSTOP); /* signal that a new signal is pending */
return 0; atomic_set(&ts->signal_pending, 1);
} else return 1; /* indicates that the signal was queued */
/* default handler : ignore some signal. The other are fatal */
if (sig != TARGET_SIGCHLD &&
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
force_sig(sig);
} else {
return 0; /* indicate ignored */
}
} else if (!queue && handler == TARGET_SIG_IGN) {
/* ignore signal */
return 0;
} else if (!queue && handler == TARGET_SIG_ERR) {
force_sig(sig);
} else {
pq = &k->first;
if (sig < TARGET_SIGRTMIN) {
/* if non real time signal, we queue exactly one signal */
if (!k->pending)
q = &k->info;
else
return 0;
} else {
if (!k->pending) {
/* first signal */
q = &k->info;
} else {
q = alloc_sigqueue(env);
if (!q)
return -EAGAIN;
while (*pq != NULL)
pq = &(*pq)->next;
}
}
*pq = q;
q->info = *info;
q->next = NULL;
k->pending = 1;
/* signal that a new signal is pending */
ts->signal_pending = 1;
return 1; /* indicates that the signal was queued */
}
} }
#ifndef HAVE_SAFE_SYSCALL #ifndef HAVE_SAFE_SYSCALL
@ -572,8 +595,13 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
void *puc) void *puc)
{ {
CPUArchState *env = thread_cpu->env_ptr; CPUArchState *env = thread_cpu->env_ptr;
CPUState *cpu = ENV_GET_CPU(env);
TaskState *ts = cpu->opaque;
int sig; int sig;
target_siginfo_t tinfo; target_siginfo_t tinfo;
ucontext_t *uc = puc;
struct emulated_sigtable *k;
/* the CPU emulator uses some host signals to detect exceptions, /* the CPU emulator uses some host signals to detect exceptions,
we forward to it some signals */ we forward to it some signals */
@ -592,10 +620,23 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
rewind_if_in_safe_syscall(puc); rewind_if_in_safe_syscall(puc);
host_to_target_siginfo_noswap(&tinfo, info); host_to_target_siginfo_noswap(&tinfo, info);
if (queue_signal(env, sig, &tinfo) == 1) { k = &ts->sigtab[sig - 1];
/* interrupt the virtual CPU as soon as possible */ k->info = tinfo;
cpu_exit(thread_cpu); k->pending = sig;
} ts->signal_pending = 1;
/* Block host signals until target signal handler entered. We
* can't block SIGSEGV or SIGBUS while we're executing guest
* code in case the guest code provokes one in the window between
* now and it getting out to the main loop. Signals will be
* unblocked again in process_pending_signals().
*/
sigfillset(&uc->uc_sigmask);
sigdelset(&uc->uc_sigmask, SIGSEGV);
sigdelset(&uc->uc_sigmask, SIGBUS);
/* interrupt the virtual CPU as soon as possible */
cpu_exit(thread_cpu);
} }
/* do_sigaltstack() returns target values and errnos. */ /* do_sigaltstack() returns target values and errnos. */
@ -671,7 +712,7 @@ out:
return ret; return ret;
} }
/* do_sigaction() return host values and errnos */ /* do_sigaction() return target values and host errnos */
int do_sigaction(int sig, const struct target_sigaction *act, int do_sigaction(int sig, const struct target_sigaction *act,
struct target_sigaction *oact) struct target_sigaction *oact)
{ {
@ -680,8 +721,14 @@ int do_sigaction(int sig, const struct target_sigaction *act,
int host_sig; int host_sig;
int ret = 0; int ret = 0;
if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
return -EINVAL; return -TARGET_EINVAL;
}
if (block_signals()) {
return -TARGET_ERESTARTSYS;
}
k = &sigact_table[sig - 1]; k = &sigact_table[sig - 1];
if (oact) { if (oact) {
__put_user(k->_sa_handler, &oact->_sa_handler); __put_user(k->_sa_handler, &oact->_sa_handler);
@ -1093,7 +1140,7 @@ long do_sigreturn(CPUX86State *env)
} }
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
/* restore registers */ /* restore registers */
if (restore_sigcontext(env, &frame->sc)) if (restore_sigcontext(env, &frame->sc))
@ -1118,7 +1165,7 @@ long do_rt_sigreturn(CPUX86State *env)
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
goto badframe; goto badframe;
target_to_host_sigset(&set, &frame->uc.tuc_sigmask); target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
goto badframe; goto badframe;
@ -1258,7 +1305,7 @@ static int target_restore_sigframe(CPUARMState *env,
uint64_t pstate; uint64_t pstate;
target_to_host_sigset(&set, &sf->uc.tuc_sigmask); target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
for (i = 0; i < 31; i++) { for (i = 0; i < 31; i++) {
__get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]); __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
@ -1900,7 +1947,7 @@ static long do_sigreturn_v1(CPUARMState *env)
} }
target_to_host_sigset_internal(&host_set, &set); target_to_host_sigset_internal(&host_set, &set);
do_sigprocmask(SIG_SETMASK, &host_set, NULL); set_sigmask(&host_set);
if (restore_sigcontext(env, &frame->sc)) { if (restore_sigcontext(env, &frame->sc)) {
goto badframe; goto badframe;
@ -1981,7 +2028,7 @@ static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
abi_ulong *regspace; abi_ulong *regspace;
target_to_host_sigset(&host_set, &uc->tuc_sigmask); target_to_host_sigset(&host_set, &uc->tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &host_set, NULL); set_sigmask(&host_set);
if (restore_sigcontext(env, &uc->tuc_mcontext)) if (restore_sigcontext(env, &uc->tuc_mcontext))
return 1; return 1;
@ -2077,7 +2124,7 @@ static long do_rt_sigreturn_v1(CPUARMState *env)
} }
target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &host_set, NULL); set_sigmask(&host_set);
if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
goto badframe; goto badframe;
@ -2453,7 +2500,7 @@ long do_sigreturn(CPUSPARCState *env)
} }
target_to_host_sigset_internal(&host_set, &set); target_to_host_sigset_internal(&host_set, &set);
do_sigprocmask(SIG_SETMASK, &host_set, NULL); set_sigmask(&host_set);
if (err) { if (err) {
goto segv_and_exit; goto segv_and_exit;
@ -2576,7 +2623,7 @@ void sparc64_set_context(CPUSPARCState *env)
} }
} }
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
} }
env->pc = pc; env->pc = pc;
env->npc = npc; env->npc = npc;
@ -2664,9 +2711,13 @@ void sparc64_get_context(CPUSPARCState *env)
env->pc = env->npc; env->pc = env->npc;
env->npc += 4; env->npc += 4;
err = 0; /* If we're only reading the signal mask then do_sigprocmask()
* is guaranteed not to fail, which is important because we don't
do_sigprocmask(0, NULL, &set); * have any way to signal a failure or restart this operation since
* this is not a normal syscall.
*/
err = do_sigprocmask(0, NULL, &set);
assert(err == 0);
host_to_target_sigset_internal(&target_set, &set); host_to_target_sigset_internal(&target_set, &set);
if (TARGET_NSIG_WORDS == 1) { if (TARGET_NSIG_WORDS == 1) {
__put_user(target_set.sig[0], __put_user(target_set.sig[0],
@ -2993,7 +3044,7 @@ long do_sigreturn(CPUMIPSState *regs)
} }
target_to_host_sigset_internal(&blocked, &target_set); target_to_host_sigset_internal(&blocked, &target_set);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
restore_sigcontext(regs, &frame->sf_sc); restore_sigcontext(regs, &frame->sf_sc);
@ -3097,7 +3148,7 @@ long do_rt_sigreturn(CPUMIPSState *env)
} }
target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
@ -3371,7 +3422,7 @@ long do_sigreturn(CPUSH4State *regs)
goto badframe; goto badframe;
target_to_host_sigset_internal(&blocked, &target_set); target_to_host_sigset_internal(&blocked, &target_set);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
restore_sigcontext(regs, &frame->sc); restore_sigcontext(regs, &frame->sc);
@ -3397,7 +3448,7 @@ long do_rt_sigreturn(CPUSH4State *regs)
} }
target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask); target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
restore_sigcontext(regs, &frame->uc.tuc_mcontext); restore_sigcontext(regs, &frame->uc.tuc_mcontext);
@ -3621,7 +3672,7 @@ long do_sigreturn(CPUMBState *env)
__get_user(target_set.sig[i], &frame->extramask[i - 1]); __get_user(target_set.sig[i], &frame->extramask[i - 1]);
} }
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
restore_sigcontext(&frame->uc.tuc_mcontext, env); restore_sigcontext(&frame->uc.tuc_mcontext, env);
/* We got here through a sigreturn syscall, our path back is via an /* We got here through a sigreturn syscall, our path back is via an
@ -3792,7 +3843,7 @@ long do_sigreturn(CPUCRISState *env)
__get_user(target_set.sig[i], &frame->extramask[i - 1]); __get_user(target_set.sig[i], &frame->extramask[i - 1]);
} }
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
restore_sigcontext(&frame->sc, env); restore_sigcontext(&frame->sc, env);
unlock_user_struct(frame, frame_addr, 0); unlock_user_struct(frame, frame_addr, 0);
@ -4284,7 +4335,7 @@ long do_sigreturn(CPUS390XState *env)
__get_user(target_set.sig[0], &frame->sc.oldmask[0]); __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ set_sigmask(&set); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->sregs)) { if (restore_sigregs(env, &frame->sregs)) {
goto badframe; goto badframe;
@ -4310,7 +4361,7 @@ long do_rt_sigreturn(CPUS390XState *env)
} }
target_to_host_sigset(&set, &frame->uc.tuc_sigmask); target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */ set_sigmask(&set); /* ~_BLOCKABLE? */
if (restore_sigregs(env, &frame->uc.tuc_mcontext)) { if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
goto badframe; goto badframe;
@ -4872,7 +4923,7 @@ long do_sigreturn(CPUPPCState *env)
__get_user(set.sig[1], &sc->_unused[3]); __get_user(set.sig[1], &sc->_unused[3]);
#endif #endif
target_to_host_sigset_internal(&blocked, &set); target_to_host_sigset_internal(&blocked, &set);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
__get_user(sr_addr, &sc->regs); __get_user(sr_addr, &sc->regs);
if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
@ -4913,7 +4964,7 @@ static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
return 1; return 1;
target_to_host_sigset_internal(&blocked, &set); target_to_host_sigset_internal(&blocked, &set);
do_sigprocmask(SIG_SETMASK, &blocked, NULL); set_sigmask(&blocked);
restore_user_regs(env, mcp, sig); restore_user_regs(env, mcp, sig);
unlock_user_struct(mcp, mcp_addr, 1); unlock_user_struct(mcp, mcp_addr, 1);
@ -5261,7 +5312,7 @@ long do_sigreturn(CPUM68KState *env)
} }
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
/* restore registers */ /* restore registers */
@ -5287,7 +5338,7 @@ long do_rt_sigreturn(CPUM68KState *env)
goto badframe; goto badframe;
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
/* restore registers */ /* restore registers */
@ -5530,7 +5581,7 @@ long do_sigreturn(CPUAlphaState *env)
__get_user(target_set.sig[0], &sc->sc_mask); __get_user(target_set.sig[0], &sc->sc_mask);
target_to_host_sigset_internal(&set, &target_set); target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
restore_sigcontext(env, sc); restore_sigcontext(env, sc);
unlock_user_struct(sc, sc_addr, 0); unlock_user_struct(sc, sc_addr, 0);
@ -5551,7 +5602,7 @@ long do_rt_sigreturn(CPUAlphaState *env)
goto badframe; goto badframe;
} }
target_to_host_sigset(&set, &frame->uc.tuc_sigmask); target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
restore_sigcontext(env, &frame->uc.tuc_mcontext); restore_sigcontext(env, &frame->uc.tuc_mcontext);
if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
@ -5718,7 +5769,7 @@ long do_rt_sigreturn(CPUTLGState *env)
goto badframe; goto badframe;
} }
target_to_host_sigset(&set, &frame->uc.tuc_sigmask); target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
do_sigprocmask(SIG_SETMASK, &set, NULL); set_sigmask(&set);
restore_sigcontext(env, &frame->uc.tuc_mcontext); restore_sigcontext(env, &frame->uc.tuc_mcontext);
if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
@ -5765,39 +5816,19 @@ long do_rt_sigreturn(CPUArchState *env)
#endif #endif
void process_pending_signals(CPUArchState *cpu_env) static void handle_pending_signal(CPUArchState *cpu_env, int sig)
{ {
CPUState *cpu = ENV_GET_CPU(cpu_env); CPUState *cpu = ENV_GET_CPU(cpu_env);
int sig;
abi_ulong handler; abi_ulong handler;
sigset_t set, old_set; sigset_t set;
target_sigset_t target_old_set; target_sigset_t target_old_set;
struct emulated_sigtable *k;
struct target_sigaction *sa; struct target_sigaction *sa;
struct sigqueue *q;
TaskState *ts = cpu->opaque; TaskState *ts = cpu->opaque;
struct emulated_sigtable *k = &ts->sigtab[sig - 1];
if (!ts->signal_pending)
return;
/* FIXME: This is not threadsafe. */
k = ts->sigtab;
for(sig = 1; sig <= TARGET_NSIG; sig++) {
if (k->pending)
goto handle_signal;
k++;
}
/* if no signal is pending, just return */
ts->signal_pending = 0;
return;
handle_signal:
trace_user_handle_signal(cpu_env, sig); trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */ /* dequeue signal */
q = k->first; k->pending = 0;
k->first = q->next;
if (!k->first)
k->pending = 0;
sig = gdb_handlesig(cpu, sig); sig = gdb_handlesig(cpu, sig);
if (!sig) { if (!sig) {
@ -5808,14 +5839,6 @@ void process_pending_signals(CPUArchState *cpu_env)
handler = sa->_sa_handler; handler = sa->_sa_handler;
} }
if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
/* Guest has blocked SIGSEGV but we got one anyway. Assume this
* is a forced SIGSEGV (ie one the kernel handles via force_sig_info
* because it got a real MMU fault), and treat as if default handler.
*/
handler = TARGET_SIG_DFL;
}
if (handler == TARGET_SIG_DFL) { if (handler == TARGET_SIG_DFL) {
/* default handler : ignore some signal. The other are job control or fatal */ /* default handler : ignore some signal. The other are job control or fatal */
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) { if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
@ -5832,17 +5855,23 @@ void process_pending_signals(CPUArchState *cpu_env)
force_sig(sig); force_sig(sig);
} else { } else {
/* compute the blocked signals during the handler execution */ /* compute the blocked signals during the handler execution */
sigset_t *blocked_set;
target_to_host_sigset(&set, &sa->sa_mask); target_to_host_sigset(&set, &sa->sa_mask);
/* SA_NODEFER indicates that the current signal should not be /* SA_NODEFER indicates that the current signal should not be
blocked during the handler */ blocked during the handler */
if (!(sa->sa_flags & TARGET_SA_NODEFER)) if (!(sa->sa_flags & TARGET_SA_NODEFER))
sigaddset(&set, target_to_host_signal(sig)); sigaddset(&set, target_to_host_signal(sig));
/* block signals in the handler using Linux */
do_sigprocmask(SIG_BLOCK, &set, &old_set);
/* save the previous blocked signal state to restore it at the /* save the previous blocked signal state to restore it at the
end of the signal execution (see do_sigreturn) */ end of the signal execution (see do_sigreturn) */
host_to_target_sigset_internal(&target_old_set, &old_set); host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
/* block signals in the handler */
blocked_set = ts->in_sigsuspend ?
&ts->sigsuspend_mask : &ts->signal_mask;
sigorset(&ts->signal_mask, blocked_set, &set);
ts->in_sigsuspend = 0;
/* if the CPU is in VM86 mode, we restore the 32 bit values */ /* if the CPU is in VM86 mode, we restore the 32 bit values */
#if defined(TARGET_I386) && !defined(TARGET_X86_64) #if defined(TARGET_I386) && !defined(TARGET_X86_64)
@ -5856,16 +5885,74 @@ void process_pending_signals(CPUArchState *cpu_env)
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \ #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
|| defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
/* These targets do not have traditional signals. */ /* These targets do not have traditional signals. */
setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
#else #else
if (sa->sa_flags & TARGET_SA_SIGINFO) if (sa->sa_flags & TARGET_SA_SIGINFO)
setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env); setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
else else
setup_frame(sig, sa, &target_old_set, cpu_env); setup_frame(sig, sa, &target_old_set, cpu_env);
#endif #endif
if (sa->sa_flags & TARGET_SA_RESETHAND) if (sa->sa_flags & TARGET_SA_RESETHAND) {
sa->_sa_handler = TARGET_SIG_DFL; sa->_sa_handler = TARGET_SIG_DFL;
}
} }
if (q != &k->info) }
free_sigqueue(cpu_env, q);
void process_pending_signals(CPUArchState *cpu_env)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
int sig;
TaskState *ts = cpu->opaque;
sigset_t set;
sigset_t *blocked_set;
while (atomic_read(&ts->signal_pending)) {
/* FIXME: This is not threadsafe. */
sigfillset(&set);
sigprocmask(SIG_SETMASK, &set, 0);
sig = ts->sync_signal.pending;
if (sig) {
/* Synchronous signals are forced,
* see force_sig_info() and callers in Linux
* Note that not all of our queue_signal() calls in QEMU correspond
* to force_sig_info() calls in Linux (some are send_sig_info()).
* However it seems like a kernel bug to me to allow the process
* to block a synchronous signal since it could then just end up
* looping round and round indefinitely.
*/
if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
|| sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
}
handle_pending_signal(cpu_env, sig);
}
for (sig = 1; sig <= TARGET_NSIG; sig++) {
blocked_set = ts->in_sigsuspend ?
&ts->sigsuspend_mask : &ts->signal_mask;
if (ts->sigtab[sig - 1].pending &&
(!sigismember(blocked_set,
target_to_host_signal_table[sig]))) {
handle_pending_signal(cpu_env, sig);
/* Restart scan from the beginning */
sig = 1;
}
}
/* if no signal is pending, unblock signals and recheck (the act
* of unblocking might cause us to take another host signal which
* will set signal_pending again).
*/
atomic_set(&ts->signal_pending, 0);
ts->in_sigsuspend = 0;
set = ts->signal_mask;
sigdelset(&set, SIGSEGV);
sigdelset(&set, SIGBUS);
sigprocmask(SIG_SETMASK, &set, 0);
}
ts->in_sigsuspend = 0;
} }

View file

@ -281,7 +281,7 @@ print_ipc(const struct syscallname *name,
static void static void
print_syscall_ret_addr(const struct syscallname *name, abi_long ret) print_syscall_ret_addr(const struct syscallname *name, abi_long ret)
{ {
char *errstr = NULL; const char *errstr = NULL;
if (ret < 0) { if (ret < 0) {
errstr = target_strerror(-ret); errstr = target_strerror(-ret);
@ -1594,7 +1594,7 @@ void
print_syscall_ret(int num, abi_long ret) print_syscall_ret(int num, abi_long ret)
{ {
int i; int i;
char *errstr = NULL; const char *errstr = NULL;
for(i=0;i<nsyscalls;i++) for(i=0;i<nsyscalls;i++)
if( scnames[i].nr == num ) { if( scnames[i].nr == num ) {

File diff suppressed because it is too large Load diff

View file

@ -673,6 +673,21 @@ typedef struct {
#define TARGET_SI_PAD_SIZE ((TARGET_SI_MAX_SIZE - TARGET_SI_PREAMBLE_SIZE) / sizeof(int)) #define TARGET_SI_PAD_SIZE ((TARGET_SI_MAX_SIZE - TARGET_SI_PREAMBLE_SIZE) / sizeof(int))
/* Within QEMU the top 16 bits of si_code indicate which of the parts of
* the union in target_siginfo is valid. This only applies between
* host_to_target_siginfo_noswap() and tswap_siginfo(); it does not
* appear either within host siginfo_t or in target_siginfo structures
* which we get from the guest userspace program. (The Linux kernel
* does a similar thing with using the top bits for its own internal
* purposes but not letting them be visible to userspace.)
*/
#define QEMU_SI_KILL 0
#define QEMU_SI_TIMER 1
#define QEMU_SI_POLL 2
#define QEMU_SI_FAULT 3
#define QEMU_SI_CHLD 4
#define QEMU_SI_RT 5
typedef struct target_siginfo { typedef struct target_siginfo {
#ifdef TARGET_MIPS #ifdef TARGET_MIPS
int si_signo; int si_signo;
@ -2274,34 +2289,34 @@ struct target_statfs64 {
#endif #endif
struct target_flock { struct target_flock {
short l_type; short l_type;
short l_whence; short l_whence;
abi_ulong l_start; abi_long l_start;
abi_ulong l_len; abi_long l_len;
int l_pid; int l_pid;
}; };
struct target_flock64 { struct target_flock64 {
short l_type; short l_type;
short l_whence; short l_whence;
#if defined(TARGET_PPC) || defined(TARGET_X86_64) || defined(TARGET_MIPS) \ #if defined(TARGET_PPC) || defined(TARGET_X86_64) || defined(TARGET_MIPS) \
|| defined(TARGET_SPARC) || defined(TARGET_HPPA) \ || defined(TARGET_SPARC) || defined(TARGET_HPPA) \
|| defined(TARGET_MICROBLAZE) || defined(TARGET_TILEGX) || defined(TARGET_MICROBLAZE) || defined(TARGET_TILEGX)
int __pad; int __pad;
#endif #endif
unsigned long long l_start; abi_llong l_start;
unsigned long long l_len; abi_llong l_len;
int l_pid; int l_pid;
} QEMU_PACKED; } QEMU_PACKED;
#ifdef TARGET_ARM #ifdef TARGET_ARM
struct target_eabi_flock64 { struct target_eabi_flock64 {
short l_type; short l_type;
short l_whence; short l_whence;
int __pad; int __pad;
unsigned long long l_start; abi_llong l_start;
unsigned long long l_len; abi_llong l_len;
int l_pid; int l_pid;
} QEMU_PACKED; } QEMU_PACKED;
#endif #endif

385
scripts/qemu-binfmt-conf.sh Normal file → Executable file
View file

@ -1,72 +1,323 @@
#!/bin/sh #!/bin/sh
# enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel # enable automatic i386/ARM/M68K/MIPS/SPARC/PPC/s390 program execution by the kernel
# load the binfmt_misc module qemu_target_list="i386 i486 alpha arm sparc32plus ppc ppc64 ppc64le m68k \
if [ ! -d /proc/sys/fs/binfmt_misc ]; then mips mipsel mipsn32 mipsn32el mips64 mips64el \
/sbin/modprobe binfmt_misc sh4 sh4eb s390x aarch64"
fi
if [ ! -f /proc/sys/fs/binfmt_misc/register ]; then
mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
fi
# probe cpu type i386_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00'
cpu=$(uname -m) i386_mask='\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
case "$cpu" in i386_family=i386
i386|i486|i586|i686|i86pc|BePC|x86_64)
cpu="i386"
;;
m68k)
cpu="m68k"
;;
mips*)
cpu="mips"
;;
"Power Macintosh"|ppc|ppc64)
cpu="ppc"
;;
armv[4-9]*)
cpu="arm"
;;
esac
# register the interpreter for each cpu except for the native one i486_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00'
if [ $cpu != "i386" ] ; then i486_mask='\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
echo ':i386:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-i386:' > /proc/sys/fs/binfmt_misc/register i486_family=i386
echo ':i486:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-i386:' > /proc/sys/fs/binfmt_misc/register
fi alpha_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90'
if [ $cpu != "alpha" ] ; then alpha_mask='\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
echo ':alpha:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90:\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-alpha:' > /proc/sys/fs/binfmt_misc/register alpha_family=alpha
fi
if [ $cpu != "arm" ] ; then arm_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00'
echo ':arm:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-arm:' > /proc/sys/fs/binfmt_misc/register arm_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
echo ':armeb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-armeb:' > /proc/sys/fs/binfmt_misc/register arm_family=arm
fi
if [ $cpu != "aarch64" ] ; then armeb_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28'
echo ':aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-aarch64:' > /proc/sys/fs/binfmt_misc/register armeb_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
fi armeb_family=arm
if [ $cpu != "sparc" ] ; then
echo ':sparc:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sparc:' > /proc/sys/fs/binfmt_misc/register sparc_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02'
fi sparc_mask='\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
if [ $cpu != "ppc" ] ; then sparc_family=sparc
echo ':ppc:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-ppc:' > /proc/sys/fs/binfmt_misc/register
fi sparc32plus_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x12'
if [ $cpu != "m68k" ] ; then sparc32plus_mask='\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
echo 'Please check cpu value and header information for m68k!' sparc32plus_family=sparc
echo ':m68k:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x04:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-m68k:' > /proc/sys/fs/binfmt_misc/register
fi ppc_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14'
if [ $cpu != "mips" ] ; then ppc_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
# FIXME: We could use the other endianness on a MIPS host. ppc_family=ppc
echo ':mips:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mips:' > /proc/sys/fs/binfmt_misc/register
echo ':mipsel:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mipsel:' > /proc/sys/fs/binfmt_misc/register ppc64_magic='\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15'
echo ':mipsn32:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mipsn32:' > /proc/sys/fs/binfmt_misc/register ppc64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
echo ':mipsn32el:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mipsn32el:' > /proc/sys/fs/binfmt_misc/register ppc64_family=ppc
echo ':mips64:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-mips64:' > /proc/sys/fs/binfmt_misc/register
echo ':mips64el:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-mips64el:' > /proc/sys/fs/binfmt_misc/register ppc64le_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15\x00'
fi ppc64le_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\x00'
if [ $cpu != "sh" ] ; then ppc64le_family=ppcle
echo ':sh4:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/local/bin/qemu-sh4:' > /proc/sys/fs/binfmt_misc/register
echo ':sh4eb:M::\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-sh4eb:' > /proc/sys/fs/binfmt_misc/register m68k_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x04'
fi m68k_mask='\xff\xff\xff\xff\xff\xff\xfe\xfe\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
if [ $cpu != "s390x" ] ; then m68k_family=m68k
echo ':s390x:M::\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16:\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff:/usr/local/bin/qemu-s390x:' > /proc/sys/fs/binfmt_misc/register
fi # FIXME: We could use the other endianness on a MIPS host.
mips_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'
mips_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
mips_family=mips
mipsel_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'
mipsel_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
mipsel_family=mips
mipsn32_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'
mipsn32_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
mipsn32_family=mips
mipsn32el_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'
mipsn32el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
mipsn32el_family=mips
mips64_magic='\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'
mips64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
mips64_family=mips
mips64el_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'
mips64el_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
mips64el_family=mips
sh4_magic='\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a\x00'
sh4_mask='\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
sh4_family=sh4
sh4eb_magic='\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x2a'
sh4eb_mask='\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
sh4eb_family=sh4
s390x_magic='\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x16'
s390x_mask='\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'
s390x_family=s390x
aarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00'
aarch64_mask='\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
aarch64_family=arm
qemu_get_family() {
cpu=${HOST_ARCH:-$(uname -m)}
case "$cpu" in
amd64|i386|i486|i586|i686|i86pc|BePC|x86_64)
echo "i386"
;;
mips*)
echo "mips"
;;
"Power Macintosh"|ppc64|powerpc|ppc)
echo "ppc"
;;
ppc64el|ppc64le)
echo "ppcle"
;;
arm|armel|armhf|arm64|armv[4-9]*)
echo "arm"
;;
sparc*)
echo "sparc"
;;
*)
echo "$cpu"
;;
esac
}
usage() {
cat <<EOF
Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU]
[--help][--credential yes|no][--exportdir PATH]
Configure binfmt_misc to use qemu interpreter
--help: display this usage
--qemu-path: set path to qemu interpreter ($QEMU_PATH)
--debian: don't write into /proc,
instead generate update-binfmts templates
--systemd: don't write into /proc,
instead generate file for systemd-binfmt.service
for the given CPU
--exportdir: define where to write configuration files
(default: $SYSTEMDDIR or $DEBIANDIR)
--credential: if yes, credential and security tokens are
calculated according to the binary to interpret
To import templates with update-binfmts, use :
sudo update-binfmts --importdir ${EXPORTDIR:-$DEBIANDIR} --import qemu-CPU
To remove interpreter, use :
sudo update-binfmts --package qemu-CPU --remove qemu-CPU $QEMU_PATH
With systemd, binfmt files are loaded by systemd-binfmt.service
The environment variable HOST_ARCH allows to override 'uname' to generate
configuration files for a different architecture than the current one.
where CPU is one of:
$qemu_target_list
EOF
}
qemu_check_access() {
if [ ! -w "$1" ] ; then
echo "ERROR: cannot write to $1" 1>&2
exit 1
fi
}
qemu_check_bintfmt_misc() {
# load the binfmt_misc module
if [ ! -d /proc/sys/fs/binfmt_misc ]; then
if ! /sbin/modprobe binfmt_misc ; then
exit 1
fi
fi
if [ ! -f /proc/sys/fs/binfmt_misc/register ]; then
if ! mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc ; then
exit 1
fi
fi
qemu_check_access /proc/sys/fs/binfmt_misc/register
}
installed_dpkg() {
dpkg --status "$1" > /dev/null 2>&1
}
qemu_check_debian() {
if [ ! -e /etc/debian_version ] ; then
echo "WARNING: your system is not a Debian based distro" 1>&2
elif ! installed_dpkg binfmt-support ; then
echo "WARNING: package binfmt-support is needed" 1>&2
fi
qemu_check_access "$EXPORTDIR"
}
qemu_check_systemd() {
if ! systemctl -q is-enabled systemd-binfmt.service ; then
echo "WARNING: systemd-binfmt.service is missing or disabled" 1>&2
fi
qemu_check_access "$EXPORTDIR"
}
qemu_generate_register() {
echo ":qemu-$cpu:M::$magic:$mask:$qemu:$FLAGS"
}
qemu_register_interpreter() {
echo "Setting $qemu as binfmt interpreter for $cpu"
qemu_generate_register > /proc/sys/fs/binfmt_misc/register
}
qemu_generate_systemd() {
echo "Setting $qemu as binfmt interpreter for $cpu for systemd-binfmt.service"
qemu_generate_register > "$EXPORTDIR/qemu-$cpu.conf"
}
qemu_generate_debian() {
cat > "$EXPORTDIR/qemu-$cpu" <<EOF
package qemu-$cpu
interpreter $qemu
magic $magic
mask $mask
EOF
if [ "$FLAGS" = "OC" ] ; then
echo "credentials yes" >> "$EXPORTDIR/qemu-$cpu"
fi
}
qemu_set_binfmts() {
# probe cpu type
host_family=$(qemu_get_family)
# register the interpreter for each cpu except for the native one
for cpu in ${qemu_target_list} ; do
magic=$(eval echo \$${cpu}_magic)
mask=$(eval echo \$${cpu}_mask)
family=$(eval echo \$${cpu}_family)
if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then
echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2
continue
fi
qemu="$QEMU_PATH/qemu-$cpu"
if [ "$cpu" = "i486" ] ; then
qemu="$QEMU_PATH/qemu-i386"
fi
if [ "$host_family" != "$family" ] ; then
$BINFMT_SET
fi
done
}
CHECK=qemu_check_bintfmt_misc
BINFMT_SET=qemu_register_interpreter
SYSTEMDDIR="/etc/binfmt.d"
DEBIANDIR="/usr/share/binfmts"
QEMU_PATH=/usr/local/bin
FLAGS=""
options=$(getopt -o ds:Q:e:hc: -l debian,systemd:,qemu-path:,exportdir:,help,credential: -- "$@")
eval set -- "$options"
while true ; do
case "$1" in
-d|--debian)
CHECK=qemu_check_debian
BINFMT_SET=qemu_generate_debian
EXPORTDIR=${EXPORTDIR:-$DEBIANDIR}
;;
-s|--systemd)
CHECK=qemu_check_systemd
BINFMT_SET=qemu_generate_systemd
EXPORTDIR=${EXPORTDIR:-$SYSTEMDDIR}
shift
# check given cpu is in the supported CPU list
for cpu in ${qemu_target_list} ; do
if [ "$cpu" == "$1" ] ; then
break
fi
done
if [ "$cpu" == "$1" ] ; then
qemu_target_list="$1"
else
echo "ERROR: unknown CPU \"$1\"" 1>&2
usage
exit 1
fi
;;
-Q|--qemu-path)
shift
QEMU_PATH="$1"
;;
-e|--exportdir)
shift
EXPORTDIR="$1"
;;
-h|--help)
usage
exit 1
;;
-c|--credential)
shift
if [ "$1" = "yes" ] ; then
FLAGS="OC"
else
FLAGS=""
fi
;;
*)
break
;;
esac
shift
done
$CHECK
qemu_set_binfmts