Commit 9e3e4656 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Andrei Vagin

ia32/futex: restore compat_robust_list

The same as for Checkpointing - we need to call 32-bit syscall
for compatible tasks here to correctly restore compat_robust_list,
not robust_list.

Note: I check here restorer's *task* arg for compatible mode, not
restorer *thread* arg. As changing application's mode during runtime
is very rare thing itself, application that runs different bitness
threads is most likely not present at all.
If we ever meet such application, this could be improved.
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent 9a282cbb
...@@ -64,7 +64,13 @@ static inline void restore_tls(tls_t *ptls) ...@@ -64,7 +64,13 @@ static inline void restore_tls(tls_t *ptls)
static inline void *alloc_compat_syscall_stack(void) { return NULL; } static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { } static inline void free_compat_syscall_stack(void *stack32) { }
static inline int static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } {
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif #endif
...@@ -77,7 +77,13 @@ static inline void restore_tls(tls_t *ptls) { ...@@ -77,7 +77,13 @@ static inline void restore_tls(tls_t *ptls) {
static inline void *alloc_compat_syscall_stack(void) { return NULL; } static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { } static inline void free_compat_syscall_stack(void *stack32) { }
static inline int static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } {
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif #endif
...@@ -63,7 +63,13 @@ unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg); ...@@ -63,7 +63,13 @@ unsigned long sys_shmat(int shmid, const void *shmaddr, int shmflg);
static inline void *alloc_compat_syscall_stack(void) { return NULL; } static inline void *alloc_compat_syscall_stack(void) { return NULL; }
static inline void free_compat_syscall_stack(void *stack32) { } static inline void free_compat_syscall_stack(void *stack32) { }
static inline int static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } {
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif /*__CR_ASM_RESTORER_H__*/ #endif /*__CR_ASM_RESTORER_H__*/
...@@ -438,21 +438,6 @@ int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r) ...@@ -438,21 +438,6 @@ int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
return 0; return 0;
} }
struct syscall_args32 {
uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
};
static void do_full_int80(struct syscall_args32 *args)
{
register unsigned long bp asm("bp") = args->arg5;
asm volatile ("int $0x80"
: "+a" (args->nr),
"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
"+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
: : "r8", "r9", "r10", "r11");
args->arg5 = bp;
}
static int get_robust_list32(pid_t pid, uintptr_t head, uintptr_t len) static int get_robust_list32(pid_t pid, uintptr_t head, uintptr_t len)
{ {
struct syscall_args32 s = { struct syscall_args32 s = {
......
...@@ -34,6 +34,22 @@ static inline void free_compat_syscall_stack(void *mem) ...@@ -34,6 +34,22 @@ static inline void free_compat_syscall_stack(void *mem)
mem, ret); mem, ret);
} }
struct syscall_args32 {
uint32_t nr, arg0, arg1, arg2, arg3, arg4, arg5;
};
static inline void do_full_int80(struct syscall_args32 *args)
{
register unsigned long bp asm("bp") = args->arg5;
asm volatile ("int $0x80"
: "+a" (args->nr),
"+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
"+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
: : "r8", "r9", "r10", "r11");
args->arg5 = bp;
}
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
extern unsigned long call32_from_64(void *stack, void *func); extern unsigned long call32_from_64(void *stack, void *func);
#endif #endif
......
...@@ -12,10 +12,17 @@ ...@@ -12,10 +12,17 @@
extern void restore_tls(tls_t *ptls); extern void restore_tls(tls_t *ptls);
extern int arch_compat_rt_sigaction(void *stack32, int sig, extern int arch_compat_rt_sigaction(void *stack32, int sig,
rt_sigaction_t_compat *act); rt_sigaction_t_compat *act);
extern int set_compat_robust_list(uint32_t head_ptr, uint32_t len);
#else /* CONFIG_COMPAT */ #else /* CONFIG_COMPAT */
static inline void restore_tls(tls_t *ptls) { } static inline void restore_tls(tls_t *ptls) { }
static inline int static inline int arch_compat_rt_sigaction(void *stack, int sig, void *act)
arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } {
return -1;
}
static inline int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
return -1;
}
#endif /* !CONFIG_COMPAT */ #endif /* !CONFIG_COMPAT */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \ #define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
......
...@@ -3,9 +3,11 @@ ...@@ -3,9 +3,11 @@
#include "types.h" #include "types.h"
#include "restorer.h" #include "restorer.h"
#include "asm/compat.h"
#include "asm/restorer.h" #include "asm/restorer.h"
#include <compel/asm/fpu.h> #include <compel/asm/fpu.h>
#include <compel/plugins/std/syscall-codes.h>
#include <compel/plugins/std/string.h> #include <compel/plugins/std/string.h>
#include <compel/plugins/std/syscall.h> #include <compel/plugins/std/syscall.h>
#include "log.h" #include "log.h"
...@@ -34,6 +36,18 @@ int restore_nonsigframe_gpregs(UserX86RegsEntry *r) ...@@ -34,6 +36,18 @@ int restore_nonsigframe_gpregs(UserX86RegsEntry *r)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
int set_compat_robust_list(uint32_t head_ptr, uint32_t len)
{
struct syscall_args32 s = {
.nr = __NR32_set_robust_list,
.arg0 = head_ptr,
.arg1 = len,
};
do_full_int80(&s);
return (int)s.nr;
}
static int prepare_stack32(void **stack32) static int prepare_stack32(void **stack32)
{ {
if (*stack32) if (*stack32)
......
...@@ -422,21 +422,41 @@ die: ...@@ -422,21 +422,41 @@ die:
return -1; return -1;
} }
static int restore_thread_common(struct thread_restore_args *args) static int restore_robust_futex(struct thread_restore_args *args)
{ {
sys_set_tid_address((int *)decode_pointer(args->clear_tid_addr)); uint32_t futex_len = args->futex_rla_len;
int ret;
if (args->futex_rla_len) { if (!args->futex_rla_len)
int ret; return 0;
ret = sys_set_robust_list(decode_pointer(args->futex_rla), /*
args->futex_rla_len); * XXX: We check here *task's* mode, not *thread's*.
if (ret) { * But it's possible to write an application with mixed
pr_err("Failed to recover futex robust list: %d\n", ret); * threads (on x86): some in 32-bit mode, some in 64-bit.
return -1; * Quite unlikely that such application exists at all.
} */
if (args->ta->compatible_mode) {
uint32_t futex = (uint32_t)args->futex_rla;
ret = set_compat_robust_list(futex, futex_len);
} else {
void *futex = decode_pointer(args->futex_rla);
ret = sys_set_robust_list(futex, futex_len);
} }
if (ret)
pr_err("Failed to recover futex robust list: %d\n", ret);
return ret;
}
static int restore_thread_common(struct thread_restore_args *args)
{
sys_set_tid_address((int *)decode_pointer(args->clear_tid_addr));
if (restore_robust_futex(args))
return -1;
restore_sched_info(&args->sp); restore_sched_info(&args->sp);
if (restore_nonsigframe_gpregs(&args->gpregs)) if (restore_nonsigframe_gpregs(&args->gpregs))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment