Commit fba12ae9 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Pavel Emelyanov

arch: x86-32 -- Be able to build 32bit CRIU

After this patch one can run ARCH="ia32" make to build
32bit version on CRIU on 64bit host. Note this is only
build procedure which tuned up, the CRIU itself is not
yet ready to make a checkpoint/restore cycle -- a lot
of additional code is needed and here we rather put
stubs simply to make build procedure run.
Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent e04f683d
...@@ -59,6 +59,16 @@ ifeq ($(ARCH),x86_64) ...@@ -59,6 +59,16 @@ ifeq ($(ARCH),x86_64)
LDARCH := i386:x86-64 LDARCH := i386:x86-64
VDSO := y VDSO := y
endif endif
ifeq ($(ARCH),ia32)
SRCARCH := x86
DEFINES := -DCONFIG_X86_32
LDARCH := i386
ldflags-y += -m elf_i386
VDSO := y
USERCFLAGS += -m32
PROTOUFIX := y
export PROTOUFIX ldflags-y
endif
ifeq ($(shell echo $(ARCH) | sed -e 's/arm.*/arm/'),arm) ifeq ($(shell echo $(ARCH) | sed -e 's/arm.*/arm/'),arm)
ARMV := $(shell echo $(ARCH) | sed -nr 's/armv([[:digit:]]).*/\1/p; t; i7') ARMV := $(shell echo $(ARCH) | sed -nr 's/armv([[:digit:]]).*/\1/p; t; i7')
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "protobuf/core.pb-c.h" #include "protobuf/core.pb-c.h"
#ifdef CONFIG_X86_64
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \ #define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \ task_args) \
asm volatile( \ asm volatile( \
...@@ -18,6 +19,14 @@ ...@@ -18,6 +19,14 @@
"g"(restore_task_exec_start), \ "g"(restore_task_exec_start), \
"g"(task_args) \ "g"(task_args) \
: "rsp", "rdi", "rsi", "rbx", "rax", "memory") : "rsp", "rdi", "rsi", "rbx", "rax", "memory")
#else /* CONFIG_X86_64 */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
(void)new_sp; \
(void)restore_task_exec_start; \
(void)task_args; \
;
#endif /* CONFIG_X86_64 */
#define core_get_tls(pcore, ptls) #define core_get_tls(pcore, ptls)
......
...@@ -72,7 +72,7 @@ struct rt_sigframe { ...@@ -72,7 +72,7 @@ struct rt_sigframe {
fpu_state_t fpu_state; fpu_state_t fpu_state;
}; };
#ifdef CONFIG_X86_64
#define ARCH_RT_SIGRETURN(new_sp) \ #define ARCH_RT_SIGRETURN(new_sp) \
asm volatile( \ asm volatile( \
"movq %0, %%rax \n" \ "movq %0, %%rax \n" \
...@@ -129,6 +129,35 @@ struct rt_sigframe { ...@@ -129,6 +129,35 @@ struct rt_sigframe {
: \ : \
: "r"(ret) \ : "r"(ret) \
: "memory") : "memory")
#else /* CONFIG_X86_64 */
#define ARCH_RT_SIGRETURN(new_sp) \
asm volatile( \
"movl %0, %%eax \n" \
"movl %%eax, %%esp \n" \
"movl $"__stringify(__NR_rt_sigreturn)", %%eax \n" \
"int $0x80 \n" \
: \
: "r"(new_sp) \
: "eax","esp","memory")
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
(void)ret; \
(void)clone_flags; \
(void)new_sp; \
(void)parent_tid; \
(void)thread_args; \
(void)clone_restore_fn; \
;
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"movl %0, %%esp \n" \
"xorl %%eax, %%eax \n" \
"jmp *%%eax \n" \
: \
: "r"(ret) \
: "memory")
#endif /* CONFIG_X86_64 */
#define RT_SIGFRAME_UC(rt_sigframe) rt_sigframe->uc #define RT_SIGFRAME_UC(rt_sigframe) rt_sigframe->uc
#define RT_SIGFRAME_REGIP(rt_sigframe) (rt_sigframe)->uc.uc_mcontext.rip #define RT_SIGFRAME_REGIP(rt_sigframe) (rt_sigframe)->uc.uc_mcontext.rip
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
int restore_nonsigframe_gpregs(UserX86RegsEntry *r) int restore_nonsigframe_gpregs(UserX86RegsEntry *r)
{ {
#ifdef CONFIG_X86_64
long ret; long ret;
unsigned long fsgs_base; unsigned long fsgs_base;
...@@ -27,6 +28,6 @@ int restore_nonsigframe_gpregs(UserX86RegsEntry *r) ...@@ -27,6 +28,6 @@ int restore_nonsigframe_gpregs(UserX86RegsEntry *r)
pr_info("SET_GS fail %ld\n", ret); pr_info("SET_GS fail %ld\n", ret);
return -1; return -1;
} }
#endif
return 0; return 0;
} }
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#endif #endif
#define LOG_PREFIX "vdso: " #define LOG_PREFIX "vdso: "
#ifdef CONFIG_X86_64
typedef struct { typedef struct {
u16 movabs; u16 movabs;
u64 imm64; u64 imm64;
...@@ -438,3 +439,31 @@ int vdso_proxify(char *who, struct vdso_symtable *sym_rt, ...@@ -438,3 +439,31 @@ int vdso_proxify(char *who, struct vdso_symtable *sym_rt,
sys_mprotect((void *)vdso_rt_parked_at, vdso_vma_size(sym_rt), VDSO_PROT); sys_mprotect((void *)vdso_rt_parked_at, vdso_vma_size(sym_rt), VDSO_PROT);
return 0; return 0;
} }
#else /* CONFIG_X86_64 */
int vdso_redirect_calls(void *base_to, void *base_from,
struct vdso_symtable *to,
struct vdso_symtable *from)
{
return 0;
}
int vdso_fill_symtable(char *mem, size_t size, struct vdso_symtable *t)
{
return 0;
}
int vdso_do_park(struct vdso_symtable *sym_rt, unsigned long park_at, unsigned long park_size)
{
return 0;
}
int vdso_proxify(char *who, struct vdso_symtable *sym_rt,
unsigned long vdso_rt_parked_at, size_t index,
VmaEntry *vmas, size_t nr_vmas)
{
return 0;
}
#endif /* CONFIG_X86_64 */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment