Commit c58e1bbc authored by Cyrill Gorcunov's avatar Cyrill Gorcunov Committed by Andrei Vagin

criu: arch,x86 -- Drop native ia32 pieces

Acked-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Acked-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent b729c187
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "images/core.pb-c.h" #include "images/core.pb-c.h"
#include "images/creds.pb-c.h" #include "images/creds.pb-c.h"
#ifdef CONFIG_X86_64
int kdat_compat_sigreturn_test(void) int kdat_compat_sigreturn_test(void)
{ {
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
...@@ -49,7 +48,6 @@ int kdat_compat_sigreturn_test(void) ...@@ -49,7 +48,6 @@ int kdat_compat_sigreturn_test(void)
#endif #endif
return 0; return 0;
} }
#endif /* CONFIG_X86_64 */
int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs) int save_task_regs(void *x, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs)
{ {
...@@ -369,11 +367,7 @@ int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core) ...@@ -369,11 +367,7 @@ int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core)
return 0; return 0;
} }
#ifdef CONFIG_X86_64
#define CPREG32(d) f->compat.uc.uc_mcontext.d = r->d #define CPREG32(d) f->compat.uc.uc_mcontext.d = r->d
#else
#define CPREG32(d) f->uc.uc_mcontext.d = r->d
#endif
static void restore_compat_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r) static void restore_compat_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{ {
CPREG32(gs); CPREG32(gs);
...@@ -387,13 +381,10 @@ static void restore_compat_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r) ...@@ -387,13 +381,10 @@ static void restore_compat_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
CPREG32(ss); CPREG32(ss);
CPREG32(flags); CPREG32(flags);
#ifdef CONFIG_X86_64
f->is_native = false; f->is_native = false;
#endif
} }
#undef CPREG32 #undef CPREG32
#ifdef CONFIG_X86_64
#define CPREG64(d, s) f->native.uc.uc_mcontext.d = r->s #define CPREG64(d, s) f->native.uc.uc_mcontext.d = r->s
static void restore_native_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r) static void restore_native_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{ {
...@@ -439,11 +430,3 @@ int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r) ...@@ -439,11 +430,3 @@ int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
} }
return 0; return 0;
} }
#else /* !CONFIG_X86_64 */
int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r)
{
restore_compat_gpregs(f, r);
return 0;
}
#endif
...@@ -5,10 +5,6 @@ ...@@ -5,10 +5,6 @@
#include <compel/plugins/std/syscall-codes.h> #include <compel/plugins/std/syscall-codes.h>
#include "asm/compat.h" #include "asm/compat.h"
#ifdef CONFIG_X86_32
# define __parasite_entry __attribute__((regparm(3)))
#endif
static int arch_get_user_desc(user_desc_t *desc) static int arch_get_user_desc(user_desc_t *desc)
{ {
int ret = __NR32_get_thread_area; int ret = __NR32_get_thread_area;
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include "images/core.pb-c.h" #include "images/core.pb-c.h"
#ifdef CONFIG_X86_64
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \ #define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \ task_args) \
asm volatile( \ asm volatile( \
...@@ -19,14 +18,6 @@ ...@@ -19,14 +18,6 @@
"g"(restore_task_exec_start), \ "g"(restore_task_exec_start), \
"g"(task_args) \ "g"(task_args) \
: "rsp", "rdi", "rsi", "rbx", "rax", "memory") : "rsp", "rdi", "rsi", "rbx", "rax", "memory")
#else /* CONFIG_X86_64 */
#define JUMP_TO_RESTORER_BLOB(new_sp, restore_task_exec_start, \
task_args) \
(void)new_sp; \
(void)restore_task_exec_start; \
(void)task_args; \
;
#endif /* CONFIG_X86_64 */
static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls) static inline void core_get_tls(CoreEntry *pcore, tls_t *ptls)
{ {
......
...@@ -17,7 +17,7 @@ static inline void restore_tls(tls_t *ptls) { } ...@@ -17,7 +17,7 @@ static inline void restore_tls(tls_t *ptls) { }
static inline int static inline int
arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; }
#endif /* !CONFIG_COMPAT */ #endif /* !CONFIG_COMPAT */
#ifdef CONFIG_X86_64
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \ #define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \ thread_args, clone_restore_fn) \
asm volatile( \ asm volatile( \
...@@ -70,27 +70,6 @@ arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; } ...@@ -70,27 +70,6 @@ arch_compat_rt_sigaction(void *stack, int sig, void *act) { return -1; }
#endif #endif
extern int kdat_compat_sigreturn_test(void); extern int kdat_compat_sigreturn_test(void);
#else /* CONFIG_X86_64 */
#define RUN_CLONE_RESTORE_FN(ret, clone_flags, new_sp, parent_tid, \
thread_args, clone_restore_fn) \
(void)ret; \
(void)clone_flags; \
(void)new_sp; \
(void)parent_tid; \
(void)thread_args; \
(void)clone_restore_fn; \
;
#define ARCH_FAIL_CORE_RESTORE \
asm volatile( \
"movl %0, %%esp \n" \
"xorl %%eax, %%eax \n" \
"jmp *%%eax \n" \
: \
: "r"(ret) \
: "memory")
#define kdat_compat_sigreturn_test() 0
#endif /* CONFIG_X86_64 */
static inline void static inline void
__setup_sas_compat(struct ucontext_ia32* uc, ThreadSasEntry *sas) __setup_sas_compat(struct ucontext_ia32* uc, ThreadSasEntry *sas)
...@@ -103,7 +82,6 @@ __setup_sas_compat(struct ucontext_ia32* uc, ThreadSasEntry *sas) ...@@ -103,7 +82,6 @@ __setup_sas_compat(struct ucontext_ia32* uc, ThreadSasEntry *sas)
static inline void static inline void
__setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas) __setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
{ {
#ifdef CONFIG_X86_64
if (sigframe->is_native) { if (sigframe->is_native) {
struct rt_ucontext *uc = &sigframe->native.uc; struct rt_ucontext *uc = &sigframe->native.uc;
...@@ -113,9 +91,6 @@ __setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas) ...@@ -113,9 +91,6 @@ __setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
} else { } else {
__setup_sas_compat(&sigframe->compat.uc, sas); __setup_sas_compat(&sigframe->compat.uc, sas);
} }
#else
__setup_sas_compat(&sigframe->uc, sas);
#endif
} }
static inline void _setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas) static inline void _setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include "images/core.pb-c.h" #include "images/core.pb-c.h"
#ifdef CONFIG_X86_64
static inline int core_is_compat(CoreEntry *c) static inline int core_is_compat(CoreEntry *c)
{ {
switch (c->thread_info->gpregs->mode) { switch (c->thread_info->gpregs->mode) {
...@@ -24,9 +23,6 @@ static inline int core_is_compat(CoreEntry *c) ...@@ -24,9 +23,6 @@ static inline int core_is_compat(CoreEntry *c)
return -1; return -1;
} }
} }
#else /* CONFIG_X86_64 */
static inline int core_is_compat(CoreEntry *c) { return 0; }
#endif /* CONFIG_X86_64 */
#define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__X86_64 #define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__X86_64
......
...@@ -49,7 +49,7 @@ struct vdso_symtable { ...@@ -49,7 +49,7 @@ struct vdso_symtable {
}, \ }, \
} }
#if defined(CONFIG_X86_32) || defined(CONFIG_VDSO_32) #ifdef CONFIG_VDSO_32
#define Ehdr_t Elf32_Ehdr #define Ehdr_t Elf32_Ehdr
#define Sym_t Elf32_Sym #define Sym_t Elf32_Sym
...@@ -60,7 +60,7 @@ struct vdso_symtable { ...@@ -60,7 +60,7 @@ struct vdso_symtable {
#define ELF_ST_TYPE ELF32_ST_TYPE #define ELF_ST_TYPE ELF32_ST_TYPE
#define ELF_ST_BIND ELF32_ST_BIND #define ELF_ST_BIND ELF32_ST_BIND
#else /* !CONFIG_X86_32 */ #else /* CONFIG_VDSO_32 */
#define Ehdr_t Elf64_Ehdr #define Ehdr_t Elf64_Ehdr
#define Sym_t Elf64_Sym #define Sym_t Elf64_Sym
...@@ -75,7 +75,7 @@ struct vdso_symtable { ...@@ -75,7 +75,7 @@ struct vdso_symtable {
#define ELF_ST_BIND ELF64_ST_BIND #define ELF_ST_BIND ELF64_ST_BIND
#endif #endif
#endif /* !CONFIG_X86_32 */ #endif /* CONFIG_VDSO_32 */
/* Size of VMA associated with vdso */ /* Size of VMA associated with vdso */
static inline unsigned long vdso_vma_size(struct vdso_symtable *t) static inline unsigned long vdso_vma_size(struct vdso_symtable *t)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment