Commit 684dbef1 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Pavel Emelyanov

x86/32: rectify compatible jump trampolines

Reworked this code a little and it becomes more readable.
Drop those macroses under CONFIG_X86_64 define and just use
boolean `compat_vdso' to check whether insert 64 or 32-bit jmp.
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent e56c642e
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
#define LOG_PREFIX "vdso: " #define LOG_PREFIX "vdso: "
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *to, struct vdso_symtable *from,
struct vdso_symtable *from) bool __always_unused compat_vdso)
{ {
unsigned int i; unsigned int i;
......
...@@ -125,10 +125,9 @@ static inline void put_trampoline_call(unsigned long at, unsigned long to, ...@@ -125,10 +125,9 @@ static inline void put_trampoline_call(unsigned long at, unsigned long to,
invalidate_caches(at); invalidate_caches(at);
} }
int vdso_redirect_calls(unsigned long base_to, int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
unsigned long base_from, struct vdso_symtable *to, struct vdso_symtable *from,
struct vdso_symtable *to, bool __always_unused compat_vdso)
struct vdso_symtable *from)
{ {
unsigned int i; unsigned int i;
unsigned long trampoline; unsigned long trampoline;
......
...@@ -13,53 +13,63 @@ ...@@ -13,53 +13,63 @@
#endif #endif
#define LOG_PREFIX "vdso: " #define LOG_PREFIX "vdso: "
#ifdef CONFIG_X86_64 static void insert_trampoline32(uintptr_t from, uintptr_t to)
typedef struct { {
u16 movabs; struct {
u64 imm64; u8 movl;
u16 jmp_rax; u32 imm32;
u32 guards; u16 jmp_eax;
} __packed jmp_t; u32 guards;
#define IMMEDIATE(j) (j.imm64) } __packed jmp = {
.movl = 0xb8,
.imm32 = (uint32_t)to,
.jmp_eax = 0xe0ff,
.guards = 0xcccccccc,
};
jmp_t jmp = { memcpy((void *)from, &jmp, sizeof(jmp));
.movabs = 0xb848, }
.jmp_rax = 0xe0ff,
.guards = 0xcccccccc,
};
#else /* CONFIG_X86_64 */ static void insert_trampoline64(uintptr_t from, uintptr_t to)
typedef struct { {
u8 movl; struct {
u32 imm32; u16 movabs;
u16 jmp_eax; u64 imm64;
u32 guards; u16 jmp_rax;
} __packed jmp_t; u32 guards;
#define IMMEDIATE(j) (j.imm32) } __packed jmp = {
.movabs = 0xb848,
.imm64 = to,
.jmp_rax = 0xe0ff,
.guards = 0xcccccccc,
};
jmp_t jmp = { memcpy((void *)from, &jmp, sizeof(jmp));
.movl = 0xb8, }
.jmp_eax = 0xe0ff,
.guards = 0xcccccccc,
};
#endif /* CONFIG_X86_64 */
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *sto, struct vdso_symtable *sfrom,
struct vdso_symtable *from) bool compat_vdso)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) { for (i = 0; i < ARRAY_SIZE(sto->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i])) uintptr_t from, to;
if (vdso_symbol_empty(&sfrom->symbols[i]))
continue; continue;
pr_debug("jmp: %lx/%lx -> %lx/%lx (index %d)\n", pr_debug("jmp: %lx/%lx -> %lx/%lx (index %d)\n",
base_from, from->symbols[i].offset, base_from, sfrom->symbols[i].offset,
base_to, to->symbols[i].offset, i); base_to, sto->symbols[i].offset, i);
from = base_from + sfrom->symbols[i].offset;
to = base_to + sto->symbols[i].offset;
IMMEDIATE(jmp) = base_to + to->symbols[i].offset; if (!compat_vdso)
memcpy((void *)(base_from + from->symbols[i].offset), &jmp, sizeof(jmp)); insert_trampoline64(from, to);
else
insert_trampoline32(from, to);
} }
return 0; return 0;
......
...@@ -86,7 +86,8 @@ extern int vdso_proxify(struct vdso_symtable *sym_rt, ...@@ -86,7 +86,8 @@ extern int vdso_proxify(struct vdso_symtable *sym_rt,
VmaEntry *vmas, size_t nr_vmas, VmaEntry *vmas, size_t nr_vmas,
bool compat_vdso, bool force_trampolines); bool compat_vdso, bool force_trampolines);
extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from, extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *from); struct vdso_symtable *to, struct vdso_symtable *from,
bool compat_vdso);
#else /* CONFIG_VDSO */ #else /* CONFIG_VDSO */
#define vdso_do_park(sym_rt, park_at, park_size) (0) #define vdso_do_park(sym_rt, park_at, park_size) (0)
......
...@@ -242,7 +242,7 @@ int vdso_proxify(struct vdso_symtable *sym_rt, unsigned long vdso_rt_parked_at, ...@@ -242,7 +242,7 @@ int vdso_proxify(struct vdso_symtable *sym_rt, unsigned long vdso_rt_parked_at,
if (vdso_redirect_calls(vdso_rt_parked_at, if (vdso_redirect_calls(vdso_rt_parked_at,
vma_vdso->start, vma_vdso->start,
sym_rt, &s)) { sym_rt, &s, compat_vdso)) {
pr_err("Failed to proxify dumpee contents\n"); pr_err("Failed to proxify dumpee contents\n");
return -1; return -1;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment