Commit 684dbef1 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Pavel Emelyanov

x86/32: rectify compatible jump trampolines

Reworked this code a little and it becomes more readable.
Drop those macroses under CONFIG_X86_64 define and just use
boolean `compat_vdso' to check whether insert 64 or 32-bit jmp.
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent e56c642e
......@@ -13,8 +13,8 @@
#define LOG_PREFIX "vdso: "
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to,
struct vdso_symtable *from)
struct vdso_symtable *to, struct vdso_symtable *from,
bool __always_unused compat_vdso)
{
unsigned int i;
......
......@@ -125,10 +125,9 @@ static inline void put_trampoline_call(unsigned long at, unsigned long to,
invalidate_caches(at);
}
int vdso_redirect_calls(unsigned long base_to,
unsigned long base_from,
struct vdso_symtable *to,
struct vdso_symtable *from)
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *from,
bool __always_unused compat_vdso)
{
unsigned int i;
unsigned long trampoline;
......
......@@ -13,53 +13,63 @@
#endif
#define LOG_PREFIX "vdso: "
#ifdef CONFIG_X86_64
typedef struct {
u16 movabs;
u64 imm64;
u16 jmp_rax;
u32 guards;
} __packed jmp_t;
#define IMMEDIATE(j) (j.imm64)
static void insert_trampoline32(uintptr_t from, uintptr_t to)
{
struct {
u8 movl;
u32 imm32;
u16 jmp_eax;
u32 guards;
} __packed jmp = {
.movl = 0xb8,
.imm32 = (uint32_t)to,
.jmp_eax = 0xe0ff,
.guards = 0xcccccccc,
};
jmp_t jmp = {
.movabs = 0xb848,
.jmp_rax = 0xe0ff,
.guards = 0xcccccccc,
};
memcpy((void *)from, &jmp, sizeof(jmp));
}
#else /* CONFIG_X86_64 */
typedef struct {
u8 movl;
u32 imm32;
u16 jmp_eax;
u32 guards;
} __packed jmp_t;
#define IMMEDIATE(j) (j.imm32)
static void insert_trampoline64(uintptr_t from, uintptr_t to)
{
struct {
u16 movabs;
u64 imm64;
u16 jmp_rax;
u32 guards;
} __packed jmp = {
.movabs = 0xb848,
.imm64 = to,
.jmp_rax = 0xe0ff,
.guards = 0xcccccccc,
};
jmp_t jmp = {
.movl = 0xb8,
.jmp_eax = 0xe0ff,
.guards = 0xcccccccc,
};
#endif /* CONFIG_X86_64 */
memcpy((void *)from, &jmp, sizeof(jmp));
}
int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to,
struct vdso_symtable *from)
struct vdso_symtable *sto, struct vdso_symtable *sfrom,
bool compat_vdso)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(to->symbols); i++) {
if (vdso_symbol_empty(&from->symbols[i]))
for (i = 0; i < ARRAY_SIZE(sto->symbols); i++) {
uintptr_t from, to;
if (vdso_symbol_empty(&sfrom->symbols[i]))
continue;
pr_debug("jmp: %lx/%lx -> %lx/%lx (index %d)\n",
base_from, from->symbols[i].offset,
base_to, to->symbols[i].offset, i);
base_from, sfrom->symbols[i].offset,
base_to, sto->symbols[i].offset, i);
from = base_from + sfrom->symbols[i].offset;
to = base_to + sto->symbols[i].offset;
IMMEDIATE(jmp) = base_to + to->symbols[i].offset;
memcpy((void *)(base_from + from->symbols[i].offset), &jmp, sizeof(jmp));
if (!compat_vdso)
insert_trampoline64(from, to);
else
insert_trampoline32(from, to);
}
return 0;
......
......@@ -86,7 +86,8 @@ extern int vdso_proxify(struct vdso_symtable *sym_rt,
VmaEntry *vmas, size_t nr_vmas,
bool compat_vdso, bool force_trampolines);
extern int vdso_redirect_calls(unsigned long base_to, unsigned long base_from,
struct vdso_symtable *to, struct vdso_symtable *from);
struct vdso_symtable *to, struct vdso_symtable *from,
bool compat_vdso);
#else /* CONFIG_VDSO */
#define vdso_do_park(sym_rt, park_at, park_size) (0)
......
......@@ -242,7 +242,7 @@ int vdso_proxify(struct vdso_symtable *sym_rt, unsigned long vdso_rt_parked_at,
if (vdso_redirect_calls(vdso_rt_parked_at,
vma_vdso->start,
sym_rt, &s)) {
sym_rt, &s, compat_vdso)) {
pr_err("Failed to proxify dumpee contents\n");
return -1;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment