Commit 1fdaed0a authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Andrei Vagin

vdso: Exclude {vdso, vvar}_start from vdso_symtable

Preparation for saving vdso_symtable in kdat, which will
allow skip parsing of native and compat symtables - that
at least will save from running compat helper each criu launch.

As {vvar,vdso}_start are randomized with ASLR, there is no
point in saving them into kdat. We'll still need to reread
them from /proc/self/maps for awhile.
Reviewed-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent 06f846b7
......@@ -3037,7 +3037,7 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
struct restore_mem_zone *mz;
#ifdef CONFIG_VDSO
struct vdso_symtable vdso_symtab_rt;
struct vdso_maps vdso_maps_rt;
unsigned long vdso_rt_size = 0;
#endif
......@@ -3083,15 +3083,15 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
#ifdef CONFIG_VDSO
if (core_is_compat(core))
vdso_symtab_rt = vdso_compat_rt;
vdso_maps_rt = vdso_maps_compat;
else
vdso_symtab_rt = vdso_sym_rt;
vdso_maps_rt = vdso_maps;
/*
* Figure out how much memory runtime vdso and vvar will need.
*/
vdso_rt_size = vdso_symtab_rt.vdso_size;
if (vdso_rt_size && vdso_symtab_rt.vvar_size)
vdso_rt_size += ALIGN(vdso_symtab_rt.vvar_size, PAGE_SIZE);
vdso_rt_size = vdso_maps_rt.sym.vdso_size;
if (vdso_rt_size && vdso_maps_rt.sym.vvar_size)
vdso_rt_size += ALIGN(vdso_maps_rt.sym.vvar_size, PAGE_SIZE);
task_args->bootstrap_len += vdso_rt_size;
#endif
......@@ -3308,7 +3308,7 @@ static int sigreturn_restore(pid_t pid, struct task_restore_args *task_args, uns
*/
mem += rst_mem_size;
task_args->vdso_rt_parked_at = (unsigned long)mem;
task_args->vdso_sym_rt = vdso_symtab_rt;
task_args->vdso_maps_rt = vdso_maps_rt;
task_args->vdso_rt_size = vdso_rt_size;
#endif
......
......@@ -79,7 +79,8 @@ static inline bool is_vdso_mark(void *addr)
return false;
}
extern int vdso_do_park(struct vdso_symtable *sym_rt, unsigned long park_at, unsigned long park_size);
extern int vdso_do_park(struct vdso_maps *rt, unsigned long park_at,
unsigned long park_size);
extern int vdso_map_compat(unsigned long map_at);
extern int vdso_proxify(struct vdso_symtable *sym_rt,
unsigned long vdso_rt_parked_at,
......
......@@ -193,7 +193,7 @@ struct task_restore_args {
#ifdef CONFIG_VDSO
unsigned long vdso_rt_size;
struct vdso_symtable vdso_sym_rt; /* runtime vdso symbols */
struct vdso_maps vdso_maps_rt; /* runtime vdso symbols */
unsigned long vdso_rt_parked_at; /* safe place to keep vdso */
#endif
void **breakpoint;
......
......@@ -28,21 +28,23 @@ struct vdso_symbol {
};
struct vdso_symtable {
unsigned long vdso_start;
unsigned long vdso_size;
unsigned long vvar_start;
unsigned long vvar_size;
struct vdso_symbol symbols[VDSO_SYMBOL_MAX];
bool vdso_before_vvar; /* order of vdso/vvar pair */
};
struct vdso_maps {
unsigned long vdso_start;
unsigned long vvar_start;
struct vdso_symtable sym;
};
#define VDSO_SYMBOL_INIT { .offset = VDSO_BAD_ADDR, }
#define VDSO_SYMTABLE_INIT \
{ \
.vdso_start = VDSO_BAD_ADDR, \
.vdso_size = VDSO_BAD_SIZE, \
.vvar_start = VVAR_BAD_ADDR, \
.vvar_size = VVAR_BAD_SIZE, \
.symbols = { \
[0 ... VDSO_SYMBOL_MAX - 1] = \
......@@ -51,6 +53,13 @@ struct vdso_symtable {
.vdso_before_vvar = false, \
}
#define VDSO_MAPS_INIT \
{ \
.vdso_start = VDSO_BAD_ADDR, \
.vvar_start = VVAR_BAD_ADDR, \
.sym = VDSO_SYMTABLE_INIT, \
}
#ifdef CONFIG_VDSO_32
#define Ehdr_t Elf32_Ehdr
......
......@@ -10,8 +10,8 @@
#include "util-vdso.h"
extern struct vdso_symtable vdso_sym_rt;
extern struct vdso_symtable vdso_compat_rt;
extern struct vdso_maps vdso_maps;
extern struct vdso_maps vdso_maps_compat;
extern int vdso_init(void);
......@@ -19,7 +19,7 @@ extern int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
struct vm_area_list *vma_area_list);
#ifdef CONFIG_COMPAT
extern void compat_vdso_helper(struct vdso_symtable *native, int pipe_fd,
extern void compat_vdso_helper(struct vdso_maps *native, int pipe_fd,
int err_fd, void *vdso_buf, size_t buf_size);
#endif
......
......@@ -42,29 +42,29 @@ static int vdso_remap(char *who, unsigned long from, unsigned long to, size_t si
}
/* Park runtime vDSO in some safe place where it can be accessible from restorer */
int vdso_do_park(struct vdso_symtable *sym_rt, unsigned long park_at, unsigned long park_size)
int vdso_do_park(struct vdso_maps *rt, unsigned long park_at, unsigned long park_size)
{
int ret;
BUG_ON((sym_rt->vdso_size + sym_rt->vvar_size) < park_size);
BUG_ON((rt->sym.vdso_size + rt->sym.vvar_size) < park_size);
if (sym_rt->vvar_start != VVAR_BAD_ADDR) {
if (sym_rt->vdso_before_vvar) {
ret = vdso_remap("rt-vdso", sym_rt->vdso_start,
park_at, sym_rt->vdso_size);
park_at += sym_rt->vdso_size;
ret |= vdso_remap("rt-vvar", sym_rt->vvar_start,
park_at, sym_rt->vvar_size);
if (rt->vvar_start != VVAR_BAD_ADDR) {
if (rt->sym.vdso_before_vvar) {
ret = vdso_remap("rt-vdso", rt->vdso_start,
park_at, rt->sym.vdso_size);
park_at += rt->sym.vdso_size;
ret |= vdso_remap("rt-vvar", rt->vvar_start,
park_at, rt->sym.vvar_size);
} else {
ret = vdso_remap("rt-vvar", sym_rt->vvar_start,
park_at, sym_rt->vvar_size);
park_at += sym_rt->vvar_size;
ret |= vdso_remap("rt-vdso", sym_rt->vdso_start,
park_at, sym_rt->vdso_size);
ret = vdso_remap("rt-vvar", rt->vvar_start,
park_at, rt->sym.vvar_size);
park_at += rt->sym.vvar_size;
ret |= vdso_remap("rt-vdso", rt->vdso_start,
park_at, rt->sym.vdso_size);
}
} else
ret = vdso_remap("rt-vdso", sym_rt->vdso_start,
park_at, sym_rt->vdso_size);
ret = vdso_remap("rt-vdso", rt->vdso_start,
park_at, rt->sym.vdso_size);
return ret;
}
......@@ -126,7 +126,7 @@ static bool blobs_matches(VmaEntry *vdso_img, VmaEntry *vvar_img,
return false;
}
if (vvar_img && sym_rt->vvar_start != VVAR_BAD_ADDR) {
if (vvar_img && sym_rt->vvar_size != VVAR_BAD_SIZE) {
bool vdso_firstly = (vvar_img->start > vdso_img->start);
if (sym_rt->vvar_size != vma_entry_len(vvar_img))
......@@ -237,7 +237,7 @@ int vdso_proxify(struct vdso_symtable *sym_rt, unsigned long vdso_rt_parked_at,
/*
* Don't forget to shift if vvar is before vdso.
*/
if (sym_rt->vvar_start != VDSO_BAD_ADDR && !sym_rt->vdso_before_vvar)
if (sym_rt->vvar_size != VDSO_BAD_SIZE && !sym_rt->vdso_before_vvar)
vdso_rt_parked_at += sym_rt->vvar_size;
if (vdso_redirect_calls(vdso_rt_parked_at,
......
......@@ -1212,7 +1212,7 @@ long __export_restore_task(struct task_restore_args *args)
}
if (vdso_needs_parking(args)) {
if (vdso_do_park(&args->vdso_sym_rt,
if (vdso_do_park(&args->vdso_maps_rt,
args->vdso_rt_parked_at, vdso_rt_size))
goto core_restore_end;
}
......@@ -1345,7 +1345,7 @@ long __export_restore_task(struct task_restore_args *args)
/*
* Proxify vDSO.
*/
if (vdso_proxify(&args->vdso_sym_rt, args->vdso_rt_parked_at,
if (vdso_proxify(&args->vdso_maps_rt.sym, args->vdso_rt_parked_at,
args->vmas, args->vmas_n, args->compatible_mode,
fault_injected(FI_VDSO_TRAMPOLINES)))
goto core_restore_end;
......
......@@ -33,7 +33,7 @@ static void exit_on(int ret, int err_fd, char *reason)
* WARN: This helper shouldn't call pr_err() or any syscall with
* Glibc's wrapper function - it may very likely blow up.
*/
void compat_vdso_helper(struct vdso_symtable *native, int pipe_fd,
void compat_vdso_helper(struct vdso_maps *native, int pipe_fd,
int err_fd, void *vdso_buf, size_t buf_size)
{
void *vdso_addr;
......@@ -41,12 +41,14 @@ void compat_vdso_helper(struct vdso_symtable *native, int pipe_fd,
long ret;
if (native->vdso_start != VDSO_BAD_ADDR) {
ret = syscall(__NR_munmap, native->vdso_start, native->vdso_size);
ret = syscall(__NR_munmap,
native->vdso_start, native->sym.vdso_size);
exit_on(ret, err_fd, "Error: Failed to unmap native vdso\n");
}
if (native->vvar_start != VVAR_BAD_ADDR) {
ret = syscall(__NR_munmap, native->vvar_start, native->vvar_size);
ret = syscall(__NR_munmap,
native->vvar_start, native->sym.vvar_size);
exit_on(ret, err_fd, "Error: Failed to unmap native vvar\n");
}
......
......@@ -29,8 +29,8 @@
#define LOG_PREFIX "vdso: "
u64 vdso_pfn = VDSO_BAD_PFN;
struct vdso_symtable vdso_sym_rt = VDSO_SYMTABLE_INIT;
struct vdso_symtable vdso_compat_rt = VDSO_SYMTABLE_INIT;
struct vdso_maps vdso_maps = VDSO_MAPS_INIT;
struct vdso_maps vdso_maps_compat = VDSO_MAPS_INIT;
/*
* The VMAs list might have proxy vdso/vvar areas left
......@@ -227,13 +227,13 @@ err:
return exit_code;
}
static int vdso_parse_maps(pid_t pid, struct vdso_symtable *s)
static int vdso_parse_maps(pid_t pid, struct vdso_maps *s)
{
int exit_code = -1;
char *buf;
struct bfd f;
*s = (struct vdso_symtable)VDSO_SYMTABLE_INIT;
*s = (struct vdso_maps)VDSO_MAPS_INIT;
f.fd = open_proc(pid, "maps");
if (f.fd < 0)
......@@ -272,19 +272,19 @@ static int vdso_parse_maps(pid_t pid, struct vdso_symtable *s)
goto err;
}
s->vdso_start = start;
s->vdso_size = end - start;
s->sym.vdso_size = end - start;
} else {
if (s->vvar_start != VVAR_BAD_ADDR) {
pr_err("Got second VVAR entry\n");
goto err;
}
s->vvar_start = start;
s->vvar_size = end - start;
s->sym.vvar_size = end - start;
}
}
if (s->vdso_start != VDSO_BAD_ADDR && s->vvar_start != VVAR_BAD_ADDR)
s->vdso_before_vvar = (s->vdso_start < s->vvar_start);
s->sym.vdso_before_vvar = (s->vdso_start < s->vvar_start);
exit_code = 0;
err:
......@@ -292,10 +292,10 @@ err:
return exit_code;
}
static int validate_vdso_addr(struct vdso_symtable *s)
static int validate_vdso_addr(struct vdso_maps *s)
{
unsigned long vdso_end = s->vdso_start + s->vdso_size;
unsigned long vvar_end = s->vvar_start + s->vvar_size;
unsigned long vdso_end = s->vdso_start + s->sym.vdso_size;
unsigned long vvar_end = s->vvar_start + s->sym.vvar_size;
/*
* Validate its structure -- for new vDSO format the
* structure must be like
......@@ -325,28 +325,28 @@ static int validate_vdso_addr(struct vdso_symtable *s)
return 0;
}
static int vdso_fill_self_symtable(struct vdso_symtable *s)
static int vdso_fill_self_symtable(struct vdso_maps *s)
{
if (vdso_parse_maps(PROC_SELF, s))
return -1;
if (vdso_fill_symtable(s->vdso_start, s->vdso_size, s))
if (vdso_fill_symtable(s->vdso_start, s->sym.vdso_size, &s->sym))
return -1;
if (validate_vdso_addr(s))
return -1;
pr_debug("rt [vdso] %lx-%lx [vvar] %lx-%lx\n",
s->vdso_start, s->vdso_start + s->vdso_size,
s->vvar_start, s->vvar_start + s->vvar_size);
s->vdso_start, s->vdso_start + s->sym.vdso_size,
s->vvar_start, s->vvar_start + s->sym.vvar_size);
return 0;
}
#ifdef CONFIG_COMPAT
static int vdso_mmap_compat(struct vdso_symtable *native,
struct vdso_symtable *compat, void *vdso_buf, size_t buf_size)
static int vdso_mmap_compat(struct vdso_maps *native,
struct vdso_maps *compat, void *vdso_buf, size_t buf_size)
{
pid_t pid;
int status, ret = -1;
......@@ -421,8 +421,8 @@ out_close:
}
#define COMPAT_VDSO_BUF_SZ (PAGE_SIZE*2)
static int vdso_fill_compat_symtable(struct vdso_symtable *native,
struct vdso_symtable *compat)
static int vdso_fill_compat_symtable(struct vdso_maps *native,
struct vdso_maps *compat)
{
void *vdso_mmap;
int ret = -1;
......@@ -443,14 +443,14 @@ static int vdso_fill_compat_symtable(struct vdso_symtable *native,
}
if (vdso_fill_symtable_compat((uintptr_t)vdso_mmap,
compat->vdso_size, compat)) {
compat->sym.vdso_size, &compat->sym)) {
pr_err("Failed to parse mmaped compatible vdso blob\n");
goto out_unmap;
}
pr_debug("compat [vdso] %lx-%lx [vvar] %lx-%lx\n",
compat->vdso_start, compat->vdso_start + compat->vdso_size,
compat->vvar_start, compat->vvar_start + compat->vvar_size);
compat->vdso_start, compat->vdso_start + compat->sym.vdso_size,
compat->vvar_start, compat->vvar_start + compat->sym.vvar_size);
ret = 0;
out_unmap:
......@@ -460,8 +460,8 @@ out_unmap:
}
#else /* CONFIG_COMPAT */
static int vdso_fill_compat_symtable(struct vdso_symtable *native,
struct vdso_symtable *compat)
static int vdso_fill_compat_symtable(struct vdso_maps *native,
struct vdso_maps *compat)
{
return 0;
}
......@@ -469,19 +469,19 @@ static int vdso_fill_compat_symtable(struct vdso_symtable *native,
int vdso_init(void)
{
if (vdso_fill_self_symtable(&vdso_sym_rt)) {
if (vdso_fill_self_symtable(&vdso_maps)) {
pr_err("Failed to fill self vdso symtable\n");
return -1;
}
if (vdso_fill_compat_symtable(&vdso_sym_rt, &vdso_compat_rt)) {
if (vdso_fill_compat_symtable(&vdso_maps, &vdso_maps_compat)) {
pr_err("Failed to fill compat vdso symtable\n");
return -1;
}
if (kdat.pmap != PM_FULL)
pr_info("VDSO detection turned off\n");
else if (vaddr_to_pfn(vdso_sym_rt.vdso_start, &vdso_pfn))
else if (vaddr_to_pfn(vdso_maps.vdso_start, &vdso_pfn))
return -1;
return 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment