Commit 3b8071ba authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Andrei Vagin

vdso: Introduce vdso mark v3

We need to place @rt_vvar_addr into vdso mark, as we don't know
the position of rt-vvar to be dropped on the following dumps.
I've renamed proxy_*_addr to orig_*_addr, as it looks more
describing.
orig_*_addr we need for marking those VMAs as accordingly,
so restorer would know what to do with them. Otherwise, it'll
think they are just regular vmas.
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent 91c81e5f
......@@ -18,64 +18,69 @@ static inline bool vdso_symbol_empty(struct vdso_symbol *s)
}
/*
* Special mark which allows to identify runtime vdso where
* calls from proxy vdso are redirected. This mark usually
* Special mark which allows to identify runtime vdso (rt-vdso) where
* calls from proxy (original) vdso are redirected. This mark usually
* placed at the start of vdso area where Elf header lives.
* Since such runtime vdso is solevey used by proxy and
* Since such runtime vdso is solely used by the proxy and
* nobody else is supposed to access it, it's more-less
* safe to screw the Elf header with @signature and
* @proxy_addr.
* vvar/vdso addresses for next dumping.
*
* The @proxy_addr deserves a few comments. When we redirect
* the calls from proxy to runtime vdso, on next checkpoint
* it won't be possible to find which VMA is proxy, thus
* we save its address in the member.
* The @orig_addr deserves a few comments. When we redirect the calls
* from the original vdso to runtime vdso, on next checkpoint it won't
* be possible to find original vdso/vvar pair, thus we save their
* addresses in the member.
*
* As on the following dumps we need to drop rt-{vvar,vdso} pair
* from list of VMAs to save in images, we save rt-vvar address also.
*/
struct vdso_mark {
u64 signature;
unsigned long proxy_vdso_addr;
unsigned long orig_vdso_addr;
unsigned long version;
/*
* In case of new vDSO format the VVAR area address
* neeed for easier discovering where it lives without
* relying on procfs output.
*/
unsigned long proxy_vvar_addr;
unsigned long orig_vvar_addr;
unsigned long rt_vvar_addr;
};
#define VDSO_MARK_SIGNATURE (0x6f73647675697263ULL) /* Magic number (criuvdso) */
#define VDSO_MARK_SIGNATURE_V1 (0x6f73647675697263ULL) /* Magic number (criuvdso) */
#define VDSO_MARK_SIGNATURE_V2 (0x4f53447675697263ULL) /* Magic number (criuvDSO) */
#define VDSO_MARK_CUR_VERSION (2)
#define VDSO_MARK_SIGNATURE_V3 (0x4f53447655495243ULL) /* Magic number (CRIUvDSO) */
#define VDSO_MARK_CUR_VERSION (3)
static inline void vdso_put_mark(void *where, unsigned long proxy_vdso_addr, unsigned long proxy_vvar_addr)
static inline void vdso_put_mark(void *where, unsigned long rt_vvar_addr,
unsigned long orig_vdso_addr, unsigned long orig_vvar_addr)
{
struct vdso_mark *m = where;
m->signature = VDSO_MARK_SIGNATURE_V2;
m->proxy_vdso_addr = proxy_vdso_addr;
m->signature = VDSO_MARK_SIGNATURE_V3;
m->orig_vdso_addr = orig_vdso_addr;
m->version = VDSO_MARK_CUR_VERSION;
m->proxy_vvar_addr = proxy_vvar_addr;
m->orig_vvar_addr = orig_vvar_addr;
m->rt_vvar_addr = rt_vvar_addr;
}
static inline bool is_vdso_mark(void *addr)
{
struct vdso_mark *m = addr;
if (m->signature == VDSO_MARK_SIGNATURE_V2) {
/*
* New format
*/
switch (m->signature) {
case VDSO_MARK_SIGNATURE_V3:
return true;
} else if (m->signature == VDSO_MARK_SIGNATURE) {
/*
* Old format -- simply extend the mark up
* to the version we support.
*/
vdso_put_mark(m, m->proxy_vdso_addr, VVAR_BAD_ADDR);
/*
* Old formats -- simply extend the mark up
* to the version we support.
*/
case VDSO_MARK_SIGNATURE_V2:
vdso_put_mark(m, VVAR_BAD_ADDR,
m->orig_vdso_addr, m->orig_vvar_addr);
return true;
case VDSO_MARK_SIGNATURE_V1:
vdso_put_mark(m, VVAR_BAD_ADDR,
m->orig_vdso_addr, VVAR_BAD_ADDR);
return true;
}
return false;
}
......
......@@ -193,15 +193,26 @@ static int remap_rt_vdso(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
*/
static int add_vdso_proxy(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
struct vdso_symtable *sym_img, struct vdso_symtable *sym_rt,
unsigned long rt_vdso_addr, bool compat_vdso)
unsigned long vdso_rt_parked_at, bool compat_vdso)
{
unsigned long rt_vvar_addr = vdso_rt_parked_at;
unsigned long rt_vdso_addr = vdso_rt_parked_at;
unsigned long orig_vvar_addr =
vma_vvar ? vma_vvar->start : VVAR_BAD_ADDR;
pr_info("Runtime vdso mismatches dumpee, generate proxy\n");
/*
* Don't forget to shift if vvar is before vdso.
*/
if (sym_rt->vvar_size != VDSO_BAD_SIZE && !sym_rt->vdso_before_vvar)
rt_vdso_addr += sym_rt->vvar_size;
if (sym_rt->vvar_size == VVAR_BAD_SIZE) {
rt_vvar_addr = VVAR_BAD_ADDR;
} else {
if (sym_rt->vdso_before_vvar)
rt_vvar_addr += sym_rt->vdso_size;
else
rt_vdso_addr += sym_rt->vvar_size;
}
if (vdso_redirect_calls(rt_vdso_addr, vma_vdso->start,
sym_rt, sym_img, compat_vdso)) {
......@@ -215,8 +226,8 @@ static int add_vdso_proxy(VmaEntry *vma_vdso, VmaEntry *vma_vvar,
* it's auto-generated every new session if proxy required.
*/
sys_mprotect((void *)rt_vdso_addr, sym_rt->vdso_size, PROT_WRITE);
vdso_put_mark((void *)rt_vdso_addr, vma_vdso->start,
vma_vvar ? vma_vvar->start : VVAR_BAD_ADDR);
vdso_put_mark((void *)rt_vdso_addr, rt_vvar_addr,
vma_vdso->start, orig_vvar_addr);
sys_mprotect((void *)rt_vdso_addr, sym_rt->vdso_size, VDSO_PROT);
return 0;
......
......@@ -555,8 +555,8 @@ static int parasite_check_vdso_mark(struct parasite_vdso_vma_entry *args)
return -EINVAL;
}
args->is_marked = 1;
args->proxy_vdso_addr = m->proxy_vdso_addr;
args->proxy_vvar_addr = m->proxy_vvar_addr;
args->proxy_vdso_addr = m->orig_vdso_addr;
args->proxy_vvar_addr = m->orig_vvar_addr;
} else {
args->is_marked = 0;
args->proxy_vdso_addr = VDSO_BAD_ADDR;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment