Commit 631dd827 authored by Pavel Emelyanov's avatar Pavel Emelyanov

restore: Prepare on-restorer vmas earlier

Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
Reviewed-by: 's avatarCyrill Gorcunov <gorcunov@openvz.org>
parent 85adb786
......@@ -505,7 +505,7 @@ static int restore_one_alive_task(int pid, CoreEntry *core)
if (prepare_file_locks(pid))
return -1;
if (open_vmas(current))
if (prepare_vmas(current, ta))
return -1;
if (fixup_sysv_shmems())
......@@ -2665,9 +2665,6 @@ static int sigreturn_restore(pid_t pid, unsigned long ta_cp, CoreEntry *core)
struct thread_restore_args *thread_args;
struct restore_mem_zone *mz;
struct vma_area *vma;
unsigned long tgt_vmas;
#ifdef CONFIG_VDSO
unsigned long vdso_rt_size = 0;
#endif
......@@ -2692,25 +2689,6 @@ static int sigreturn_restore(pid_t pid, unsigned long ta_cp, CoreEntry *core)
BUILD_BUG_ON(sizeof(struct task_restore_args) & 1);
BUILD_BUG_ON(sizeof(struct thread_restore_args) & 1);
/*
* Copy VMAs to private rst memory so that it's able to
* walk them and m(un|re)map.
*/
tgt_vmas = rst_mem_align_cpos(RM_PRIVATE);
list_for_each_entry(vma, &vmas->h, list) {
VmaEntry *vme;
vme = rst_mem_alloc(sizeof(*vme), RM_PRIVATE);
if (!vme)
goto err_nv;
*vme = *vma->e;
if (vma_area_is_private(vma, kdat.task_size))
vma_premmaped_start(vme) = vma->premmaped_addr;
}
/*
* Put info about AIO rings, they will get remapped
*/
......@@ -2868,12 +2846,13 @@ static int sigreturn_restore(pid_t pid, unsigned long ta_cp, CoreEntry *core)
task_args->task_size = kdat.task_size;
task_args->vmas = rst_mem_remap_ptr((unsigned long)task_args->vmas, RM_PRIVATE);
#define remap_array(name, nr, cpos) do { \
task_args->name##_n = nr; \
task_args->name = rst_mem_remap_ptr(cpos, RM_PRIVATE); \
} while (0)
remap_array(vmas, vmas->nr, tgt_vmas);
remap_array(posix_timers, posix_timers_nr, posix_timers_cpos);
remap_array(timerfd, rst_timerfd_nr, rst_timerfd_cpos);
remap_array(siginfo, siginfo_nr, siginfo_cpos);
......
......@@ -24,7 +24,8 @@ extern int parasite_dump_pages_seized(struct parasite_ctl *ctl,
#define PME_PFRAME_MASK ((1ULL << PME_PSHIFT_OFFSET) - 1)
#define PME_PFRAME(x) ((x) & PME_PFRAME_MASK)
int open_vmas(struct pstree_item *t);
struct task_restore_args;
int prepare_vmas(struct pstree_item *t, struct task_restore_args *ta);
int unmap_guard_pages(struct pstree_item *t);
int prepare_mappings(struct pstree_item *t);
#endif /* __CR_MEM_H__ */
......@@ -18,6 +18,7 @@
#include "shmem.h"
#include "pstree.h"
#include "restorer.h"
#include "rst-malloc.h"
#include "bitmap.h"
#include "sk-packet.h"
#include "files-reg.h"
......@@ -503,7 +504,7 @@ static int map_private_vma(struct pstree_item *t,
return -1;
}
vma->vm_open = NULL; /* prevent from 2nd open in open_vmas */
vma->vm_open = NULL; /* prevent from 2nd open in prepare_vmas */
}
nr_pages = vma_entry_len(vma->e) / PAGE_SIZE;
......@@ -878,16 +879,19 @@ int unmap_guard_pages(struct pstree_item *t)
return 0;
}
int open_vmas(struct pstree_item *t)
int prepare_vmas(struct pstree_item *t, struct task_restore_args *ta)
{
int pid = t->pid.virt;
struct vma_area *vma;
struct list_head *vmas = &rsti(t)->vmas.h;
struct vm_area_list *vmas = &rsti(t)->vmas;
list_for_each_entry(vma, vmas, list) {
if (!(vma_area_is(vma, VMA_AREA_REGULAR)))
continue;
ta->vmas = (VmaEntry *)rst_mem_align_cpos(RM_PRIVATE);
ta->vmas_n = vmas->nr;
list_for_each_entry(vma, &vmas->h, list) {
VmaEntry *vme;
if (vma_area_is(vma, VMA_AREA_REGULAR)) {
pr_info("Opening 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" (%x) vma\n",
vma->e->start, vma->e->end,
vma->e->pgoff, vma->e->status);
......@@ -898,6 +902,20 @@ int open_vmas(struct pstree_item *t)
}
}
vme = rst_mem_alloc(sizeof(*vme), RM_PRIVATE);
if (!vme)
return -1;
/*
* Copy VMAs to private rst memory so that it's able to
* walk them and m(un|re)map.
*/
*vme = *vma->e;
if (vma_area_is_private(vma, kdat.task_size))
vma_premmaped_start(vme) = vma->premmaped_addr;
}
return 0;
}
......@@ -247,7 +247,7 @@ static int open_shmem_sysv(int pid, struct vma_area *vma)
/*
* Value that doesn't (shouldn't) match with any real
* sysv shmem ID (thus it cannot be 0, as shmem id can)
* and still is not negative to prevent open_vmas() from
* and still is not negative to prevent prepare_vmas() from
* treating it as error.
*/
ret_fd = SYSV_SHMEM_SKIP_FD;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment