Commit 3b35d053 authored by Pavel Emelyanov's avatar Pavel Emelyanov

rst: Use collected target vmas list in restorer_get_vma_hint

It uses the vma image right now, but the image is already sucked in.
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent d7961998
...@@ -1321,14 +1321,11 @@ static int restore_all_tasks(pid_t pid, struct cr_options *opts) ...@@ -1321,14 +1321,11 @@ static int restore_all_tasks(pid_t pid, struct cr_options *opts)
return restore_root_task(pstree_fd, opts); return restore_root_task(pstree_fd, opts);
} }
static long restorer_get_vma_hint(pid_t pid, struct list_head *self_vma_list, long vma_len) static long restorer_get_vma_hint(pid_t pid, struct list_head *tgt_vma_list,
struct list_head *self_vma_list, long vma_len)
{ {
struct vma_area *vma_area; struct vma_area *t_vma;
long prev_vma_end, hint; long prev_vma_end = 0;
struct vma_entry vma;
int fd = -1, ret;
hint = -1;
/* /*
* Here we need some heuristics -- the VMA which restorer will * Here we need some heuristics -- the VMA which restorer will
...@@ -1340,49 +1337,24 @@ static long restorer_get_vma_hint(pid_t pid, struct list_head *self_vma_list, lo ...@@ -1340,49 +1337,24 @@ static long restorer_get_vma_hint(pid_t pid, struct list_head *self_vma_list, lo
* better to stick with it. * better to stick with it.
*/ */
fd = open_image_ro_nocheck(FMT_FNAME_VMAS, pid); list_for_each_entry(t_vma, tgt_vma_list, list) {
if (fd < 0) if (prev_vma_end && ((t_vma->vma.start - prev_vma_end) > vma_len)) {
return -1; struct vma_area *s_vma;
prev_vma_end = 0;
lseek(fd, MAGIC_OFFSET, SEEK_SET);
while (1) {
ret = read(fd, &vma, sizeof(vma));
if (ret && ret != sizeof(vma)) {
pr_perror("Can't read vma entry from core-%d", pid);
break;
}
if (!prev_vma_end) {
prev_vma_end = vma.end;
continue;
}
if ((vma.start - prev_vma_end) > vma_len) {
unsigned long prev_vma_end2 = 0; unsigned long prev_vma_end2 = 0;
list_for_each_entry(vma_area, self_vma_list, list) { list_for_each_entry(s_vma, self_vma_list, list) {
if (!prev_vma_end2) { if (prev_vma_end2 && (prev_vma_end2 >= prev_vma_end) &&
prev_vma_end2 = vma_area->vma.end; ((s_vma->vma.start - prev_vma_end2) > vma_len))
continue; return prev_vma_end2;
}
if ((prev_vma_end2 >= prev_vma_end) && prev_vma_end2 = s_vma->vma.end;
(vma_area->vma.start - prev_vma_end2) > vma_len) {
hint = prev_vma_end2;
goto found;
}
prev_vma_end2 = vma_area->vma.end;
} }
} }
prev_vma_end = vma.end; prev_vma_end = t_vma->vma.end;
} }
found:
close_safe(&fd); return -1;
return hint;
} }
#define USEC_PER_SEC 1000000L #define USEC_PER_SEC 1000000L
...@@ -1577,7 +1549,7 @@ static int sigreturn_restore(pid_t pid, struct list_head *tgt_vmas, int nr_vmas) ...@@ -1577,7 +1549,7 @@ static int sigreturn_restore(pid_t pid, struct list_head *tgt_vmas, int nr_vmas)
restore_thread_vma_len = round_up(restore_thread_vma_len, PAGE_SIZE); restore_thread_vma_len = round_up(restore_thread_vma_len, PAGE_SIZE);
exec_mem_hint = restorer_get_vma_hint(pid, &self_vma_list, exec_mem_hint = restorer_get_vma_hint(pid, tgt_vmas, &self_vma_list,
restore_task_vma_len + restore_task_vma_len +
restore_thread_vma_len + restore_thread_vma_len +
self_vmas_len + self_vmas_len +
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment