Commit c2666441 authored by Andrey Vagin's avatar Andrey Vagin

tasks: synchronize restoration of tasks (v2)

This patch prepares code to handle errors. In the near future
we will handle SIGCHLD. If a restore of one task fails, we will
send a signal to other for completing.

For this we should have ability to wait until all task wills be
restored. This patch does it.

v2: Don't wait children.
Signed-off-by: 's avatarAndrey Vagin <avagin@openvz.org>
Acked-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent fb177e03
......@@ -75,6 +75,22 @@ struct pipe_list_entry {
off_t offset;
};
static struct task_entries *task_entries;
static void task_add_entry(int pid)
{
int *nr = &task_entries->nr;
struct task_entry *e = &task_entries->entries[*nr];
(*nr)++;
BUG_ON((*nr) * sizeof(struct task_entry) +
sizeof(struct task_entries) > TASK_ENTRIES_SIZE);
e->pid = pid;
e->done = 0;
}
static struct shmem_id *shmem_ids;
static struct shmems *shmems;
......@@ -372,6 +388,14 @@ static int prepare_shared(int ps_fd)
shmems->nr_shmems = 0;
task_entries = mmap(NULL, TASK_ENTRIES_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
if (task_entries == MAP_FAILED) {
pr_perror("Can't map shmem\n");
return -1;
}
task_entries->nr = 0;
task_entries->start = 0;
pipes = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
if (pipes == MAP_FAILED) {
pr_perror("Can't map pipes\n");
......@@ -403,6 +427,8 @@ static int prepare_shared(int ps_fd)
if (prepare_fd_pid(e.pid))
return -1;
task_add_entry(e.pid);
lseek(ps_fd, e.nr_children * sizeof(u32) + e.nr_threads * sizeof(u32), SEEK_CUR);
}
......@@ -1189,7 +1215,7 @@ static int restore_task_with_children(int my_pid)
static int restore_root_task(int fd)
{
struct pstree_entry e;
int ret;
int ret, i;
ret = read(fd, &e, sizeof(e));
if (ret != sizeof(e)) {
......@@ -1204,6 +1230,15 @@ static int restore_root_task(int fd)
if (ret < 0)
return -1;
for (i = 0; i < task_entries->nr; i++) {
pr_info("Wait while the task %d restored\n",
task_entries->entries[i].pid);
cr_wait_while(&task_entries->entries[i].done, 0);
}
pr_info("Go on!!!\n");
cr_wait_set(&task_entries->start, 1);
wait(NULL);
return 0;
}
......@@ -1330,6 +1365,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
BUILD_BUG_ON(sizeof(struct task_restore_core_args) & 1);
BUILD_BUG_ON(sizeof(struct thread_restore_args) & 1);
BUILD_BUG_ON(SHMEMS_SIZE % PAGE_SIZE);
BUILD_BUG_ON(TASK_ENTRIES_SIZE % PAGE_SIZE);
fd_pstree = open_image_ro_nocheck(FMT_FNAME_PSTREE, pstree_pid);
if (fd_pstree < 0)
......@@ -1421,7 +1457,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
exec_mem_hint = restorer_get_vma_hint(pid, &self_vma_list,
restore_task_vma_len +
restore_thread_vma_len +
SHMEMS_SIZE);
SHMEMS_SIZE + TASK_ENTRIES_SIZE);
if (exec_mem_hint == -1) {
pr_err("No suitable area for task_restore bootstrap (%dK)\n",
restore_task_vma_len + restore_thread_vma_len);
......@@ -1483,6 +1519,15 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
goto err;
task_args->shmems = shmems_ref;
shmems_ref = (struct shmems *)(exec_mem_hint +
restore_task_vma_len +
restore_thread_vma_len +
SHMEMS_SIZE);
ret = shmem_remap(task_entries, shmems_ref, TASK_ENTRIES_SIZE);
if (ret < 0)
goto err;
task_args->task_entries = shmems_ref;
/*
* Arguments for task restoration.
*/
......
......@@ -75,6 +75,7 @@ struct task_restore_core_args {
thread_restore_fcall_t clone_restore_fn; /* helper address for clone() call */
struct thread_restore_args *thread_args; /* array of thread arguments */
struct shmems *shmems;
struct task_entries *task_entries;
} __aligned(sizeof(long));
struct pt_regs {
......@@ -301,6 +302,20 @@ struct shmems {
struct shmem_info entries[0];
};
#define TASK_ENTRIES_SIZE 4096
struct task_entry {
int pid;
u32 done; // futex
};
struct task_entries {
int nr;
u32 start; //futex
struct task_entry entries[0];
};
static always_inline struct shmem_info *
find_shmem_by_pid(struct shmems *shmems, unsigned long start, int pid)
{
......@@ -318,6 +333,17 @@ find_shmem_by_pid(struct shmems *shmems, unsigned long start, int pid)
return NULL;
}
static always_inline struct task_entry *
task_get_entry(struct task_entries *base, int pid)
{
int i;
for (i = 0; i < base->nr; i++)
if (base->entries[i].pid == pid)
return &base->entries[i];
return NULL;
}
/* We need own handler */
......
......@@ -148,6 +148,7 @@ self_len_start:
long restore_task(long cmd, struct task_restore_core_args *args)
{
long ret = -1;
struct task_entry *task_entry;
switch (cmd) {
......@@ -546,6 +547,19 @@ self_len_end:
sys_close(fd);
}
write_num_n(__LINE__);
task_entry = task_get_entry(args->task_entries, my_pid);
cr_wait_set(&task_entry->done, 1);
cr_wait_while(&args->task_entries->start, 0);
write_num_n(__LINE__);
ret = sys_munmap(args->task_entries, TASK_ENTRIES_SIZE);
if (ret < 0) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
/*
* Sigframe stack.
*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment