Commit 01f8f8f4 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov

restore: Bring trivial locker back

Threads are better to be restored in serialized
way otherwise if some error happened an error
message will be screwed.
Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@gmail.com>
parent a0956172
......@@ -1453,6 +1453,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
task_args->pid = pid;
task_args->fd_core = fd_core;
task_args->fd_self_vmas = fd_self_vmas;
task_args->rst_lock = 0;
if (pstree_entry.nr_threads) {
int i;
......@@ -1479,6 +1480,8 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
goto err;
}
thread_args[i].rst_lock = &task_args->rst_lock;
pr_info("Thread %4d stack %8p heap %8p rt_sigframe %8p\n",
i, (long)thread_args[i].mem_zone.stack,
thread_args[i].mem_zone.heap,
......
......@@ -54,6 +54,7 @@ struct thread_restore_args {
int pid;
int fd_core;
long *rst_lock;
} __aligned(sizeof(long));
struct task_restore_core_args {
......@@ -63,6 +64,7 @@ struct task_restore_core_args {
int fd_core; /* opened core file */
int fd_self_vmas; /* opened file with running VMAs to unmap */
bool restore_threads; /* if to restore threads */
long rst_lock;
/* threads restoration */
int nr_threads; /* number of threads */
......@@ -222,4 +224,29 @@ static void always_inline write_hex_n(unsigned long num)
sys_write(1, &c, 1);
}
static always_inline void rst_lock(long *v)
{
while (*v) {
asm volatile("lfence");
asm volatile("pause");
}
(*v)++;
asm volatile("sfence");
}
static always_inline void rst_unlock(long *v)
{
(*v)--;
asm volatile("sfence");
}
static always_inline void rst_wait_unlock(long *v)
{
while (*v) {
asm volatile("lfence");
asm volatile("pause");
}
}
#endif /* CR_RESTORER_H__ */
......@@ -86,6 +86,8 @@ long restore_thread(long cmd, struct thread_restore_args *args)
goto core_restore_end;
}
rst_unlock(args->rst_lock);
new_sp = (long)rt_sigframe + 8;
asm volatile(
"movq %0, %%rax \n"
......@@ -428,6 +430,8 @@ self_len_end:
if (thread_args[i].pid == args->pid)
continue;
rst_lock(&args->rst_lock);
new_sp =
RESTORE_ALIGN_STACK((long)thread_args[i].mem_zone.stack,
sizeof(thread_args[i].mem_zone.stack));
......@@ -478,7 +482,7 @@ self_len_end:
"g"(&thread_args[i])
: "rax", "rdi", "rsi", "rdx", "r10", "memory");
//r_wait_unlock(args->lock);
rst_wait_unlock(&args->rst_lock);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment