Commit d9aa7f02 authored by Cyrill Gorcunov's avatar Cyrill Gorcunov

restore: Add mutexes via futex syscall and order threads creation

Signed-off-by: 's avatarCyrill Gorcunov <gorcunov@gmail.com>
parent 45871747
......@@ -1453,7 +1453,8 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
task_args->pid = pid;
task_args->fd_core = fd_core;
task_args->fd_self_vmas = fd_self_vmas;
task_args->rst_lock = 0;
rst_mutex_init(&task_args->rst_lock);
if (pstree_entry.nr_threads) {
int i;
......
#ifndef ATOMIC_H__
#define ATOMIC_H__
#define atomic_set(mem, v) \
({ \
asm volatile ("lock xchg %0, %1\n" \
: "+r" (v), "+m" (*mem) \
: \
: "cc", "memory"); \
})
#define atomic_get(mem) \
({ \
u32 ret__ = 0; \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" (*mem) \
: \
: "cc", "memory"); \
ret__; \
})
#define atomic_inc(mem) \
({ \
u32 ret__ = 1; \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" (*mem) \
: \
: "cc", "memory"); \
ret__; \
})
#define atomic_dec(mem) \
({ \
u32 ret__ = -1; \
asm volatile ("lock xadd %0, %1\n" \
: "+r" (ret__), "+m" (*mem) \
: \
: "cc", "memory"); \
ret__; \
})
#endif /* ATOMIC_H__ */
......@@ -46,7 +46,6 @@ static void clear_bit(int nr, volatile unsigned long *addr)
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
#else /* CONFIG_X86_64 */
# error x86-32 is not implemented yet
#endif /* CONFIG_X86_64 */
......
......@@ -48,13 +48,15 @@ struct restore_mem_zone {
#define first_on_heap(ptr, heap) ((typeof(ptr))heap)
#define next_on_heap(ptr, prev) ((typeof(ptr))((long)(prev) + sizeof(*(prev))))
typedef u32 rst_mutex_t;
/* Make sure it's pow2 in size */
struct thread_restore_args {
struct restore_mem_zone mem_zone;
int pid;
int fd_core;
long *rst_lock;
rst_mutex_t *rst_lock;
} __aligned(sizeof(long));
struct task_restore_core_args {
......@@ -64,7 +66,7 @@ struct task_restore_core_args {
int fd_core; /* opened core file */
int fd_self_vmas; /* opened file with running VMAs to unmap */
bool restore_threads; /* if to restore threads */
long rst_lock;
rst_mutex_t rst_lock;
/* threads restoration */
int nr_threads; /* number of threads */
......@@ -224,29 +226,27 @@ static void always_inline write_hex_n(unsigned long num)
sys_write(1, &c, 1);
}
static always_inline void rst_lock(long *v)
{
while (*v) {
asm volatile("lfence");
asm volatile("pause");
}
(*v)++;
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
asm volatile("sfence");
static void always_inline rst_mutex_init(rst_mutex_t *mutex)
{
u32 c = 0;
atomic_set(mutex, c);
}
static always_inline void rst_unlock(long *v)
static void always_inline rst_mutex_lock(rst_mutex_t *mutex)
{
(*v)--;
asm volatile("sfence");
u32 c;
while ((c = atomic_inc(mutex)))
sys_futex(mutex, FUTEX_WAIT, c + 1, NULL, NULL, 0);
}
static always_inline void rst_wait_unlock(long *v)
static void always_inline rst_mutex_unlock(rst_mutex_t *mutex)
{
while (*v) {
asm volatile("lfence");
asm volatile("pause");
}
u32 c = 0;
atomic_set(mutex, c);
sys_futex(mutex, FUTEX_WAKE, 1, NULL, NULL, 0);
}
#endif /* CR_RESTORER_H__ */
......@@ -5,6 +5,7 @@
#include <stdbool.h>
#include "bitops.h"
#include "atomic.h"
/* prctl */
#define ARCH_SET_GS 0x1001
......
......@@ -18,7 +18,8 @@
#include "restorer.h"
/*
* Threads restoration via sigreturn.
* Threads restoration via sigreturn. Note it's locked
* routine and calls for unlock at the end.
*/
long restore_thread(long cmd, struct thread_restore_args *args)
{
......@@ -86,7 +87,7 @@ long restore_thread(long cmd, struct thread_restore_args *args)
goto core_restore_end;
}
rst_unlock(args->rst_lock);
rst_mutex_unlock(args->rst_lock);
new_sp = (long)rt_sigframe + 8;
asm volatile(
......@@ -430,7 +431,7 @@ self_len_end:
if (thread_args[i].pid == args->pid)
continue;
rst_lock(&args->rst_lock);
rst_mutex_lock(&args->rst_lock);
new_sp =
RESTORE_ALIGN_STACK((long)thread_args[i].mem_zone.stack,
......@@ -482,7 +483,7 @@ self_len_end:
"g"(&thread_args[i])
: "rax", "rdi", "rsi", "rdx", "r10", "memory");
rst_wait_unlock(&args->rst_lock);
rst_mutex_lock(&args->rst_lock);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment