Commit 01484182 authored by Andrey Vagin's avatar Andrey Vagin

restorer: isolate restorer like parasite (v4)

Before this patch the restorer's code is linked in crtools and then
we copied functions from it. In this case all function should
be inline and we can't use a global variables.

I suggest to make it like parasite. The restorer's code is isolated in
own file and will be copied wholly.  The restorer's code is compiled as
position-independent code, so we can use functions and global variale
(E.g. to save descriptor for log messages).

v2: correct indentions in a separate patch
v3: introduce a variable restore_task_exec_start symmetrical to
    restore_thread_exec_start
v4: don't give command in restorer_thread()
Signed-off-by: 's avatarAndrey Vagin <avagin@openvz.org>
Acked-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent f05e2dcc
......@@ -70,7 +70,6 @@ OBJS += cr-restore.o
OBJS += cr-show.o
OBJS += util.o
OBJS += ptrace.o
OBJS += restorer.o
OBJS += log.o
OBJS += libnetlink.o
OBJS += sockets.o
......@@ -108,7 +107,35 @@ $(HEAD-BLOB-GEN): $(HEAD-BIN) $(DEPS-BLOB)
$(HEAD-BIN) > parasite-blob.h
$(Q) sync
$(OBJS): $(DEPS) $(HEAD-BLOB-GEN)
ROBJS-BLOB = restorer.o
RDEPS-BLOB += $(patsubst %.o,%.d,$(ROBJS-BLOB))
RSRCS-BLOB += $(patsubst %.o,%.c,$(ROBJS-BLOB))
RHEAD-BLOB-GEN := $(patsubst %.o,%-blob.h,$(ROBJS-BLOB))
RHEAD-BIN := $(patsubst %.o,%.bin,$(ROBJS-BLOB))
RHEAD-LDS := $(patsubst %.o,%.lds.S,$(ROBJS-BLOB))
RHEAD-IDS := $(patsubst %.h,%_h__,$(subst -,_,$(RHEAD-BLOB)))
$(ROBJS-BLOB): $(RSRCS-BLOB)
$(E) " CC " $@
$(Q) $(CC) -c $(CFLAGS) -fpic $< -o $@
$(RHEAD-BIN): $(ROBJS-BLOB) $(RHEAD-LDS)
$(E) " GEN " $@
$(Q) $(LD) -T $(patsubst %.bin,%.lds.S,$@) $< -o $@
$(RHEAD-BLOB-GEN): $(RHEAD-BIN) $(RDEPS-BLOB)
$(E) " GEN " $@
$(Q) $(SH) gen-offsets.sh \
restorer_h__ \
restorer_blob_offset__ \
restorer_blob \
$(ROBJS-BLOB) \
$(RHEAD-BIN) > restorer-blob.h
$(Q) sync
$(OBJS): $(DEPS) $(HEAD-BLOB-GEN) $(RHEAD-BLOB-GEN)
%.o: %.c
$(E) " CC " $@
$(Q) $(CC) -c $(CFLAGS) $< -o $@
......@@ -117,11 +144,11 @@ $(PROGRAM): $(OBJS)
$(E) " LINK " $@
$(Q) $(CC) $(CFLAGS) $(OBJS) $(LIBS) -o $@
$(DEPS): $(HEAD-BLOB-GEN) $(HEADERS)
$(DEPS): $(HEAD-BLOB-GEN) $(HEADERS) $(RHEAD-BLOB-GEN) $(RHEADERS)
%.d: %.c
$(Q) $(CC) -M -MT $(patsubst %.d,%.o,$@) $(CFLAGS) $< -o $@
$(DEPS-BLOB): $(SRCS-BLOB)
$(DEPS-BLOB) $(RDEPS-BLOB): $(SRCS-BLOB) $(RSRCS-BLOB)
$(Q) $(CC) -M -MT $(patsubst %.d,%.o,$@) $(CFLAGS) $< -o $@
test:
......@@ -145,7 +172,7 @@ clean:
$(Q) $(RM) -f ./tags
$(Q) $(RM) -f ./cscope*
$(Q) $(RM) -f ./$(PROGRAM)
$(Q) $(RM) -f ./$(HEAD-BLOB-GEN)
$(Q) $(RM) -f ./$(HEAD-BLOB-GEN) ./$(RHEAD-BLOB-GEN)
$(Q) $(MAKE) -C test clean
.PHONY: clean
......
......@@ -34,6 +34,7 @@
#include "lock.h"
#include "files.h"
#include "proc_parse.h"
#include "restorer-blob.h"
#include "crtools.h"
/*
......@@ -1317,12 +1318,13 @@ err_or_found:
static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
{
long restore_task_code_len, restore_task_vma_len;
long restore_thread_code_len, restore_thread_vma_len;
long restore_code_len, restore_task_vma_len;
long restore_thread_vma_len;
void *exec_mem = MAP_FAILED;
void *restore_thread_exec_start;
void *restore_task_exec_start;
void *restore_code_start;
void *shmems_ref;
long new_sp, exec_mem_hint;
......@@ -1346,9 +1348,8 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
pr_info("%d: Restore via sigreturn\n", pid);
restore_task_code_len = 0;
restore_code_len = 0;
restore_task_vma_len = 0;
restore_thread_code_len = 0;
restore_thread_vma_len = 0;
pid_dir = open_pid_proc(pid);
......@@ -1404,10 +1405,10 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
free_mappings(&self_vma_list);
restore_task_code_len = restore_task(RESTORE_CMD__GET_SELF_LEN, NULL) - (long)restore_task;
restore_task_code_len = round_up(restore_task_code_len, 16);
restore_code_len = sizeof(restorer_blob);
restore_code_len = round_up(restore_code_len, 16);
restore_task_vma_len = round_up(restore_task_code_len + sizeof(*task_args), PAGE_SIZE);
restore_task_vma_len = round_up(restore_code_len + sizeof(*task_args), PAGE_SIZE);
/*
* Thread statistics
......@@ -1438,14 +1439,9 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
* per thread.
*/
restore_thread_code_len = restore_thread(RESTORE_CMD__GET_SELF_LEN, NULL) - (long)restore_thread;
restore_thread_code_len = round_up(restore_thread_code_len, 16);
restore_thread_vma_len = sizeof(*thread_args) * pstree_entry.nr_threads;
restore_thread_vma_len = round_up(restore_thread_vma_len, 16);
restore_thread_vma_len+= restore_thread_code_len;
pr_info("%d: %d threads require %dK of memory\n",
pid, pstree_entry.nr_threads,
KBYTES(restore_thread_vma_len));
......@@ -1482,10 +1478,11 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
* Prepare a memory map for restorer. Note a thread space
* might be completely unused so it's here just for convenience.
*/
restore_task_exec_start = exec_mem;
restore_thread_exec_start = restore_task_exec_start + restore_task_vma_len;
task_args = restore_task_exec_start + restore_task_code_len;
thread_args = restore_thread_exec_start + restore_thread_code_len;
restore_code_start = exec_mem;
restore_thread_exec_start = restore_code_start + restorer_blob_offset__restore_thread;
restore_task_exec_start = restore_code_start + restorer_blob_offset__restore_task;
task_args = restore_code_start + restore_code_len;
thread_args = restore_thread_exec_start;
memzero_p(task_args);
memzero_p(thread_args);
......@@ -1493,8 +1490,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
/*
* Code at a new place.
*/
memcpy(restore_task_exec_start, &restore_task, restore_task_code_len);
memcpy(restore_thread_exec_start, &restore_thread, restore_thread_code_len);
memcpy(restore_code_start, &restorer_blob, sizeof(restorer_blob));
/*
* Adjust stack.
......@@ -1599,8 +1595,7 @@ static void sigreturn_restore(pid_t pstree_pid, pid_t pid)
asm volatile(
"movq %0, %%rbx \n"
"movq %1, %%rax \n"
"movq %2, %%rsi \n"
"movl $"__stringify(RESTORE_CMD__RESTORE_CORE)", %%edi \n"
"movq %2, %%rdi \n"
"movq %%rbx, %%rsp \n"
"callq *%%rax \n"
:
......
......@@ -18,11 +18,11 @@
struct task_restore_core_args;
struct thread_restore_args;
extern long restore_task(long cmd, struct task_restore_core_args *args);
extern long restore_thread(long cmd, struct thread_restore_args *args);
extern long restore_task(struct task_restore_core_args *args);
extern long restore_thread(struct thread_restore_args *args);
typedef long (*task_restore_fcall_t) (long cmd, struct task_restore_core_args *args);
typedef long (*thread_restore_fcall_t) (long cmd, struct thread_restore_args *args);
typedef long (*task_restore_fcall_t) (struct task_restore_core_args *args);
typedef long (*thread_restore_fcall_t) (struct thread_restore_args *args);
#define RESTORE_CMD__NONE 0
#define RESTORE_CMD__GET_SELF_LEN 1
......
......@@ -23,13 +23,9 @@
* Threads restoration via sigreturn. Note it's locked
* routine and calls for unlock at the end.
*/
long restore_thread(long cmd, struct thread_restore_args *args)
long restore_thread(struct thread_restore_args *args)
{
long ret = -1;
switch (cmd) {
case RESTORE_CMD__RESTORE_THREAD:
{
struct core_entry *core_entry;
struct rt_sigframe *rt_sigframe;
unsigned long new_sp, fsgs_base;
......@@ -114,58 +110,19 @@ core_restore_end:
for (;;)
local_sleep(5);
sys_exit(0);
}
break;
case RESTORE_CMD__GET_SELF_LEN:
goto self_len_start;
self_len_end:
break;
default:
goto core_restore_end;
break;
}
return ret;
self_len_start:
asm volatile(
".align 64 \n"
"self_thread: \n"
"leaq self_thread(%%rip), %%rax \n"
"addq $64, %%rax \n"
"andq $~63, %%rax \n"
"movq %%rax, %0 \n"
: "=r"(ret)
:
: "memory");
goto self_len_end;
}
/*
* The main routine to restore task via sigreturn.
*/
long restore_task(long cmd, struct task_restore_core_args *args)
{
long ret = -1;
struct task_entry *task_entry;
switch (cmd) {
case RESTORE_CMD__GET_SELF_LEN:
goto self_len_start;
self_len_end:
break;
/*
* This one is very special, we never return there
* but use sigreturn facility to restore core registers
* and jump execution to some predefined ip read from
* core file.
*/
case RESTORE_CMD__RESTORE_CORE:
{
long restore_task(struct task_restore_core_args *args)
{
long ret = -1;
struct task_entry *task_entry;
struct core_entry *core_entry;
struct vma_entry *vma_entry;
u64 va;
......@@ -500,9 +457,7 @@ self_len_end:
asm volatile(
"clone_emul: \n"
"movq %2, %%rsi \n"
"subq $24, %%rsi \n"
"movq %7, %%rdi \n"
"movq %%rdi,16(%%rsi) \n"
"subq $16, %%rsi \n"
"movq %6, %%rdi \n"
"movq %%rdi, 8(%%rsi) \n"
"movq %5, %%rdi \n"
......@@ -523,7 +478,6 @@ self_len_end:
"xorq %%rbp, %%rbp \n" /* clear ABI frame pointer */
"popq %%rax \n" /* clone_restore_fn -- restore_thread */
"popq %%rdi \n" /* arguments */
"popq %%rsi \n"
"callq *%%rax \n"
"clone_end: \n"
......@@ -533,7 +487,6 @@ self_len_end:
"g"(&parent_tid),
"g"(&thread_args[i].pid),
"g"(args->clone_restore_fn),
"g"(RESTORE_CMD__RESTORE_THREAD),
"g"(&thread_args[i])
: "rax", "rdi", "rsi", "rdx", "r10", "memory");
}
......@@ -586,26 +539,4 @@ core_restore_end:
for (;;)
local_sleep(5);
sys_exit(0);
}
break;
default:
goto core_restore_end;
break;
}
return ret;
self_len_start:
asm volatile(
".align 64 \n"
"self: \n"
"leaq self(%%rip), %%rax \n"
"addq $64, %%rax \n"
"andq $~63, %%rax \n"
"movq %%rax, %0 \n"
: "=r"(ret)
:
: "memory");
goto self_len_end;
}
OUTPUT_FORMAT("binary")
OUTPUT_ARCH(i386:x86-64)
SECTIONS
{
. = 0;
.text : {
*(.restorer.head.text)
*(.text)
. = ALIGN(8);
}
.data : {
*(.data)
*(.rodata)
*(.bss)
*(.restorer.stack)
. = ALIGN(8);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment