Commit 74e23867 authored by Andrey Vagin's avatar Andrey Vagin

restorer: correct indentions

The previouse patch doesn't change it to simplify review
Signed-off-by: 's avatarAndrey Vagin <avagin@openvz.org>
Acked-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent 01484182
...@@ -52,8 +52,8 @@ long restore_thread(struct thread_restore_args *args) ...@@ -52,8 +52,8 @@ long restore_thread(struct thread_restore_args *args)
rt_sigframe = (void *)args->mem_zone.rt_sigframe + 8; rt_sigframe = (void *)args->mem_zone.rt_sigframe + 8;
#define CPREGT1(d) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.d #define CPREGT1(d) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.d
#define CPREGT2(d,s) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.s #define CPREGT2(d,s) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.s
CPREGT1(r8); CPREGT1(r8);
CPREGT1(r9); CPREGT1(r9);
...@@ -104,7 +104,7 @@ long restore_thread(struct thread_restore_args *args) ...@@ -104,7 +104,7 @@ long restore_thread(struct thread_restore_args *args)
: :
: "r"(new_sp) : "r"(new_sp)
: "rax","rsp","memory"); : "rax","rsp","memory");
core_restore_end: core_restore_end:
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(sys_getpid()); write_num_n(sys_getpid());
for (;;) for (;;)
...@@ -123,196 +123,196 @@ long restore_task(struct task_restore_core_args *args) ...@@ -123,196 +123,196 @@ long restore_task(struct task_restore_core_args *args)
{ {
long ret = -1; long ret = -1;
struct task_entry *task_entry; struct task_entry *task_entry;
struct core_entry *core_entry; struct core_entry *core_entry;
struct vma_entry *vma_entry; struct vma_entry *vma_entry;
u64 va; u64 va;
struct rt_sigframe *rt_sigframe; struct rt_sigframe *rt_sigframe;
unsigned long new_sp, fsgs_base; unsigned long new_sp, fsgs_base;
pid_t my_pid = sys_getpid(); pid_t my_pid = sys_getpid();
core_entry = first_on_heap(core_entry, args->mem_zone.heap); core_entry = first_on_heap(core_entry, args->mem_zone.heap);
vma_entry = next_on_heap(vma_entry, core_entry); vma_entry = next_on_heap(vma_entry, core_entry);
#if 0 #if 0
write_hex_n((long)args); write_hex_n((long)args);
write_hex_n((long)args->mem_zone.heap); write_hex_n((long)args->mem_zone.heap);
write_hex_n((long)core_entry); write_hex_n((long)core_entry);
write_hex_n((long)vma_entry); write_hex_n((long)vma_entry);
#endif #endif
sys_lseek(args->fd_core, MAGIC_OFFSET, SEEK_SET); sys_lseek(args->fd_core, MAGIC_OFFSET, SEEK_SET);
ret = sys_read(args->fd_core, core_entry, sizeof(*core_entry)); ret = sys_read(args->fd_core, core_entry, sizeof(*core_entry));
if (ret != sizeof(*core_entry)) { if (ret != sizeof(*core_entry)) {
write_num_n(__LINE__);
goto core_restore_end;
}
/* Note no magic constant on fd_self_vmas */
ret = sys_lseek(args->fd_self_vmas, 0, SEEK_SET);
while (1) {
ret = sys_read(args->fd_self_vmas, vma_entry, sizeof(*vma_entry));
if (!ret)
break;
if (ret != sizeof(*vma_entry)) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end; goto core_restore_end;
} }
/* Note no magic constant on fd_self_vmas */ if (!vma_entry_is(vma_entry, VMA_AREA_REGULAR))
ret = sys_lseek(args->fd_self_vmas, 0, SEEK_SET); continue;
while (1) {
ret = sys_read(args->fd_self_vmas, vma_entry, sizeof(*vma_entry));
if (!ret)
break;
if (ret != sizeof(*vma_entry)) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
if (!vma_entry_is(vma_entry, VMA_AREA_REGULAR)) if (sys_munmap((void *)vma_entry->start, vma_entry_len(vma_entry))) {
continue; write_num_n(__LINE__);
goto core_restore_end;
if (sys_munmap((void *)vma_entry->start, vma_entry_len(vma_entry))) {
write_num_n(__LINE__);
goto core_restore_end;
}
} }
}
sys_close(args->fd_self_vmas); sys_close(args->fd_self_vmas);
/*
* OK, lets try to map new one.
*/
sys_lseek(args->fd_core, GET_FILE_OFF_AFTER(struct core_entry), SEEK_SET);
while (1) {
int prot;
ret = sys_read(args->fd_core, vma_entry, sizeof(*vma_entry));
if (!ret)
break;
if (ret != sizeof(*vma_entry)) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
if (final_vma_entry(vma_entry))
break;
if (!vma_entry_is(vma_entry, VMA_AREA_REGULAR)) /*
continue; * OK, lets try to map new one.
*/
sys_lseek(args->fd_core, GET_FILE_OFF_AFTER(struct core_entry), SEEK_SET);
while (1) {
int prot;
/* ret = sys_read(args->fd_core, vma_entry, sizeof(*vma_entry));
* Restore or shared mappings are tricky, since if (!ret)
* we open anonymous mapping via map_files/ break;
* MAP_ANONYMOUS should be eliminated so fd would if (ret != sizeof(*vma_entry)) {
* be taken into account by a kernel. write_num_n(__LINE__);
*/ write_num_n(ret);
if (vma_entry_is(vma_entry, VMA_ANON_SHARED)) { goto core_restore_end;
if (vma_entry->fd != -1UL) }
vma_entry->flags &= ~MAP_ANONYMOUS;
}
prot = vma_entry->prot;
/* A mapping of file with MAP_SHARED is up to date */ if (final_vma_entry(vma_entry))
if (vma_entry->fd == -1 || !(vma_entry->flags & MAP_SHARED)) break;
prot |= PROT_WRITE;
/* if (!vma_entry_is(vma_entry, VMA_AREA_REGULAR))
* Should map memory here. Note we map them as continue;
* writable since we're going to restore page
* contents.
*/
va = sys_mmap((void *)vma_entry->start,
vma_entry_len(vma_entry),
prot,
vma_entry->flags | MAP_FIXED,
vma_entry->fd,
vma_entry->pgoff);
if (va != vma_entry->start) {
write_num_n(__LINE__);
write_hex_n(vma_entry->start);
write_hex_n(vma_entry->end);
write_hex_n(vma_entry->prot);
write_hex_n(vma_entry->flags);
write_hex_n(vma_entry->fd);
write_hex_n(vma_entry->pgoff);
write_hex_n(va);
goto core_restore_end;
}
/*
* Restore or shared mappings are tricky, since
* we open anonymous mapping via map_files/
* MAP_ANONYMOUS should be eliminated so fd would
* be taken into account by a kernel.
*/
if (vma_entry_is(vma_entry, VMA_ANON_SHARED)) {
if (vma_entry->fd != -1UL) if (vma_entry->fd != -1UL)
sys_close(vma_entry->fd); vma_entry->flags &= ~MAP_ANONYMOUS;
} }
/* prot = vma_entry->prot;
* Read page contents.
*/
while (1) {
ret = sys_read(args->fd_core, &va, sizeof(va));
if (!ret)
break;
if (ret != sizeof(va)) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
if (final_page_va(va))
break;
ret = sys_read(args->fd_core, (void *)va, PAGE_SIZE); /* A mapping of file with MAP_SHARED is up to date */
if (ret != PAGE_SIZE) { if (vma_entry->fd == -1 || !(vma_entry->flags & MAP_SHARED))
write_num_n(__LINE__); prot |= PROT_WRITE;
write_num_n(ret);
goto core_restore_end;
}
}
/* /*
* Walk though all VMAs again to drop PROT_WRITE * Should map memory here. Note we map them as
* if it was not there. * writable since we're going to restore page
* contents.
*/ */
sys_lseek(args->fd_core, GET_FILE_OFF_AFTER(struct core_entry), SEEK_SET); va = sys_mmap((void *)vma_entry->start,
while (1) { vma_entry_len(vma_entry),
ret = sys_read(args->fd_core, vma_entry, sizeof(*vma_entry)); prot,
if (!ret) vma_entry->flags | MAP_FIXED,
break; vma_entry->fd,
if (ret != sizeof(*vma_entry)) { vma_entry->pgoff);
write_num_n(__LINE__);
write_num_n(ret); if (va != vma_entry->start) {
goto core_restore_end; write_num_n(__LINE__);
} write_hex_n(vma_entry->start);
write_hex_n(vma_entry->end);
if (final_vma_entry(vma_entry)) write_hex_n(vma_entry->prot);
break; write_hex_n(vma_entry->flags);
write_hex_n(vma_entry->fd);
if (!(vma_entry_is(vma_entry, VMA_AREA_REGULAR))) write_hex_n(vma_entry->pgoff);
continue; write_hex_n(va);
goto core_restore_end;
}
if (vma_entry_is(vma_entry, VMA_ANON_SHARED)) { if (vma_entry->fd != -1UL)
struct shmem_info *entry; sys_close(vma_entry->fd);
entry = find_shmem_by_pid(args->shmems, }
vma_entry->start,
my_pid);
if (entry)
cr_wait_set(&entry->lock, 1);
}
if (vma_entry->prot & PROT_WRITE) /*
continue; * Read page contents.
*/
sys_mprotect(vma_entry->start, while (1) {
vma_entry_len(vma_entry), ret = sys_read(args->fd_core, &va, sizeof(va));
vma_entry->prot); if (!ret)
break;
if (ret != sizeof(va)) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
} }
if (final_page_va(va))
break;
sys_close(args->fd_core); ret = sys_read(args->fd_core, (void *)va, PAGE_SIZE);
if (ret != PAGE_SIZE) {
ret = sys_munmap(args->shmems, SHMEMS_SIZE); write_num_n(__LINE__);
if (ret < 0) { write_num_n(ret);
goto core_restore_end;
}
}
/*
* Walk though all VMAs again to drop PROT_WRITE
* if it was not there.
*/
sys_lseek(args->fd_core, GET_FILE_OFF_AFTER(struct core_entry), SEEK_SET);
while (1) {
ret = sys_read(args->fd_core, vma_entry, sizeof(*vma_entry));
if (!ret)
break;
if (ret != sizeof(*vma_entry)) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret); write_num_n(ret);
goto core_restore_end; goto core_restore_end;
} }
/* if (final_vma_entry(vma_entry))
* Tune up the task fields. break;
*/
if (!(vma_entry_is(vma_entry, VMA_AREA_REGULAR)))
continue;
if (vma_entry_is(vma_entry, VMA_ANON_SHARED)) {
struct shmem_info *entry;
entry = find_shmem_by_pid(args->shmems,
vma_entry->start,
my_pid);
if (entry)
cr_wait_set(&entry->lock, 1);
}
if (vma_entry->prot & PROT_WRITE)
continue;
sys_mprotect(vma_entry->start,
vma_entry_len(vma_entry),
vma_entry->prot);
}
sys_close(args->fd_core);
ret = sys_munmap(args->shmems, SHMEMS_SIZE);
if (ret < 0) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
/*
* Tune up the task fields.
*/
#define sys_prctl_safe(opcode, val1, val2) \ #define sys_prctl_safe(opcode, val1, val2) \
do { \ do { \
...@@ -323,220 +323,220 @@ long restore_task(struct task_restore_core_args *args) ...@@ -323,220 +323,220 @@ long restore_task(struct task_restore_core_args *args)
} \ } \
} while (0) } while (0)
sys_prctl_safe(PR_SET_NAME, (long)core_entry->task_comm, 0); sys_prctl_safe(PR_SET_NAME, (long)core_entry->task_comm, 0);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_CODE, (long)core_entry->mm_start_code); sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_CODE, (long)core_entry->mm_start_code);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_END_CODE, (long)core_entry->mm_end_code); sys_prctl_safe(PR_SET_MM, PR_SET_MM_END_CODE, (long)core_entry->mm_end_code);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_DATA, (long)core_entry->mm_start_data); sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_DATA, (long)core_entry->mm_start_data);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_END_DATA, (long)core_entry->mm_end_data); sys_prctl_safe(PR_SET_MM, PR_SET_MM_END_DATA, (long)core_entry->mm_end_data);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_STACK,(long)core_entry->mm_start_stack); sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_STACK,(long)core_entry->mm_start_stack);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_BRK, (long)core_entry->mm_start_brk); sys_prctl_safe(PR_SET_MM, PR_SET_MM_START_BRK, (long)core_entry->mm_start_brk);
sys_prctl_safe(PR_SET_MM, PR_SET_MM_BRK, (long)core_entry->mm_brk); sys_prctl_safe(PR_SET_MM, PR_SET_MM_BRK, (long)core_entry->mm_brk);
/* /*
* We need to prepare a valid sigframe here, so * We need to prepare a valid sigframe here, so
* after sigreturn the kernel will pick up the * after sigreturn the kernel will pick up the
* registers from the frame, set them up and * registers from the frame, set them up and
* finally pass execution to the new IP. * finally pass execution to the new IP.
*/ */
rt_sigframe = (void *)args->mem_zone.rt_sigframe + 8; rt_sigframe = (void *)args->mem_zone.rt_sigframe + 8;
#define CPREG1(d) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.d #define CPREG1(d) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.d
#define CPREG2(d,s) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.s #define CPREG2(d,s) rt_sigframe->uc.uc_mcontext.d = core_entry->u.arch.gpregs.s
CPREG1(r8); CPREG1(r8);
CPREG1(r9); CPREG1(r9);
CPREG1(r10); CPREG1(r10);
CPREG1(r11); CPREG1(r11);
CPREG1(r12); CPREG1(r12);
CPREG1(r13); CPREG1(r13);
CPREG1(r14); CPREG1(r14);
CPREG1(r15); CPREG1(r15);
CPREG2(rdi, di); CPREG2(rdi, di);
CPREG2(rsi, si); CPREG2(rsi, si);
CPREG2(rbp, bp); CPREG2(rbp, bp);
CPREG2(rbx, bx); CPREG2(rbx, bx);
CPREG2(rdx, dx); CPREG2(rdx, dx);
CPREG2(rax, ax); CPREG2(rax, ax);
CPREG2(rcx, cx); CPREG2(rcx, cx);
CPREG2(rsp, sp); CPREG2(rsp, sp);
CPREG2(rip, ip); CPREG2(rip, ip);
CPREG2(eflags, flags); CPREG2(eflags, flags);
CPREG1(cs); CPREG1(cs);
CPREG1(gs); CPREG1(gs);
CPREG1(fs); CPREG1(fs);
fsgs_base = core_entry->u.arch.gpregs.fs_base;
ret = sys_arch_prctl(ARCH_SET_FS, (void *)fsgs_base);
if (ret) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
fsgs_base = core_entry->u.arch.gpregs.fs_base; fsgs_base = core_entry->u.arch.gpregs.gs_base;
ret = sys_arch_prctl(ARCH_SET_FS, (void *)fsgs_base); ret = sys_arch_prctl(ARCH_SET_GS, (void *)fsgs_base);
if (ret) { if (ret) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
/*
* Blocked signals.
*/
rt_sigframe->uc.uc_sigmask.sig[0] = core_entry->task_sigset;
/*
* Threads restoration. This requires some more comments. This
* restorer routine and thread restorer routine has the following
* memory map, prepared by a caller code.
*
* | <-- low addresses high addresses --> |
* +-------------------------------------------------------+-----------------------+
* | this proc body | own stack | heap | rt_sigframe space | thread restore zone |
* +-------------------------------------------------------+-----------------------+
*
* where each thread restore zone is the following
*
* | <-- low addresses high addresses --> |
* +--------------------------------------------------------------------------+
* | thread restore proc | thread1 stack | thread1 heap | thread1 rt_sigframe |
* +--------------------------------------------------------------------------+
*/
if (args->nr_threads) {
struct thread_restore_args *thread_args = args->thread_args;
long clone_flags = CLONE_VM | CLONE_FILES | CLONE_SIGHAND |
CLONE_THREAD | CLONE_SYSVSEM;
long last_pid_len;
long parent_tid;
int i, fd;
fd = sys_open(args->ns_last_pid_path, O_RDWR, LAST_PID_PERM);
if (fd < 0) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret); write_num_n(fd);
goto core_restore_end; goto core_restore_end;
} }
fsgs_base = core_entry->u.arch.gpregs.gs_base; ret = sys_flock(fd, LOCK_EX);
ret = sys_arch_prctl(ARCH_SET_GS, (void *)fsgs_base);
if (ret) { if (ret) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret); write_num_n(ret);
goto core_restore_end; goto core_restore_end;
} }
/* for (i = 0; i < args->nr_threads; i++) {
* Blocked signals. char last_pid_buf[16];
*/
rt_sigframe->uc.uc_sigmask.sig[0] = core_entry->task_sigset;
/*
* Threads restoration. This requires some more comments. This
* restorer routine and thread restorer routine has the following
* memory map, prepared by a caller code.
*
* | <-- low addresses high addresses --> |
* +-------------------------------------------------------+-----------------------+
* | this proc body | own stack | heap | rt_sigframe space | thread restore zone |
* +-------------------------------------------------------+-----------------------+
*
* where each thread restore zone is the following
*
* | <-- low addresses high addresses --> |
* +--------------------------------------------------------------------------+
* | thread restore proc | thread1 stack | thread1 heap | thread1 rt_sigframe |
* +--------------------------------------------------------------------------+
*/
if (args->nr_threads) {
struct thread_restore_args *thread_args = args->thread_args;
long clone_flags = CLONE_VM | CLONE_FILES | CLONE_SIGHAND |
CLONE_THREAD | CLONE_SYSVSEM;
long last_pid_len;
long parent_tid;
int i, fd;
fd = sys_open(args->ns_last_pid_path, O_RDWR, LAST_PID_PERM); /* skip self */
if (fd < 0) { if (thread_args[i].pid == args->pid)
write_num_n(__LINE__); continue;
write_num_n(fd);
goto core_restore_end;
}
ret = sys_flock(fd, LOCK_EX); cr_mutex_lock(&args->rst_lock);
if (ret) {
write_num_n(__LINE__);
write_num_n(ret);
goto core_restore_end;
}
for (i = 0; i < args->nr_threads; i++) { new_sp =
char last_pid_buf[16]; RESTORE_ALIGN_STACK((long)thread_args[i].mem_zone.stack,
sizeof(thread_args[i].mem_zone.stack));
/* skip self */
if (thread_args[i].pid == args->pid)
continue;
cr_mutex_lock(&args->rst_lock);
new_sp =
RESTORE_ALIGN_STACK((long)thread_args[i].mem_zone.stack,
sizeof(thread_args[i].mem_zone.stack));
last_pid_len = vprint_num(last_pid_buf, thread_args[i].pid - 1);
ret = sys_write(fd, last_pid_buf, last_pid_len - 1);
if (ret < 0) {
write_num_n(__LINE__);
write_num_n(ret);
write_string_n(last_pid_buf);
goto core_restore_end;
}
/*
* To achieve functionality like libc's clone()
* we need a pure assembly here, because clone()'ed
* thread will run with own stack and we must not
* have any additional instructions... oh, dear...
*/
asm volatile(
"clone_emul: \n"
"movq %2, %%rsi \n"
"subq $16, %%rsi \n"
"movq %6, %%rdi \n"
"movq %%rdi, 8(%%rsi) \n"
"movq %5, %%rdi \n"
"movq %%rdi, 0(%%rsi) \n"
"movq %1, %%rdi \n"
"movq %3, %%rdx \n"
"movq %4, %%r10 \n"
"movl $"__stringify(__NR_clone)", %%eax \n"
"syscall \n"
"testq %%rax,%%rax \n"
"jz thread_run \n"
"movq %%rax, %0 \n"
"jmp clone_end \n"
"thread_run: \n" /* new stack here */
"xorq %%rbp, %%rbp \n" /* clear ABI frame pointer */
"popq %%rax \n" /* clone_restore_fn -- restore_thread */
"popq %%rdi \n" /* arguments */
"callq *%%rax \n"
"clone_end: \n"
: "=r"(ret)
: "g"(clone_flags),
"g"(new_sp),
"g"(&parent_tid),
"g"(&thread_args[i].pid),
"g"(args->clone_restore_fn),
"g"(&thread_args[i])
: "rax", "rdi", "rsi", "rdx", "r10", "memory");
}
ret = sys_flock(fd, LOCK_UN); last_pid_len = vprint_num(last_pid_buf, thread_args[i].pid - 1);
if (ret) { ret = sys_write(fd, last_pid_buf, last_pid_len - 1);
if (ret < 0) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret); write_num_n(ret);
write_string_n(last_pid_buf);
goto core_restore_end; goto core_restore_end;
} }
sys_close(fd); /*
* To achieve functionality like libc's clone()
* we need a pure assembly here, because clone()'ed
* thread will run with own stack and we must not
* have any additional instructions... oh, dear...
*/
asm volatile(
"clone_emul: \n"
"movq %2, %%rsi \n"
"subq $16, %%rsi \n"
"movq %6, %%rdi \n"
"movq %%rdi, 8(%%rsi) \n"
"movq %5, %%rdi \n"
"movq %%rdi, 0(%%rsi) \n"
"movq %1, %%rdi \n"
"movq %3, %%rdx \n"
"movq %4, %%r10 \n"
"movl $"__stringify(__NR_clone)", %%eax \n"
"syscall \n"
"testq %%rax,%%rax \n"
"jz thread_run \n"
"movq %%rax, %0 \n"
"jmp clone_end \n"
"thread_run: \n" /* new stack here */
"xorq %%rbp, %%rbp \n" /* clear ABI frame pointer */
"popq %%rax \n" /* clone_restore_fn -- restore_thread */
"popq %%rdi \n" /* arguments */
"callq *%%rax \n"
"clone_end: \n"
: "=r"(ret)
: "g"(clone_flags),
"g"(new_sp),
"g"(&parent_tid),
"g"(&thread_args[i].pid),
"g"(args->clone_restore_fn),
"g"(&thread_args[i])
: "rax", "rdi", "rsi", "rdx", "r10", "memory");
} }
write_num_n(__LINE__); ret = sys_flock(fd, LOCK_UN);
task_entry = task_get_entry(args->task_entries, my_pid); if (ret) {
cr_wait_set(&task_entry->done, 1);
cr_wait_while(&args->task_entries->start, 0);
write_num_n(__LINE__);
ret = sys_munmap(args->task_entries, TASK_ENTRIES_SIZE);
if (ret < 0) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(ret); write_num_n(ret);
goto core_restore_end; goto core_restore_end;
} }
/* sys_close(fd);
* Sigframe stack. }
*/
new_sp = (long)rt_sigframe + 8;
/* write_num_n(__LINE__);
* Prepare the stack and call for sigreturn, task_entry = task_get_entry(args->task_entries, my_pid);
* pure assembly since we don't need any additional cr_wait_set(&task_entry->done, 1);
* code insns from gcc. cr_wait_while(&args->task_entries->start, 0);
*/ write_num_n(__LINE__);
asm volatile(
"movq %0, %%rax \n"
"movq %%rax, %%rsp \n"
"movl $"__stringify(__NR_rt_sigreturn)", %%eax \n"
"syscall \n"
:
: "r"(new_sp)
: "rax","rsp","memory");
core_restore_end: ret = sys_munmap(args->task_entries, TASK_ENTRIES_SIZE);
if (ret < 0) {
write_num_n(__LINE__); write_num_n(__LINE__);
write_num_n(sys_getpid()); write_num_n(ret);
for (;;) goto core_restore_end;
local_sleep(5); }
sys_exit(0);
/*
* Sigframe stack.
*/
new_sp = (long)rt_sigframe + 8;
/*
* Prepare the stack and call for sigreturn,
* pure assembly since we don't need any additional
* code insns from gcc.
*/
asm volatile(
"movq %0, %%rax \n"
"movq %%rax, %%rsp \n"
"movl $"__stringify(__NR_rt_sigreturn)", %%eax \n"
"syscall \n"
:
: "r"(new_sp)
: "rax","rsp","memory");
core_restore_end:
write_num_n(__LINE__);
write_num_n(sys_getpid());
for (;;)
local_sleep(5);
sys_exit(0);
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment