Commit 78ac904c authored by Laurent Dufour's avatar Laurent Dufour Committed by Pavel Emelyanov

ppc64: Extract copy to protobuf functions

Since the Transactional memory state will contains VSX, VMX and FP
registers, extracting the common code copying data to protobuf buffer in
separate functions.
Signed-off-by: 's avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Reviewed-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
parent 7dce5025
...@@ -84,6 +84,30 @@ int syscall_seized(struct parasite_ctl *ctl, int nr, unsigned long *ret, ...@@ -84,6 +84,30 @@ int syscall_seized(struct parasite_ctl *ctl, int nr, unsigned long *ret,
return err; return err;
} }
static UserPpc64FpstateEntry *copy_fp_regs(uint64_t *fpregs)
{
UserPpc64FpstateEntry *fpe;
int i;
fpe = xmalloc(sizeof(UserPpc64FpstateEntry));
if (!fpe)
return NULL;
user_ppc64_fpstate_entry__init(fpe);
fpe->n_fpregs = NFPREG;
fpe->fpregs = xmalloc(fpe->n_fpregs * sizeof(fpe->fpregs[0]));
if (!fpe->fpregs) {
xfree(fpe);
return NULL;
}
/* FPSRC is the last (33th) register in the set */
for (i = 0; i < NFPREG; i++)
fpe->fpregs[i] = fpregs[i];
return fpe;
}
/* This is the layout of the POWER7 VSX registers and the way they /* This is the layout of the POWER7 VSX registers and the way they
* overlap with the existing FPR and VMX registers. * overlap with the existing FPR and VMX registers.
* *
...@@ -123,35 +147,16 @@ int syscall_seized(struct parasite_ctl *ctl, int nr, unsigned long *ret, ...@@ -123,35 +147,16 @@ int syscall_seized(struct parasite_ctl *ctl, int nr, unsigned long *ret,
*/ */
#define NVSXREG 32 #define NVSXREG 32
static int get_fpu_regs(pid_t pid, CoreEntry *core) static UserPpc64FpstateEntry *get_fpu_regs(pid_t pid)
{ {
uint64_t fpregs[NFPREG]; uint64_t fpregs[NFPREG];
UserPpc64FpstateEntry *fpe;
int i;
if (ptrace(PTRACE_GETFPREGS, pid, 0, (void *)&fpregs) < 0) { if (ptrace(PTRACE_GETFPREGS, pid, 0, (void *)&fpregs) < 0) {
pr_perror("Couldn't get floating-point registers"); pr_perror("Couldn't get floating-point registers");
return -1; return NULL;;
} }
fpe = xmalloc(sizeof(UserPpc64FpstateEntry)); return copy_fp_regs(fpregs);
if (!fpe)
return -1;
user_ppc64_fpstate_entry__init(fpe);
fpe->n_fpregs = NFPREG;
fpe->fpregs = xmalloc(fpe->n_fpregs * sizeof(fpe->fpregs[0]));
if (!fpe->fpregs) {
xfree(fpe);
return -1;
}
/* FPSRC is the last (33th) register in the set */
for (i = 0; i < NFPREG; i++)
fpe->fpregs[i] = fpregs[i];
core->ti_ppc64->fpstate = fpe;
return 0;
} }
static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe) static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe)
...@@ -163,39 +168,16 @@ static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe) ...@@ -163,39 +168,16 @@ static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe)
mcfp[i] = fpe->fpregs[i]; mcfp[i] = fpe->fpregs[i];
} }
static int get_altivec_regs(pid_t pid, CoreEntry *core) static UserPpc64VrstateEntry *copy_altivec_regs(unsigned char *vrregs)
{ {
/* The kernel returns :
* 32 Vector registers (128bit)
* VSCR (32bit) stored in a 128bit entry (odd)
* VRSAVE (32bit) store at the end.
*
* Kernel setup_sigcontext's comment mentions:
* "Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not"
*/
unsigned char vrregs[(NVRREG-1) * 16 + 4];
UserPpc64VrstateEntry *vse; UserPpc64VrstateEntry *vse;
uint64_t *p64; uint64_t *p64;
uint32_t *p32; uint32_t *p32;
int i; int i;
if (ptrace(PTRACE_GETVRREGS, pid, 0, (void*)&vrregs) < 0) {
/* PTRACE_GETVRREGS returns EIO if Altivec is not supported.
* This should not happen if msr_vec is set. */
if (errno != EIO) {
pr_perror("Couldn't get Altivec registers");
return -1;
}
pr_debug("Altivec not supported\n");
return 0;
}
pr_debug("Dumping Altivec registers\n");
vse = xmalloc(sizeof(*vse)); vse = xmalloc(sizeof(*vse));
if (!vse) if (!vse)
return -1; return NULL;
user_ppc64_vrstate_entry__init(vse); user_ppc64_vrstate_entry__init(vse);
/* protocol buffer store only 64bit entries and we need 128bit */ /* protocol buffer store only 64bit entries and we need 128bit */
...@@ -203,7 +185,7 @@ static int get_altivec_regs(pid_t pid, CoreEntry *core) ...@@ -203,7 +185,7 @@ static int get_altivec_regs(pid_t pid, CoreEntry *core)
vse->vrregs = xmalloc(vse->n_vrregs * sizeof(vse->vrregs[0])); vse->vrregs = xmalloc(vse->n_vrregs * sizeof(vse->vrregs[0]));
if (!vse->vrregs) { if (!vse->vrregs) {
xfree(vse); xfree(vse);
return -1; return NULL;
} }
/* Vectors are 2*64bits entries */ /* Vectors are 2*64bits entries */
...@@ -216,15 +198,36 @@ static int get_altivec_regs(pid_t pid, CoreEntry *core) ...@@ -216,15 +198,36 @@ static int get_altivec_regs(pid_t pid, CoreEntry *core)
p32 = (uint32_t*) &vrregs[(NVRREG-1) * 2 * sizeof(uint64_t)]; p32 = (uint32_t*) &vrregs[(NVRREG-1) * 2 * sizeof(uint64_t)];
vse->vrsave = *p32; vse->vrsave = *p32;
core->ti_ppc64->vrstate = vse; return vse;
}
/* static UserPpc64VrstateEntry *get_altivec_regs(pid_t pid)
* Force the MSR_VEC bit of the restored MSR otherwise the kernel {
* will not restore them from the signal frame. /* The kernel returns :
* 32 Vector registers (128bit)
* VSCR (32bit) stored in a 128bit entry (odd)
* VRSAVE (32bit) store at the end.
*
* Kernel setup_sigcontext's comment mentions:
* "Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not"
*/ */
core->ti_ppc64->gpregs->msr |= MSR_VEC; unsigned char vrregs[(NVRREG-1) * 16 + 4];
return 0; if (ptrace(PTRACE_GETVRREGS, pid, 0, (void*)&vrregs) < 0) {
/* PTRACE_GETVRREGS returns EIO if Altivec is not supported.
* This should not happen if msr_vec is set. */
if (errno != EIO) {
pr_perror("Couldn't get Altivec registers");
return (UserPpc64VrstateEntry*)-1L;
}
pr_debug("Altivec not supported\n");
return NULL;
}
pr_debug("Dumping Altivec registers\n");
return copy_altivec_regs(vrregs);
} }
static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse) static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse)
...@@ -254,6 +257,30 @@ static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse) ...@@ -254,6 +257,30 @@ static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse)
return 0; return 0;
} }
static UserPpc64VsxstateEntry* copy_vsx_regs(uint64_t *vsregs)
{
UserPpc64VsxstateEntry *vse;
int i;
vse = xmalloc(sizeof(*vse));
if (!vse)
return NULL;
user_ppc64_vsxstate_entry__init(vse);
vse->n_vsxregs = NVSXREG;
vse->vsxregs = xmalloc(vse->n_vsxregs*sizeof(vse->vsxregs[0]));
if (!vse->vsxregs) {
xfree(vse);
return NULL;
}
for (i = 0; i < vse->n_vsxregs; i++)
vse->vsxregs[i] = vsregs[i];
return vse;
}
/* /*
* Since the FPR[0-31] is stored in the first double word of VSR[0-31] and * Since the FPR[0-31] is stored in the first double word of VSR[0-31] and
* FPR are saved through the FP state, there is no need to save the upper part * FPR are saved through the FP state, there is no need to save the upper part
...@@ -263,11 +290,9 @@ static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse) ...@@ -263,11 +290,9 @@ static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse)
* As a consequence, only the doubleword 1 of the 32 first VSX registers have * As a consequence, only the doubleword 1 of the 32 first VSX registers have
* to be saved (the ones are returned by PTRACE_GETVSRREGS). * to be saved (the ones are returned by PTRACE_GETVSRREGS).
*/ */
static int get_vsx_regs(pid_t pid, CoreEntry *core) static UserPpc64VsxstateEntry *get_vsx_regs(pid_t pid)
{ {
UserPpc64VsxstateEntry *vse;
uint64_t vsregs[NVSXREG]; uint64_t vsregs[NVSXREG];
int i;
if (ptrace(PTRACE_GETVSRREGS, pid, 0, (void*)&vsregs) < 0) { if (ptrace(PTRACE_GETVSRREGS, pid, 0, (void*)&vsregs) < 0) {
/* /*
...@@ -279,34 +304,11 @@ static int get_vsx_regs(pid_t pid, CoreEntry *core) ...@@ -279,34 +304,11 @@ static int get_vsx_regs(pid_t pid, CoreEntry *core)
return 0; return 0;
} }
pr_perror("Couldn't get VSX registers"); pr_perror("Couldn't get VSX registers");
return -1; return (UserPpc64VsxstateEntry *)-1L;
} }
pr_debug("Dumping VSX registers\n"); pr_debug("Dumping VSX registers\n");
return copy_vsx_regs(vsregs);
vse = xmalloc(sizeof(*vse));
if (!vse)
return -1;
user_ppc64_vsxstate_entry__init(vse);
vse->n_vsxregs = NVSXREG;
vse->vsxregs = xmalloc(vse->n_vsxregs * sizeof(vse->vsxregs[0]));
if (!vse->vsxregs) {
xfree(vse);
return -1;
}
for (i = 0; i < vse->n_vsxregs; i++)
vse->vsxregs[i] = vsregs[i];
core->ti_ppc64->vsxstate = vse;
/*
* Force the MSR_VSX bit of the restored MSR otherwise the kernel
* will not restore them from the signal frame.
*/
core->ti_ppc64->gpregs->msr |= MSR_VSX;
return 0;
} }
static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse) static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse)
...@@ -331,10 +333,67 @@ static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse) ...@@ -331,10 +333,67 @@ static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse)
return 0; return 0;
} }
int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
static void copy_gp_regs(UserPpc64RegsEntry *dst, user_regs_struct_t *src)
{ {
int i; int i;
#define assign_reg(e) do { \
dst->e = (__typeof__(dst->e))src->e; \
} while (0)
for (i=0; i<32; i++)
assign_reg(gpr[i]);
assign_reg(nip);
assign_reg(msr);
assign_reg(orig_gpr3);
assign_reg(ctr);
assign_reg(link);
assign_reg(xer);
assign_reg(ccr);
assign_reg(trap);
#undef assign_reg
}
static void restore_gp_regs(mcontext_t *dst, UserPpc64RegsEntry *src)
{
int i;
/* r0 to r31 */
for (i=0; i<32; i++)
dst->gp_regs[i] = src->gpr[i];
dst->gp_regs[PT_NIP] = src->nip;
dst->gp_regs[PT_MSR] = src->msr;
dst->gp_regs[PT_ORIG_R3] = src->orig_gpr3;
dst->gp_regs[PT_CTR] = src->ctr;
dst->gp_regs[PT_LNK] = src->link;
dst->gp_regs[PT_XER] = src->xer;
dst->gp_regs[PT_CCR] = src->ccr;
dst->gp_regs[PT_TRAP] = src->trap;
}
static UserPpc64RegsEntry *allocate_gp_regs(void)
{
UserPpc64RegsEntry *gpregs;
gpregs = xmalloc(sizeof(*gpregs));
if (!gpregs)
return NULL;
user_ppc64_regs_entry__init(gpregs);
gpregs->n_gpr = 32;
gpregs->gpr = xmalloc(32 * sizeof(uint64_t));
if (!gpregs->gpr) {
xfree(gpregs);
return NULL;
}
return gpregs;
}
int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
{
pr_info("Dumping GP/FPU registers for %d\n", pid); pr_info("Dumping GP/FPU registers for %d\n", pid);
/* /*
...@@ -364,35 +423,37 @@ int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core) ...@@ -364,35 +423,37 @@ int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
/* Resetting trap since we are now coming from user space. */ /* Resetting trap since we are now coming from user space. */
regs.trap = 0; regs.trap = 0;
#define assign_reg(dst, src, e) do { \ copy_gp_regs(core->ti_ppc64->gpregs, &regs);
dst->e = (__typeof__(dst->e))src.e; \
} while (0)
for (i=0; i<32; i++)
assign_reg(core->ti_ppc64->gpregs, regs, gpr[i]);
assign_reg(core->ti_ppc64->gpregs, regs, nip);
assign_reg(core->ti_ppc64->gpregs, regs, msr);
assign_reg(core->ti_ppc64->gpregs, regs, orig_gpr3);
assign_reg(core->ti_ppc64->gpregs, regs, ctr);
assign_reg(core->ti_ppc64->gpregs, regs, link);
assign_reg(core->ti_ppc64->gpregs, regs, xer);
assign_reg(core->ti_ppc64->gpregs, regs, ccr);
assign_reg(core->ti_ppc64->gpregs, regs, trap);
#undef assign_reg
if (get_fpu_regs(pid, core)) core->ti_ppc64->fpstate = get_fpu_regs(pid);
if (!core->ti_ppc64->fpstate)
return -1; return -1;
if (get_altivec_regs(pid, core)) core->ti_ppc64->vrstate = get_altivec_regs(pid);
if (core->ti_ppc64->vrstate) {
if (core->ti_ppc64->vrstate == (UserPpc64VrstateEntry*)-1L)
return -1; return -1;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
core->ti_ppc64->gpregs->msr |= MSR_VEC;
/* /*
* Don't save the VSX registers if Altivec registers are not * Save the VSX registers if Altivec registers are supported
* supported
*/ */
if (CORE_THREAD_ARCH_INFO(core)->vrstate && get_vsx_regs(pid, core)) core->ti_ppc64->vsxstate = get_vsx_regs(pid);
if (core->ti_ppc64->vsxstate) {
if (core->ti_ppc64->vsxstate == (UserPpc64VsxstateEntry *)-1L)
return -1; return -1;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
core->ti_ppc64->gpregs->msr |= MSR_VSX;
}
}
return 0; return 0;
} }
...@@ -400,30 +461,21 @@ int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core) ...@@ -400,30 +461,21 @@ int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
int arch_alloc_thread_info(CoreEntry *core) int arch_alloc_thread_info(CoreEntry *core)
{ {
ThreadInfoPpc64 *ti_ppc64; ThreadInfoPpc64 *ti_ppc64;
UserPpc64RegsEntry *regs;
ti_ppc64 = xmalloc(sizeof(*ti_ppc64)); ti_ppc64 = xmalloc(sizeof(*ti_ppc64));
if(!ti_ppc64) if(!ti_ppc64)
goto err; return -1;
thread_info_ppc64__init(ti_ppc64);
CORE_THREAD_ARCH_INFO(core) = ti_ppc64;
/* user_ppc64_regs_entry */
regs = xmalloc(sizeof(*regs));
if (!regs)
goto err;
user_ppc64_regs_entry__init(regs);
regs->gpr = xmalloc(32*sizeof(uint64_t)); thread_info_ppc64__init(ti_ppc64);
if (!regs->gpr)
goto err;
regs->n_gpr = 32;
ti_ppc64->gpregs = regs; ti_ppc64->gpregs = allocate_gp_regs();
if (!ti_ppc64->gpregs) {
xfree(ti_ppc64);
return -1;
}
CORE_THREAD_ARCH_INFO(core) = ti_ppc64;
return 0; return 0;
err:
return -1;
} }
void arch_free_thread_info(CoreEntry *core) void arch_free_thread_info(CoreEntry *core)
...@@ -498,20 +550,7 @@ int sigreturn_prep_fpu_frame(struct rt_sigframe *frame, ...@@ -498,20 +550,7 @@ int sigreturn_prep_fpu_frame(struct rt_sigframe *frame,
int restore_gpregs(struct rt_sigframe *f, UserPpc64RegsEntry *r) int restore_gpregs(struct rt_sigframe *f, UserPpc64RegsEntry *r)
{ {
int i; restore_gp_regs(&f->uc.uc_mcontext, r);
/* r0 to r31 */
for (i=0; i<32; i++)
f->uc.uc_mcontext.gp_regs[i] = r->gpr[i];
f->uc.uc_mcontext.gp_regs[PT_NIP] = r->nip;
f->uc.uc_mcontext.gp_regs[PT_MSR] = r->msr;
f->uc.uc_mcontext.gp_regs[PT_ORIG_R3] = r->orig_gpr3;
f->uc.uc_mcontext.gp_regs[PT_CTR] = r->ctr;
f->uc.uc_mcontext.gp_regs[PT_LNK] = r->link;
f->uc.uc_mcontext.gp_regs[PT_XER] = r->xer;
f->uc.uc_mcontext.gp_regs[PT_CCR] = r->ccr;
f->uc.uc_mcontext.gp_regs[PT_TRAP] = r->trap;
return 0; return 0;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment