Commit 6a1c0a07 authored by Laurent Dufour's avatar Laurent Dufour Committed by Pavel Emelyanov

arch: Introduce user_fpregs_struct_t on ppc64

Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
[ldufour@linux.vnet.ibm.com: mostly rewrite to fix functional issue]
Signed-off-by: 's avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Signed-off-by: 's avatarLaurent Dufour <ldufour@linux.vnet.ibm.com>
Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
parent 7925c1e1
......@@ -160,18 +160,16 @@ static UserPpc64FpstateEntry *copy_fp_regs(uint64_t *fpregs)
* word registers are saved through FPR[0..32] and the remaining registers
* are saved when saving the Altivec registers VR[0..32].
*/
#define NVSXREG 32
static UserPpc64FpstateEntry *get_fpu_regs(pid_t pid)
static int get_fpu_regs(pid_t pid, user_fpregs_struct_t *fp)
{
uint64_t fpregs[NFPREG];
if (ptrace(PTRACE_GETFPREGS, pid, 0, (void *)&fpregs) < 0) {
if (ptrace(PTRACE_GETFPREGS, pid, 0, (void *)&fp->fpregs) < 0) {
pr_perror("Couldn't get floating-point registers");
return NULL;;
return -1;
}
fp->flags |= USER_FPREGS_FL_FP;
return copy_fp_regs(fpregs);
return 0;
}
static void put_fpu_regs(mcontext_t *mc, UserPpc64FpstateEntry *fpe)
......@@ -216,33 +214,22 @@ static UserPpc64VrstateEntry *copy_altivec_regs(__vector128 *vrregs)
return vse;
}
static UserPpc64VrstateEntry *get_altivec_regs(pid_t pid)
static int get_altivec_regs(pid_t pid, user_fpregs_struct_t *fp)
{
/* The kernel returns :
* 32 Vector registers (128bit)
* VSCR (32bit) stored in a 128bit entry (odd)
* VRSAVE (32bit) store at the end.
*
* Kernel setup_sigcontext's comment mentions:
* "Userland shall check AT_HWCAP to know whether it can rely on the
* v_regs pointer or not"
*/
__vector128 vrregs[NVRREG];
if (ptrace(PTRACE_GETVRREGS, pid, 0, (void*)&vrregs) < 0) {
if (ptrace(PTRACE_GETVRREGS, pid, 0, (void*)&fp->vrregs) < 0) {
/* PTRACE_GETVRREGS returns EIO if Altivec is not supported.
* This should not happen if msr_vec is set. */
if (errno != EIO) {
pr_perror("Couldn't get Altivec registers");
return (UserPpc64VrstateEntry*)-1L;
return -1;
}
pr_debug("Altivec not supported\n");
return NULL;
}
pr_debug("Dumping Altivec registers\n");
return copy_altivec_regs(vrregs);
else {
pr_debug("Dumping Altivec registers\n");
fp->flags |= USER_FPREGS_FL_ALTIVEC;
}
return 0;
}
static int put_altivec_regs(mcontext_t *mc, UserPpc64VrstateEntry *vse)
......@@ -305,25 +292,24 @@ static UserPpc64VsxstateEntry* copy_vsx_regs(uint64_t *vsregs)
* As a consequence, only the doubleword 1 of the 32 first VSX registers have
* to be saved (the ones are returned by PTRACE_GETVSRREGS).
*/
static UserPpc64VsxstateEntry *get_vsx_regs(pid_t pid)
static int get_vsx_regs(pid_t pid, user_fpregs_struct_t *fp)
{
uint64_t vsregs[NVSXREG];
if (ptrace(PTRACE_GETVSRREGS, pid, 0, (void*)&vsregs) < 0) {
if (ptrace(PTRACE_GETVSRREGS, pid, 0, (void*)fp->vsxregs) < 0) {
/*
* EIO is returned in the case PTRACE_GETVRREGS is not
* supported.
*/
if (errno == EIO) {
pr_debug("VSX register's dump not supported.\n");
return 0;
if (errno != EIO) {
pr_perror("Couldn't get VSX registers");
return -1;
}
pr_perror("Couldn't get VSX registers");
return (UserPpc64VsxstateEntry *)-1L;
pr_debug("VSX register's dump not supported.\n");
}
pr_debug("Dumping VSX registers\n");
return copy_vsx_regs(vsregs);
else {
pr_debug("Dumping VSX registers\n");
fp->flags |= USER_FPREGS_FL_VSX;
}
return 0;
}
static int put_vsx_regs(mcontext_t *mc, UserPpc64VsxstateEntry *vse)
......@@ -434,29 +420,12 @@ static void xfree_tm_state(UserPpc64TmRegsEntry *tme)
}
}
static int get_tm_regs(pid_t pid, CoreEntry *core)
static int get_tm_regs(pid_t pid, user_fpregs_struct_t *fpregs)
{
struct {
uint64_t tfhar, texasr, tfiar;
} tm_spr_regs;
user_regs_struct_t regs;
uint64_t fpregs[NFPREG], vsxregs[32];
__vector128 vmxregs[NVRREG];
struct iovec iov;
UserPpc64TmRegsEntry *tme;
UserPpc64RegsEntry *gpregs = core->ti_ppc64->gpregs;
pr_debug("Dumping TM registers\n");
tme = xmalloc(sizeof(*tme));
if (!tme)
return -1;
user_ppc64_tm_regs_entry__init(tme);
tme->gpregs = allocate_gp_regs();
if (!tme->gpregs)
goto out_free;
#define TM_REQUIRED 0
#define TM_OPTIONAL 1
#define PTRACE_GET_TM(s,n,c,u) do { \
......@@ -475,47 +444,29 @@ static int get_tm_regs(pid_t pid, CoreEntry *core)
} while(0)
/* Get special registers */
PTRACE_GET_TM(tm_spr_regs, "SPR", NT_PPC_TM_SPR, TM_REQUIRED);
gpregs->has_tfhar = true;
gpregs->tfhar = tm_spr_regs.tfhar;
gpregs->has_texasr = true;
gpregs->texasr = tm_spr_regs.texasr;
gpregs->has_tfiar = true;
gpregs->tfiar = tm_spr_regs.tfiar;
PTRACE_GET_TM(fpregs->tm.tm_spr_regs, "SPR", NT_PPC_TM_SPR, TM_REQUIRED);
/* Get checkpointed regular registers */
PTRACE_GET_TM(regs, "GPR", NT_PPC_TM_CGPR, TM_REQUIRED);
copy_gp_regs(gpregs, &regs);
PTRACE_GET_TM(fpregs->tm.regs, "GPR", NT_PPC_TM_CGPR, TM_REQUIRED);
/* Get checkpointed FP registers */
PTRACE_GET_TM(fpregs, "FPR", NT_PPC_TM_CFPR, TM_OPTIONAL);
if (iov.iov_base) {
core->ti_ppc64->fpstate = copy_fp_regs(fpregs);
if (!core->ti_ppc64->fpstate)
goto out_free;
}
PTRACE_GET_TM(fpregs->tm.fpregs, "FPR", NT_PPC_TM_CFPR, TM_OPTIONAL);
if (iov.iov_base)
fpregs->tm.flags |= USER_FPREGS_FL_FP;
/* Get checkpointed VMX (Altivec) registers */
PTRACE_GET_TM(vmxregs, "VMX", NT_PPC_TM_CVMX, TM_OPTIONAL);
if (iov.iov_base) {
core->ti_ppc64->vrstate = copy_altivec_regs(vmxregs);
if (!core->ti_ppc64->vrstate)
goto out_free;
}
PTRACE_GET_TM(fpregs->tm.vrregs, "VMX", NT_PPC_TM_CVMX, TM_OPTIONAL);
if (iov.iov_base)
fpregs->tm.flags |= USER_FPREGS_FL_ALTIVEC;
/* Get checkpointed VSX registers */
PTRACE_GET_TM(vsxregs, "VSX", NT_PPC_TM_CVSX, TM_OPTIONAL);
if (iov.iov_base) {
core->ti_ppc64->vsxstate = copy_vsx_regs(vsxregs);
if (!core->ti_ppc64->vsxstate)
goto out_free;
}
PTRACE_GET_TM(fpregs->tm.vsxregs, "VSX", NT_PPC_TM_CVSX, TM_OPTIONAL);
if (iov.iov_base)
fpregs->tm.flags |= USER_FPREGS_FL_VSX;
core->ti_ppc64->tmstate = tme;
return 0;
out_free:
xfree_tm_state(tme);
return -1; /* still failing the checkpoint */
}
......@@ -553,13 +504,9 @@ static int put_tm_regs(struct rt_sigframe *f, UserPpc64TmRegsEntry *tme)
}
/****************************************************************************/
int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
static int __get_task_regs(pid_t pid, user_regs_struct_t *regs,
user_fpregs_struct_t *fpregs)
{
UserPpc64RegsEntry *gpregs;
UserPpc64FpstateEntry **fpstate;
UserPpc64VrstateEntry **vrstate;
UserPpc64VsxstateEntry **vsxstate;
pr_info("Dumping GP/FPU registers for %d\n", pid);
/*
......@@ -570,85 +517,203 @@ int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
#define TRAP(r) ((r).trap & ~0xF)
#endif
if (TRAP(regs) == 0x0C00 && regs.ccr & 0x10000000) {
if (TRAP(*regs) == 0x0C00 && regs->ccr & 0x10000000) {
/* Restart the system call */
switch (regs.gpr[3]) {
switch (regs->gpr[3]) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
regs.gpr[3] = regs.orig_gpr3;
regs.nip -= 4;
regs->gpr[3] = regs->orig_gpr3;
regs->nip -= 4;
break;
case ERESTART_RESTARTBLOCK:
regs.gpr[0] = __NR_restart_syscall;
regs.nip -= 4;
regs->gpr[0] = __NR_restart_syscall;
regs->nip -= 4;
break;
}
}
/* Resetting trap since we are now coming from user space. */
regs.trap = 0;
regs->trap = 0;
fpregs->flags = 0;
/*
* Check for Transactional Memory operation in progress.
* Until we have support of TM register's state through the ptrace API,
* we can't checkpoint process with TM operation in progress (almost
* impossible) or suspended (easy to get).
*/
if (MSR_TM_ACTIVE(regs.msr)) {
if (MSR_TM_ACTIVE(regs->msr)) {
pr_debug("Task %d has %s TM operation at 0x%lx\n",
pid,
(regs.msr & MSR_TMS) ? "a suspended" : "an active",
regs.nip);
if (get_tm_regs(pid, core))
(regs->msr & MSR_TMS) ? "a suspended" : "an active",
regs->nip);
if (get_tm_regs(pid, fpregs))
return -1;
fpregs->flags = USER_FPREGS_FL_TM;
}
if (get_fpu_regs(pid, fpregs))
return -1;
if (get_altivec_regs(pid, fpregs))
return -1;
if (fpregs->flags & USER_FPREGS_FL_ALTIVEC) {
/*
* Save the VSX registers if Altivec registers are supported
*/
if (get_vsx_regs(pid, fpregs))
return -1;
}
return 0;
}
static int copy_tm_regs(user_regs_struct_t *regs, user_fpregs_struct_t *fpregs,
CoreEntry *core)
{
UserPpc64TmRegsEntry *tme;
UserPpc64RegsEntry *gpregs = core->ti_ppc64->gpregs;
pr_debug("Copying TM registers\n");
tme = xmalloc(sizeof(*tme));
if (!tme)
return -1;
user_ppc64_tm_regs_entry__init(tme);
tme->gpregs = allocate_gp_regs();
if (!tme->gpregs)
goto out_free;
gpregs->has_tfhar = true;
gpregs->tfhar = fpregs->tm.tm_spr_regs.tfhar;
gpregs->has_texasr = true;
gpregs->texasr = fpregs->tm.tm_spr_regs.texasr;
gpregs->has_tfiar = true;
gpregs->tfiar = fpregs->tm.tm_spr_regs.tfiar;
/* This is the checkpointed state, we must save it in place of the
* current state because the signal handler is made in this way.
* We invert the 2 states instead of when building the signal frame,
* because we can't modify the gpregs manipulated by the common layer.
*/
copy_gp_regs(gpregs, &fpregs->tm.regs);
if (fpregs->tm.flags & USER_FPREGS_FL_FP) {
core->ti_ppc64->fpstate = copy_fp_regs(fpregs->tm.fpregs);
if (!core->ti_ppc64->fpstate)
goto out_free;
}
if (fpregs->tm.flags & USER_FPREGS_FL_ALTIVEC) {
core->ti_ppc64->vrstate = copy_altivec_regs(fpregs->tm.vrregs);
if (!core->ti_ppc64->vrstate)
goto out_free;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
gpregs->msr |= MSR_VEC;
if (fpregs->tm.flags & USER_FPREGS_FL_VSX) {
core->ti_ppc64->vsxstate = copy_vsx_regs(fpregs->tm.vsxregs);
if (!core->ti_ppc64->vsxstate)
goto out_free;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
gpregs->msr |= MSR_VSX;
}
}
core->ti_ppc64->tmstate = tme;
return 0;
out_free:
xfree_tm_state(tme);
return -1;
}
static int __copy_task_regs(user_regs_struct_t *regs,
user_fpregs_struct_t *fpregs,
CoreEntry *core)
{
UserPpc64RegsEntry *gpregs;
UserPpc64FpstateEntry **fpstate;
UserPpc64VrstateEntry **vrstate;
UserPpc64VsxstateEntry **vsxstate;
/* Copy retrieved registers in the proto data
* If TM is in the loop we switch the saved register set because
* the signal frame is built with checkpointed registers on top to not
* confused TM unaware process, while ptrace is retrieving the
* checkpointed set through the TM specific ELF notes.
*/
if (fpregs->flags & USER_FPREGS_FL_TM) {
if (copy_tm_regs(regs, fpregs, core))
return -1;
gpregs = core->ti_ppc64->tmstate->gpregs;
fpstate = &(core->ti_ppc64->tmstate->fpstate);
vrstate = &(core->ti_ppc64->tmstate->vrstate);
vsxstate = &(core->ti_ppc64->tmstate->vsxstate);
} else {
}
else {
gpregs = core->ti_ppc64->gpregs;
fpstate = &(core->ti_ppc64->fpstate);
vrstate = &(core->ti_ppc64->vrstate);
vsxstate = &(core->ti_ppc64->vsxstate);
}
copy_gp_regs(gpregs, &regs);
*fpstate = get_fpu_regs(pid);
if (!*fpstate)
return -1;
}
*vrstate = get_altivec_regs(pid);
if (*vrstate) {
if (*vrstate == (UserPpc64VrstateEntry*)-1L)
copy_gp_regs(gpregs, regs);
if (fpregs->flags & USER_FPREGS_FL_FP) {
*fpstate = copy_fp_regs(fpregs->fpregs);
if (!*fpstate)
return -1;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
gpregs->msr |= MSR_VEC;
/*
* Save the VSX registers if Altivec registers are supported
*/
*vsxstate = get_vsx_regs(pid);
if (*vsxstate) {
if (*vsxstate == (UserPpc64VsxstateEntry *)-1L)
}
if (fpregs->flags & USER_FPREGS_FL_ALTIVEC) {
*vrstate = copy_altivec_regs(fpregs->vrregs);
if (!*vrstate)
return -1;
/*
* Force the MSR_VEC bit of the restored MSR otherwise the
* kernel will not restore them from the signal frame.
*/
gpregs->msr |= MSR_VEC;
if (fpregs->flags & USER_FPREGS_FL_VSX) {
*vsxstate = copy_vsx_regs(fpregs->vsxregs);
if (!*vsxstate)
return -1;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
gpregs->msr |= MSR_VSX;
/*
* Force the MSR_VSX bit of the restored MSR otherwise
* the kernel will not restore them from the signal
* frame.
*/
gpregs->msr |= MSR_VSX;
}
}
return 0;
}
/****************************************************************************/
int get_task_regs(pid_t pid, user_regs_struct_t regs, CoreEntry *core)
{
user_fpregs_struct_t fpregs;
int ret;
ret = __get_task_regs(pid, &regs, &fpregs);
if (ret)
return ret;
return __copy_task_regs(&regs, &fpregs, core);
}
int arch_alloc_thread_info(CoreEntry *core)
{
ThreadInfoPpc64 *ti_ppc64;
......
......@@ -61,6 +61,32 @@ typedef struct {
unsigned long result; /* Result of a system call */
} user_regs_struct_t;
#define NVSXREG 32
#define USER_FPREGS_FL_FP 0x00001
#define USER_FPREGS_FL_ALTIVEC 0x00002
#define USER_FPREGS_FL_VSX 0x00004
#define USER_FPREGS_FL_TM 0x00010
typedef struct {
uint64_t fpregs[NFPREG];
__vector128 vrregs[NVRREG];
uint64_t vsxregs[NVSXREG];
int flags;
struct tm_regs {
int flags;
struct {
uint64_t tfhar, texasr, tfiar;
} tm_spr_regs;
user_regs_struct_t regs;
uint64_t fpregs[NFPREG];
__vector128 vrregs[NVRREG];
uint64_t vsxregs[NVSXREG];
} tm;
} user_fpregs_struct_t;
typedef UserPpc64RegsEntry UserRegsEntry;
#define CORE_ENTRY__MARCH CORE_ENTRY__MARCH__PPC64
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment