Commit 2bcfa2c1 authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Andrei Vagin

x86: dump TLS entries from GDT

Dump TLS with the help of SYS_get_thread_area.
Primary for 32-bit applications, but this also may be used by
mixed 64/32 bit code. I do not enable dumping for 64 bit unless
we'll meet such code, but include 3 user_desc entries of TLS
as not present in 64-bit images.
That's arguable and I may include user_descs only for compat tasks.

Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: 's avatarDmitry Safonov <dsafonov@virtuozzo.com>
Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent b1cc9984
......@@ -389,6 +389,18 @@ int ptrace_set_regs(pid_t pid, user_regs_struct_t *regs)
return ptrace(PTRACE_SETREGSET, pid, NT_PRSTATUS, &iov);
}
static void alloc_tls(ThreadInfoX86 *ti, void **mempool)
{
int i;
ti->tls = xptr_pull_s(mempool, GDT_ENTRY_TLS_NUM*sizeof(UserDescT*));
ti->n_tls = GDT_ENTRY_TLS_NUM;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++) {
ti->tls[i] = xptr_pull(mempool, UserDescT);
user_desc_t__init(ti->tls[i]);
}
}
int arch_alloc_thread_info(CoreEntry *core)
{
size_t sz;
......@@ -399,7 +411,9 @@ int arch_alloc_thread_info(CoreEntry *core)
with_fpu = cpu_has_feature(X86_FEATURE_FPU);
sz = sizeof(ThreadInfoX86) + sizeof(UserX86RegsEntry);
sz = sizeof(ThreadInfoX86) + sizeof(UserX86RegsEntry) +
GDT_ENTRY_TLS_NUM*sizeof(UserDescT) +
GDT_ENTRY_TLS_NUM*sizeof(UserDescT*);
if (with_fpu) {
sz += sizeof(UserX86FpregsEntry);
with_xsave = cpu_has_feature(X86_FEATURE_OSXSAVE);
......@@ -415,6 +429,7 @@ int arch_alloc_thread_info(CoreEntry *core)
thread_info_x86__init(ti);
ti->gpregs = xptr_pull(&m, UserX86RegsEntry);
user_x86_regs_entry__init(ti->gpregs);
alloc_tls(ti, &m);
if (with_fpu) {
UserX86FpregsEntry *fpregs;
......
......@@ -9,6 +9,29 @@ extern void arch_free_thread_info(CoreEntry *core);
extern int ptrace_get_regs(pid_t pid, user_regs_struct_t *regs);
extern int ptrace_set_regs(pid_t pid, user_regs_struct_t *regs);
#define core_put_tls(core, tls)
static inline void core_put_tls(CoreEntry *core, tls_t tls)
{
ThreadInfoX86 *ti = core->thread_info;
int i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++)
{
user_desc_t *from = &tls.desc[i];
UserDescT *to = ti->tls[i];
#define COPY_TLS(field) to->field = from->field
COPY_TLS(entry_number);
COPY_TLS(base_addr);
COPY_TLS(limit);
COPY_TLS(seg_32bit);
to->contents_h = from->contents & 0x2;
to->contents_l = from->contents & 0x1;
COPY_TLS(read_exec_only);
COPY_TLS(limit_in_pages);
COPY_TLS(seg_not_present);
COPY_TLS(useable);
#undef COPY_TLS
}
}
#endif
......@@ -136,7 +136,19 @@ typedef struct xsave_struct user_fpregs_struct_t;
static inline unsigned long task_size(void) { return TASK_SIZE; }
typedef uint64_t auxv_t;
typedef uint32_t tls_t;
/*
* Linux preserves three TLS segments in GDT.
* Offsets in GDT differ between 32-bit and 64-bit machines.
* For 64-bit x86 those GDT offsets are the same
* for native and compat tasks.
*/
#define GDT_ENTRY_TLS_MIN 12
#define GDT_ENTRY_TLS_MAX 14
#define GDT_ENTRY_TLS_NUM 3
typedef struct {
user_desc_t desc[GDT_ENTRY_TLS_NUM];
} tls_t;
#define REG_RES(regs) get_user_reg(&regs, ax)
#define REG_IP(regs) get_user_reg(&regs, ip)
......
......@@ -5,6 +5,55 @@
# define __parasite_entry __attribute__((regparm(3)))
#endif
static inline void arch_get_tls(tls_t *ptls) { (void)ptls; }
#ifdef CONFIG_X86_32
static void arch_get_user_desc(user_desc_t *desc)
{
if (sys_get_thread_area(desc))
pr_err("Failed to dump TLS descriptor #%d\n",
desc->entry_number);
}
#else /* !X86_32 */
static void arch_get_user_desc(user_desc_t *desc)
{
/*
* For 64-bit applications, TLS (fs_base for Glibc) is
* in MSR, which are dumped with the help of arch_prctl().
*
* But SET_FS_BASE will update GDT if base pointer fits in 4 bytes.
* Otherwise it will set only MSR, which allows for mixed 64/32-bit
* code to use: 2 MSRs as TLS base _and_ 3 GDT entries.
* Having in sum 5 TLS pointers, 3 of which are four bytes and
* other two bigger than four bytes:
* struct thread_struct {
* struct desc_struct tls_array[3];
* ...
* #ifdef CONFIG_X86_64
* unsigned long fsbase;
* unsigned long gsbase;
* #endif
* ...
* };
*
* For this mixed code we may want to call get_thread_area
* 32-bit syscall. But as additional three calls to kernel
* will slow dumping, I omit it here.
*/
desc->seg_not_present = 1;
}
#endif /* !X86_32 */
static void arch_get_tls(tls_t *ptls)
{
int i;
for (i = 0; i < GDT_ENTRY_TLS_NUM; i++)
{
user_desc_t *d = &ptls->desc[i];
memset(d, 0, sizeof(user_desc_t));
d->entry_number = GDT_ENTRY_TLS_MIN + i;
arch_get_user_desc(d);
}
}
#endif
......@@ -118,7 +118,10 @@ static inline void _setup_sas(struct rt_sigframe* sigframe, ThreadSasEntry *sas)
int restore_gpregs(struct rt_sigframe *f, UserX86RegsEntry *r);
int restore_nonsigframe_gpregs(UserX86RegsEntry *r);
static inline void restore_tls(tls_t *ptls) { (void)ptls; }
static inline void restore_tls(tls_t *ptls)
{
(void)ptls;
}
int ptrace_set_breakpoint(pid_t pid, void *addr);
int ptrace_flush_breakpoints(pid_t pid);
......
......@@ -65,8 +65,23 @@ message user_x86_fpregs_entry {
optional user_x86_xsave_entry xsave = 13;
}
message user_desc_t {
required uint32 entry_number = 1;
/* this is for GDT, not for MSRs - 32-bit base */
required uint32 base_addr = 2;
required uint32 limit = 3;
required bool seg_32bit = 4;
required bool contents_h = 5;
required bool contents_l = 6;
required bool read_exec_only = 7 [default = true];
required bool limit_in_pages = 8;
required bool seg_not_present = 9 [default = true];
required bool useable = 10;
}
message thread_info_x86 {
required uint64 clear_tid_addr = 1[(criu).hex = true];
required user_x86_regs_entry gpregs = 2[(criu).hex = true];
required user_x86_fpregs_entry fpregs = 3;
repeated user_desc_t tls = 4;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment