Commit 19a76494 authored by Pavel Emelyanov's avatar Pavel Emelyanov

kerndat: Collect all global variables on one struct

Not to spoil the global namespace and unify the kerndat
data names.
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent 5f7933e3
......@@ -566,7 +566,7 @@ static int check_mem_dirty_track(void)
if (kerndat_get_dirty_track() < 0)
return -1;
if (!kerndat_has_dirty_track)
if (!kdat.has_dirty_track)
pr_warn("Dirty tracking is OFF. Memory snapshot will not work.\n");
return 0;
}
......
......@@ -2249,7 +2249,7 @@ static int prepare_creds(int pid, struct task_restore_args *args)
creds_entry__free_unpacked(ce, NULL);
args->cap_last_cap = kern_last_cap;
args->cap_last_cap = kdat.last_cap;
/* XXX -- validate creds here? */
......
......@@ -14,14 +14,17 @@ extern int kerndat_init(void);
extern int kerndat_init_rst(void);
extern int kerndat_get_dirty_track(void);
extern dev_t kerndat_shmem_dev;
extern bool kerndat_has_dirty_track;
extern int tcp_max_wshare;
extern int tcp_max_rshare;
struct kerndat_s {
dev_t shmem_dev;
int tcp_max_wshare;
int tcp_max_rshare;
int last_cap;
u64 zero_page_pfn;
bool has_dirty_track;
bool has_memfd;
};
extern int kern_last_cap;
extern u64 zero_page_pfn;
extern struct kerndat_s kdat;
enum {
KERNDAT_FS_STAT_DEVPTS,
......@@ -32,6 +35,4 @@ enum {
extern struct stat *kerndat_get_fs_stat(unsigned int which);
extern bool memfd_is_supported;
#endif /* __CR_KERNDAT_H__ */
......@@ -18,7 +18,10 @@
#include "cr_options.h"
#include "util.h"
dev_t kerndat_shmem_dev;
struct kerndat_s kdat = {
.tcp_max_wshare = 2U << 20,
.tcp_max_rshare = 3U << 20,
};
/*
* Anonymous shared mappings are backed by hidden tmpfs
......@@ -49,8 +52,8 @@ static int kerndat_get_shmemdev(void)
munmap(map, PAGE_SIZE);
kerndat_shmem_dev = buf.st_dev;
pr_info("Found anon-shmem device at %"PRIx64"\n", kerndat_shmem_dev);
kdat.shmem_dev = buf.st_dev;
pr_info("Found anon-shmem device at %"PRIx64"\n", kdat.shmem_dev);
return 0;
}
......@@ -105,8 +108,6 @@ struct stat *kerndat_get_fs_stat(unsigned int which)
* this functionality under CONFIG_MEM_SOFT_DIRTY option.
*/
bool kerndat_has_dirty_track = false;
int kerndat_get_dirty_track(void)
{
char *map;
......@@ -146,7 +147,7 @@ int kerndat_get_dirty_track(void)
if (pmap & PME_SOFT_DIRTY) {
pr_info("Dirty track supported on kernel\n");
kerndat_has_dirty_track = true;
kdat.has_dirty_track = true;
} else {
pr_info("Dirty tracking support is OFF\n");
if (opts.track_mem) {
......@@ -167,8 +168,6 @@ int kerndat_get_dirty_track(void)
* Meanwhile set it up to 2M and 3M, which is safe enough to
* proceed without errors.
*/
int tcp_max_wshare = 2U << 20;
int tcp_max_rshare = 3U << 20;
static int tcp_read_sysctl_limits(void)
{
......@@ -191,19 +190,17 @@ static int tcp_read_sysctl_limits(void)
goto out;
}
tcp_max_wshare = min(tcp_max_wshare, (int)vect[0][2]);
tcp_max_rshare = min(tcp_max_rshare, (int)vect[1][2]);
kdat.tcp_max_wshare = min(kdat.tcp_max_wshare, (int)vect[0][2]);
kdat.tcp_max_rshare = min(kdat.tcp_max_rshare, (int)vect[1][2]);
if (tcp_max_wshare < 128 || tcp_max_rshare < 128)
if (kdat.tcp_max_wshare < 128 || kdat.tcp_max_rshare < 128)
pr_warn("The memory limits for TCP queues are suspiciously small\n");
out:
pr_debug("TCP queue memory limits are %d:%d\n", tcp_max_wshare, tcp_max_rshare);
pr_debug("TCP queue memory limits are %d:%d\n", kdat.tcp_max_wshare, kdat.tcp_max_rshare);
return 0;
}
/* The page frame number (PFN) is constant for the zero page */
u64 zero_page_pfn;
static int init_zero_page_pfn()
{
void *addr;
......@@ -220,28 +217,25 @@ static int init_zero_page_pfn()
return -1;
}
ret = vaddr_to_pfn((unsigned long)addr, &zero_page_pfn);
ret = vaddr_to_pfn((unsigned long)addr, &kdat.zero_page_pfn);
munmap(addr, PAGE_SIZE);
if (zero_page_pfn == 0)
if (kdat.zero_page_pfn == 0)
ret = -1;
return ret;
}
int kern_last_cap;
int get_last_cap(void)
{
struct sysctl_req req[] = {
{ "kernel/cap_last_cap", &kern_last_cap, CTL_U32 },
{ "kernel/cap_last_cap", &kdat.last_cap, CTL_U32 },
{ },
};
return sysctl_op(req, CTL_READ);
}
bool memfd_is_supported;
static bool kerndat_has_memfd_create(void)
{
int ret;
......@@ -249,9 +243,9 @@ static bool kerndat_has_memfd_create(void)
ret = sys_memfd_create(NULL, 0);
if (ret == -ENOSYS)
memfd_is_supported = false;
kdat.has_memfd = false;
else if (ret == -EFAULT)
memfd_is_supported = true;
kdat.has_memfd = true;
else {
pr_err("Unexpected error %d from memfd_create(NULL, 0)\n", ret);
return -1;
......
......@@ -29,7 +29,7 @@ static int task_reset_dirty_track(int pid)
if (!opts.track_mem)
return 0;
BUG_ON(!kerndat_has_dirty_track);
BUG_ON(!kdat.has_dirty_track);
return do_task_reset_dirty_track(pid);
}
......@@ -92,7 +92,7 @@ static inline bool should_dump_page(VmaEntry *vmae, u64 pme)
return false;
if (pme & PME_SWAP)
return true;
if ((pme & PME_PRESENT) && ((pme & PME_PFRAME_MASK) != zero_page_pfn))
if ((pme & PME_PRESENT) && ((pme & PME_PFRAME_MASK) != kdat.zero_page_pfn))
return true;
return false;
......@@ -251,7 +251,7 @@ static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
pr_info("Dumping pages (type: %d pid: %d)\n", CR_FD_PAGES, ctl->pid.real);
pr_info("----------------------------------------\n");
BUG_ON(zero_page_pfn == 0);
BUG_ON(kdat.zero_page_pfn == 0);
timing_start(TIME_MEMDUMP);
......
......@@ -737,7 +737,7 @@ int parasite_dump_creds(struct parasite_ctl *ctl, CredsEntry *ce)
BUILD_BUG_ON(sizeof(*pc) > PAGE_SIZE);
pc = parasite_args(ctl, struct parasite_dump_creds);
pc->cap_last_cap = kern_last_cap;
pc->cap_last_cap = kdat.last_cap;
if (parasite_execute_daemon(PARASITE_CMD_DUMP_CREDS, ctl) < 0)
return -1;
......
......@@ -174,7 +174,7 @@ static int parse_vmflags(char *buf, struct vma_area *vma_area)
static inline int is_anon_shmem_map(dev_t dev)
{
return kerndat_shmem_dev == dev;
return kdat.shmem_dev == dev;
}
struct vma_file_info {
......
......@@ -191,7 +191,7 @@ int get_shmem_fd(int pid, VmaEntry *vi)
return dup(si->fd);
flags = MAP_SHARED;
if (memfd_is_supported) {
if (kdat.has_memfd) {
f = sys_memfd_create("", 0);
if (f < 0) {
pr_perror("Unable to create memfd");
......
......@@ -466,7 +466,7 @@ static int __send_tcp_queue(int sk, int queue, u32 len, struct cr_img *img)
if (read_img_buf(img, buf, len) < 0)
goto err;
max = (queue == TCP_SEND_QUEUE) ? tcp_max_wshare : tcp_max_rshare;
max = (queue == TCP_SEND_QUEUE) ? kdat.tcp_max_wshare : kdat.tcp_max_rshare;
off = 0;
while (len) {
int chunk = (len > max ? max : len);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment