Commit c00dd345 authored by Mike Rapoport's avatar Mike Rapoport Committed by Andrei Vagin

criu: lazy-pages: move find_vmas and related code around

Moving the find_vmas and collect_uffd_pages functions before they are
actually used. This allows to drop forward declaration of find_vmas and
will make subsequent refactoring cleaner.
Signed-off-by: 's avatarMike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
parent 6d405370
...@@ -287,7 +287,160 @@ out: ...@@ -287,7 +287,160 @@ out:
return -1; return -1;
} }
static int find_vmas(struct lazy_pages_info *lpi); #define UFFD_FLAG_SENT 0x1
struct uffd_pages_struct {
struct list_head list;
unsigned long addr;
int flags;
};
static int collect_uffd_pages(struct page_read *pr, struct lazy_pages_info *lpi)
{
unsigned long base;
int i;
struct iovec iov;
unsigned long nr_pages;
unsigned long ps;
int rc;
struct uffd_pages_struct *uffd_pages;
struct vma_area *vma;
struct vm_area_list *vmas;
struct pstree_item *item = pstree_item_by_virt(lpi->pid);
BUG_ON(!item);
vmas = &rsti(item)->vmas;
rc = pr->get_pagemap(pr, &iov);
if (rc <= 0)
return 0;
ps = page_size();
nr_pages = iov.iov_len / ps;
base = (unsigned long) iov.iov_base;
pr_debug("iov.iov_base 0x%lx (%ld pages)\n", base, nr_pages);
for (i = 0; i < nr_pages; i++) {
bool uffd_page = false;
base = (unsigned long) iov.iov_base + (i * ps);
/*
* Only pages which are MAP_ANONYMOUS and MAP_PRIVATE
* are relevant for userfaultfd handling.
* Loop over all VMAs to see if the flags matching.
*/
list_for_each_entry(vma, &vmas->h, list) {
/*
* This loop assumes that base can actually be found
* in the VMA list.
*/
if (base >= vma->e->start && base < vma->e->end) {
if (vma_entry_can_be_lazy(vma->e)) {
if(!pagemap_in_parent(pr->pe))
uffd_page = true;
break;
}
}
}
/* This is not a page we are looking for. Move along */
if (!uffd_page)
continue;
pr_debug("Adding 0x%lx to our list\n", base);
uffd_pages = xzalloc(sizeof(struct uffd_pages_struct));
if (!uffd_pages)
return -1;
uffd_pages->addr = base;
list_add(&uffd_pages->list, &lpi->pages);
}
return 1;
}
/*
* Setting up criu infrastructure and scan for VMAs.
*/
static int find_vmas(struct lazy_pages_info *lpi)
{
struct cr_img *img;
int ret;
struct vm_area_list vmas;
int vn = 0;
struct rst_info *ri;
struct uffd_pages_struct *uffd_pages;
struct pstree_item *item = pstree_item_by_virt(lpi->pid);
BUG_ON(!item);
vm_area_list_init(&vmas);
ri = rsti(item);
if (!ri)
return -1;
img = open_image(CR_FD_MM, O_RSTR, lpi->pid);
if (!img)
return -1;
ret = pb_read_one_eof(img, &ri->mm, PB_MM);
close_image(img);
if (ret == -1)
return -1;
pr_debug("Found %zd VMAs in image\n", ri->mm->n_vmas);
while (vn < ri->mm->n_vmas) {
struct vma_area *vma;
ret = -1;
vma = alloc_vma_area();
if (!vma)
goto out;
ret = 0;
ri->vmas.nr++;
vma->e = ri->mm->vmas[vn++];
list_add_tail(&vma->list, &ri->vmas.h);
if (vma_area_is_private(vma, kdat.task_size)) {
vmas.priv_size += vma_area_len(vma);
if (vma->e->flags & MAP_GROWSDOWN)
vmas.priv_size += PAGE_SIZE;
}
pr_info("vma 0x%"PRIx64" 0x%"PRIx64"\n", vma->e->start, vma->e->end);
}
ret = open_page_read(lpi->pid, &lpi->pr, PR_TASK);
if (ret <= 0) {
ret = -1;
goto out;
}
/*
* This puts all pages which should be handled by userfaultfd
* in the list uffd_list. This list is later used to detect if
* a page has already been transferred or if it needs to be
* pushed into the process using userfaultfd.
*/
do {
ret = collect_uffd_pages(&lpi->pr, lpi);
if (ret == -1) {
goto out;
}
} while (ret);
/* Count detected pages */
list_for_each_entry(uffd_pages, &lpi->pages, list)
ret++;
pr_debug("Found %d pages to be handled by UFFD\n", ret);
out:
return ret;
}
static int ud_open(int client, struct lazy_pages_info **_lpi) static int ud_open(int client, struct lazy_pages_info **_lpi)
{ {
...@@ -366,14 +519,6 @@ static int get_page(struct lazy_pages_info *lpi, unsigned long addr, void *dest) ...@@ -366,14 +519,6 @@ static int get_page(struct lazy_pages_info *lpi, unsigned long addr, void *dest)
return 1; return 1;
} }
#define UFFD_FLAG_SENT 0x1
struct uffd_pages_struct {
struct list_head list;
unsigned long addr;
int flags;
};
static int uffd_copy_page(struct lazy_pages_info *lpi, __u64 address, static int uffd_copy_page(struct lazy_pages_info *lpi, __u64 address,
void *dest) void *dest)
{ {
...@@ -448,70 +593,6 @@ static int uffd_handle_page(struct lazy_pages_info *lpi, __u64 address, ...@@ -448,70 +593,6 @@ static int uffd_handle_page(struct lazy_pages_info *lpi, __u64 address,
return rc; return rc;
} }
static int collect_uffd_pages(struct page_read *pr, struct lazy_pages_info *lpi)
{
unsigned long base;
int i;
struct iovec iov;
unsigned long nr_pages;
unsigned long ps;
int rc;
struct uffd_pages_struct *uffd_pages;
struct vma_area *vma;
struct vm_area_list *vmas;
struct pstree_item *item = pstree_item_by_virt(lpi->pid);
BUG_ON(!item);
vmas = &rsti(item)->vmas;
rc = pr->get_pagemap(pr, &iov);
if (rc <= 0)
return 0;
ps = page_size();
nr_pages = iov.iov_len / ps;
base = (unsigned long) iov.iov_base;
pr_debug("iov.iov_base 0x%lx (%ld pages)\n", base, nr_pages);
for (i = 0; i < nr_pages; i++) {
bool uffd_page = false;
base = (unsigned long) iov.iov_base + (i * ps);
/*
* Only pages which are MAP_ANONYMOUS and MAP_PRIVATE
* are relevant for userfaultfd handling.
* Loop over all VMAs to see if the flags matching.
*/
list_for_each_entry(vma, &vmas->h, list) {
/*
* This loop assumes that base can actually be found
* in the VMA list.
*/
if (base >= vma->e->start && base < vma->e->end) {
if (vma_entry_can_be_lazy(vma->e)) {
if(!pagemap_in_parent(pr->pe))
uffd_page = true;
break;
}
}
}
/* This is not a page we are looking for. Move along */
if (!uffd_page)
continue;
pr_debug("Adding 0x%lx to our list\n", base);
uffd_pages = xzalloc(sizeof(struct uffd_pages_struct));
if (!uffd_pages)
return -1;
uffd_pages->addr = base;
list_add(&uffd_pages->list, &lpi->pages);
}
return 1;
}
static int handle_remaining_pages(struct lazy_pages_info *lpi, void *dest) static int handle_remaining_pages(struct lazy_pages_info *lpi, void *dest)
{ {
struct uffd_pages_struct *uffd_pages; struct uffd_pages_struct *uffd_pages;
...@@ -560,89 +641,6 @@ static int handle_regular_pages(struct lazy_pages_info *lpi, void *dest, ...@@ -560,89 +641,6 @@ static int handle_regular_pages(struct lazy_pages_info *lpi, void *dest,
return 0; return 0;
} }
/*
* Setting up criu infrastructure and scan for VMAs.
*/
static int find_vmas(struct lazy_pages_info *lpi)
{
struct cr_img *img;
int ret;
struct vm_area_list vmas;
int vn = 0;
struct rst_info *ri;
struct uffd_pages_struct *uffd_pages;
struct pstree_item *item = pstree_item_by_virt(lpi->pid);
BUG_ON(!item);
vm_area_list_init(&vmas);
ri = rsti(item);
if (!ri)
return -1;
img = open_image(CR_FD_MM, O_RSTR, lpi->pid);
if (!img)
return -1;
ret = pb_read_one_eof(img, &ri->mm, PB_MM);
close_image(img);
if (ret == -1)
return -1;
pr_debug("Found %zd VMAs in image\n", ri->mm->n_vmas);
while (vn < ri->mm->n_vmas) {
struct vma_area *vma;
ret = -1;
vma = alloc_vma_area();
if (!vma)
goto out;
ret = 0;
ri->vmas.nr++;
vma->e = ri->mm->vmas[vn++];
list_add_tail(&vma->list, &ri->vmas.h);
if (vma_area_is_private(vma, kdat.task_size)) {
vmas.priv_size += vma_area_len(vma);
if (vma->e->flags & MAP_GROWSDOWN)
vmas.priv_size += PAGE_SIZE;
}
pr_info("vma 0x%"PRIx64" 0x%"PRIx64"\n", vma->e->start, vma->e->end);
}
ret = open_page_read(lpi->pid, &lpi->pr, PR_TASK);
if (ret <= 0) {
ret = -1;
goto out;
}
/*
* This puts all pages which should be handled by userfaultfd
* in the list uffd_list. This list is later used to detect if
* a page has already been transferred or if it needs to be
* pushed into the process using userfaultfd.
*/
do {
ret = collect_uffd_pages(&lpi->pr, lpi);
if (ret == -1) {
goto out;
}
} while (ret);
/* Count detected pages */
list_for_each_entry(uffd_pages, &lpi->pages, list)
ret++;
pr_debug("Found %d pages to be handled by UFFD\n", ret);
out:
return ret;
}
static int handle_user_fault(struct lazy_pages_info *lpi, void *dest) static int handle_user_fault(struct lazy_pages_info *lpi, void *dest)
{ {
struct uffd_msg msg; struct uffd_msg msg;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment