Commit 5ca537a2 authored by Pavel Emelyanov's avatar Pavel Emelyanov

vma: Introduce vma_premapped flag

Not all private VMA-s will be premmaped, so a separate sign of
a VMA being on the premap area is needed.
Signed-off-by: 's avatarPavel Emelyanov <xemul@virtuozzo.com>
Signed-off-by: 's avatarAndrei Vagin <avagin@virtuozzo.com>
parent fd5ae6d9
......@@ -89,6 +89,7 @@
#define VMA_AREA_VVAR (1 << 12)
#define VMA_AREA_AIORING (1 << 13)
#define VMA_PREMMAPED (1 << 30)
#define VMA_UNSUPP (1 << 31)
#define CR_CAP_SIZE 2
......
......@@ -522,7 +522,7 @@ int prepare_mm_pid(struct pstree_item *i)
}
/* Map a private vma, if it is not mapped by a parent yet */
static int map_private_vma(struct pstree_item *t,
static int premap_private_vma(struct pstree_item *t,
struct vma_area *vma, void **tgt_addr,
struct vma_area **pvma, struct list_head *pvma_list)
{
......@@ -550,7 +550,7 @@ static int map_private_vma(struct pstree_item *t,
if (p->e->start > vma->e->start)
break;
if (!vma_area_is_private(p, kdat.task_size))
if (!vma_area_is(p, VMA_PREMMAPED))
continue;
if (p->e->end != vma->e->end ||
......@@ -627,6 +627,7 @@ static int map_private_vma(struct pstree_item *t,
*pvma = list_entry(p->list.next, struct vma_area, list);
}
vma->e->status |= VMA_PREMMAPED;
vma->premmaped_addr = (unsigned long) addr;
pr_debug("\tpremap %#016"PRIx64"-%#016"PRIx64" -> %016lx\n",
vma->e->start, vma->e->end, (unsigned long)addr);
......@@ -654,7 +655,7 @@ static int premap_priv_vmas(struct pstree_item *t, struct vm_area_list *vmas,
/*
* Keep parent vmas at hands to check whether we can "inherit" them.
* See comments in map_private_vma.
* See comments in premap_private_vma.
*/
if (t->parent)
parent_vmas = &rsti(t->parent)->vmas.h;
......@@ -674,7 +675,7 @@ static int premap_priv_vmas(struct pstree_item *t, struct vm_area_list *vmas,
if (!vma_area_is_private(vma, kdat.task_size))
continue;
ret = map_private_vma(t, vma, &at, &pvma, parent_vmas);
ret = premap_private_vma(t, vma, &at, &pvma, parent_vmas);
if (ret < 0)
break;
}
......@@ -901,7 +902,7 @@ int unmap_guard_pages(struct pstree_item *t)
struct list_head *vmas = &rsti(t)->vmas.h;
list_for_each_entry(vma, vmas, list) {
if (!vma_area_is_private(vma, kdat.task_size))
if (!vma_area_is(vma, VMA_PREMMAPED))
continue;
if (vma->e->flags & MAP_GROWSDOWN) {
......@@ -961,7 +962,7 @@ int prepare_vmas(struct pstree_item *t, struct task_restore_args *ta)
*/
*vme = *vma->e;
if (vma_area_is_private(vma, kdat.task_size))
if (vma_area_is(vma, VMA_PREMMAPED))
vma_premmaped_start(vme) = vma->premmaped_addr;
}
......
......@@ -624,7 +624,7 @@ static unsigned long restore_mapping(VmaEntry *vma_entry)
* of tail. To set tail, we write to /dev/null and use the fact this
* operation is synchronious for the device. Also, we unmap temporary
* anonymous area, used to store content of ring buffer during restore
* and mapped in map_private_vma().
* and mapped in premap_private_vma().
*/
static int restore_aio_ring(struct rst_aio_ring *raio)
{
......@@ -1141,7 +1141,7 @@ long __export_restore_task(struct task_restore_args *args)
for (i = 0; i < args->vmas_n; i++) {
vma_entry = args->vmas + i;
if (!vma_entry_is_private(vma_entry, args->task_size))
if (!vma_entry_is(vma_entry, VMA_PREMMAPED))
continue;
if (vma_entry->end >= args->task_size)
......@@ -1159,7 +1159,7 @@ long __export_restore_task(struct task_restore_args *args)
for (i = args->vmas_n - 1; i >= 0; i--) {
vma_entry = args->vmas + i;
if (!vma_entry_is_private(vma_entry, args->task_size))
if (!vma_entry_is(vma_entry, VMA_PREMMAPED))
continue;
if (vma_entry->start > args->task_size)
......@@ -1182,7 +1182,7 @@ long __export_restore_task(struct task_restore_args *args)
if (!vma_entry_is(vma_entry, VMA_AREA_REGULAR))
continue;
if (vma_entry_is_private(vma_entry, args->task_size))
if (vma_entry_is(vma_entry, VMA_PREMMAPED))
continue;
va = restore_mapping(vma_entry);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment