Commit eb1ae0a0 authored by Pavel Emelyanov's avatar Pavel Emelyanov

vma: Turn embeded VmaEntry on vma_area into pointer

On restore we will read all VmaEntries in one big MmEntry object,
so to avoif copying them all into vma_areas, make them be pointable.
Signed-off-by: 's avatarPavel Emelyanov <xemul@parallels.com>
parent 6da13c68
...@@ -100,10 +100,10 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, ...@@ -100,10 +100,10 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
if (!vma_area_is(vma, VMA_AREA_REGULAR)) if (!vma_area_is(vma, VMA_AREA_REGULAR))
continue; continue;
if ((vma->vma.prot & VDSO_PROT) != VDSO_PROT) if ((vma->e->prot & VDSO_PROT) != VDSO_PROT)
continue; continue;
if (vma->vma.start > TASK_SIZE) if (vma->e->start > TASK_SIZE)
continue; continue;
/* /*
...@@ -111,7 +111,7 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, ...@@ -111,7 +111,7 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
* otherwise if task never called for vdso functions * otherwise if task never called for vdso functions
* page frame number won't be reported. * page frame number won't be reported.
*/ */
args->start = vma->vma.start; args->start = vma->e->start;
args->len = vma_area_len(vma); args->len = vma_area_len(vma);
if (parasite_execute_daemon(PARASITE_CMD_CHECK_VDSO_MARK, ctl)) { if (parasite_execute_daemon(PARASITE_CMD_CHECK_VDSO_MARK, ctl)) {
...@@ -131,10 +131,10 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, ...@@ -131,10 +131,10 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
continue; continue;
} }
off = (vma->vma.start / PAGE_SIZE) * sizeof(u64); off = (vma->e->start / PAGE_SIZE) * sizeof(u64);
if (lseek(fd, off, SEEK_SET) != off) { if (lseek(fd, off, SEEK_SET) != off) {
pr_perror("Failed to seek address %lx\n", pr_perror("Failed to seek address %lx\n",
(long unsigned int)vma->vma.start); (long unsigned int)vma->e->start);
ret = -1; ret = -1;
goto err; goto err;
} }
...@@ -155,14 +155,14 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, ...@@ -155,14 +155,14 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
if (pfn == vdso_pfn) { if (pfn == vdso_pfn) {
if (!vma_area_is(vma, VMA_AREA_VDSO)) { if (!vma_area_is(vma, VMA_AREA_VDSO)) {
pr_debug("vdso: Restore status by pfn at %lx\n", pr_debug("vdso: Restore status by pfn at %lx\n",
(long)vma->vma.start); (long)vma->e->start);
vma->vma.status |= VMA_AREA_VDSO; vma->e->status |= VMA_AREA_VDSO;
} }
} else { } else {
if (vma_area_is(vma, VMA_AREA_VDSO)) { if (vma_area_is(vma, VMA_AREA_VDSO)) {
pr_debug("vdso: Drop mishinted status at %lx\n", pr_debug("vdso: Drop mishinted status at %lx\n",
(long)vma->vma.start); (long)vma->e->start);
vma->vma.status &= ~VMA_AREA_VDSO; vma->e->status &= ~VMA_AREA_VDSO;
} }
} }
} }
...@@ -173,23 +173,23 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid, ...@@ -173,23 +173,23 @@ int parasite_fixup_vdso(struct parasite_ctl *ctl, pid_t pid,
*/ */
if (marked) { if (marked) {
pr_debug("vdso: Found marked at %lx (proxy at %lx)\n", pr_debug("vdso: Found marked at %lx (proxy at %lx)\n",
(long)marked->vma.start, (long)proxy_addr); (long)marked->e->start, (long)proxy_addr);
/* /*
* Don't forget to restore the proxy vdso status, since * Don't forget to restore the proxy vdso status, since
* it's being not recognized by the kernel as vdso. * it's being not recognized by the kernel as vdso.
*/ */
list_for_each_entry(vma, &vma_area_list->h, list) { list_for_each_entry(vma, &vma_area_list->h, list) {
if (vma->vma.start == proxy_addr) { if (vma->e->start == proxy_addr) {
vma->vma.status |= VMA_AREA_REGULAR | VMA_AREA_VDSO; vma->e->status |= VMA_AREA_REGULAR | VMA_AREA_VDSO;
pr_debug("vdso: Restore proxy status at %lx\n", pr_debug("vdso: Restore proxy status at %lx\n",
(long)vma->vma.start); (long)vma->e->start);
break; break;
} }
} }
pr_debug("vdso: Droppping marked vdso at %lx\n", pr_debug("vdso: Droppping marked vdso at %lx\n",
(long)vma->vma.start); (long)vma->e->start);
list_del(&marked->list); list_del(&marked->list);
xfree(marked); xfree(marked);
} }
......
...@@ -84,15 +84,15 @@ bool privately_dump_vma(struct vma_area *vma) ...@@ -84,15 +84,15 @@ bool privately_dump_vma(struct vma_area *vma)
/* /*
* The special areas are not dumped. * The special areas are not dumped.
*/ */
if (!(vma->vma.status & VMA_AREA_REGULAR)) if (!(vma->e->status & VMA_AREA_REGULAR))
return false; return false;
/* No dumps for file-shared mappings */ /* No dumps for file-shared mappings */
if (vma->vma.status & VMA_FILE_SHARED) if (vma->e->status & VMA_FILE_SHARED)
return false; return false;
/* No dumps for SYSV IPC mappings */ /* No dumps for SYSV IPC mappings */
if (vma->vma.status & VMA_AREA_SYSVIPC) if (vma->e->status & VMA_AREA_SYSVIPC)
return false; return false;
if (vma_area_is(vma, VMA_ANON_SHARED)) if (vma_area_is(vma, VMA_ANON_SHARED))
...@@ -104,7 +104,7 @@ bool privately_dump_vma(struct vma_area *vma) ...@@ -104,7 +104,7 @@ bool privately_dump_vma(struct vma_area *vma)
return false; return false;
} }
if (vma->vma.end > TASK_SIZE) if (vma->e->end > TASK_SIZE)
return false; return false;
return true; return true;
...@@ -114,7 +114,7 @@ static void close_vma_file(struct vma_area *vma) ...@@ -114,7 +114,7 @@ static void close_vma_file(struct vma_area *vma)
{ {
if (vma->vm_file_fd < 0) if (vma->vm_file_fd < 0)
return; return;
if (vma->vma.status & VMA_AREA_SOCKET) if (vma->e->status & VMA_AREA_SOCKET)
return; return;
if (vma->file_borrowed) if (vma->file_borrowed)
return; return;
...@@ -347,7 +347,7 @@ static int dump_filemap(pid_t pid, struct vma_area *vma_area, ...@@ -347,7 +347,7 @@ static int dump_filemap(pid_t pid, struct vma_area *vma_area,
const struct cr_fdset *fdset) const struct cr_fdset *fdset)
{ {
struct fd_parms p = FD_PARMS_INIT; struct fd_parms p = FD_PARMS_INIT;
VmaEntry *vma = &vma_area->vma; VmaEntry *vma = vma_area->e;
BUG_ON(!vma_area->st); BUG_ON(!vma_area->st);
p.stat = *vma_area->st; p.stat = *vma_area->st;
...@@ -417,7 +417,7 @@ static int dump_task_mm(pid_t pid, const struct proc_pid_stat *stat, ...@@ -417,7 +417,7 @@ static int dump_task_mm(pid_t pid, const struct proc_pid_stat *stat,
fd = fdset_fd(fdset, CR_FD_VMAS); fd = fdset_fd(fdset, CR_FD_VMAS);
list_for_each_entry(vma_area, &vma_area_list->h, list) { list_for_each_entry(vma_area, &vma_area_list->h, list) {
VmaEntry *vma = &vma_area->vma; VmaEntry *vma = vma_area->e;
pr_info_vma(vma_area); pr_info_vma(vma_area);
......
...@@ -217,40 +217,40 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr, ...@@ -217,40 +217,40 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,
struct vma_area *p = *pvma; struct vma_area *p = *pvma;
if (vma_area_is(vma, VMA_FILE_PRIVATE)) { if (vma_area_is(vma, VMA_FILE_PRIVATE)) {
ret = get_filemap_fd(pid, &vma->vma); ret = get_filemap_fd(pid, vma->e);
if (ret < 0) { if (ret < 0) {
pr_err("Can't fixup VMA's fd\n"); pr_err("Can't fixup VMA's fd\n");
return -1; return -1;
} }
vma->vma.fd = ret; vma->e->fd = ret;
} }
nr_pages = vma_entry_len(&vma->vma) / PAGE_SIZE; nr_pages = vma_entry_len(vma->e) / PAGE_SIZE;
vma->page_bitmap = xzalloc(BITS_TO_LONGS(nr_pages) * sizeof(long)); vma->page_bitmap = xzalloc(BITS_TO_LONGS(nr_pages) * sizeof(long));
if (vma->page_bitmap == NULL) if (vma->page_bitmap == NULL)
return -1; return -1;
list_for_each_entry_continue(p, pvma_list, list) { list_for_each_entry_continue(p, pvma_list, list) {
if (p->vma.start > vma->vma.start) if (p->e->start > vma->e->start)
break; break;
if (!vma_priv(&p->vma)) if (!vma_priv(p->e))
continue; continue;
if (p->vma.end != vma->vma.end || if (p->e->end != vma->e->end ||
p->vma.start != vma->vma.start) p->e->start != vma->e->start)
continue; continue;
/* Check flags, which must be identical for both vma-s */ /* Check flags, which must be identical for both vma-s */
if ((vma->vma.flags ^ p->vma.flags) & (MAP_GROWSDOWN | MAP_ANONYMOUS)) if ((vma->e->flags ^ p->e->flags) & (MAP_GROWSDOWN | MAP_ANONYMOUS))
break; break;
if (!(vma->vma.flags & MAP_ANONYMOUS) && if (!(vma->e->flags & MAP_ANONYMOUS) &&
vma->vma.shmid != p->vma.shmid) vma->e->shmid != p->e->shmid)
break; break;
pr_info("COW 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" vma\n", pr_info("COW 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" vma\n",
vma->vma.start, vma->vma.end, vma->vma.pgoff); vma->e->start, vma->e->end, vma->e->pgoff);
paddr = decode_pointer(vma->premmaped_addr); paddr = decode_pointer(vma->premmaped_addr);
} }
...@@ -260,25 +260,25 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr, ...@@ -260,25 +260,25 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,
* A grow-down VMA has a guard page, which protect a VMA below it. * A grow-down VMA has a guard page, which protect a VMA below it.
* So one more page is mapped here to restore content of the first page * So one more page is mapped here to restore content of the first page
*/ */
if (vma->vma.flags & MAP_GROWSDOWN) { if (vma->e->flags & MAP_GROWSDOWN) {
vma->vma.start -= PAGE_SIZE; vma->e->start -= PAGE_SIZE;
if (paddr) if (paddr)
paddr -= PAGE_SIZE; paddr -= PAGE_SIZE;
} }
size = vma_entry_len(&vma->vma); size = vma_entry_len(vma->e);
if (paddr == NULL) { if (paddr == NULL) {
/* /*
* The respective memory area was NOT found in the parent. * The respective memory area was NOT found in the parent.
* Map a new one. * Map a new one.
*/ */
pr_info("Map 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" vma\n", pr_info("Map 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" vma\n",
vma->vma.start, vma->vma.end, vma->vma.pgoff); vma->e->start, vma->e->end, vma->e->pgoff);
addr = mmap(tgt_addr, size, addr = mmap(tgt_addr, size,
vma->vma.prot | PROT_WRITE, vma->e->prot | PROT_WRITE,
vma->vma.flags | MAP_FIXED, vma->e->flags | MAP_FIXED,
vma->vma.fd, vma->vma.pgoff); vma->e->fd, vma->e->pgoff);
if (addr == MAP_FAILED) { if (addr == MAP_FAILED) {
pr_perror("Unable to map ANON_VMA"); pr_perror("Unable to map ANON_VMA");
...@@ -302,15 +302,15 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr, ...@@ -302,15 +302,15 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,
vma->premmaped_addr = (unsigned long) addr; vma->premmaped_addr = (unsigned long) addr;
pr_debug("\tpremap 0x%016"PRIx64"-0x%016"PRIx64" -> %016lx\n", pr_debug("\tpremap 0x%016"PRIx64"-0x%016"PRIx64" -> %016lx\n",
vma->vma.start, vma->vma.end, (unsigned long)addr); vma->e->start, vma->e->end, (unsigned long)addr);
if (vma->vma.flags & MAP_GROWSDOWN) { /* Skip gurad page */ if (vma->e->flags & MAP_GROWSDOWN) { /* Skip gurad page */
vma->vma.start += PAGE_SIZE; vma->e->start += PAGE_SIZE;
vma->premmaped_addr += PAGE_SIZE; vma->premmaped_addr += PAGE_SIZE;
} }
if (vma_area_is(vma, VMA_FILE_PRIVATE)) if (vma_area_is(vma, VMA_FILE_PRIVATE))
close(vma->vma.fd); close(vma->e->fd);
return size; return size;
} }
...@@ -354,7 +354,7 @@ static int restore_priv_vma_content(pid_t pid) ...@@ -354,7 +354,7 @@ static int restore_priv_vma_content(pid_t pid)
* The lookup is over *all* possible VMAs * The lookup is over *all* possible VMAs
* read from image file. * read from image file.
*/ */
while (va >= vma->vma.end) { while (va >= vma->e->end) {
if (vma->list.next == vmas) if (vma->list.next == vmas)
goto err_addr; goto err_addr;
vma = list_entry(vma->list.next, struct vma_area, list); vma = list_entry(vma->list.next, struct vma_area, list);
...@@ -366,14 +366,14 @@ static int restore_priv_vma_content(pid_t pid) ...@@ -366,14 +366,14 @@ static int restore_priv_vma_content(pid_t pid)
* there is no guarantee that the data from pagemap is * there is no guarantee that the data from pagemap is
* valid. * valid.
*/ */
if (va < vma->vma.start) if (va < vma->e->start)
goto err_addr; goto err_addr;
else if (unlikely(!vma_priv(&vma->vma))) { else if (unlikely(!vma_priv(vma->e))) {
pr_err("Trying to restore page for non-private VMA\n"); pr_err("Trying to restore page for non-private VMA\n");
goto err_addr; goto err_addr;
} }
off = (va - vma->vma.start) / PAGE_SIZE; off = (va - vma->e->start) / PAGE_SIZE;
p = decode_pointer((off) * PAGE_SIZE + p = decode_pointer((off) * PAGE_SIZE +
vma->premmaped_addr); vma->premmaped_addr);
...@@ -419,7 +419,7 @@ err_read: ...@@ -419,7 +419,7 @@ err_read:
if (vma->ppage_bitmap == NULL) if (vma->ppage_bitmap == NULL)
continue; continue;
size = vma_entry_len(&vma->vma) / PAGE_SIZE; size = vma_entry_len(vma->e) / PAGE_SIZE;
while (1) { while (1) {
/* Find all pages, which are not shared with this child */ /* Find all pages, which are not shared with this child */
i = find_next_bit(vma->ppage_bitmap, size, i); i = find_next_bit(vma->ppage_bitmap, size, i);
...@@ -449,7 +449,7 @@ err_read: ...@@ -449,7 +449,7 @@ err_read:
err_addr: err_addr:
pr_err("Page entry address %lx outside of VMA %lx-%lx\n", pr_err("Page entry address %lx outside of VMA %lx-%lx\n",
va, (long)vma->vma.start, (long)vma->vma.end); va, (long)vma->e->start, (long)vma->e->end);
return -1; return -1;
} }
...@@ -493,14 +493,14 @@ static int prepare_mappings(int pid) ...@@ -493,14 +493,14 @@ static int prepare_mappings(int pid)
pvma = list_entry(parent_vmas, struct vma_area, list); pvma = list_entry(parent_vmas, struct vma_area, list);
list_for_each_entry(vma, &vmas->h, list) { list_for_each_entry(vma, &vmas->h, list) {
if (pstart > vma->vma.start) { if (pstart > vma->e->start) {
ret = -1; ret = -1;
pr_err("VMA-s are not sorted in the image file\n"); pr_err("VMA-s are not sorted in the image file\n");
break; break;
} }
pstart = vma->vma.start; pstart = vma->e->start;
if (!vma_priv(&vma->vma)) if (!vma_priv(vma->e))
continue; continue;
ret = map_private_vma(pid, vma, addr, &pvma, parent_vmas); ret = map_private_vma(pid, vma, addr, &pvma, parent_vmas);
...@@ -535,10 +535,10 @@ static int unmap_guard_pages() ...@@ -535,10 +535,10 @@ static int unmap_guard_pages()
struct list_head *vmas = &current->rst->vmas.h; struct list_head *vmas = &current->rst->vmas.h;
list_for_each_entry(vma, vmas, list) { list_for_each_entry(vma, vmas, list) {
if (!vma_priv(&vma->vma)) if (!vma_priv(vma->e))
continue; continue;
if (vma->vma.flags & MAP_GROWSDOWN) { if (vma->e->flags & MAP_GROWSDOWN) {
void *addr = decode_pointer(vma->premmaped_addr); void *addr = decode_pointer(vma->premmaped_addr);
if (munmap(addr - PAGE_SIZE, PAGE_SIZE)) { if (munmap(addr - PAGE_SIZE, PAGE_SIZE)) {
...@@ -562,17 +562,17 @@ static int open_vmas(int pid) ...@@ -562,17 +562,17 @@ static int open_vmas(int pid)
continue; continue;
pr_info("Opening 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" (%x) vma\n", pr_info("Opening 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" (%x) vma\n",
vma->vma.start, vma->vma.end, vma->e->start, vma->e->end,
vma->vma.pgoff, vma->vma.status); vma->e->pgoff, vma->e->status);
if (vma_area_is(vma, VMA_AREA_SYSVIPC)) if (vma_area_is(vma, VMA_AREA_SYSVIPC))
ret = vma->vma.shmid; ret = vma->e->shmid;
else if (vma_area_is(vma, VMA_ANON_SHARED)) else if (vma_area_is(vma, VMA_ANON_SHARED))
ret = get_shmem_fd(pid, &vma->vma); ret = get_shmem_fd(pid, vma->e);
else if (vma_area_is(vma, VMA_FILE_SHARED)) else if (vma_area_is(vma, VMA_FILE_SHARED))
ret = get_filemap_fd(pid, &vma->vma); ret = get_filemap_fd(pid, vma->e);
else if (vma_area_is(vma, VMA_AREA_SOCKET)) else if (vma_area_is(vma, VMA_AREA_SOCKET))
ret = get_socket_fd(pid, &vma->vma); ret = get_socket_fd(pid, vma->e);
else else
continue; continue;
...@@ -582,7 +582,7 @@ static int open_vmas(int pid) ...@@ -582,7 +582,7 @@ static int open_vmas(int pid)
} }
pr_info("\t`- setting %d as mapping fd\n", ret); pr_info("\t`- setting %d as mapping fd\n", ret);
vma->vma.fd = ret; vma->e->fd = ret;
} }
return ret < 0 ? -1 : 0; return ret < 0 ? -1 : 0;
...@@ -1619,36 +1619,38 @@ static long restorer_get_vma_hint(pid_t pid, struct list_head *tgt_vma_list, ...@@ -1619,36 +1619,38 @@ static long restorer_get_vma_hint(pid_t pid, struct list_head *tgt_vma_list,
struct vma_area *t_vma, *s_vma; struct vma_area *t_vma, *s_vma;
long prev_vma_end = 0; long prev_vma_end = 0;
struct vma_area end_vma; struct vma_area end_vma;
VmaEntry end_e;
end_vma.vma.start = end_vma.vma.end = TASK_SIZE; end_vma.e = &end_e;
end_e.start = end_e.end = TASK_SIZE;
prev_vma_end = PAGE_SIZE * 0x10; /* CONFIG_LSM_MMAP_MIN_ADDR=65536 */ prev_vma_end = PAGE_SIZE * 0x10; /* CONFIG_LSM_MMAP_MIN_ADDR=65536 */
s_vma = list_first_entry(self_vma_list, struct vma_area, list); s_vma = list_first_entry(self_vma_list, struct vma_area, list);
t_vma = list_first_entry(tgt_vma_list, struct vma_area, list); t_vma = list_first_entry(tgt_vma_list, struct vma_area, list);
while (1) { while (1) {
if (prev_vma_end + vma_len > s_vma->vma.start) { if (prev_vma_end + vma_len > s_vma->e->start) {
if (s_vma->list.next == self_vma_list) { if (s_vma->list.next == self_vma_list) {
s_vma = &end_vma; s_vma = &end_vma;
continue; continue;
} }
if (s_vma == &end_vma) if (s_vma == &end_vma)
break; break;
if (prev_vma_end < s_vma->vma.end) if (prev_vma_end < s_vma->e->end)
prev_vma_end = s_vma->vma.end; prev_vma_end = s_vma->e->end;
s_vma = list_entry(s_vma->list.next, struct vma_area, list); s_vma = list_entry(s_vma->list.next, struct vma_area, list);
continue; continue;
} }
if (prev_vma_end + vma_len > t_vma->vma.start) { if (prev_vma_end + vma_len > t_vma->e->start) {
if (t_vma->list.next == tgt_vma_list) { if (t_vma->list.next == tgt_vma_list) {
t_vma = &end_vma; t_vma = &end_vma;
continue; continue;
} }
if (t_vma == &end_vma) if (t_vma == &end_vma)
break; break;
if (prev_vma_end < t_vma->vma.end) if (prev_vma_end < t_vma->e->end)
prev_vma_end = t_vma->vma.end; prev_vma_end = t_vma->e->end;
t_vma = list_entry(t_vma->list.next, struct vma_area, list); t_vma = list_entry(t_vma->list.next, struct vma_area, list);
continue; continue;
} }
...@@ -2196,9 +2198,9 @@ static int sigreturn_restore(pid_t pid, CoreEntry *core) ...@@ -2196,9 +2198,9 @@ static int sigreturn_restore(pid_t pid, CoreEntry *core)
if (!vme) if (!vme)
goto err_nv; goto err_nv;
*vme = vma->vma; *vme = *vma->e;
if (vma_priv(&vma->vma)) if (vma_priv(vma->e))
vma_premmaped_start(vme) = vma->premmaped_addr; vma_premmaped_start(vme) = vma->premmaped_addr;
} }
......
...@@ -23,7 +23,7 @@ static inline void vm_area_list_init(struct vm_area_list *vml) ...@@ -23,7 +23,7 @@ static inline void vm_area_list_init(struct vm_area_list *vml)
struct vma_area { struct vma_area {
struct list_head list; struct list_head list;
VmaEntry vma; VmaEntry *e;
union { union {
int vm_file_fd; int vm_file_fd;
...@@ -44,8 +44,8 @@ extern int collect_mappings(pid_t pid, struct vm_area_list *vma_area_list); ...@@ -44,8 +44,8 @@ extern int collect_mappings(pid_t pid, struct vm_area_list *vma_area_list);
extern void free_mappings(struct vm_area_list *vma_area_list); extern void free_mappings(struct vm_area_list *vma_area_list);
extern bool privately_dump_vma(struct vma_area *vma); extern bool privately_dump_vma(struct vma_area *vma);
#define vma_area_is(vma_area, s) vma_entry_is(&((vma_area)->vma), s) #define vma_area_is(vma_area, s) vma_entry_is((vma_area)->e, s)
#define vma_area_len(vma_area) vma_entry_len(&((vma_area)->vma)) #define vma_area_len(vma_area) vma_entry_len((vma_area)->e)
#define vma_entry_is(vma, s) (((vma)->status & (s)) == (s)) #define vma_entry_is(vma, s) (((vma)->status & (s)) == (s))
#define vma_entry_len(vma) ((vma)->end - (vma)->start) #define vma_entry_len(vma) ((vma)->end - (vma)->start)
...@@ -59,8 +59,8 @@ extern bool privately_dump_vma(struct vma_area *vma); ...@@ -59,8 +59,8 @@ extern bool privately_dump_vma(struct vma_area *vma);
static inline int in_vma_area(struct vma_area *vma, unsigned long addr) static inline int in_vma_area(struct vma_area *vma, unsigned long addr)
{ {
return addr >= (unsigned long)vma->vma.start && return addr >= (unsigned long)vma->e->start &&
addr < (unsigned long)vma->vma.end; addr < (unsigned long)vma->e->end;
} }
#endif /* __CR_VMA_H__ */ #endif /* __CR_VMA_H__ */
...@@ -111,7 +111,7 @@ static int generate_iovs(struct vma_area *vma, int pagemap, struct page_pipe *pp ...@@ -111,7 +111,7 @@ static int generate_iovs(struct vma_area *vma, int pagemap, struct page_pipe *pp
u64 from, len; u64 from, len;
nr_to_scan = vma_area_len(vma) / PAGE_SIZE; nr_to_scan = vma_area_len(vma) / PAGE_SIZE;
from = vma->vma.start / PAGE_SIZE * sizeof(*map); from = vma->e->start / PAGE_SIZE * sizeof(*map);
len = nr_to_scan * sizeof(*map); len = nr_to_scan * sizeof(*map);
if (pread(pagemap, map, len, from) != len) { if (pread(pagemap, map, len, from) != len) {
pr_perror("Can't read pagemap file"); pr_perror("Can't read pagemap file");
...@@ -122,10 +122,10 @@ static int generate_iovs(struct vma_area *vma, int pagemap, struct page_pipe *pp ...@@ -122,10 +122,10 @@ static int generate_iovs(struct vma_area *vma, int pagemap, struct page_pipe *pp
unsigned long vaddr; unsigned long vaddr;
int ret; int ret;
if (!should_dump_page(&vma->vma, map[pfn])) if (!should_dump_page(vma->e, map[pfn]))
continue; continue;
vaddr = vma->vma.start + pfn * PAGE_SIZE; vaddr = vma->e->start + pfn * PAGE_SIZE;
/* /*
* If we're doing incremental dump (parent images * If we're doing incremental dump (parent images
...@@ -169,12 +169,12 @@ static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl ...@@ -169,12 +169,12 @@ static struct parasite_dump_pages_args *prep_dump_pages_args(struct parasite_ctl
list_for_each_entry(vma, &vma_area_list->h, list) { list_for_each_entry(vma, &vma_area_list->h, list) {
if (!privately_dump_vma(vma)) if (!privately_dump_vma(vma))
continue; continue;
if (vma->vma.prot & PROT_READ) if (vma->e->prot & PROT_READ)
continue; continue;
p_vma->start = vma->vma.start; p_vma->start = vma->e->start;
p_vma->len = vma_area_len(vma); p_vma->len = vma_area_len(vma);
p_vma->prot = vma->vma.prot; p_vma->prot = vma->e->prot;
args->nr_vmas++; args->nr_vmas++;
p_vma++; p_vma++;
...@@ -371,23 +371,23 @@ int prepare_mm_pid(struct pstree_item *i) ...@@ -371,23 +371,23 @@ int prepare_mm_pid(struct pstree_item *i)
} }
ri->vmas.nr++; ri->vmas.nr++;
vma->vma = *vi; *vma->e = *vi;
list_add_tail(&vma->list, &ri->vmas.h); list_add_tail(&vma->list, &ri->vmas.h);
vma_entry__free_unpacked(vi, NULL); vma_entry__free_unpacked(vi, NULL);
if (vma_priv(&vma->vma)) { if (vma_priv(vma->e)) {
ri->vmas.priv_size += vma_area_len(vma); ri->vmas.priv_size += vma_area_len(vma);
if (vma->vma.flags & MAP_GROWSDOWN) if (vma->e->flags & MAP_GROWSDOWN)
ri->vmas.priv_size += PAGE_SIZE; ri->vmas.priv_size += PAGE_SIZE;
} }
pr_info("vma 0x%"PRIx64" 0x%"PRIx64"\n", vma->vma.start, vma->vma.end); pr_info("vma 0x%"PRIx64" 0x%"PRIx64"\n", vma->e->start, vma->e->end);
if (!vma_area_is(vma, VMA_ANON_SHARED) || if (!vma_area_is(vma, VMA_ANON_SHARED) ||
vma_area_is(vma, VMA_AREA_SYSVIPC)) vma_area_is(vma, VMA_AREA_SYSVIPC))
continue; continue;
ret = collect_shmem(pid, &vma->vma); ret = collect_shmem(pid, vma->e);
if (ret) if (ret)
break; break;
} }
......
...@@ -46,9 +46,9 @@ static int can_run_syscall(unsigned long ip, unsigned long start, unsigned long ...@@ -46,9 +46,9 @@ static int can_run_syscall(unsigned long ip, unsigned long start, unsigned long
static int syscall_fits_vma_area(struct vma_area *vma_area) static int syscall_fits_vma_area(struct vma_area *vma_area)
{ {
return can_run_syscall((unsigned long)vma_area->vma.start, return can_run_syscall((unsigned long)vma_area->e->start,
(unsigned long)vma_area->vma.start, (unsigned long)vma_area->e->start,
(unsigned long)vma_area->vma.end); (unsigned long)vma_area->e->end);
} }
static struct vma_area *get_vma_by_ip(struct list_head *vma_area_list, unsigned long ip) static struct vma_area *get_vma_by_ip(struct list_head *vma_area_list, unsigned long ip)
...@@ -56,9 +56,9 @@ static struct vma_area *get_vma_by_ip(struct list_head *vma_area_list, unsigned ...@@ -56,9 +56,9 @@ static struct vma_area *get_vma_by_ip(struct list_head *vma_area_list, unsigned
struct vma_area *vma_area; struct vma_area *vma_area;
list_for_each_entry(vma_area, vma_area_list, list) { list_for_each_entry(vma_area, vma_area_list, list) {
if (vma_area->vma.start >= TASK_SIZE) if (vma_area->e->start >= TASK_SIZE)
continue; continue;
if (!(vma_area->vma.prot & PROT_EXEC)) if (!(vma_area->e->prot & PROT_EXEC))
continue; continue;
if (syscall_fits_vma_area(vma_area)) if (syscall_fits_vma_area(vma_area))
return vma_area; return vma_area;
...@@ -1048,7 +1048,7 @@ struct parasite_ctl *parasite_prep_ctl(pid_t pid, struct vm_area_list *vma_area_ ...@@ -1048,7 +1048,7 @@ struct parasite_ctl *parasite_prep_ctl(pid_t pid, struct vm_area_list *vma_area_
goto err; goto err;
} }
ctl->syscall_ip = vma_area->vma.start; ctl->syscall_ip = vma_area->e->start;
return ctl; return ctl;
......
...@@ -105,29 +105,29 @@ static int parse_vmflags(char *buf, struct vma_area *vma_area) ...@@ -105,29 +105,29 @@ static int parse_vmflags(char *buf, struct vma_area *vma_area)
do { do {
/* mmap() block */ /* mmap() block */
if (_vmflag_match(tok, "gd")) if (_vmflag_match(tok, "gd"))
vma_area->vma.flags |= MAP_GROWSDOWN; vma_area->e->flags |= MAP_GROWSDOWN;
else if (_vmflag_match(tok, "lo")) else if (_vmflag_match(tok, "lo"))
vma_area->vma.flags |= MAP_LOCKED; vma_area->e->flags |= MAP_LOCKED;
else if (_vmflag_match(tok, "nr")) else if (_vmflag_match(tok, "nr"))
vma_area->vma.flags |= MAP_NORESERVE; vma_area->e->flags |= MAP_NORESERVE;
else if (_vmflag_match(tok, "ht")) else if (_vmflag_match(tok, "ht"))
vma_area->vma.flags |= MAP_HUGETLB; vma_area->e->flags |= MAP_HUGETLB;
/* madvise() block */ /* madvise() block */
if (_vmflag_match(tok, "sr")) if (_vmflag_match(tok, "sr"))
vma_area->vma.madv |= (1ul << MADV_SEQUENTIAL); vma_area->e->madv |= (1ul << MADV_SEQUENTIAL);
else if (_vmflag_match(tok, "rr")) else if (_vmflag_match(tok, "rr"))
vma_area->vma.madv |= (1ul << MADV_RANDOM); vma_area->e->madv |= (1ul << MADV_RANDOM);
else if (_vmflag_match(tok, "dc")) else if (_vmflag_match(tok, "dc"))
vma_area->vma.madv |= (1ul << MADV_DONTFORK); vma_area->e->madv |= (1ul << MADV_DONTFORK);
else if (_vmflag_match(tok, "dd")) else if (_vmflag_match(tok, "dd"))
vma_area->vma.madv |= (1ul << MADV_DONTDUMP); vma_area->e->madv |= (1ul << MADV_DONTDUMP);
else if (_vmflag_match(tok, "mg")) else if (_vmflag_match(tok, "mg"))
vma_area->vma.madv |= (1ul << MADV_MERGEABLE); vma_area->e->madv |= (1ul << MADV_MERGEABLE);
else if (_vmflag_match(tok, "hg")) else if (_vmflag_match(tok, "hg"))
vma_area->vma.madv |= (1ul << MADV_HUGEPAGE); vma_area->e->madv |= (1ul << MADV_HUGEPAGE);
else if (_vmflag_match(tok, "nh")) else if (_vmflag_match(tok, "nh"))
vma_area->vma.madv |= (1ul << MADV_NOHUGEPAGE); vma_area->e->madv |= (1ul << MADV_NOHUGEPAGE);
/* /*
* Anything else is just ignored. * Anything else is just ignored.
...@@ -136,8 +136,8 @@ static int parse_vmflags(char *buf, struct vma_area *vma_area) ...@@ -136,8 +136,8 @@ static int parse_vmflags(char *buf, struct vma_area *vma_area)
#undef _vmflag_match #undef _vmflag_match
if (vma_area->vma.madv) if (vma_area->e->madv)
vma_area->vma.has_madv = true; vma_area->e->has_madv = true;
return 0; return 0;
} }
...@@ -173,17 +173,17 @@ static int vma_get_mapfile(struct vma_area *vma, DIR *mfd, ...@@ -173,17 +173,17 @@ static int vma_get_mapfile(struct vma_area *vma, DIR *mfd,
struct vma_area *prev = prev_vfi->vma; struct vma_area *prev = prev_vfi->vma;
pr_debug("vma %lx borrows vfi from previous %lx\n", pr_debug("vma %lx borrows vfi from previous %lx\n",
vma->vma.start, prev->vma.start); vma->e->start, prev->e->start);
vma->vm_file_fd = prev->vm_file_fd; vma->vm_file_fd = prev->vm_file_fd;
if (prev->vma.status & VMA_AREA_SOCKET) if (prev->e->status & VMA_AREA_SOCKET)
vma->vma.status |= VMA_AREA_SOCKET | VMA_AREA_REGULAR; vma->e->status |= VMA_AREA_SOCKET | VMA_AREA_REGULAR;
vma->file_borrowed = true; vma->file_borrowed = true;
return 0; return 0;
} }
/* Figure out if it's file mapping */ /* Figure out if it's file mapping */
snprintf(path, sizeof(path), "%lx-%lx", vma->vma.start, vma->vma.end); snprintf(path, sizeof(path), "%lx-%lx", vma->e->start, vma->e->end);
/* /*
* Note that we "open" it in dumper process space * Note that we "open" it in dumper process space
...@@ -202,8 +202,8 @@ static int vma_get_mapfile(struct vma_area *vma, DIR *mfd, ...@@ -202,8 +202,8 @@ static int vma_get_mapfile(struct vma_area *vma, DIR *mfd,
return -1; return -1;
pr_info("Found socket %"PRIu64" mapping @%lx\n", pr_info("Found socket %"PRIu64" mapping @%lx\n",
buf.st_ino, vma->vma.start); buf.st_ino, vma->e->start);
vma->vma.status |= VMA_AREA_SOCKET | VMA_AREA_REGULAR; vma->e->status |= VMA_AREA_SOCKET | VMA_AREA_REGULAR;
vma->vm_socket_id = buf.st_ino; vma->vm_socket_id = buf.st_ino;
} else if (errno != ENOENT) } else if (errno != ENOENT)
return -1; return -1;
...@@ -251,7 +251,7 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file ...@@ -251,7 +251,7 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file
if (!strncmp(buf, "Nonlinear", 9)) { if (!strncmp(buf, "Nonlinear", 9)) {
BUG_ON(!vma_area); BUG_ON(!vma_area);
pr_err("Nonlinear mapping found %016"PRIx64"-%016"PRIx64"\n", pr_err("Nonlinear mapping found %016"PRIx64"-%016"PRIx64"\n",
vma_area->vma.start, vma_area->vma.end); vma_area->e->start, vma_area->e->end);
/* /*
* VMA is already on list and will be * VMA is already on list and will be
* freed later as list get destroyed. * freed later as list get destroyed.
...@@ -269,9 +269,9 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file ...@@ -269,9 +269,9 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file
if (vma_area) { if (vma_area) {
/* If we've split the stack vma, only the lowest one has the guard page. */ /* If we've split the stack vma, only the lowest one has the guard page. */
if ((vma_area->vma.flags & MAP_GROWSDOWN) && !prev_growsdown) if ((vma_area->e->flags & MAP_GROWSDOWN) && !prev_growsdown)
vma_area->vma.start -= PAGE_SIZE; /* Guard page */ vma_area->e->start -= PAGE_SIZE; /* Guard page */
prev_growsdown = (bool)(vma_area->vma.flags & MAP_GROWSDOWN); prev_growsdown = (bool)(vma_area->e->flags & MAP_GROWSDOWN);
list_add_tail(&vma_area->list, &vma_area_list->h); list_add_tail(&vma_area->list, &vma_area_list->h);
vma_area_list->nr++; vma_area_list->nr++;
...@@ -303,42 +303,42 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file ...@@ -303,42 +303,42 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file
goto err; goto err;
} }
vma_area->vma.start = start; vma_area->e->start = start;
vma_area->vma.end = end; vma_area->e->end = end;
vma_area->vma.pgoff = pgoff; vma_area->e->pgoff = pgoff;
vma_area->vma.prot = PROT_NONE; vma_area->e->prot = PROT_NONE;
if (vma_get_mapfile(vma_area, map_files_dir, &vfi, &prev_vfi)) if (vma_get_mapfile(vma_area, map_files_dir, &vfi, &prev_vfi))
goto err_bogus_mapfile; goto err_bogus_mapfile;
if (r == 'r') if (r == 'r')
vma_area->vma.prot |= PROT_READ; vma_area->e->prot |= PROT_READ;
if (w == 'w') if (w == 'w')
vma_area->vma.prot |= PROT_WRITE; vma_area->e->prot |= PROT_WRITE;
if (x == 'x') if (x == 'x')
vma_area->vma.prot |= PROT_EXEC; vma_area->e->prot |= PROT_EXEC;
if (s == 's') if (s == 's')
vma_area->vma.flags = MAP_SHARED; vma_area->e->flags = MAP_SHARED;
else if (s == 'p') else if (s == 'p')
vma_area->vma.flags = MAP_PRIVATE; vma_area->e->flags = MAP_PRIVATE;
else { else {
pr_err("Unexpected VMA met (%c)\n", s); pr_err("Unexpected VMA met (%c)\n", s);
goto err; goto err;
} }
if (vma_area->vma.status != 0) { if (vma_area->e->status != 0) {
continue; continue;
} else if (strstr(buf, "[vsyscall]") || strstr(buf, "[vectors]")) { } else if (strstr(buf, "[vsyscall]") || strstr(buf, "[vectors]")) {
vma_area->vma.status |= VMA_AREA_VSYSCALL; vma_area->e->status |= VMA_AREA_VSYSCALL;
} else if (strstr(buf, "[vdso]")) { } else if (strstr(buf, "[vdso]")) {
vma_area->vma.status |= VMA_AREA_REGULAR; vma_area->e->status |= VMA_AREA_REGULAR;
if ((vma_area->vma.prot & VDSO_PROT) == VDSO_PROT) if ((vma_area->e->prot & VDSO_PROT) == VDSO_PROT)
vma_area->vma.status |= VMA_AREA_VDSO; vma_area->e->status |= VMA_AREA_VDSO;
} else if (strstr(buf, "[heap]")) { } else if (strstr(buf, "[heap]")) {
vma_area->vma.status |= VMA_AREA_REGULAR | VMA_AREA_HEAP; vma_area->e->status |= VMA_AREA_REGULAR | VMA_AREA_HEAP;
} else { } else {
vma_area->vma.status = VMA_AREA_REGULAR; vma_area->e->status = VMA_AREA_REGULAR;
} }
/* /*
...@@ -353,9 +353,9 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file ...@@ -353,9 +353,9 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file
* Status is copied as-is as it should be zero here, * Status is copied as-is as it should be zero here,
* and have full match with the previous. * and have full match with the previous.
*/ */
vma_area->vma.flags |= (prev->vma.flags & MAP_ANONYMOUS); vma_area->e->flags |= (prev->e->flags & MAP_ANONYMOUS);
vma_area->vma.status = prev->vma.status; vma_area->e->status = prev->e->status;
vma_area->vma.shmid = prev->vma.shmid; vma_area->e->shmid = prev->e->shmid;
vma_area->st = prev->st; vma_area->st = prev->st;
} else if (vma_area->vm_file_fd >= 0) { } else if (vma_area->vm_file_fd >= 0) {
struct stat *st_buf; struct stat *st_buf;
...@@ -380,33 +380,33 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file ...@@ -380,33 +380,33 @@ int parse_smaps(pid_t pid, struct vm_area_list *vma_area_list, bool use_map_file
* otherwise it's some file mapping. * otherwise it's some file mapping.
*/ */
if (is_anon_shmem_map(st_buf->st_dev)) { if (is_anon_shmem_map(st_buf->st_dev)) {
if (!(vma_area->vma.flags & MAP_SHARED)) if (!(vma_area->e->flags & MAP_SHARED))
goto err_bogus_mapping; goto err_bogus_mapping;
vma_area->vma.flags |= MAP_ANONYMOUS; vma_area->e->flags |= MAP_ANONYMOUS;
vma_area->vma.status |= VMA_ANON_SHARED; vma_area->e->status |= VMA_ANON_SHARED;
vma_area->vma.shmid = st_buf->st_ino; vma_area->e->shmid = st_buf->st_ino;
if (!strcmp(file_path, "/SYSV")) { if (!strcmp(file_path, "/SYSV")) {
pr_info("path: %s\n", file_path); pr_info("path: %s\n", file_path);
vma_area->vma.status |= VMA_AREA_SYSVIPC; vma_area->e->status |= VMA_AREA_SYSVIPC;
} }
} else { } else {
if (vma_area->vma.flags & MAP_PRIVATE) if (vma_area->e->flags & MAP_PRIVATE)
vma_area->vma.status |= VMA_FILE_PRIVATE; vma_area->e->status |= VMA_FILE_PRIVATE;
else else
vma_area->vma.status |= VMA_FILE_SHARED; vma_area->e->status |= VMA_FILE_SHARED;
} }
} else { } else {
/* /*
* No file but mapping -- anonymous one. * No file but mapping -- anonymous one.
*/ */
if (vma_area->vma.flags & MAP_SHARED) { if (vma_area->e->flags & MAP_SHARED) {
vma_area->vma.status |= VMA_ANON_SHARED; vma_area->e->status |= VMA_ANON_SHARED;
vma_area->vma.shmid = vfi.ino; vma_area->e->shmid = vfi.ino;
} else { } else {
vma_area->vma.status |= VMA_ANON_PRIVATE; vma_area->e->status |= VMA_ANON_PRIVATE;
} }
vma_area->vma.flags |= MAP_ANONYMOUS; vma_area->e->flags |= MAP_ANONYMOUS;
} }
} }
...@@ -425,8 +425,8 @@ err: ...@@ -425,8 +425,8 @@ err:
err_bogus_mapping: err_bogus_mapping:
pr_err("Bogus mapping 0x%"PRIx64"-0x%"PRIx64" (flags: %#x vm_file_fd: %d)\n", pr_err("Bogus mapping 0x%"PRIx64"-0x%"PRIx64" (flags: %#x vm_file_fd: %d)\n",
vma_area->vma.start, vma_area->vma.end, vma_area->e->start, vma_area->e->end,
vma_area->vma.flags, vma_area->vm_file_fd); vma_area->e->flags, vma_area->vm_file_fd);
goto err; goto err;
err_bogus_mapfile: err_bogus_mapfile:
......
...@@ -221,8 +221,8 @@ int dump_socket_map(struct vma_area *vma) ...@@ -221,8 +221,8 @@ int dump_socket_map(struct vma_area *vma)
return -1; return -1;
} }
pr_info("Dumping socket map %x -> %"PRIx64"\n", sd->file_id, vma->vma.start); pr_info("Dumping socket map %x -> %"PRIx64"\n", sd->file_id, vma->e->start);
vma->vma.shmid = sd->file_id; vma->e->shmid = sd->file_id;
return 0; return 0;
} }
......
...@@ -51,7 +51,7 @@ static void vma_opt_str(const struct vma_area *v, char *opt) ...@@ -51,7 +51,7 @@ static void vma_opt_str(const struct vma_area *v, char *opt)
int p = 0; int p = 0;
#define opt2s(_o, _s) do { \ #define opt2s(_o, _s) do { \
if (v->vma.status & _o) \ if (v->e->status & _o) \
p += sprintf(opt + p, _s " "); \ p += sprintf(opt + p, _s " "); \
} while (0) } while (0)
...@@ -83,12 +83,12 @@ void pr_vma(unsigned int loglevel, const struct vma_area *vma_area) ...@@ -83,12 +83,12 @@ void pr_vma(unsigned int loglevel, const struct vma_area *vma_area)
vma_opt_str(vma_area, opt); vma_opt_str(vma_area, opt);
print_on_level(loglevel, "%#"PRIx64"-%#"PRIx64" (%"PRIi64"K) prot %#x flags %#x off %#"PRIx64" " print_on_level(loglevel, "%#"PRIx64"-%#"PRIx64" (%"PRIi64"K) prot %#x flags %#x off %#"PRIx64" "
"%s shmid: %#"PRIx64"\n", "%s shmid: %#"PRIx64"\n",
vma_area->vma.start, vma_area->vma.end, vma_area->e->start, vma_area->e->end,
KBYTES(vma_area_len(vma_area)), KBYTES(vma_area_len(vma_area)),
vma_area->vma.prot, vma_area->e->prot,
vma_area->vma.flags, vma_area->e->flags,
vma_area->vma.pgoff, vma_area->e->pgoff,
opt, vma_area->vma.shmid); opt, vma_area->e->shmid);
} }
int close_safe(int *fd) int close_safe(int *fd)
...@@ -651,11 +651,12 @@ struct vma_area *alloc_vma_area(void) ...@@ -651,11 +651,12 @@ struct vma_area *alloc_vma_area(void)
{ {
struct vma_area *p; struct vma_area *p;
p = xzalloc(sizeof(*p)); p = xzalloc(sizeof(*p) + sizeof(VmaEntry));
if (p) { if (p) {
vma_entry__init(&p->vma); p->e = (VmaEntry *)(p + 1);
vma_entry__init(p->e);
p->vm_file_fd = -1; p->vm_file_fd = -1;
p->vma.fd = -1; p->e->fd = -1;
} }
return p; return p;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment