summaryrefslogtreecommitdiffstats
path: root/mm/vma.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vma.h')
-rw-r--r--mm/vma.h32
1 files changed, 19 insertions, 13 deletions
diff --git a/mm/vma.h b/mm/vma.h
index 819f994cf727..d58068c0ff2e 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -42,8 +42,7 @@ struct vma_munmap_struct {
int vma_count; /* Number of vmas that will be removed */
bool unlock; /* Unlock after the munmap */
bool clear_ptes; /* If there are outstanding PTE to be cleared */
- bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */
- /* 1 byte hole */
+ /* 2 byte hole */
unsigned long nr_pages; /* Number of pages being removed */
unsigned long locked_vm; /* Number of locked pages */
unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */
@@ -59,6 +58,17 @@ enum vma_merge_state {
VMA_MERGE_SUCCESS,
};
+enum vma_merge_flags {
+ VMG_FLAG_DEFAULT = 0,
+ /*
+ * If we can expand, simply do so. We know there is nothing to merge to
+ * the right. Does not reset state upon failure to merge. The VMA
+ * iterator is assumed to be positioned at the previous VMA, rather than
+ * at the gap.
+ */
+ VMG_FLAG_JUST_EXPAND = 1 << 0,
+};
+
/* Represents a VMA merge operation. */
struct vma_merge_struct {
struct mm_struct *mm;
@@ -75,6 +85,7 @@ struct vma_merge_struct {
struct mempolicy *policy;
struct vm_userfaultfd_ctx uffd_ctx;
struct anon_vma_name *anon_name;
+ enum vma_merge_flags merge_flags;
enum vma_merge_state state;
};
@@ -99,6 +110,7 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.flags = flags_, \
.pgoff = pgoff_, \
.state = VMA_MERGE_START, \
+ .merge_flags = VMG_FLAG_DEFAULT, \
}
#define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_) \
@@ -118,6 +130,7 @@ static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
.uffd_ctx = vma_->vm_userfaultfd_ctx, \
.anon_name = anon_vma_name(vma_), \
.state = VMA_MERGE_START, \
+ .merge_flags = VMG_FLAG_DEFAULT, \
}
#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
@@ -184,7 +197,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
vms->unmap_start = FIRST_USER_ADDRESS;
vms->unmap_end = USER_PGTABLES_CEILING;
vms->clear_ptes = false;
- vms->closed_vm_ops = false;
}
#endif
@@ -241,15 +253,9 @@ static inline void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
* failure method of leaving a gap where the MAP_FIXED mapping failed.
*/
mas_set_range(mas, vms->start, vms->end - 1);
- if (unlikely(mas_store_gfp(mas, NULL, GFP_KERNEL))) {
- pr_warn_once("%s: (%d) Unable to abort munmap() operation\n",
- current->comm, current->pid);
- /* Leaving vmas detached and in-tree may hamper recovery */
- reattach_vmas(mas_detach);
- } else {
- /* Clean up the insertion of the unfortunate gap */
- vms_complete_munmap_vmas(vms, mas_detach);
- }
+ mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
+ /* Clean up the insertion of the unfortunate gap */
+ vms_complete_munmap_vmas(vms, mas_detach);
}
int
@@ -261,7 +267,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
-void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed);
+void remove_vma(struct vm_area_struct *vma, bool unreachable);
void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct vm_area_struct *next);