diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 161 |
1 files changed, 74 insertions, 87 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index 7e8c3e8ae75f..6756b8bb0033 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -90,12 +90,6 @@ static void unmap_region(struct mm_struct *mm, * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and - * MAP_PRIVATE: - * r: (no) no - * w: (no) no - * x: (yes) yes */ pgprot_t protection_map[16] __ro_after_init = { __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, @@ -201,6 +195,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) bool downgraded = false; LIST_HEAD(uf); + brk = untagged_addr(brk); + if (down_write_killable(&mm->mmap_sem)) return -EINTR; @@ -289,9 +285,9 @@ out: return retval; } -static long vma_compute_subtree_gap(struct vm_area_struct *vma) +static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) { - unsigned long max, prev_end, subtree_gap; + unsigned long gap, prev_end; /* * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we @@ -299,14 +295,21 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma) * an unmapped area; whereas when expanding we only require one. * That's a little inconsistent, but keeps the code here simpler. */ - max = vm_start_gap(vma); + gap = vm_start_gap(vma); if (vma->vm_prev) { prev_end = vm_end_gap(vma->vm_prev); - if (max > prev_end) - max -= prev_end; + if (gap > prev_end) + gap -= prev_end; else - max = 0; + gap = 0; } + return gap; +} + +#ifdef CONFIG_DEBUG_VM_RB +static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) +{ + unsigned long max = vma_compute_gap(vma), subtree_gap; if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -322,7 +325,6 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma) return max; } -#ifdef CONFIG_DEBUG_VM_RB static int browse_rb(struct mm_struct *mm) { struct rb_root *root = &mm->mm_rb; @@ -428,8 +430,9 @@ static void validate_mm(struct mm_struct *mm) #define validate_mm(mm) do { } while (0) #endif -RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, - unsigned long, rb_subtree_gap, vma_compute_subtree_gap) +RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, + struct vm_area_struct, vm_rb, + unsigned long, rb_subtree_gap, vma_compute_gap) /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or @@ -439,8 +442,8 @@ RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, static void vma_gap_update(struct vm_area_struct *vma) { /* - * As it turns out, RB_DECLARE_CALLBACKS() already created a callback - * function that does exactly what we want. + * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created + * a callback function that does exactly what we want. */ vma_gap_callbacks_propagate(&vma->vm_rb, NULL); } @@ -632,7 +635,7 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { - __vma_link_list(mm, vma, prev, rb_parent); + __vma_link_list(mm, vma, prev); __vma_link_rb(mm, vma, rb_link, rb_parent); } @@ -675,37 +678,14 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) static __always_inline void __vma_unlink_common(struct mm_struct *mm, struct vm_area_struct *vma, - struct vm_area_struct *prev, - bool has_prev, struct vm_area_struct *ignore) { - struct vm_area_struct *next; - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); - next = vma->vm_next; - if (has_prev) - prev->vm_next = next; - else { - prev = vma->vm_prev; - if (prev) - prev->vm_next = next; - else - mm->mmap = next; - } - if (next) - next->vm_prev = prev; - + __vma_unlink_list(mm, vma); /* Kill the cache */ vmacache_invalidate(mm); } -static inline void __vma_unlink_prev(struct mm_struct *mm, - struct vm_area_struct *vma, - struct vm_area_struct *prev) -{ - __vma_unlink_common(mm, vma, prev, true, vma); -} - /* * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that * is already present in an i_mmap tree without adjusting the tree. @@ -760,8 +740,6 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, remove_next = 1 + (end > next->vm_end); VM_WARN_ON(remove_next == 2 && end != next->vm_next->vm_end); - VM_WARN_ON(remove_next == 1 && - end != next->vm_end); /* trim end to next, for case 6 first pass */ end = next->vm_end; } @@ -880,7 +858,7 @@ again: * us to remove next before dropping the locks. */ if (remove_next != 3) - __vma_unlink_prev(mm, next, vma); + __vma_unlink_common(mm, next, next); else /* * vma is not before next if they've been @@ -891,7 +869,7 @@ again: * "next" (which is stored in post-swap() * "vma"). */ - __vma_unlink_common(mm, next, NULL, false, vma); + __vma_unlink_common(mm, next, vma); if (file) __remove_shared_vm_struct(next, file, mapping); } else if (insert) { @@ -1107,15 +1085,18 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, * the area passed down from mprotect_fixup, never extending beyond one * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: * - * AAAA AAAA AAAA AAAA - * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX - * cannot merge might become might become might become - * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or - * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or - * mremap move: PPPPXXXXXXXX 8 - * AAAA - * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN - * might become case 1 below case 2 below case 3 below + * AAAA AAAA AAAA + * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN + * cannot merge might become might become + * PPNNNNNNNNNN PPPPPPPPPPNN + * mmap, brk or case 4 below case 5 below + * mremap move: + * AAAA AAAA + * PPPP NNNN PPPPNNNNXXXX + * might become might become + * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or + * PPPPPPPPNNNN 2 or PPPPPPPPXXXX 7 or + * PPPPNNNNNNNN 3 PPPPXXXXXXXX 8 * * It is important for case 8 that the vma NNNN overlapping the * region AAAA is never going to extended over XXXX. Instead XXXX must @@ -1289,26 +1270,22 @@ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_ */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { - struct anon_vma *anon_vma; - struct vm_area_struct *near; - - near = vma->vm_next; - if (!near) - goto try_prev; - - anon_vma = reusable_anon_vma(near, vma, near); - if (anon_vma) - return anon_vma; -try_prev: - near = vma->vm_prev; - if (!near) - goto none; - - anon_vma = reusable_anon_vma(near, near, vma); - if (anon_vma) - return anon_vma; -none: + struct anon_vma *anon_vma = NULL; + + /* Try next first. */ + if (vma->vm_next) { + anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); + if (anon_vma) + return anon_vma; + } + + /* Try prev next. */ + if (vma->vm_prev) + anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); + /* + * We might reach here with anon_vma == NULL if we can't find + * any reusable anon_vma. * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, @@ -1316,7 +1293,7 @@ none: * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ - return NULL; + return anon_vma; } /* @@ -1358,6 +1335,9 @@ static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) if (S_ISBLK(inode->i_mode)) return MAX_LFS_FILESIZE; + if (S_ISSOCK(inode->i_mode)) + return MAX_LFS_FILESIZE; + /* Special "we do even unsigned file positions" case */ if (file->f_mode & FMODE_UNSIGNED_OFFSET) return 0; @@ -1430,7 +1410,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * that it represents a valid section of the address space. */ addr = get_unmapped_area(file, addr, len, pgoff, flags); - if (offset_in_page(addr)) + if (IS_ERR_VALUE(addr)) return addr; if (flags & MAP_FIXED_NOREPLACE) { @@ -1483,8 +1463,12 @@ unsigned long do_mmap(struct file *file, unsigned long addr, case MAP_SHARED_VALIDATE: if (flags & ~flags_mask) return -EOPNOTSUPP; - if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) - return -EACCES; + if (prot & PROT_WRITE) { + if (!(file->f_mode & FMODE_WRITE)) + return -EACCES; + if (IS_SWAPFILE(file->f_mapping->host)) + return -ETXTBSY; + } /* * Make sure we don't allow writing to an append-only @@ -1573,6 +1557,8 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, struct file *file = NULL; unsigned long retval; + addr = untagged_addr(addr); + if (!(flags & MAP_ANONYMOUS)) { audit_mmap_fd(fd, flags); file = fget(fd); @@ -2270,12 +2256,9 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, if (vma) { *pprev = vma->vm_prev; } else { - struct rb_node *rb_node = mm->mm_rb.rb_node; - *pprev = NULL; - while (rb_node) { - *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb); - rb_node = rb_node->rb_right; - } + struct rb_node *rb_node = rb_last(&mm->mm_rb); + + *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL; } return vma; } @@ -2874,6 +2857,7 @@ EXPORT_SYMBOL(vm_munmap); SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) { + addr = untagged_addr(addr); profile_munmap(addr); return __vm_munmap(addr, len, true); } @@ -2990,15 +2974,16 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla struct rb_node **rb_link, *rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; + unsigned long mapped_addr; /* Until we need other flags, refuse anything except VM_EXEC. */ if ((flags & (~VM_EXEC)) != 0) return -EINVAL; flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); - if (offset_in_page(error)) - return error; + mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (IS_ERR_VALUE(mapped_addr)) + return mapped_addr; error = mlock_future_check(mm, mm->def_flags, len); if (error) @@ -3347,6 +3332,8 @@ static const struct vm_operations_struct special_mapping_vmops = { .fault = special_mapping_fault, .mremap = special_mapping_mremap, .name = special_mapping_name, + /* vDSO code relies that VVAR can't be accessed remotely */ + .access = NULL, }; static const struct vm_operations_struct legacy_special_mapping_vmops = { |