Commit e9714acf authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds
Browse files

mm: kill vma flag VM_EXECUTABLE and mm->num_exe_file_vmas

Currently the kernel sets mm->exe_file during sys_execve() and then tracks
number of vmas with VM_EXECUTABLE flag in mm->num_exe_file_vmas, as soon
as this counter drops to zero kernel resets mm->exe_file to NULL.  Plus it
resets mm->exe_file at last mmput() when mm->mm_users drops to zero.

VMA with VM_EXECUTABLE flag appears after mapping file with flag
MAP_EXECUTABLE, such vmas can appears only at sys_execve() or after vma
splitting, because sys_mmap ignores this flag.  Usually binfmt module sets
mm->exe_file and mmaps executable vmas with this file, they hold
mm->exe_file while task is running.

comment from v2.6.25-6245-g925d1c40

 ("procfs task exe symlink"),
where all this stuff was introduced:

> The kernel implements readlink of /proc/pid/exe by getting the file from
> the first executable VMA.  Then the path to the file is reconstructed and
> reported as the result.
> Because of the VMA walk the code is slightly different on nommu systems.
> This patch avoids separate /proc/pid/exe code on nommu systems.  Instead of
> walking the VMAs to find the first executable file-backed VMA we store a
> reference to the exec'd file in the mm_struct.
> That reference would prevent the filesystem holding the executable file
> from being unmounted even after unmapping the VMAs.  So we track the number
> of VM_EXECUTABLE VMAs and drop the new reference when the last one is
> unmapped.  This avoids pinning the mounted filesystem.

exe_file's vma accounting is hooked into every file mmap/unmmap and vma
split/merge just to fix some hypothetical pinning fs from umounting by mm,
which already unmapped all its executable files, but still alive.

Seems like currently nobody depends on this behaviour.  We can try to
remove this logic and keep mm->exe_file until final mmput().

mm->exe_file is still protected with mm->mmap_sem, because we want to
change it via new sys_prctl(PR_SET_MM_EXE_FILE).  Also via this syscall
task can change its mm->exe_file and unpin mountpoint explicitly.
Signed-off-by: default avatarKonstantin Khlebnikov <>
Cc: Alexander Viro <>
Cc: Carsten Otte <>
Cc: Chris Metcalf <>
Cc: Cyrill Gorcunov <>
Cc: Eric Paris <>
Cc: H. Peter Anvin <>
Cc: Hugh Dickins <>
Cc: Ingo Molnar <>
Cc: James Morris <>
Cc: Jason Baron <>
Cc: Kentaro Takeda <>
Cc: Matt Helsley <>
Cc: Nick Piggin <>
Cc: Oleg Nesterov <>
Cc: Peter Zijlstra <>
Cc: Robert Richter <>
Cc: Suresh Siddha <>
Cc: Tetsuo Handa <>
Cc: Venkatesh Pallipadi <>
Acked-by: default avatarLinus Torvalds <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent 2dd8ad81
......@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp);
#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
#define VM_EXECUTABLE 0x00001000
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
......@@ -1396,9 +1395,6 @@ extern void exit_mmap(struct mm_struct *);
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
extern void added_exe_file_vma(struct mm_struct *mm);
extern void removed_exe_file_vma(struct mm_struct *mm);
extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
......@@ -394,7 +394,6 @@ struct mm_struct {
/* store ref to file /proc/<pid>/exe symlink points to */
struct file *exe_file;
unsigned long num_exe_file_vmas;
struct mmu_notifier_mm *mmu_notifier_mm;
......@@ -86,7 +86,6 @@ calc_vm_flag_bits(unsigned long flags)
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
#endif /* __KERNEL__ */
......@@ -622,26 +622,6 @@ void mmput(struct mm_struct *mm)
* We added or removed a vma mapping the executable. The vmas are only mapped
* during exec and are not mapped with the mmap system call.
* Callers must hold down_write() on the mm's mmap_sem for these
void added_exe_file_vma(struct mm_struct *mm)
void removed_exe_file_vma(struct mm_struct *mm)
if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
mm->exe_file = NULL;
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
if (new_exe_file)
......@@ -649,7 +629,6 @@ void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
if (mm->exe_file)
mm->exe_file = new_exe_file;
mm->num_exe_file_vmas = 0;
struct file *get_mm_exe_file(struct mm_struct *mm)
......@@ -231,11 +231,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_ops && vma->vm_ops->close)
if (vma->vm_file) {
if (vma->vm_file)
if (vma->vm_flags & VM_EXECUTABLE)
kmem_cache_free(vm_area_cachep, vma);
return next;
......@@ -636,8 +633,6 @@ again: remove_next = 1 + (end > next->vm_end);
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
if (next->vm_flags & VM_EXECUTABLE)
if (next->anon_vma)
anon_vma_merge(vma, next);
......@@ -1304,8 +1299,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
error = file->f_op->mmap(file, vma);
if (error)
goto unmap_and_free_vma;
if (vm_flags & VM_EXECUTABLE)
/* Can addr have changed??
......@@ -1987,11 +1980,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
if (anon_vma_clone(new, vma))
goto out_free_mpol;
if (new->vm_file) {
if (new->vm_file)
if (vma->vm_flags & VM_EXECUTABLE)
if (new->vm_ops && new->vm_ops->open)
......@@ -2009,11 +1999,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)
if (new->vm_file) {
if (vma->vm_flags & VM_EXECUTABLE)
if (new->vm_file)
......@@ -2408,12 +2395,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
if (new_vma->vm_file) {
if (new_vma->vm_file)
if (vma->vm_flags & VM_EXECUTABLE)
if (new_vma->vm_ops && new_vma->vm_ops->open)
vma_link(mm, new_vma, prev, rb_link, rb_parent);
......@@ -789,11 +789,8 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
kenter("%p", vma);
if (vma->vm_ops && vma->vm_ops->close)
if (vma->vm_file) {
if (vma->vm_file)
if (vma->vm_flags & VM_EXECUTABLE)
kmem_cache_free(vm_area_cachep, vma);
......@@ -1284,10 +1281,6 @@ unsigned long do_mmap_pgoff(struct file *file,
if (file) {
region->vm_file = get_file(file);
vma->vm_file = get_file(file);
if (vm_flags & VM_EXECUTABLE) {
vma->vm_mm = current->mm;
......@@ -1440,8 +1433,6 @@ unsigned long do_mmap_pgoff(struct file *file,
kmem_cache_free(vm_region_jar, region);
if (vma->vm_file)
if (vma->vm_flags & VM_EXECUTABLE)
kmem_cache_free(vm_area_cachep, vma);
kleave(" = %d", ret);
return ret;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment