diff --git a/include/linux/mm.h b/include/linux/mm.h index 47a93928b90fff85339fd09d47761549db576b58..cccbbba12b9d474d18c6cea81923c98f2fbbb33f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2109,7 +2109,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ -#define FOLL_MLOCK 0x40 /* mark page as mlocked */ +#define FOLL_POPULATE 0x40 /* fault in page */ #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ diff --git a/mm/gup.c b/mm/gup.c index a6e24e246f8688af7664966e66d99dbd038b8066..1b114ba9aebf4c512342f5591b71b6c5a9f27a33 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -92,7 +92,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, */ mark_page_accessed(page); } - if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { + if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE @@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned int fault_flags = 0; int ret; - /* For mlock, just skip the stack guard page. */ - if ((*flags & FOLL_MLOCK) && + /* For mm_populate(), just skip the stack guard page. */ + if ((*flags & FOLL_POPULATE) && (stack_guard_page_start(vma, address) || stack_guard_page_end(vma, address + PAGE_SIZE))) return -ENOENT; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6817b0350c71c43b0f89a4972ce19c51a2408a2b..10a4b6cea0d10a27f3792b8a61f1a415b911f9a0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, pmd, _pmd, 1)) update_mmu_cache_pmd(vma, addr, pmd); } - if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { + if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { if (page->mapping && trylock_page(page)) { lru_add_drain(); if (page->mapping) diff --git a/mm/mlock.c b/mm/mlock.c index 8a54cd214925872a66d4d1cc36b69ec6c6047324..f756e28b33fc2ca253fec3e3b66c2c90f7f72797 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); - gup_flags = FOLL_TOUCH | FOLL_MLOCK; + gup_flags = FOLL_TOUCH | FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW