Commit 1027e443 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

mm: make GUP handle pfn mapping unless FOLL_GET is requested

With DAX, pfn mapping becoming more common.  The patch adjusts GUP code to
cover pfn mapping for cases when we don't need struct page to proceed.

To make it possible, let's change follow_page() code to return -EEXIST
error code if proper page table entry exists, but no corresponding struct
page.  __get_user_page() would ignore the error code and move to the next
page frame.

The immediate effect of the change is working MAP_POPULATE and mlock() on
DAX mappings.

[ fix arm64 build]
Signed-off-by: default avatarKirill A. Shutemov <>
Reviewed-by: default avatarToshi Kani <>
Acked-by: default avatarMatthew Wilcox <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent d899844e
......@@ -12,7 +12,9 @@
#include <linux/sched.h>
#include <linux/rwsem.h>
#include <linux/hugetlb.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "internal.h"
......@@ -32,6 +34,30 @@ static struct page *no_page_table(struct vm_area_struct *vma,
return NULL;
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
pte_t *pte, unsigned int flags)
/* No page to get reference */
if (flags & FOLL_GET)
return -EFAULT;
if (flags & FOLL_TOUCH) {
pte_t entry = *pte;
if (flags & FOLL_WRITE)
entry = pte_mkdirty(entry);
entry = pte_mkyoung(entry);
if (!pte_same(*pte, entry)) {
set_pte_at(vma->vm_mm, address, pte, entry);
update_mmu_cache(vma, address, pte);
/* Proper page table entry exists, but no corresponding struct page */
return -EEXIST;
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
......@@ -73,10 +99,21 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
page = vm_normal_page(vma, address, pte);
if (unlikely(!page)) {
if ((flags & FOLL_DUMP) ||
goto bad_page;
page = pte_page(pte);
if (flags & FOLL_DUMP) {
/* Avoid special (like zero) pages in core dumps */
page = ERR_PTR(-EFAULT);
goto out;
if (is_zero_pfn(pte_pfn(pte))) {
page = pte_page(pte);
} else {
int ret;
ret = follow_pfn_pte(vma, address, ptep, flags);
page = ERR_PTR(ret);
goto out;
if (flags & FOLL_GET)
......@@ -114,12 +151,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
pte_unmap_unlock(ptep, ptl);
return page;
pte_unmap_unlock(ptep, ptl);
return ERR_PTR(-EFAULT);
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
......@@ -489,9 +523,15 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
goto next_page;
if (IS_ERR(page))
} else if (PTR_ERR(page) == -EEXIST) {
* Proper page table entry exists, but no corresponding
* struct page.
goto next_page;
} else if (IS_ERR(page)) {
return i ? i : PTR_ERR(page);
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment