gup.c 60.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/spinlock.h>

#include <linux/mm.h>
8
#include <linux/memremap.h>
9 10 11 12 13
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>

14
#include <linux/sched/signal.h>
15
#include <linux/rwsem.h>
16
#include <linux/hugetlb.h>
17 18 19
#include <linux/migrate.h>
#include <linux/mm_inline.h>
#include <linux/sched/mm.h>
20

21
#include <asm/mmu_context.h>
22
#include <asm/pgtable.h>
23
#include <asm/tlbflush.h>
24

25 26
#include "internal.h"

27 28 29 30 31
struct follow_page_context {
	struct dev_pagemap *pgmap;
	unsigned int page_mask;
};

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
typedef int (*set_dirty_func_t)(struct page *page);

static void __put_user_pages_dirty(struct page **pages,
				   unsigned long npages,
				   set_dirty_func_t sdf)
{
	unsigned long index;

	for (index = 0; index < npages; index++) {
		struct page *page = compound_head(pages[index]);

		/*
		 * Checking PageDirty at this point may race with
		 * clear_page_dirty_for_io(), but that's OK. Two key cases:
		 *
		 * 1) This code sees the page as already dirty, so it skips
		 * the call to sdf(). That could happen because
		 * clear_page_dirty_for_io() called page_mkclean(),
		 * followed by set_page_dirty(). However, now the page is
		 * going to get written back, which meets the original
		 * intention of setting it dirty, so all is well:
		 * clear_page_dirty_for_io() goes on to call
		 * TestClearPageDirty(), and write the page back.
		 *
		 * 2) This code sees the page as clean, so it calls sdf().
		 * The page stays dirty, despite being written back, so it
		 * gets written back again in the next writeback cycle.
		 * This is harmless.
		 */
		if (!PageDirty(page))
			sdf(page);

		put_user_page(page);
	}
}

/**
 * put_user_pages_dirty() - release and dirty an array of gup-pinned pages
 * @pages:  array of pages to be marked dirty and released.
 * @npages: number of pages in the @pages array.
 *
 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
 * variants called on that page.
 *
 * For each page in the @pages array, make that page (or its head page, if a
 * compound page) dirty, if it was previously listed as clean. Then, release
 * the page using put_user_page().
 *
 * Please see the put_user_page() documentation for details.
 *
 * set_page_dirty(), which does not lock the page, is used here.
 * Therefore, it is the caller's responsibility to ensure that this is
 * safe. If not, then put_user_pages_dirty_lock() should be called instead.
 *
 */
void put_user_pages_dirty(struct page **pages, unsigned long npages)
{
	__put_user_pages_dirty(pages, npages, set_page_dirty);
}
EXPORT_SYMBOL(put_user_pages_dirty);

/**
 * put_user_pages_dirty_lock() - release and dirty an array of gup-pinned pages
 * @pages:  array of pages to be marked dirty and released.
 * @npages: number of pages in the @pages array.
 *
 * For each page in the @pages array, make that page (or its head page, if a
 * compound page) dirty, if it was previously listed as clean. Then, release
 * the page using put_user_page().
 *
 * Please see the put_user_page() documentation for details.
 *
 * This is just like put_user_pages_dirty(), except that it invokes
 * set_page_dirty_lock(), instead of set_page_dirty().
 *
 */
void put_user_pages_dirty_lock(struct page **pages, unsigned long npages)
{
	__put_user_pages_dirty(pages, npages, set_page_dirty_lock);
}
EXPORT_SYMBOL(put_user_pages_dirty_lock);

/**
 * put_user_pages() - release an array of gup-pinned pages.
 * @pages:  array of pages to be marked dirty and released.
 * @npages: number of pages in the @pages array.
 *
 * For each page in the @pages array, release the page using put_user_page().
 *
 * Please see the put_user_page() documentation for details.
 */
void put_user_pages(struct page **pages, unsigned long npages)
{
	unsigned long index;

	/*
	 * TODO: this can be optimized for huge pages: if a series of pages is
	 * physically contiguous and part of the same compound page, then a
	 * single operation to the head page should suffice.
	 */
	for (index = 0; index < npages; index++)
		put_user_page(pages[index]);
}
EXPORT_SYMBOL(put_user_pages);

137 138
static struct page *no_page_table(struct vm_area_struct *vma,
		unsigned int flags)
139
{
140 141 142 143 144 145 146 147 148 149 150 151
	/*
	 * When core dumping an enormous anonymous area that nobody
	 * has touched so far, we don't want to allocate unnecessary pages or
	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
	 * then get_dump_page() will return NULL to leave a hole in the dump.
	 * But we can only make this optimization where a hole would surely
	 * be zero-filled if handle_mm_fault() actually did handle it.
	 */
	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
		return ERR_PTR(-EFAULT);
	return NULL;
}
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, unsigned int flags)
{
	/* No page to get reference */
	if (flags & FOLL_GET)
		return -EFAULT;

	if (flags & FOLL_TOUCH) {
		pte_t entry = *pte;

		if (flags & FOLL_WRITE)
			entry = pte_mkdirty(entry);
		entry = pte_mkyoung(entry);

		if (!pte_same(*pte, entry)) {
			set_pte_at(vma->vm_mm, address, pte, entry);
			update_mmu_cache(vma, address, pte);
		}
	}

	/* Proper page table entry exists, but no corresponding struct page */
	return -EEXIST;
}

177 178 179 180 181 182
/*
 * FOLL_FORCE can write to even unwritable pte's, but only
 * after we've gone through a COW cycle and they are dirty.
 */
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
183
	return pte_write(pte) ||
184 185 186
		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}

187
static struct page *follow_page_pte(struct vm_area_struct *vma,
188 189
		unsigned long address, pmd_t *pmd, unsigned int flags,
		struct dev_pagemap **pgmap)
190 191 192 193 194
{
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;
	spinlock_t *ptl;
	pte_t *ptep, pte;
195

196
retry:
197
	if (unlikely(pmd_bad(*pmd)))
198
		return no_page_table(vma, flags);
199 200 201 202 203 204 205 206 207 208 209 210

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
	pte = *ptep;
	if (!pte_present(pte)) {
		swp_entry_t entry;
		/*
		 * KSM's break_ksm() relies upon recognizing a ksm page
		 * even while it is being migrated, so for that case we
		 * need migration_entry_wait().
		 */
		if (likely(!(flags & FOLL_MIGRATION)))
			goto no_page;
211
		if (pte_none(pte))
212 213 214 215 216 217
			goto no_page;
		entry = pte_to_swp_entry(pte);
		if (!is_migration_entry(entry))
			goto no_page;
		pte_unmap_unlock(ptep, ptl);
		migration_entry_wait(mm, pmd, address);
218
		goto retry;
219
	}
220
	if ((flags & FOLL_NUMA) && pte_protnone(pte))
221
		goto no_page;
222
	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
223 224 225
		pte_unmap_unlock(ptep, ptl);
		return NULL;
	}
226 227

	page = vm_normal_page(vma, address, pte);
228 229 230 231 232
	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
		/*
		 * Only return device mapping pages in the FOLL_GET case since
		 * they are only valid while holding the pgmap reference.
		 */
233 234
		*pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
		if (*pgmap)
235 236 237 238
			page = pte_page(pte);
		else
			goto no_page;
	} else if (unlikely(!page)) {
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
		if (flags & FOLL_DUMP) {
			/* Avoid special (like zero) pages in core dumps */
			page = ERR_PTR(-EFAULT);
			goto out;
		}

		if (is_zero_pfn(pte_pfn(pte))) {
			page = pte_page(pte);
		} else {
			int ret;

			ret = follow_pfn_pte(vma, address, ptep, flags);
			page = ERR_PTR(ret);
			goto out;
		}
254 255
	}

256 257 258 259 260 261 262 263 264 265 266 267 268
	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
		int ret;
		get_page(page);
		pte_unmap_unlock(ptep, ptl);
		lock_page(page);
		ret = split_huge_page(page);
		unlock_page(page);
		put_page(page);
		if (ret)
			return ERR_PTR(ret);
		goto retry;
	}

269 270 271 272 273 274
	if (flags & FOLL_GET) {
		if (unlikely(!try_get_page(page))) {
			page = ERR_PTR(-ENOMEM);
			goto out;
		}
	}
275 276 277 278 279 280 281 282 283 284 285
	if (flags & FOLL_TOUCH) {
		if ((flags & FOLL_WRITE) &&
		    !pte_dirty(pte) && !PageDirty(page))
			set_page_dirty(page);
		/*
		 * pte_mkyoung() would be more correct here, but atomic care
		 * is needed to avoid losing the dirty bit: it is easier to use
		 * mark_page_accessed().
		 */
		mark_page_accessed(page);
	}
Eric B Munson's avatar
Eric B Munson committed
286
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
287 288 289 290
		/* Do not mlock pte-mapped THP */
		if (PageTransCompound(page))
			goto out;

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
		/*
		 * The preliminary mapping check is mainly to avoid the
		 * pointless overhead of lock_page on the ZERO_PAGE
		 * which might bounce very badly if there is contention.
		 *
		 * If the page is already locked, we don't need to
		 * handle it now - vmscan will handle it later if and
		 * when it attempts to reclaim the page.
		 */
		if (page->mapping && trylock_page(page)) {
			lru_add_drain();  /* push cached pages to LRU */
			/*
			 * Because we lock page here, and migration is
			 * blocked by the pte's page reference, and we
			 * know the page is still mapped, we don't even
			 * need to check for file-cache page truncation.
			 */
			mlock_vma_page(page);
			unlock_page(page);
		}
	}
312
out:
313 314 315 316 317
	pte_unmap_unlock(ptep, ptl);
	return page;
no_page:
	pte_unmap_unlock(ptep, ptl);
	if (!pte_none(pte))
318 319 320 321
		return NULL;
	return no_page_table(vma, flags);
}

322 323
static struct page *follow_pmd_mask(struct vm_area_struct *vma,
				    unsigned long address, pud_t *pudp,
324 325
				    unsigned int flags,
				    struct follow_page_context *ctx)
326
{
327
	pmd_t *pmd, pmdval;
328 329 330 331
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

332
	pmd = pmd_offset(pudp, address);
333 334 335 336 337 338
	/*
	 * The READ_ONCE() will stabilize the pmdval in a register or
	 * on the stack so that it will stop changing under the code.
	 */
	pmdval = READ_ONCE(*pmd);
	if (pmd_none(pmdval))
339
		return no_page_table(vma, flags);
340
	if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) {
341 342 343 344
		page = follow_huge_pmd(mm, address, pmd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
345
	}
346
	if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
347
		page = follow_huge_pd(vma, address,
348
				      __hugepd(pmd_val(pmdval)), flags,
349 350 351 352 353
				      PMD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
354
retry:
355
	if (!pmd_present(pmdval)) {
356 357 358
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		VM_BUG_ON(thp_migration_supported() &&
359 360
				  !is_pmd_migration_entry(pmdval));
		if (is_pmd_migration_entry(pmdval))
361
			pmd_migration_entry_wait(mm, pmd);
362 363 364 365 366 367 368
		pmdval = READ_ONCE(*pmd);
		/*
		 * MADV_DONTNEED may convert the pmd to null because
		 * mmap_sem is held in read mode
		 */
		if (pmd_none(pmdval))
			return no_page_table(vma, flags);
369 370
		goto retry;
	}
371
	if (pmd_devmap(pmdval)) {
372
		ptl = pmd_lock(mm, pmd);
373
		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
374 375 376 377
		spin_unlock(ptl);
		if (page)
			return page;
	}
378
	if (likely(!pmd_trans_huge(pmdval)))
379
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
380

381
	if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
382 383
		return no_page_table(vma, flags);

384
retry_locked:
385
	ptl = pmd_lock(mm, pmd);
386 387 388 389
	if (unlikely(pmd_none(*pmd))) {
		spin_unlock(ptl);
		return no_page_table(vma, flags);
	}
390 391 392 393 394 395 396
	if (unlikely(!pmd_present(*pmd))) {
		spin_unlock(ptl);
		if (likely(!(flags & FOLL_MIGRATION)))
			return no_page_table(vma, flags);
		pmd_migration_entry_wait(mm, pmd);
		goto retry_locked;
	}
397 398
	if (unlikely(!pmd_trans_huge(*pmd))) {
		spin_unlock(ptl);
399
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
400 401 402 403 404 405 406
	}
	if (flags & FOLL_SPLIT) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
			spin_unlock(ptl);
			ret = 0;
407
			split_huge_pmd(vma, pmd, address);
408 409
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
410
		} else {
411 412 413 414
			if (unlikely(!try_get_page(page))) {
				spin_unlock(ptl);
				return ERR_PTR(-ENOMEM);
			}
415
			spin_unlock(ptl);
416 417 418 419
			lock_page(page);
			ret = split_huge_page(page);
			unlock_page(page);
			put_page(page);
420 421
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
422 423 424
		}

		return ret ? ERR_PTR(ret) :
425
			follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
426
	}
427 428
	page = follow_trans_huge_pmd(vma, address, pmd, flags);
	spin_unlock(ptl);
429
	ctx->page_mask = HPAGE_PMD_NR - 1;
430
	return page;
431 432
}

433 434
static struct page *follow_pud_mask(struct vm_area_struct *vma,
				    unsigned long address, p4d_t *p4dp,
435 436
				    unsigned int flags,
				    struct follow_page_context *ctx)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
{
	pud_t *pud;
	spinlock_t *ptl;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

	pud = pud_offset(p4dp, address);
	if (pud_none(*pud))
		return no_page_table(vma, flags);
	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
		page = follow_huge_pud(mm, address, pud, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
452 453 454 455 456 457 458 459
	if (is_hugepd(__hugepd(pud_val(*pud)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pud_val(*pud)), flags,
				      PUD_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
460 461
	if (pud_devmap(*pud)) {
		ptl = pud_lock(mm, pud);
462
		page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
463 464 465 466 467 468 469
		spin_unlock(ptl);
		if (page)
			return page;
	}
	if (unlikely(pud_bad(*pud)))
		return no_page_table(vma, flags);

470
	return follow_pmd_mask(vma, address, pud, flags, ctx);
471 472 473 474
}

static struct page *follow_p4d_mask(struct vm_area_struct *vma,
				    unsigned long address, pgd_t *pgdp,
475 476
				    unsigned int flags,
				    struct follow_page_context *ctx)
477 478
{
	p4d_t *p4d;
479
	struct page *page;
480 481 482 483 484 485 486 487

	p4d = p4d_offset(pgdp, address);
	if (p4d_none(*p4d))
		return no_page_table(vma, flags);
	BUILD_BUG_ON(p4d_huge(*p4d));
	if (unlikely(p4d_bad(*p4d)))
		return no_page_table(vma, flags);

488 489 490 491 492 493 494 495
	if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(p4d_val(*p4d)), flags,
				      P4D_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
496
	return follow_pud_mask(vma, address, p4d, flags, ctx);
497 498 499 500 501 502 503
}

/**
 * follow_page_mask - look up a page descriptor from a user-virtual address
 * @vma: vm_area_struct mapping @address
 * @address: virtual address to look up
 * @flags: flags modifying lookup behaviour
504 505
 * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
 *       pointer to output page_mask
506 507 508
 *
 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
 *
509 510 511 512 513 514
 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
 * the device's dev_pagemap metadata to avoid repeating expensive lookups.
 *
 * On output, the @ctx->page_mask is set according to the size of the page.
 *
 * Return: the mapped (struct page *), %NULL if no mapping exists, or
515 516 517 518 519
 * an error pointer if there is a mapping to something not represented
 * by a page descriptor (see also vm_normal_page()).
 */
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
520
			      struct follow_page_context *ctx)
521 522 523 524 525
{
	pgd_t *pgd;
	struct page *page;
	struct mm_struct *mm = vma->vm_mm;

526
	ctx->page_mask = 0;
527 528 529 530 531 532 533 534 535 536 537 538 539

	/* make this handle hugepd */
	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		return page;
	}

	pgd = pgd_offset(mm, address);

	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
		return no_page_table(vma, flags);

540 541 542 543 544 545
	if (pgd_huge(*pgd)) {
		page = follow_huge_pgd(mm, address, pgd, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
546 547 548 549 550 551 552 553
	if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
		page = follow_huge_pd(vma, address,
				      __hugepd(pgd_val(*pgd)), flags,
				      PGDIR_SHIFT);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}
554

555 556 557 558 559 560 561 562 563 564 565 566 567
	return follow_p4d_mask(vma, address, pgd, flags, ctx);
}

struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
			 unsigned int foll_flags)
{
	struct follow_page_context ctx = { NULL };
	struct page *page;

	page = follow_page_mask(vma, address, foll_flags, &ctx);
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return page;
568 569
}

570 571 572 573 574
static int get_gate_page(struct mm_struct *mm, unsigned long address,
		unsigned int gup_flags, struct vm_area_struct **vma,
		struct page **page)
{
	pgd_t *pgd;
575
	p4d_t *p4d;
576 577 578 579 580 581 582 583 584 585 586 587 588
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int ret = -EFAULT;

	/* user gate pages are read-only */
	if (gup_flags & FOLL_WRITE)
		return -EFAULT;
	if (address > TASK_SIZE)
		pgd = pgd_offset_k(address);
	else
		pgd = pgd_offset_gate(mm, address);
	BUG_ON(pgd_none(*pgd));
589 590 591
	p4d = p4d_offset(pgd, address);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, address);
592 593
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
594
	if (!pmd_present(*pmd))
595 596 597 598 599 600 601 602 603 604 605 606 607
		return -EFAULT;
	VM_BUG_ON(pmd_trans_huge(*pmd));
	pte = pte_offset_map(pmd, address);
	if (pte_none(*pte))
		goto unmap;
	*vma = get_gate_vma(mm);
	if (!page)
		goto out;
	*page = vm_normal_page(*vma, address, *pte);
	if (!*page) {
		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
			goto unmap;
		*page = pte_page(*pte);
608 609 610 611 612 613 614

		/*
		 * This should never happen (a device public page in the gate
		 * area).
		 */
		if (is_device_public_page(*page))
			goto unmap;
615
	}
616 617 618 619
	if (unlikely(!try_get_page(*page))) {
		ret = -ENOMEM;
		goto unmap;
	}
620 621 622 623 624 625 626
out:
	ret = 0;
unmap:
	pte_unmap(pte);
	return ret;
}

627 628 629 630 631
/*
 * mmap_sem must be held on entry.  If @nonblocking != NULL and
 * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
 * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
 */
632 633 634 635
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
		unsigned long address, unsigned int *flags, int *nonblocking)
{
	unsigned int fault_flags = 0;
636
	vm_fault_t ret;
637

Eric B Munson's avatar
Eric B Munson committed
638 639 640
	/* mlock all present pages, but do not fault in new pages */
	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
		return -ENOENT;
641 642
	if (*flags & FOLL_WRITE)
		fault_flags |= FAULT_FLAG_WRITE;
643 644
	if (*flags & FOLL_REMOTE)
		fault_flags |= FAULT_FLAG_REMOTE;
645 646 647 648
	if (nonblocking)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
	if (*flags & FOLL_NOWAIT)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
649 650 651 652
	if (*flags & FOLL_TRIED) {
		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
		fault_flags |= FAULT_FLAG_TRIED;
	}
653

654
	ret = handle_mm_fault(vma, address, fault_flags);
655
	if (ret & VM_FAULT_ERROR) {
656 657 658 659
		int err = vm_fault_to_errno(ret, *flags);

		if (err)
			return err;
660 661 662 663 664 665 666 667 668 669 670
		BUG();
	}

	if (tsk) {
		if (ret & VM_FAULT_MAJOR)
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}

	if (ret & VM_FAULT_RETRY) {
671
		if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
672 673 674 675 676 677 678 679 680 681 682 683 684 685
			*nonblocking = 0;
		return -EBUSY;
	}

	/*
	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
	 * can thus safely do subsequent page lookups as if they were reads.
	 * But only do so when looping for pte_write is futile: in some cases
	 * userspace may also be wanting to write to the gotten user page,
	 * which a read fault here might prevent (a readonly page might get
	 * reCOWed by userspace write).
	 */
	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
686
		*flags |= FOLL_COW;
687 688 689
	return 0;
}

690 691 692
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
	vm_flags_t vm_flags = vma->vm_flags;
693 694
	int write = (gup_flags & FOLL_WRITE);
	int foreign = (gup_flags & FOLL_REMOTE);
695 696 697 698

	if (vm_flags & (VM_IO | VM_PFNMAP))
		return -EFAULT;

699 700 701
	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
		return -EFAULT;

702
	if (write) {
703 704 705 706 707 708 709 710 711 712 713 714
		if (!(vm_flags & VM_WRITE)) {
			if (!(gup_flags & FOLL_FORCE))
				return -EFAULT;
			/*
			 * We used to let the write,force case do COW in a
			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
			 * set a breakpoint in a read-only mapping of an
			 * executable, without corrupting the file (yet only
			 * when that file had been opened for writing!).
			 * Anon pages in shared mappings are surprising: now
			 * just reject it.
			 */
715
			if (!is_cow_mapping(vm_flags))
716 717 718 719 720 721 722 723 724 725 726 727
				return -EFAULT;
		}
	} else if (!(vm_flags & VM_READ)) {
		if (!(gup_flags & FOLL_FORCE))
			return -EFAULT;
		/*
		 * Is there actually any vma we can reach here which does not
		 * have VM_MAYREAD set?
		 */
		if (!(vm_flags & VM_MAYREAD))
			return -EFAULT;
	}
728 729 730 731 732
	/*
	 * gups are always data accesses, not instruction
	 * fetches, so execute=false here
	 */
	if (!arch_vma_access_permitted(vma, write, false, foreign))
733
		return -EFAULT;
734 735 736
	return 0;
}

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
/**
 * __get_user_pages() - pin user pages in memory
 * @tsk:	task_struct of target task
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @gup_flags:	flags modifying pin behaviour
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
 * @nonblocking: whether waiting for disk IO or mmap_sem contention
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno. Each page returned must be released
 * with a put_page() call when it is finished with. vmas will only
 * remain valid while mmap_sem is held.
 *
757
 * Must be called with mmap_sem held.  It may be released.  See below.
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
 *
 * __get_user_pages walks a process's page tables and takes a reference to
 * each struct page that each user address corresponds to at a given
 * instant. That is, it takes the page that would be accessed if a user
 * thread accesses the given user virtual address at that instant.
 *
 * This does not guarantee that the page exists in the user mappings when
 * __get_user_pages returns, and there may even be a completely different
 * page there in some cases (eg. if mmapped pagecache has been invalidated
 * and subsequently re faulted). However it does guarantee that the page
 * won't be freed completely. And mostly callers simply care that the page
 * contains data that was valid *at some point in time*. Typically, an IO
 * or similar operation cannot guarantee anything stronger anyway because
 * locks can't be held over the syscall boundary.
 *
 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
 * appropriate) must be called after the page is finished with, and
 * before put_page is called.
 *
 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
 * or mmap_sem contention, and if waiting is needed to pin all pages,
780 781 782 783 784 785 786 787
 * *@nonblocking will be set to 0.  Further, if @gup_flags does not
 * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
 * this case.
 *
 * A caller using such a combination of @nonblocking and @gup_flags
 * must therefore hold the mmap_sem for reading only, and recognize
 * when it's been released.  Otherwise, it must be held for either
 * reading or writing and will not be released.
788 789 790 791 792
 *
 * In most cases, get_user_pages or get_user_pages_fast should be used
 * instead of __get_user_pages. __get_user_pages should be used only if
 * you need some special @gup_flags.
 */
793
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
794 795 796 797
		unsigned long start, unsigned long nr_pages,
		unsigned int gup_flags, struct page **pages,
		struct vm_area_struct **vmas, int *nonblocking)
{
798
	long ret = 0, i = 0;
799
	struct vm_area_struct *vma = NULL;
800
	struct follow_page_context ctx = { NULL };
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815

	if (!nr_pages)
		return 0;

	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));

	/*
	 * If FOLL_FORCE is set then do not force a full fault as the hinting
	 * fault information is unrelated to the reference behaviour of a task
	 * using the address space
	 */
	if (!(gup_flags & FOLL_FORCE))
		gup_flags |= FOLL_NUMA;

	do {
816 817 818 819 820 821 822 823 824 825 826 827
		struct page *page;
		unsigned int foll_flags = gup_flags;
		unsigned int page_increm;

		/* first iteration or cross vma bound */
		if (!vma || start >= vma->vm_end) {
			vma = find_extend_vma(mm, start);
			if (!vma && in_gate_area(mm, start)) {
				ret = get_gate_page(mm, start & PAGE_MASK,
						gup_flags, &vma,
						pages ? &pages[i] : NULL);
				if (ret)
828
					goto out;
829
				ctx.page_mask = 0;
830 831
				goto next_page;
			}
832

833 834 835 836
			if (!vma || check_vma_flags(vma, gup_flags)) {
				ret = -EFAULT;
				goto out;
			}
837 838 839
			if (is_vm_hugetlb_page(vma)) {
				i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &nr_pages, i,
840
						gup_flags, nonblocking);
841
				continue;
842
			}
843 844 845 846 847 848
		}
retry:
		/*
		 * If we have a pending SIGKILL, don't keep faulting pages and
		 * potentially allocating memory.
		 */
849
		if (fatal_signal_pending(current)) {
850 851 852
			ret = -ERESTARTSYS;
			goto out;
		}
853
		cond_resched();
854 855

		page = follow_page_mask(vma, start, foll_flags, &ctx);
856 857 858 859 860 861
		if (!page) {
			ret = faultin_page(tsk, vma, start, &foll_flags,
					nonblocking);
			switch (ret) {
			case 0:
				goto retry;
862 863 864
			case -EBUSY:
				ret = 0;
				/* FALLTHRU */
865 866 867
			case -EFAULT:
			case -ENOMEM:
			case -EHWPOISON:
868
				goto out;
869 870
			case -ENOENT:
				goto next_page;
871
			}
872
			BUG();
873 874 875 876 877 878 879
		} else if (PTR_ERR(page) == -EEXIST) {
			/*
			 * Proper page table entry exists, but no corresponding
			 * struct page.
			 */
			goto next_page;
		} else if (IS_ERR(page)) {
880 881
			ret = PTR_ERR(page);
			goto out;
882
		}
883 884 885 886
		if (pages) {
			pages[i] = page;
			flush_anon_page(vma, page, start);
			flush_dcache_page(page);
887
			ctx.page_mask = 0;
888 889
		}
next_page:
890 891
		if (vmas) {
			vmas[i] = vma;
892
			ctx.page_mask = 0;
893
		}
894
		page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
895 896 897 898 899
		if (page_increm > nr_pages)
			page_increm = nr_pages;
		i += page_increm;
		start += page_increm * PAGE_SIZE;
		nr_pages -= page_increm;
900
	} while (nr_pages);
901 902 903 904
out:
	if (ctx.pgmap)
		put_dev_pagemap(ctx.pgmap);
	return i ? i : ret;
905 906
}

907 908
static bool vma_permits_fault(struct vm_area_struct *vma,
			      unsigned int fault_flags)
909
{
910 911
	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
912
	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
913 914 915 916

	if (!(vm_flags & vma->vm_flags))
		return false;

917 918
	/*
	 * The architecture might have a hardware protection
919
	 * mechanism other than read/write that can deny access.
920 921 922
	 *
	 * gup always represents data access, not instruction
	 * fetches, so execute=false here:
923
	 */
924
	if (!arch_vma_access_permitted(vma, write, false, foreign))
925 926
		return false;

927 928 929
	return true;
}

930 931 932 933 934 935 936
/*
 * fixup_user_fault() - manually resolve a user page fault
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @address:	user address
 * @fault_flags:flags to pass down to handle_mm_fault()
937 938
 * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
 *		does not allow retry
939 940 941 942 943 944 945 946 947 948 949
 *
 * This is meant to be called in the specific scenario where for locking reasons
 * we try to access user memory in atomic context (within a pagefault_disable()
 * section), this returns -EFAULT, and we want to resolve the user fault before
 * trying again.
 *
 * Typically this is meant to be used by the futex code.
 *
 * The main difference with get_user_pages() is that this function will
 * unconditionally call handle_mm_fault() which will in turn perform all the
 * necessary SW fixup of the dirty and young bits in the PTE, while
950
 * get_user_pages() only guarantees to update these in the struct page.
951 952 953 954 955 956
 *
 * This is important for some architectures where those bits also gate the
 * access permission to the page because they are maintained in software.  On
 * such architectures, gup() will not be enough to make a subsequent access
 * succeed.
 *
957 958
 * This function will not return with an unlocked mmap_sem. So it has not the
 * same semantics wrt the @mm->mmap_sem as does filemap_fault().
959 960
 */
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
961 962
		     unsigned long address, unsigned int fault_flags,
		     bool *unlocked)
963 964
{
	struct vm_area_struct *vma;
965
	vm_fault_t ret, major = 0;
966 967 968

	if (unlocked)
		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
969

970
retry:
971 972 973 974
	vma = find_extend_vma(mm, address);
	if (!vma || address < vma->vm_start)
		return -EFAULT;

975
	if (!vma_permits_fault(vma, fault_flags))
976 977
		return -EFAULT;

978
	ret = handle_mm_fault(vma, address, fault_flags);
979
	major |= ret & VM_FAULT_MAJOR;
980
	if (ret & VM_FAULT_ERROR) {
981 982 983 984
		int err = vm_fault_to_errno(ret, 0);

		if (err)
			return err;
985 986
		BUG();
	}
987 988 989 990 991 992 993 994 995 996 997

	if (ret & VM_FAULT_RETRY) {
		down_read(&mm->mmap_sem);
		if (!(fault_flags & FAULT_FLAG_TRIED)) {
			*unlocked = true;
			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
			fault_flags |= FAULT_FLAG_TRIED;
			goto retry;
		}
	}

998
	if (tsk) {
999
		if (major)
1000 1001 1002 1003 1004 1005
			tsk->maj_flt++;
		else
			tsk->min_flt++;
	}
	return 0;
}
1006
EXPORT_SYMBOL_GPL(fixup_user_fault);
1007

1008 1009 1010 1011 1012 1013
static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
						struct mm_struct *mm,
						unsigned long start,
						unsigned long nr_pages,
						struct page **pages,
						struct vm_area_struct **vmas,
1014
						int *locked,
1015
						unsigned int flags)
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
{
	long ret, pages_done;
	bool lock_dropped;

	if (locked) {
		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
		BUG_ON(vmas);
		/* check caller initialized locked */
		BUG_ON(*locked != 1);
	}

	if (pages)
		flags |= FOLL_GET;

	pages_done = 0;
	lock_dropped = false;
	for (;;) {
		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
				       vmas, locked);
		if (!locked)
			/* VM_FAULT_RETRY couldn't trigger, bypass */
			return ret;

		/* VM_FAULT_RETRY cannot return errors */
		if (!*locked) {
			BUG_ON(ret < 0);
			BUG_ON(ret >= nr_pages);
		}

		if (!pages)
			/* If it's a prefault don't insist harder */
			return ret;

		if (ret > 0) {
			nr_pages -= ret;
			pages_done += ret;
			if (!nr_pages)
				break;
		}
		if (*locked) {
1056 1057 1058 1059
			/*
			 * VM_FAULT_RETRY didn't trigger or it was a
			 * FOLL_NOWAIT.
			 */
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
			if (!pages_done)
				pages_done = ret;
			break;
		}
		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
		pages += ret;
		start += ret << PAGE_SHIFT;

		/*
		 * Repeat on the address that fired VM_FAULT_RETRY
		 * without FAULT_FLAG_ALLOW_RETRY but with
		 * FAULT_FLAG_TRIED.
		 */
		*locked = 1;
		lock_dropped = true;
		down_read(&mm->mmap_sem);
		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
				       pages, NULL, NULL);
		if (ret != 1) {
			BUG_ON(ret > 1);
			if (!pages_done)
				pages_done = ret;
			break;
		}
		nr_pages--;
		pages_done++;
		if (!nr_pages)
			break;
		pages++;
		start += PAGE_SIZE;
	}
1091
	if (lock_dropped && *locked) {
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
		/*
		 * We must let the caller know we temporarily dropped the lock
		 * and so the critical section protected by it was lost.
		 */
		up_read(&mm->mmap_sem);
		*locked = 0;
	}
	return pages_done;
}

/*
 * We can leverage the VM_FAULT_RETRY functionality in the page fault
 * paths better by using either get_user_pages_locked() or
 * get_user_pages_unlocked().
 *
 * get_user_pages_locked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  to:
 *
 *      int locked = 1;
 *      down_read(&mm->mmap_sem);
 *      do_something()
 *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
 *      if (locked)
 *          up_read(&mm->mmap_sem);
 */
1123
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1124
			   unsigned int gup_flags, struct page **pages,
1125 1126
			   int *locked)
{
1127 1128 1129 1130 1131 1132 1133 1134 1135
	/*
	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
	 * vmas.  As there are no users of this flag in this call we simply
	 * disallow this option for now.
	 */
	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
		return -EINVAL;

1136
	return __get_user_pages_locked(current, current->mm, start, nr_pages,
1137
				       pages, NULL, locked,
1138
				       gup_flags | FOLL_TOUCH);
1139
}
1140
EXPORT_SYMBOL(get_user_pages_locked);
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153

/*
 * get_user_pages_unlocked() is suitable to replace the form:
 *
 *      down_read(&mm->mmap_sem);
 *      get_user_pages(tsk, mm, ..., pages, NULL);
 *      up_read(&mm->mmap_sem);
 *
 *  with:
 *
 *      get_user_pages_unlocked(tsk, mm, ..., pages);
 *
 * It is functionally equivalent to get_user_pages_fast so
1154 1155
 * get_user_pages_fast should be used instead if specific gup_flags
 * (e.g. FOLL_FORCE) are not required.
1156
 */
1157
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1158
			     struct page **pages, unsigned int gup_flags)
1159
{
1160 1161 1162 1163
	struct mm_struct *mm = current->mm;
	int locked = 1;
	long ret;

1164 1165 1166 1167 1168 1169 1170 1171 1172
	/*
	 * FIXME: Current FOLL_LONGTERM behavior is incompatible with
	 * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
	 * vmas.  As there are no users of this flag in this call we simply
	 * disallow this option for now.
	 */
	if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
		return -EINVAL;

1173 1174
	down_read(&mm->mmap_sem);
	ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
1175
				      &locked, gup_flags | FOLL_TOUCH);
1176 1177 1178
	if (locked)
		up_read(&mm->mmap_sem);
	return ret;
1179
}
1180
EXPORT_SYMBOL(get_user_pages_unlocked);
1181

1182
/*
1183
 * get_user_pages_remote() - pin user pages in memory
1184 1185 1186 1187 1188
 * @tsk:	the task_struct to use for page fault accounting, or
 *		NULL if faults are not to be recorded.
 * @mm:		mm_struct of target mm
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
1189
 * @gup_flags:	flags modifying lookup behaviour
1190 1191 1192 1193 1194
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long. Or NULL, if caller
 *		only intends to ensure the pages are faulted in.
 * @vmas:	array of pointers to vmas corresponding to each page.
 *		Or NULL if the caller does not require them.
1195 1196 1197
 * @locked:	pointer to lock flag indicating whether lock is held and
 *		subsequently whether VM_FAULT_RETRY functionality can be
 *		utilised. Lock must initially be held.
1198 1199 1200 1201