Commit 73b0140b authored by Ira Weiny's avatar Ira Weiny Committed by Linus Torvalds
Browse files

mm/gup: change GUP fast to use flags rather than a write 'bool'

To facilitate additional options to get_user_pages_fast() change the
singular write parameter to be gup_flags.

This patch does not change any functionality.  New functionality will
follow in subsequent patches.

Some of the get_user_pages_fast() call sites were unchanged because they
already passed FOLL_WRITE or 0 for the write parameter.

NOTE: It was suggested to change the ordering of the get_user_pages_fast()
arguments to ensure that callers were converted.  This breaks the current
GUP call site convention of having the returned pages be the final
parameter.  So the suggestion was rejected.

Link: http://lkml.kernel.org/r/20190328084422.29911-4-ira.weiny@intel.com
Link: http://lkml.kernel.org/r/20190317183438.2057-4-ira.weiny@intel.com

Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marshall <hubcap@omnibond.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Hogan <jhogan@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b798bec4
...@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* get_user_pages_fast() - pin user pages in memory * get_user_pages_fast() - pin user pages in memory
* @start: starting user address * @start: starting user address
* @nr_pages: number of pages from start to pin * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to * @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned. * @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. * Should be at least nr_pages long.
* *
...@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* requested. If nr_pages is 0 or negative, returns 0. If no pages * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. * were pinned, returns -errno.
*/ */
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages,
struct page **pages) unsigned int gup_flags, struct page **pages)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr, len, end; unsigned long addr, len, end;
...@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none(pgd)) if (pgd_none(pgd))
goto slow; goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
pages, &nr))
goto slow; goto slow;
} while (pgdp++, addr = next, addr != end); } while (pgdp++, addr = next, addr != end);
local_irq_enable(); local_irq_enable();
...@@ -289,7 +290,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -289,7 +290,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
pages += nr; pages += nr;
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT, ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
pages, write ? FOLL_WRITE : 0); pages, gup_flags);
/* Have to be a bit careful with return values */ /* Have to be a bit careful with return values */
if (nr > 0) { if (nr > 0) {
......
...@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* If writing != 0, then the HPTE must allow writing, if we get here */ /* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing; write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn); hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, writing, pages); npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) { if (npages < 1) {
/* Check if it's an I/O mapping */ /* Check if it's an I/O mapping */
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
...@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, ...@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
goto err; goto err;
hva = gfn_to_hva_memslot(memslot, gfn); hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages); npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
if (npages < 1) if (npages < 1)
goto err; goto err;
page = pages[0]; page = pages[0];
......
...@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, ...@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
ret = get_user_pages_fast(cfg->array, num_pages, 1, pages); ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
if (ret < 0) if (ret < 0)
goto free_pages; goto free_pages;
......
...@@ -2376,7 +2376,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) ...@@ -2376,7 +2376,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
ret = get_user_pages_fast(map->addr, 1, 1, &map->page); ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
if (ret < 0) if (ret < 0)
goto out; goto out;
BUG_ON(ret != 1); BUG_ON(ret != 1);
......
...@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* get_user_pages_fast() - pin user pages in memory * get_user_pages_fast() - pin user pages in memory
* @start: starting user address * @start: starting user address
* @nr_pages: number of pages from start to pin * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to * @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned. * @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. * Should be at least nr_pages long.
* *
...@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* requested. If nr_pages is 0 or negative, returns 0. If no pages * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. * were pinned, returns -errno.
*/ */
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages,
struct page **pages) unsigned int gup_flags, struct page **pages)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr, len, end; unsigned long addr, len, end;
...@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none(pgd)) if (pgd_none(pgd))
goto slow; goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
pages, &nr))
goto slow; goto slow;
} while (pgdp++, addr = next, addr != end); } while (pgdp++, addr = next, addr != end);
local_irq_enable(); local_irq_enable();
...@@ -261,7 +262,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -261,7 +262,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
ret = get_user_pages_unlocked(start, ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT, pages, (end - start) >> PAGE_SHIFT, pages,
write ? FOLL_WRITE : 0); gup_flags);
/* Have to be a bit careful with return values */ /* Have to be a bit careful with return values */
if (nr > 0) { if (nr > 0) {
......
...@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
return nr; return nr;
} }
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages,
struct page **pages) unsigned int gup_flags, struct page **pages)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr, len, end; unsigned long addr, len, end;
...@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none(pgd)) if (pgd_none(pgd))
goto slow; goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
pages, &nr))
goto slow; goto slow;
} while (pgdp++, addr = next, addr != end); } while (pgdp++, addr = next, addr != end);
...@@ -324,7 +325,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, ...@@ -324,7 +325,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
ret = get_user_pages_unlocked(start, ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT, pages, (end - start) >> PAGE_SHIFT, pages,
write ? FOLL_WRITE : 0); gup_flags);
/* Have to be a bit careful with return values */ /* Have to be a bit careful with return values */
if (nr > 0) { if (nr > 0) {
......
...@@ -140,7 +140,7 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -140,7 +140,7 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t *table; pt_element_t *table;
struct page *page; struct page *page;
npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
/* Check if the user is doing something meaningless. */ /* Check if the user is doing something meaningless. */
if (unlikely(npages != 1)) if (unlikely(npages != 1))
return -EFAULT; return -EFAULT;
......
...@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, ...@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return NULL; return NULL;
/* Pin the user virtual address. */ /* Pin the user virtual address. */
npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
if (npinned != npages) { if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages); pr_err("SEV: Failure locking %lu pages.\n", npages);
goto err; goto err;
......
...@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, ...@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm; goto unlock_vm;
} }
pinned = get_user_pages_fast(region->user_addr, npages, 1, pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages); region->pages);
if (pinned < 0) { if (pinned < 0) {
ret = pinned; ret = pinned;
......
...@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) ...@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages) if (NULL == vsg->pages)
return -ENOMEM; return -ENOMEM;
ret = get_user_pages_fast((unsigned long)xfer->mem_addr, ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
vsg->num_pages, vsg->direction == DMA_FROM_DEVICE, vsg->num_pages,
vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages); vsg->pages);
if (ret != vsg->num_pages) { if (ret != vsg->num_pages) {
if (ret < 0) if (ret < 0)
......
...@@ -105,7 +105,8 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np ...@@ -105,7 +105,8 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
{ {
int ret; int ret;
ret = get_user_pages_fast(vaddr, npages, writable, pages); ret = get_user_pages_fast(vaddr, npages, writable ? FOLL_WRITE : 0,
pages);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, ...@@ -603,7 +603,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */ /* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */ rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages, m->nr_pages,
m->write, /* readable/writable */ m->write ? FOLL_WRITE : 0, /* readable/writable */
m->page_list); /* ptrs to pages */ m->page_list); /* ptrs to pages */
if (rc < 0) if (rc < 0)
goto fail_get_user_pages; goto fail_get_user_pages;
......
...@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context, ...@@ -242,7 +242,7 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
/* /*
* Lock physical page backing a given user VA. * Lock physical page backing a given user VA.
*/ */
retval = get_user_pages_fast(uva, 1, 1, &context->notify_page); retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page);
if (retval != 1) { if (retval != 1) {
context->notify_page = NULL; context->notify_page = NULL;
return VMCI_ERROR_GENERIC; return VMCI_ERROR_GENERIC;
......
...@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva, ...@@ -659,7 +659,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
int err = VMCI_SUCCESS; int err = VMCI_SUCCESS;
retval = get_user_pages_fast((uintptr_t) produce_uva, retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1, produce_q->kernel_if->num_pages,
FOLL_WRITE,
produce_q->kernel_if->u.h.header_page); produce_q->kernel_if->u.h.header_page);
if (retval < (int)produce_q->kernel_if->num_pages) { if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)", pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
...@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva, ...@@ -671,7 +672,8 @@ static int qp_host_get_user_memory(u64 produce_uva,
} }
retval = get_user_pages_fast((uintptr_t) consume_uva, retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1, consume_q->kernel_if->num_pages,
FOLL_WRITE,
consume_q->kernel_if->u.h.header_page); consume_q->kernel_if->u.h.header_page);
if (retval < (int)consume_q->kernel_if->num_pages) { if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)", pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
......
...@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page, ...@@ -274,7 +274,8 @@ static int pin_user_pages(unsigned long first_page,
*iter_last_page_size = last_page_size; *iter_last_page_size = last_page_size;
} }
ret = get_user_pages_fast(first_page, requested_pages, !is_write, ret = get_user_pages_fast(first_page, requested_pages,
!is_write ? FOLL_WRITE : 0,
pages); pages);
if (ret <= 0) if (ret <= 0)
return -EFAULT; return -EFAULT;
......
...@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, ...@@ -868,7 +868,9 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
pinned = get_user_pages_fast( pinned = get_user_pages_fast(
(unsigned long)xfer->loc_addr & PAGE_MASK, (unsigned long)xfer->loc_addr & PAGE_MASK,
nr_pages, dir == DMA_FROM_DEVICE, page_list); nr_pages,
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
page_list);
if (pinned != nr_pages) { if (pinned != nr_pages) {
if (pinned < 0) { if (pinned < 0) {
......
...@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p) ...@@ -437,7 +437,7 @@ static int dax_lock_page(void *va, struct page **p)
dax_dbg("uva %p", va); dax_dbg("uva %p", va);
ret = get_user_pages_fast((unsigned long)va, 1, 1, p); ret = get_user_pages_fast((unsigned long)va, 1, FOLL_WRITE, p);
if (ret == 1) { if (ret == 1) {
dax_dbg("locked page %p, for VA %p", *p, va); dax_dbg("locked page %p, for VA %p", *p, va);
return 0; return 0;
......
...@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp, ...@@ -4922,7 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */ /* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */ /* rw==READ means read from drive, write into memory area */
res = get_user_pages_fast(uaddr, nr_pages, rw == READ, pages); res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
pages);
/* Errors and no page mapped should return here */ /* Errors and no page mapped should return here */
if (res < nr_pages) if (res < nr_pages)
......
...@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl, ...@@ -486,8 +486,8 @@ static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr + ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
off + i * PAGE_SIZE; off + i * PAGE_SIZE;
} else { } else {
ret = get_user_pages_fast(page_addr - offset, 1, 1, ret = get_user_pages_fast(page_addr - offset, 1,
&page); FOLL_WRITE, &page);
if (ret <= 0) { if (ret <= 0) {
dev_err(pg_tbl->device, dev_err(pg_tbl->device,
......
...@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, ...@@ -273,7 +273,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err; goto err;
} }
rc = get_user_pages_fast(start, num_pages, 1, shm->pages); rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
if (rc > 0) if (rc > 0)
shm->num_pages = rc; shm->num_pages = rc;
if (rc != num_pages) { if (rc != num_pages) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment