Commit 3b54765c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge updates from Andrew Morton:

 - a few misc things

 - ocfs2 updates

 - the v9fs maintainers have been missing for a long time. I've taken
   over v9fs patch slinging.

 - most of MM

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (116 commits)
  mm,oom_reaper: check for MMF_OOM_SKIP before complaining
  mm/ksm: fix interaction with THP
  mm/memblock.c: cast constant ULLONG_MAX to phys_addr_t
  headers: untangle kmemleak.h from mm.h
  include/linux/mmdebug.h: make VM_WARN* non-rvals
  mm/page_isolation.c: make start_isolate_page_range() fail if already isolated
  mm: change return type to vm_fault_t
  mm, oom: remove 3% bonus for CAP_SYS_ADMIN processes
  mm, page_alloc: wakeup kcompactd even if kswapd cannot free more memory
  kernel/fork.c: detect early free of a live mm
  mm: make counting of list_lru_one::nr_items lockless
  mm/swap_state.c: make bool enable_vma_readahead and swap_vma_readahead() static
  block_invalidatepage(): only release page if the full page was invalidated
  mm: kernel-doc: add missing parameter descriptions
  mm/swap.c: remove @cold parameter description for release_pages()
  mm/nommu: remove description of alloc_vm_area
  zram: drop max_zpage_size and use zs_huge_class_size()
  zsmalloc: introduce zs_huge_class_size()
  mm: fix races between swapoff and flush dcache
  fs/direct-io.c: minor cleanups in do_blockdev_direct_IO
  ...
parents 3fd14cdc 97b1255c
...@@ -1840,30 +1840,29 @@ ...@@ -1840,30 +1840,29 @@
keepinitrd [HW,ARM] keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC] kernelcore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | "mirror" Format: nn[KMGTPE] | nn% | "mirror"
This parameter This parameter specifies the amount of memory usable by
specifies the amount of memory usable by the kernel the kernel for non-movable allocations. The requested
for non-movable allocations. The requested amount is amount is spread evenly throughout all nodes in the
spread evenly throughout all nodes in the system. The system as ZONE_NORMAL. The remaining memory is used for
remaining memory in each node is used for Movable movable memory in its own zone, ZONE_MOVABLE. In the
pages. In the event, a node is too small to have both event, a node is too small to have both ZONE_NORMAL and
kernelcore and Movable pages, kernelcore pages will ZONE_MOVABLE, kernelcore memory will take priority and
take priority and other nodes will have a larger number other nodes will have a larger ZONE_MOVABLE.
of Movable pages. The Movable zone is used for the
allocation of pages that may be reclaimed or moved ZONE_MOVABLE is used for the allocation of pages that
by the page migration subsystem. This means that may be reclaimed or moved by the page migration
HugeTLB pages may not be allocated from this zone. subsystem. Note that allocations like PTEs-from-HighMem
Note that allocations like PTEs-from-HighMem still still use the HighMem zone if it exists, and the Normal
use the HighMem zone if it exists, and the Normal
zone if it does not. zone if it does not.
Instead of specifying the amount of memory (nn[KMGTPE]), It is possible to specify the exact amount of memory in
you can specify "mirror" option. In case "mirror" the form of "nn[KMGTPE]", a percentage of total system
memory in the form of "nn%", or "mirror". If "mirror"
option is specified, mirrored (reliable) memory is used option is specified, mirrored (reliable) memory is used
for non-movable allocations and remaining memory is used for non-movable allocations and remaining memory is used
for Movable pages. nn[KMGTPE] and "mirror" are exclusive, for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
so you can NOT specify nn[KMGTPE] and "mirror" at the same are exclusive, so you cannot specify multiple forms.
time.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port. kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval] Format: <Controller#>[,poll interval]
...@@ -2377,13 +2376,14 @@ ...@@ -2377,13 +2376,14 @@
mousedev.yres= [MOUSE] Vertical screen resolution, used for devices mousedev.yres= [MOUSE] Vertical screen resolution, used for devices
reporting absolute coordinates, such as tablets reporting absolute coordinates, such as tablets
movablecore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter movablecore= [KNL,X86,IA-64,PPC]
is similar to kernelcore except it specifies the Format: nn[KMGTPE] | nn%
amount of memory used for migratable allocations. This parameter is the complement to kernelcore=, it
If both kernelcore and movablecore is specified, specifies the amount of memory used for migratable
then kernelcore will be at *least* the specified allocations. If both kernelcore and movablecore is
value but may be more. If movablecore on its own specified, then kernelcore will be at *least* the
is specified, the administrator must be careful specified value but may be more. If movablecore on its
own is specified, the administrator must be careful
that the amount of memory usable for all allocations that the amount of memory usable for all allocations
is not too small. is not too small.
......
...@@ -111,7 +111,7 @@ my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flag ...@@ -111,7 +111,7 @@ my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flag
my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)'; my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)'; my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)'; my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)'; my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)'; my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)'; my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)'; my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
...@@ -201,7 +201,7 @@ $regex_kswapd_sleep = generate_traceevent_regex( ...@@ -201,7 +201,7 @@ $regex_kswapd_sleep = generate_traceevent_regex(
$regex_wakeup_kswapd = generate_traceevent_regex( $regex_wakeup_kswapd = generate_traceevent_regex(
"vmscan/mm_vmscan_wakeup_kswapd", "vmscan/mm_vmscan_wakeup_kswapd",
$regex_wakeup_kswapd_default, $regex_wakeup_kswapd_default,
"nid", "zid", "order"); "nid", "zid", "order", "gfp_flags");
$regex_lru_isolate = generate_traceevent_regex( $regex_lru_isolate = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_isolate", "vmscan/mm_vmscan_lru_isolate",
$regex_lru_isolate_default, $regex_lru_isolate_default,
......
...@@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page) ...@@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
} }
/* don't handle anon pages here */ /* don't handle anon pages here */
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!mapping) if (!mapping)
return; return;
......
...@@ -128,12 +128,7 @@ asmlinkage void __div0(void) ...@@ -128,12 +128,7 @@ asmlinkage void __div0(void)
error("Attempting division by 0!"); error("Attempting division by 0!");
} }
unsigned long __stack_chk_guard; const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
void __stack_chk_fail(void) void __stack_chk_fail(void)
{ {
...@@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p, ...@@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
{ {
int ret; int ret;
__stack_chk_guard_setup();
output_data = (unsigned char *)output_start; output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p; free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p; free_mem_end_ptr = free_mem_ptr_end_p;
......
...@@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock); raw_spin_lock(&minicache_lock);
......
...@@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, ...@@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned long kfrom, kto; unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping_file(from), from);
/* FIXME: not highmem safe */ /* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to)); discard_old_kernel_data(page_address(to));
......
...@@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to); void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock); raw_spin_lock(&minicache_lock);
......
...@@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ...@@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page); __flush_dcache_page(mapping, page);
if (mapping) { if (mapping) {
......
...@@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval) ...@@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (cache_is_vipt_aliasing()) if (cache_is_vipt_aliasing())
mapping = page_mapping(page); mapping = page_mapping_file(page);
else else
mapping = NULL; mapping = NULL;
...@@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page) ...@@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
return; return;
} }
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!cache_ops_need_broadcast() && if (!cache_ops_need_broadcast() &&
mapping && !page_mapcount(page)) mapping && !page_mapcount(page))
...@@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page) ...@@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
if (cache_is_vivt() || cache_is_vipt_aliasing()) { if (cache_is_vivt() || cache_is_vipt_aliasing()) {
struct address_space *mapping; struct address_space *mapping;
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping)) { if (!mapping || mapping_mapped(mapping)) {
void *addr; void *addr;
......
...@@ -76,12 +76,7 @@ void error(char *x) ...@@ -76,12 +76,7 @@ void error(char *x)
#include "../../../../lib/decompress_unxz.c" #include "../../../../lib/decompress_unxz.c"
#endif #endif
unsigned long __stack_chk_guard; const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
void __stack_chk_fail(void) void __stack_chk_fail(void)
{ {
...@@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start) ...@@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
{ {
unsigned long zimage_start, zimage_size; unsigned long zimage_start, zimage_size;
__stack_chk_guard_setup();
zimage_start = (unsigned long)(&__image_begin); zimage_start = (unsigned long)(&__image_begin);
zimage_size = (unsigned long)(&__image_end) - zimage_size = (unsigned long)(&__image_end) -
(unsigned long)(&__image_begin); (unsigned long)(&__image_begin);
......
...@@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, ...@@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
void __flush_dcache_page(struct page *page) void __flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping_file(page);
unsigned long addr; unsigned long addr;
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
......
...@@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page) ...@@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
mapping = page_mapping(page); mapping = page_mapping_file(page);
/* Flush this page if there are aliases. */ /* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
...@@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma, ...@@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page); __flush_dcache_page(mapping, page);
......
...@@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
return; return;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { if (page_mapping_file(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn)); flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags); clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency()) } else if (parisc_requires_coherency())
...@@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, ...@@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping_file(page);
struct vm_area_struct *mpnt; struct vm_area_struct *mpnt;
unsigned long offset; unsigned long offset;
unsigned long addr, old_addr = 0; unsigned long addr, old_addr = 0;
......
...@@ -117,12 +117,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, ...@@ -117,12 +117,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long end, unsigned long floor,
unsigned long ceiling); unsigned long ceiling);
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
/* /*
* If the arch doesn't supply something else, assume that hugepage * If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation. * size aligned regions are ok without further preparation.
......
...@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) ...@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
if (!radix_enabled()) if (!radix_enabled())
return 1UL << mmu_psize_to_shift(psize); return 1UL << mmu_psize_to_shift(psize);
#endif #endif
if (!is_vm_hugetlb_page(vma)) return vma_kernel_pagesize(vma);
return PAGE_SIZE;
return huge_page_size(hstate_vma(vma));
} }
static inline bool is_power_of_4(unsigned long x) static inline bool is_power_of_4(unsigned long x)
......
...@@ -112,7 +112,7 @@ static int mm_iommu_move_page_from_cma(struct page *page) ...@@ -112,7 +112,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
put_page(page); /* Drop the gup reference */ put_page(page); /* Drop the gup reference */
ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page, ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
NULL, 0, MIGRATE_SYNC, MR_CMA); NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
if (ret) { if (ret) {
if (!list_empty(&cma_migrate_pages)) if (!list_empty(&cma_migrate_pages))
putback_movable_pages(&cma_migrate_pages); putback_movable_pages(&cma_migrate_pages);
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/kmemleak.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/iommu.h> #include <asm/iommu.h>
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <asm/msi_bitmap.h> #include <asm/msi_bitmap.h>
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/slab.h> #include <linux/kmemleak.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/kmemleak.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
......
...@@ -104,12 +104,7 @@ static void error(char *x) ...@@ -104,12 +104,7 @@ static void error(char *x)
while(1); /* Halt */ while(1); /* Halt */
} }
unsigned long __stack_chk_guard; const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
void __stack_chk_fail(void) void __stack_chk_fail(void)
{ {
...@@ -130,8 +125,6 @@ void decompress_kernel(void) ...@@ -130,8 +125,6 @@ void decompress_kernel(void)
{ {
unsigned long output_addr; unsigned long output_addr;
__stack_chk_guard_setup();
#ifdef CONFIG_SUPERH64 #ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000); output_addr = (CONFIG_MEMORY_START + 0x2000);
#else #else
......
...@@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg) ...@@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg)
struct page *page = arg; struct page *page = arg;
unsigned long addr = (unsigned long)page_address(page); unsigned long addr = (unsigned long)page_address(page);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
......
...@@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys) ...@@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys)
static void sh7705_flush_dcache_page(void *arg) static void sh7705_flush_dcache_page(void *arg)
{ {
struct page *page = arg; struct page *page = arg;
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/kmemleak.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page) ...@@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page), __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping_file(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping_file(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page))); __flush_icache_page(__pa(page_address(page)));
#endif #endif
...@@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) ...@@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire); data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL) if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) { } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
...@@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) ...@@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
pg_addr = page_address(page); pg_addr = page_address(page);
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire); data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL) if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32); data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) { } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
......
...@@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page) ...@@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE #ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page), __flush_dcache_page(page_address(page),
((tlb_type == spitfire) && ((tlb_type == spitfire) &&
page_mapping(page) != NULL)); page_mapping_file(page) != NULL));
#else #else
if (page_mapping(page) != NULL && if (page_mapping_file(page) != NULL &&
tlb_type == spitfire) tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page))); __flush_icache_page(__pa(page_address(page)));
#endif #endif
...@@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page) ...@@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page)
this_cpu = get_cpu(); this_cpu = get_cpu();
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
int dirty = test_bit(PG_dcache_dirty, &page->flags); int dirty = test_bit(PG_dcache_dirty, &page->flags);
if (dirty) { if (dirty) {
......
...@@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, ...@@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush; goto no_cache_flush;
/* A real file page? */ /* A real file page? */
mapping = page_mapping(page); mapping = page_mapping_file(page);
if (!mapping) if (!mapping)
goto no_cache_flush; goto no_cache_flush;
......
...@@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page) ...@@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page)