Commit 3b54765c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge updates from Andrew Morton:

 - a few misc things

 - ocfs2 updates

 - the v9fs maintainers have been missing for a long time. I've taken
   over v9fs patch slinging.

 - most of MM

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (116 commits)
  mm,oom_reaper: check for MMF_OOM_SKIP before complaining
  mm/ksm: fix interaction with THP
  mm/memblock.c: cast constant ULLONG_MAX to phys_addr_t
  headers: untangle kmemleak.h from mm.h
  include/linux/mmdebug.h: make VM_WARN* non-rvals
  mm/page_isolation.c: make start_isolate_page_range() fail if already isolated
  mm: change return type to vm_fault_t
  mm, oom: remove 3% bonus for CAP_SYS_ADMIN processes
  mm, page_alloc: wakeup kcompactd even if kswapd cannot free more memory
  kernel/fork.c: detect early free of a live mm
  mm: make counting of list_lru_one::nr_items lockless
  mm/swap_state.c: make bool enable_vma_readahead and swap_vma_readahead() static
  block_invalidatepage(): only release page if the full page was invalidated
  mm: kernel-doc: add missing parameter descriptions
  mm/swap.c: remove @cold parameter description for release_pages()
  mm/nommu: remove description of alloc_vm_area
  zram: drop max_zpage_size and use zs_huge_class_size()
  zsmalloc: introduce zs_huge_class_size()
  mm: fix races between swapoff and flush dcache
  fs/direct-io.c: minor cleanups in do_blockdev_direct_IO
  ...
parents 3fd14cdc 97b1255c
......@@ -1840,30 +1840,29 @@
keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | "mirror"
This parameter
specifies the amount of memory usable by the kernel
for non-movable allocations. The requested amount is
spread evenly throughout all nodes in the system. The
remaining memory in each node is used for Movable
pages. In the event, a node is too small to have both
kernelcore and Movable pages, kernelcore pages will
take priority and other nodes will have a larger number
of Movable pages. The Movable zone is used for the
allocation of pages that may be reclaimed or moved
by the page migration subsystem. This means that
HugeTLB pages may not be allocated from this zone.
Note that allocations like PTEs-from-HighMem still
use the HighMem zone if it exists, and the Normal
Format: nn[KMGTPE] | nn% | "mirror"
This parameter specifies the amount of memory usable by
the kernel for non-movable allocations. The requested
amount is spread evenly throughout all nodes in the
system as ZONE_NORMAL. The remaining memory is used for
movable memory in its own zone, ZONE_MOVABLE. In the
event, a node is too small to have both ZONE_NORMAL and
ZONE_MOVABLE, kernelcore memory will take priority and
other nodes will have a larger ZONE_MOVABLE.
ZONE_MOVABLE is used for the allocation of pages that
may be reclaimed or moved by the page migration
subsystem. Note that allocations like PTEs-from-HighMem
still use the HighMem zone if it exists, and the Normal
zone if it does not.
Instead of specifying the amount of memory (nn[KMGTPE]),
you can specify "mirror" option. In case "mirror"
It is possible to specify the exact amount of memory in
the form of "nn[KMGTPE]", a percentage of total system
memory in the form of "nn%", or "mirror". If "mirror"
option is specified, mirrored (reliable) memory is used
for non-movable allocations and remaining memory is used
for Movable pages. nn[KMGTPE] and "mirror" are exclusive,
so you can NOT specify nn[KMGTPE] and "mirror" at the same
time.
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
......@@ -2377,13 +2376,14 @@
mousedev.yres= [MOUSE] Vertical screen resolution, used for devices
reporting absolute coordinates, such as tablets
movablecore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
is similar to kernelcore except it specifies the
amount of memory used for migratable allocations.
If both kernelcore and movablecore is specified,
then kernelcore will be at *least* the specified
value but may be more. If movablecore on its own
is specified, the administrator must be careful
movablecore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | nn%
This parameter is the complement to kernelcore=, it
specifies the amount of memory used for migratable
allocations. If both kernelcore and movablecore is
specified, then kernelcore will be at *least* the
specified value but may be more. If movablecore on its
own is specified, the administrator must be careful
that the amount of memory usable for all allocations
is not too small.
......
......@@ -111,7 +111,7 @@ my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flag
my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
......@@ -201,7 +201,7 @@ $regex_kswapd_sleep = generate_traceevent_regex(
$regex_wakeup_kswapd = generate_traceevent_regex(
"vmscan/mm_vmscan_wakeup_kswapd",
$regex_wakeup_kswapd_default,
"nid", "zid", "order");
"nid", "zid", "order", "gfp_flags");
$regex_lru_isolate = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_isolate",
$regex_lru_isolate_default,
......
......@@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
}
/* don't handle anon pages here */
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping)
return;
......
......@@ -128,12 +128,7 @@ asmlinkage void __div0(void)
error("Attempting division by 0!");
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
......@@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
{
int ret;
__stack_chk_guard_setup();
output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p;
......
......@@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);
......
......@@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));
......
......@@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);
......
......@@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping) {
......
......@@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
page = pfn_to_page(pfn);
if (cache_is_vipt_aliasing())
mapping = page_mapping(page);
mapping = page_mapping_file(page);
else
mapping = NULL;
......@@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
return;
}
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!cache_ops_need_broadcast() &&
mapping && !page_mapcount(page))
......@@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
struct address_space *mapping;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping)) {
void *addr;
......
......@@ -76,12 +76,7 @@ void error(char *x)
#include "../../../../lib/decompress_unxz.c"
#endif
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
......@@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
{
unsigned long zimage_start, zimage_size;
__stack_chk_guard_setup();
zimage_start = (unsigned long)(&__image_begin);
zimage_size = (unsigned long)(&__image_end) -
(unsigned long)(&__image_begin);
......
......@@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
unsigned long addr;
if (mapping && !mapping_mapped(mapping)) {
......
......@@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
......@@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
......
......@@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
if (page_mapping_file(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
......@@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
......
......@@ -117,12 +117,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
......
......@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
if (!radix_enabled())
return 1UL << mmu_psize_to_shift(psize);
#endif
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
return huge_page_size(hstate_vma(vma));
return vma_kernel_pagesize(vma);
}
static inline bool is_power_of_4(unsigned long x)
......
......@@ -112,7 +112,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
put_page(page); /* Drop the gup reference */
ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
NULL, 0, MIGRATE_SYNC, MR_CMA);
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
if (ret) {
if (!list_empty(&cma_migrate_pages))
putback_movable_pages(&cma_migrate_pages);
......
......@@ -38,6 +38,7 @@
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
......
......@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/bootmem.h>
#include <asm/msi_bitmap.h>
......
......@@ -15,7 +15,7 @@
#include <linux/hardirq.h>
#include <linux/log2.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
......
......@@ -27,7 +27,6 @@
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/kmemleak.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqflags.h>
......
......@@ -104,12 +104,7 @@ static void error(char *x)
while(1); /* Halt */
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
......@@ -130,8 +125,6 @@ void decompress_kernel(void)
{
unsigned long output_addr;
__stack_chk_guard_setup();
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
......
......@@ -112,7 +112,7 @@ static void sh4_flush_dcache_page(void *arg)
struct page *page = arg;
unsigned long addr = (unsigned long)page_address(page);
#ifndef CONFIG_SMP
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);
......
......@@ -136,7 +136,7 @@ static void __flush_dcache_page(unsigned long phys)
static void sh7705_flush_dcache_page(void *arg)
{
struct page *page = arg;
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);
......
......@@ -22,7 +22,6 @@
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <linux/kmemleak.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
......
......@@ -929,9 +929,9 @@ static inline void __local_flush_dcache_page(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
page_mapping_file(page) != NULL));
#else
if (page_mapping(page) != NULL &&
if (page_mapping_file(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page)));
#endif
......@@ -958,7 +958,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL)
if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
......@@ -994,7 +994,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
pg_addr = page_address(page);
if (tlb_type == spitfire) {
data0 = ((u64)&xcall_flush_dcache_page_spitfire);
if (page_mapping(page) != NULL)
if (page_mapping_file(page) != NULL)
data0 |= ((u64)1 << 32);
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
#ifdef DCACHE_ALIASING_POSSIBLE
......
......@@ -206,9 +206,9 @@ inline void flush_dcache_page_impl(struct page *page)
#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
page_mapping_file(page) != NULL));
#else
if (page_mapping(page) != NULL &&
if (page_mapping_file(page) != NULL &&
tlb_type == spitfire)
__flush_icache_page(__pa(page_address(page)));
#endif
......@@ -490,7 +490,7 @@ void flush_dcache_page(struct page *page)
this_cpu = get_cpu();
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping)) {
int dirty = test_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
......
......@@ -128,7 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush;
/* A real file page? */
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping)
goto no_cache_flush;
......
......@@ -83,7 +83,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
clear_bit(PG_dcache_clean, &page->flags);
......
......@@ -503,7 +503,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping)
......
......@@ -6,7 +6,6 @@
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/kmemleak.h>
#include <asm/proto.h>
#include <asm/dma.h>
......
......@@ -1328,14 +1328,39 @@ int kern_addr_valid(unsigned long addr)
return pfn_valid(pte_pfn(*pte));
}
/*
* Block size is the minimum amount of memory which can be hotplugged or
* hotremoved. It must be power of two and must be equal or larger than
* MIN_MEMORY_BLOCK_SIZE.
*/
#define MAX_BLOCK_SIZE (2UL << 30)
/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
static unsigned long probe_memory_block_size(void)
{
unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
unsigned long bz;
/* if system is UV or has 64GB of RAM or more, use large blocks */
if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
bz = 2UL << 30; /* 2GB */
/* If this is UV system, always set 2G block size */
if (is_uv_system()) {
bz = MAX_BLOCK_SIZE;
goto done;
}
/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
bz = MIN_MEMORY_BLOCK_SIZE;
goto done;
}
/* Find the largest allowed block size that aligns to memory end */
for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) {
if (IS_ALIGNED(boot_mem_end, bz))
break;
}
done:
pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
return bz;
......
......@@ -127,7 +127,7 @@ EXPORT_SYMBOL(copy_user_highpage);
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
/*
* If we have a mapping but the page is not mapped to user-space
......
......@@ -187,13 +187,14 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
* The probe routines leave the pages reserved, just as the bootmem code does.
* Make sure they're still that way.
* The probe routines leave the pages uninitialized, just as the bootmem code
* does. Make sure we do not access them, but instead use only information from
* within sections.
*/
static bool pages_correctly_reserved(unsigned long start_pfn)
static bool pages_correctly_probed(unsigned long start_pfn)
{
int i, j;
struct page *page;
unsigned long section_nr = pfn_to_section_nr(start_pfn);
unsigned long section_nr_end = section_nr + sections_per_block;
unsigned long pfn = start_pfn;
/*
......@@ -201,21 +202,24 @@ static bool pages_correctly_reserved(unsigned long start_pfn)
* SPARSEMEM_VMEMMAP. We lookup the page once per section
* and assume memmap is contiguous within each section
*/
for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
for (; section_nr < section_nr_end; section_nr++) {
if (WARN_ON_ONCE(!pfn_valid(pfn)))
return false;
page = pfn_to_page(pfn);
for (j = 0; j < PAGES_PER_SECTION; j++) {
if (PageReserved(page + j))
continue;
printk(KERN_WARNING "section number %ld page number %d "
"not reserved, was it already online?\n",
pfn_to_section_nr(pfn), j);
if (!present_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) not present",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (!valid_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
} else if (online_section_nr(section_nr)) {
pr_warn("section %ld pfn[%lx, %lx) is already online",
section_nr, pfn, pfn + PAGES_PER_SECTION);
return false;
}
pfn += PAGES_PER_SECTION;
}
return true;
......@@ -237,7 +241,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
switch (action) {
case MEM_ONLINE:
if (!pages_correctly_reserved(start_pfn))
if (!pages_correctly_probed(start_pfn))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages, online_type);
......@@ -708,7 +712,7 @@ static int add_memory_block(int base_section_nr)
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
int register_new_memory(int nid, struct mem_section *section)
int hotplug_memory_register(int nid, struct mem_section *section)
{
int ret = 0;
struct memory_block *mem;
......@@ -727,7 +731,7 @@ int register_new_memory(int nid, struct mem_section *section)
}
if (mem->section_count == sections_per_block)
ret = register_mem_sect_under_node(mem, nid);
ret = register_mem_sect_under_node(mem, nid, false);
out:
mutex_unlock(&mem_sysfs_mutex);
return ret;
......
......@@ -399,13 +399,16 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
}
/* register memory section under specified node if it spans that node */
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
int register_mem_sect_under_node(struct memory_block *mem_blk, int nid,
bool check_nid)
{
int ret;
unsigned long pfn, sect_start_pfn, sect_end_pfn;
if (!mem_blk)
return -EFAULT;
mem_blk->nid = nid;
if (!node_online(nid))
return 0;
......@@ -425,11 +428,18 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
}
page_nid = get_nid_for_pfn(pfn);
if (page_nid < 0)
continue;
if (page_nid != nid)
continue;
/*
* We need to check if page belongs to nid only for the boot
* case, during hotplug we know that all pages in the memory