Commit d381c547 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds
Browse files

mm: only report isolation failures when offlining memory

Heiko has complained that his log is swamped by warnings from
has_unmovable_pages

[   20.536664] page dumped because: has_unmovable_pages
[   20.536792] page:000003d081ff4080 count:1 mapcount:0 mapping:000000008ff88600 index:0x0 compound_mapcount: 0
[   20.536794] flags: 0x3fffe0000010200(slab|head)
[   20.536795] raw: 03fffe0000010200 0000000000000100 0000000000000200 000000008ff88600
[   20.536796] raw: 0000000000000000 0020004100000000 ffffffff00000001 0000000000000000
[   20.536797] page dumped because: has_unmovable_pages
[   20.536814] page:000003d0823b0000 count:1 mapcount:0 mapping:0000000000000000 index:0x0
[   20.536815] flags: 0x7fffe0000000000()
[   20.536817] raw: 07fffe0000000000 0000000000000100 0000000000000200 0000000000000000
[   20.536818] raw: 0000000000000000 0000000000000000 ffffffff00000001 0000000000000000

which are not triggered by the memory hotplug but rather CMA allocator.
The original idea behind dumping the page state...
parent 2932c8b0
......@@ -30,8 +30,11 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
#define SKIP_HWPOISON 0x1
#define REPORT_FAILURE 0x2
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype, bool skip_hwpoisoned_pages);
int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
......@@ -44,10 +47,14 @@ int move_freepages_block(struct zone *zone, struct page *page,
* For isolating all pages in the range finally, the caller have to
* free all pages in the range. test_page_isolated() can be used for
* test it.
*
* The following flags are allowed (they can be combined in a bit mask)
* SKIP_HWPOISON - ignore hwpoison pages
* REPORT_FAILURE - report details about the failure to isolate the range
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, bool skip_hwpoisoned_pages);
unsigned migratetype, int flags);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
......
......@@ -1226,7 +1226,7 @@ static bool is_pageblock_removable_nolock(struct page *page)
if (!zone_spans_pfn(zone, pfn))
return false;
return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON);
}
/* Checks if this range of memory is likely to be hot-removable. */
......@@ -1577,7 +1577,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true);
MIGRATE_MOVABLE,
SKIP_HWPOISON | REPORT_FAILURE);
if (ret) {
mem_hotplug_done();
reason = "failure to isolate range";
......
......@@ -7767,8 +7767,7 @@ void *__init alloc_large_system_hash(const char *tablename,
* race condition. So you can't expect this function should be exact.
*/
bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
int migratetype,
bool skip_hwpoisoned_pages)
int migratetype, int flags)
{
unsigned long pfn, iter, found;
......@@ -7842,7 +7841,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* The HWPoisoned page may be not in buddy system, and
* page_count() is not 0.
*/
if (skip_hwpoisoned_pages && PageHWPoison(page))
if ((flags & SKIP_HWPOISON) && PageHWPoison(page))
continue;
if (__PageMovable(page))
......@@ -7869,7 +7868,8 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
return false;
unmovable:
WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
dump_page(pfn_to_page(pfn+iter), "unmovable page");
if (flags & REPORT_FAILURE)
dump_page(pfn_to_page(pfn+iter), "unmovable page");
return true;
}
......@@ -7996,8 +7996,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
*/
ret = start_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype,
false);
pfn_max_align_up(end), migratetype, 0);
if (ret)
return ret;
......
......@@ -15,8 +15,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/page_isolation.h>
static int set_migratetype_isolate(struct page *page, int migratetype,
bool skip_hwpoisoned_pages)
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
{
struct zone *zone;
unsigned long flags, pfn;
......@@ -60,8 +59,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype,
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
* We just check MOVABLE pages.
*/
if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
skip_hwpoisoned_pages))
if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
ret = 0;
/*
......@@ -185,7 +183,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* prevents two threads from simultaneously working on overlapping ranges.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, bool skip_hwpoisoned_pages)
unsigned migratetype, int flags)
{
unsigned long pfn;
unsigned long undo_pfn;
......@@ -199,7 +197,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
pfn += pageblock_nr_pages) {
page = __first_valid_page(pfn, pageblock_nr_pages);
if (page &&
set_migratetype_isolate(page, migratetype, skip_hwpoisoned_pages)) {
set_migratetype_isolate(page, migratetype, flags)) {
undo_pfn = pfn;
goto undo;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment