Commit 309381fe authored by Sasha Levin's avatar Sasha Levin Committed by Linus Torvalds
Browse files

mm: dump page when hitting a VM_BUG_ON using VM_BUG_ON_PAGE



Most of the VM_BUG_ON assertions are performed on a page.  Usually, when
one of these assertions fails we'll get a BUG_ON with a call stack and
the registers.

I've recently noticed based on the requests to add a small piece of code
that dumps the page to various VM_BUG_ON sites that the page dump is
quite useful to people debugging issues in mm.

This patch adds a VM_BUG_ON_PAGE(cond, page) which beyond doing what
VM_BUG_ON() does, also dumps the page before executing the actual
BUG_ON.

[akpm@linux-foundation.org: fix up includes]
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e3bba3c3
......@@ -108,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
static inline void get_head_page_multiple(struct page *page, int nr)
{
VM_BUG_ON(page != compound_head(page));
VM_BUG_ON(page_count(page) == 0);
VM_BUG_ON_PAGE(page != compound_head(page), page);
VM_BUG_ON_PAGE(page_count(page) == 0, page);
atomic_add(nr, &page->_count);
SetPageReferenced(page);
}
......@@ -135,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
head = pte_page(pte);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
......@@ -212,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
head = pte_page(pte);
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
......
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
#include <linux/mmdebug.h>
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
......
......@@ -2,6 +2,7 @@
#define _LINUX_HUGETLB_H
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
......@@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
static inline struct hstate *page_hstate(struct page *page)
{
VM_BUG_ON(!PageHuge(page));
VM_BUG_ON_PAGE(!PageHuge(page), page);
return size_to_hstate(PAGE_SIZE << compound_order(page));
}
......
......@@ -15,6 +15,7 @@
#ifndef _LINUX_HUGETLB_CGROUP_H
#define _LINUX_HUGETLB_CGROUP_H
#include <linux/mmdebug.h>
#include <linux/res_counter.h>
struct hugetlb_cgroup;
......@@ -28,7 +29,7 @@ struct hugetlb_cgroup;
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
VM_BUG_ON(!PageHuge(page));
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
......@@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
static inline
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
{
VM_BUG_ON(!PageHuge(page));
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1;
......
......@@ -5,6 +5,7 @@
#ifdef __KERNEL__
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/bug.h>
#include <linux/list.h>
......@@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
*/
static inline int put_page_testzero(struct page *page)
{
VM_BUG_ON(atomic_read(&page->_count) == 0);
VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
return atomic_dec_and_test(&page->_count);
}
......@@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(PageSlab(page));
VM_BUG_ON_PAGE(PageSlab(page), page);
bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}
......@@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(PageSlab(page));
VM_BUG_ON_PAGE(PageSlab(page), page);
bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}
......@@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
*/
static inline bool compound_tail_refcounted(struct page *page)
{
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
return __compound_tail_refcounted(page);
}
......@@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
/*
* __split_huge_page_refcount() cannot run from under us.
*/
VM_BUG_ON(!PageTail(page));
VM_BUG_ON(page_mapcount(page) < 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
VM_BUG_ON_PAGE(!PageTail(page), page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
if (compound_tail_refcounted(page->first_page))
atomic_inc(&page->_mapcount);
}
......@@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
VM_BUG_ON(atomic_read(&page->_count) <= 0);
VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
atomic_inc(&page->_count);
}
......@@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
static inline void __SetPageBuddy(struct page *page)
{
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
}
static inline void __ClearPageBuddy(struct page *page)
{
VM_BUG_ON(!PageBuddy(page));
VM_BUG_ON_PAGE(!PageBuddy(page), page);
atomic_set(&page->_mapcount, -1);
}
......@@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
* slab code uses page->slab_cache and page->first_page (for tail
* pages), which share storage with page->ptl.
*/
VM_BUG_ON(*(unsigned long *)&page->ptl);
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page))
return false;
spin_lock_init(ptlock_ptr(page));
......@@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
static inline void pgtable_pmd_page_dtor(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(page->pmd_huge_pte);
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
#endif
ptlock_free(page);
}
......@@ -2029,10 +2030,6 @@ extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages;
extern int soft_offline_page(struct page *page, int flags);
extern void dump_page(struct page *page, char *reason);
extern void dump_page_badflags(struct page *page, char *reason,
unsigned long badflags);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
extern void clear_huge_page(struct page *page,
unsigned long addr,
......
#ifndef LINUX_MM_DEBUG_H
#define LINUX_MM_DEBUG_H 1
struct page;
extern void dump_page(struct page *page, char *reason);
extern void dump_page_badflags(struct page *page, char *reason,
unsigned long badflags);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#define VM_BUG_ON_PAGE(cond, page) \
do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
......
......@@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page)
*/
static inline int PageTransHuge(struct page *page)
{
VM_BUG_ON(PageTail(page));
VM_BUG_ON_PAGE(PageTail(page), page);
return PageHead(page);
}
......@@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page)
*/
static inline int PageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
VM_BUG_ON_PAGE(!PageSlab(page), page);
return PageActive(page);
}
static inline void SetPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
VM_BUG_ON_PAGE(!PageSlab(page), page);
SetPageActive(page);
}
static inline void __ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
VM_BUG_ON_PAGE(!PageSlab(page), page);
__ClearPageActive(page);
}
static inline void ClearPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON(!PageSlab(page));
VM_BUG_ON_PAGE(!PageSlab(page), page);
ClearPageActive(page);
}
......
......@@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page)
* disabling preempt, and hence no need for the "speculative get" that
* SMP requires.
*/
VM_BUG_ON(page_count(page) == 0);
VM_BUG_ON_PAGE(page_count(page) == 0, page);
atomic_inc(&page->_count);
#else
......@@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page)
return 0;
}
#endif
VM_BUG_ON(PageTail(page));
VM_BUG_ON_PAGE(PageTail(page), page);
return 1;
}
......@@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count)
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic());
# endif
VM_BUG_ON(page_count(page) == 0);
VM_BUG_ON_PAGE(page_count(page) == 0, page);
atomic_add(count, &page->_count);
#else
if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
return 0;
#endif
VM_BUG_ON(PageCompound(page) && page != compound_head(page));
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
return 1;
}
......@@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count)
static inline void page_unfreeze_refs(struct page *page, int count)
{
VM_BUG_ON(page_count(page) != 0);
VM_BUG_ON_PAGE(page_count(page) != 0, page);
VM_BUG_ON(count == 0);
atomic_set(&page->_count, count);
......
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
#include <linux/mmdebug.h>
#include <linux/preempt.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
......
......@@ -237,7 +237,7 @@ int __cleancache_get_page(struct page *page)
goto out;
}
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (fake_pool_id < 0)
goto out;
......@@ -279,7 +279,7 @@ void __cleancache_put_page(struct page *page)
return;
}
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
fake_pool_id = page->mapping->host->i_sb->cleancache_poolid;
if (fake_pool_id < 0)
return;
......@@ -318,7 +318,7 @@ void __cleancache_invalidate_page(struct address_space *mapping,
if (pool_id < 0)
return;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (cleancache_get_key(mapping->host, &key) >= 0) {
cleancache_ops->invalidate_page(pool_id,
key, page->index);
......
......@@ -601,7 +601,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
if (__isolate_lru_page(page, mode) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
VM_BUG_ON_PAGE(PageTransCompound(page), page);
/* Successfully isolated */
cc->finished_update_migrate = true;
......
......@@ -409,9 +409,9 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
{
int error;
VM_BUG_ON(!PageLocked(old));
VM_BUG_ON(!PageLocked(new));
VM_BUG_ON(new->mapping);
VM_BUG_ON_PAGE(!PageLocked(old), old);
VM_BUG_ON_PAGE(!PageLocked(new), new);
VM_BUG_ON_PAGE(new->mapping, new);
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (!error) {
......@@ -461,8 +461,8 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageSwapBacked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & GFP_RECLAIM_MASK);
......@@ -607,7 +607,7 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
*/
void unlock_page(struct page *page)
{
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
clear_bit_unlock(PG_locked, &page->flags);
smp_mb__after_clear_bit();
wake_up_page(page, PG_locked);
......@@ -760,7 +760,7 @@ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
page_cache_release(page);
goto repeat;
}
VM_BUG_ON(page->index != offset);
VM_BUG_ON_PAGE(page->index != offset, page);
}
return page;
}
......@@ -1656,7 +1656,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
put_page(page);
goto retry_find;
}
VM_BUG_ON(page->index != offset);
VM_BUG_ON_PAGE(page->index != offset, page);
/*
* We have a locked page in the page cache, now we need to check
......
......@@ -712,7 +712,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
pgtable_t pgtable;
spinlock_t *ptl;
VM_BUG_ON(!PageCompound(page));
VM_BUG_ON_PAGE(!PageCompound(page), page);
pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable))
return VM_FAULT_OOM;
......@@ -893,7 +893,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out;
}
src_page = pmd_page(pmd);
VM_BUG_ON(!PageHead(src_page));
VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
get_page(src_page);
page_dup_rmap(src_page);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
......@@ -1067,7 +1067,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_free_pages;
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
pmdp_clear_flush(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
......@@ -1133,7 +1133,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock;
page = pmd_page(orig_pmd);
VM_BUG_ON(!PageCompound(page) || !PageHead(page));
VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
if (page_mapcount(page) == 1) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
......@@ -1211,7 +1211,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
put_huge_zero_page();
} else {
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
page_remove_rmap(page);
put_page(page);
}
......@@ -1249,7 +1249,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
goto out;
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
/*
......@@ -1274,7 +1274,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
}
}
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON(!PageCompound(page));
VM_BUG_ON_PAGE(!PageCompound(page), page);
if (flags & FOLL_GET)
get_page_foll(page);
......@@ -1432,9 +1432,9 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
} else {
page = pmd_page(orig_pmd);
page_remove_rmap(page);
VM_BUG_ON(page_mapcount(page) < 0);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
tlb_remove_page(tlb, page);
......@@ -2176,9 +2176,9 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (unlikely(!page))
goto out;
VM_BUG_ON(PageCompound(page));
BUG_ON(!PageAnon(page));
VM_BUG_ON(!PageSwapBacked(page));
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1)
......@@ -2201,8 +2201,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
}
/* 0 stands for page_is_file_cache(page) == false */
inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page);
/* If there is no mapped pte young don't collapse the page */
if (pte_young(pteval) || PageReferenced(page) ||
......@@ -2232,7 +2232,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
VM_BUG_ON(page_mapcount(src_page) != 1);
VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
release_pte_page(src_page);
/*
* ptl mostly unnecessary, but preempt has to
......@@ -2311,7 +2311,7 @@ static struct page
struct vm_area_struct *vma, unsigned long address,
int node)
{
VM_BUG_ON(*hpage);
VM_BUG_ON_PAGE(*hpage, *hpage);
/*
* Allocate the page while the vma is still valid and under
* the mmap_sem read mode so there is no memory allocation
......@@ -2580,7 +2580,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
*/
node = page_to_nid(page);
khugepaged_node_load[node]++;
VM_BUG_ON(PageCompound(page));
VM_BUG_ON_PAGE(PageCompound(page), page);
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
/* cannot use mapcount: can't collapse if there's a gup pin */
......@@ -2876,7 +2876,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
return;
}
page = pmd_page(*pmd);
VM_BUG_ON(!page_count(page));
VM_BUG_ON_PAGE(!page_count(page), page);
get_page(page);
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
......
......@@ -584,7 +584,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
VM_BUG_ON(hugetlb_cgroup_from_page(page));
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
......@@ -1089,7 +1089,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
VM_BUG_ON_PAGE(page_count(page), page);
enqueue_huge_page(h, page);
}
free:
......@@ -3503,7 +3503,7 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
bool isolate_huge_page(struct page *page, struct list_head *list)
{
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
if (!get_page_unless_zero(page))
return false;
spin_lock(&hugetlb_lock);
......@@ -3514,7 +3514,7 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
void putback_active_hugepage(struct page *page)
{
VM_BUG_ON(!PageHead(page));
VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
spin_unlock(&hugetlb_lock);
......@@ -3523,7 +3523,7 @@ void putback_active_hugepage(struct page *page)
bool is_hugepage_active(struct page *page)
{
VM_BUG_ON(!PageHuge(page));
VM_BUG_ON_PAGE(!PageHuge(page), page);
/*
* This function can be called for a tail page because the caller,
* scan_movable_pages, scans through a given pfn-range which typically
......
......@@ -390,7 +390,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
if (hugetlb_cgroup_disabled())
return;
VM_BUG_ON(!PageHuge(oldhpage));
VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
spin_lock(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_page(oldhpage);
set_hugetlb_cgroup(oldhpage, NULL);
......
......@@ -27,8 +27,8 @@ static inline void set_page_count(struct page *page, int v)
*/
static inline void set_page_refcounted(struct page *page)
{
VM_BUG_ON(PageTail(page));
VM_BUG_ON(atomic_read(&page->_count));
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
set_page_count(page, 1);
}
......@@ -46,7 +46,7 @@ static inline void __get_page_tail_foll(struct page *page,
* speculative page access (like in
* page_cache_get_speculative()) on tail pages.
*/
VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);
if (get_page_head)
atomic_inc(&page->first_page->_count);
get_huge_page_tail(page);
......@@ -71,7 +71,7 @@ static inline void get_page_foll(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
VM_BUG_ON(atomic_read(&page->_count) <= 0);
VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
atomic_inc(&page->_count);
}
}
......@@ -173,7 +173,7 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
struct page *page)
{
VM_BUG_ON(PageLRU(page));
VM_BUG_ON_PAGE(PageLRU(page), page);
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
return 0;
......
......@@ -1898,13 +1898,13 @@ int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
int ret = SWAP_AGAIN;
int search_new_forks = 0;
VM_BUG_ON(!PageKsm(page));
VM_BUG_ON_PAGE(!PageKsm(page), page);
/*
* Rely on the page lock to protect against concurrent modifications
* to that page's node of the stable tree.
*/
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
stable_node = page_stable_node(page);
if (!stable_node)
......@@ -1958,13 +1958,13 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
struct stable_node *stable_node;