Commit 77ba9062 authored by Aaron Lu's avatar Aaron Lu Committed by Linus Torvalds

mm/free_pcppages_bulk: update pcp->count inside

Matthew Wilcox found that all callers of free_pcppages_bulk() currently
update pcp->count immediately after so it's natural to do it inside
free_pcppages_bulk().

No functionality or performance change is expected from this patch.

Link: http://lkml.kernel.org/r/20180301062845.26038-2-aaron.lu@intel.comSigned-off-by: default avatarAaron Lu <aaron.lu@intel.com>
Suggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kemi Wang <kemi.wang@intel.com>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bc3106b2
...@@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, ...@@ -1112,6 +1112,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
page = list_last_entry(list, struct page, lru); page = list_last_entry(list, struct page, lru);
/* must delete as __free_one_page list manipulates */ /* must delete as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
pcp->count--;
mt = get_pcppage_migratetype(page); mt = get_pcppage_migratetype(page);
/* MIGRATE_ISOLATE page should not go to pcplists */ /* MIGRATE_ISOLATE page should not go to pcplists */
...@@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) ...@@ -2495,10 +2496,8 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
local_irq_save(flags); local_irq_save(flags);
batch = READ_ONCE(pcp->batch); batch = READ_ONCE(pcp->batch);
to_drain = min(pcp->count, batch); to_drain = min(pcp->count, batch);
if (to_drain > 0) { if (to_drain > 0)
free_pcppages_bulk(zone, to_drain, pcp); free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain;
}
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif #endif
...@@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) ...@@ -2520,10 +2519,8 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
pset = per_cpu_ptr(zone->pageset, cpu); pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp; pcp = &pset->pcp;
if (pcp->count) { if (pcp->count)
free_pcppages_bulk(zone, pcp->count, pcp); free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn) ...@@ -2747,7 +2744,6 @@ static void free_unref_page_commit(struct page *page, unsigned long pfn)
if (pcp->count >= pcp->high) { if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch); unsigned long batch = READ_ONCE(pcp->batch);
free_pcppages_bulk(zone, batch, pcp); free_pcppages_bulk(zone, batch, pcp);
pcp->count -= batch;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment