Commit 63803222 authored by Kemi Wang's avatar Kemi Wang Committed by Linus Torvalds

mm: consider the number in local CPUs when reading NUMA stats

To avoid deviation, the per cpu number of NUMA stats in
vm_numa_stat_diff[] is included when a user *reads* the NUMA stats.

Since NUMA stats does not be read by users frequently, and kernel does not
need it to make a decision, it will not be a problem to make the readers
more expensive.

Link: http://lkml.kernel.org/r/1503568801-21305-4-git-send-email-kemi.wang@intel.comSigned-off-by: default avatarKemi Wang <kemi.wang@intel.com>
Reported-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Acked-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Aaron Lu <aaron.lu@intel.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Tim Chen <tim.c.chen@intel.com>
Cc: Ying Huang <ying.huang@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1d90ca89
...@@ -125,10 +125,14 @@ static inline unsigned long global_numa_state(enum numa_stat_item item) ...@@ -125,10 +125,14 @@ static inline unsigned long global_numa_state(enum numa_stat_item item)
return x; return x;
} }
static inline unsigned long zone_numa_state(struct zone *zone, static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
enum numa_stat_item item) enum numa_stat_item item)
{ {
long x = atomic_long_read(&zone->vm_numa_stat[item]); long x = atomic_long_read(&zone->vm_numa_stat[item]);
int cpu;
for_each_online_cpu(cpu)
x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
return x; return x;
} }
......
...@@ -897,6 +897,10 @@ unsigned long sum_zone_node_page_state(int node, ...@@ -897,6 +897,10 @@ unsigned long sum_zone_node_page_state(int node,
return count; return count;
} }
/*
* Determine the per node value of a numa stat item. To avoid deviation,
* the per cpu stat number in vm_numa_stat_diff[] is also included.
*/
unsigned long sum_zone_numa_state(int node, unsigned long sum_zone_numa_state(int node,
enum numa_stat_item item) enum numa_stat_item item)
{ {
...@@ -905,7 +909,7 @@ unsigned long sum_zone_numa_state(int node, ...@@ -905,7 +909,7 @@ unsigned long sum_zone_numa_state(int node,
unsigned long count = 0; unsigned long count = 0;
for (i = 0; i < MAX_NR_ZONES; i++) for (i = 0; i < MAX_NR_ZONES; i++)
count += zone_numa_state(zones + i, item); count += zone_numa_state_snapshot(zones + i, item);
return count; return count;
} }
...@@ -1536,7 +1540,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, ...@@ -1536,7 +1540,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", seq_printf(m, "\n %-12s %lu",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
zone_numa_state(zone, i)); zone_numa_state_snapshot(zone, i));
#endif #endif
seq_printf(m, "\n pagesets"); seq_printf(m, "\n pagesets");
...@@ -1792,6 +1796,7 @@ static bool need_update(int cpu) ...@@ -1792,6 +1796,7 @@ static bool need_update(int cpu)
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2); BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
#endif #endif
/* /*
* The fast way of checking if there are any vmstat diffs. * The fast way of checking if there are any vmstat diffs.
* This works because the diffs are byte sized items. * This works because the diffs are byte sized items.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment