vmstat.c 47.4 KB
Newer Older
1
2
3
4
5
/*
 *  linux/mm/vmstat.c
 *
 *  Manages VM statistics
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6
7
8
9
 *
 *  zoned VM statistics
 *  Copyright (C) 2006 Silicon Graphics, Inc.,
 *		Christoph Lameter <christoph@lameter.com>
10
 *  Copyright (C) 2008-2014 Christoph Lameter
11
 */
12
#include <linux/fs.h>
13
#include <linux/mm.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
14
#include <linux/err.h>
15
#include <linux/module.h>
16
#include <linux/slab.h>
17
#include <linux/cpu.h>
18
#include <linux/cpumask.h>
Adrian Bunk's avatar
Adrian Bunk committed
19
#include <linux/vmstat.h>
20
21
22
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
23
#include <linux/sched.h>
24
#include <linux/math64.h>
25
#include <linux/writeback.h>
26
#include <linux/compaction.h>
27
#include <linux/mm_inline.h>
28
29
#include <linux/page_ext.h>
#include <linux/page_owner.h>
30
31

#include "internal.h"
32

33
34
35
36
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
EXPORT_PER_CPU_SYMBOL(vm_event_states);

37
static void sum_vm_events(unsigned long *ret)
38
{
Christoph Lameter's avatar
Christoph Lameter committed
39
	int cpu;
40
41
42
43
	int i;

	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));

44
	for_each_online_cpu(cpu) {
45
46
47
48
49
50
51
52
53
54
55
56
57
58
		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);

		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
			ret[i] += this->event[i];
	}
}

/*
 * Accumulate the vm event counters across all CPUs.
 * The result is unavoidably approximate - it can change
 * during and after execution of this function.
*/
void all_vm_events(unsigned long *ret)
{
KOSAKI Motohiro's avatar
KOSAKI Motohiro committed
59
	get_online_cpus();
60
	sum_vm_events(ret);
KOSAKI Motohiro's avatar
KOSAKI Motohiro committed
61
	put_online_cpus();
62
}
63
EXPORT_SYMBOL_GPL(all_vm_events);
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83

/*
 * Fold the foreign cpu events into our own.
 *
 * This is adding to the events on one processor
 * but keeps the global counts constant.
 */
void vm_events_fold_cpu(int cpu)
{
	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
	int i;

	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
		count_vm_events(i, fold_state->event[i]);
		fold_state->event[i] = 0;
	}
}

#endif /* CONFIG_VM_EVENT_COUNTERS */

84
85
86
87
88
/*
 * Manage combined zone based / global counters
 *
 * vm_stat contains the global counters
 */
89
90
91
92
atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_zone_stat);
EXPORT_SYMBOL(vm_node_stat);
93
94
95

#ifdef CONFIG_SMP

96
int calculate_pressure_threshold(struct zone *zone)
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
{
	int threshold;
	int watermark_distance;

	/*
	 * As vmstats are not up to date, there is drift between the estimated
	 * and real values. For high thresholds and a high number of CPUs, it
	 * is possible for the min watermark to be breached while the estimated
	 * value looks fine. The pressure threshold is a reduced value such
	 * that even the maximum amount of drift will not accidentally breach
	 * the min watermark
	 */
	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
	threshold = max(1, (int)(watermark_distance / num_online_cpus()));

	/*
	 * Maximum threshold is 125
	 */
	threshold = min(125, threshold);

	return threshold;
}

120
int calculate_normal_threshold(struct zone *zone)
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
{
	int threshold;
	int mem;	/* memory in 128 MB units */

	/*
	 * The threshold scales with the number of processors and the amount
	 * of memory per zone. More memory means that we can defer updates for
	 * longer, more processors could lead to more contention.
 	 * fls() is used to have a cheap way of logarithmic scaling.
	 *
	 * Some sample thresholds:
	 *
	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
	 * ------------------------------------------------------------------
	 * 8		1		1	0.9-1 GB	4
	 * 16		2		2	0.9-1 GB	4
	 * 20 		2		2	1-2 GB		5
	 * 24		2		2	2-4 GB		6
	 * 28		2		2	4-8 GB		7
	 * 32		2		2	8-16 GB		8
	 * 4		2		2	<128M		1
	 * 30		4		3	2-4 GB		5
	 * 48		4		3	8-16 GB		8
	 * 32		8		4	1-2 GB		4
	 * 32		8		4	0.9-1GB		4
	 * 10		16		5	<128M		1
	 * 40		16		5	900M		4
	 * 70		64		7	2-4 GB		5
	 * 84		64		7	4-8 GB		6
	 * 108		512		9	4-8 GB		6
	 * 125		1024		10	8-16 GB		8
	 * 125		1024		10	16-32 GB	9
	 */

155
	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
156
157
158
159
160
161
162
163
164
165

	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));

	/*
	 * Maximum threshold is 125
	 */
	threshold = min(125, threshold);

	return threshold;
}
166
167

/*
168
 * Refresh the thresholds for each zone.
169
 */
170
void refresh_zone_stat_thresholds(void)
171
{
172
	struct pglist_data *pgdat;
173
174
175
176
	struct zone *zone;
	int cpu;
	int threshold;

177
178
179
180
181
182
183
	/* Zero current pgdat thresholds */
	for_each_online_pgdat(pgdat) {
		for_each_online_cpu(cpu) {
			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
		}
	}

184
	for_each_populated_zone(zone) {
185
		struct pglist_data *pgdat = zone->zone_pgdat;
186
187
		unsigned long max_drift, tolerate_drift;

188
		threshold = calculate_normal_threshold(zone);
189

190
191
192
		for_each_online_cpu(cpu) {
			int pgdat_threshold;

193
194
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;
195

196
197
198
199
200
201
			/* Base nodestat threshold on the largest populated zone. */
			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
				= max(threshold, pgdat_threshold);
		}

202
203
204
205
206
207
208
209
210
211
		/*
		 * Only set percpu_drift_mark if there is a danger that
		 * NR_FREE_PAGES reports the low watermark is ok when in fact
		 * the min watermark could be breached by an allocation
		 */
		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
		max_drift = num_online_cpus() * threshold;
		if (max_drift > tolerate_drift)
			zone->percpu_drift_mark = high_wmark_pages(zone) +
					max_drift;
212
	}
213
214
}

215
216
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
				int (*calculate_pressure)(struct zone *))
217
218
219
220
221
222
223
224
225
226
227
{
	struct zone *zone;
	int cpu;
	int threshold;
	int i;

	for (i = 0; i < pgdat->nr_zones; i++) {
		zone = &pgdat->node_zones[i];
		if (!zone->percpu_drift_mark)
			continue;

228
		threshold = (*calculate_pressure)(zone);
229
		for_each_online_cpu(cpu)
230
231
232
233
234
			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
							= threshold;
	}
}

235
/*
236
237
238
 * For use when we know that interrupts are disabled,
 * or when we know that preemption is disabled and that
 * particular counter cannot be updated from interrupt context.
239
240
 */
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
241
			   long delta)
242
{
243
244
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
245
	long x;
246
247
248
	long t;

	x = delta + __this_cpu_read(*p);
249

250
	t = __this_cpu_read(pcp->stat_threshold);
251

252
	if (unlikely(x > t || x < -t)) {
253
254
255
		zone_page_state_add(x, zone, item);
		x = 0;
	}
256
	__this_cpu_write(*p, x);
257
258
259
}
EXPORT_SYMBOL(__mod_zone_page_state);

260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
				long delta)
{
	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
	s8 __percpu *p = pcp->vm_node_stat_diff + item;
	long x;
	long t;

	x = delta + __this_cpu_read(*p);

	t = __this_cpu_read(pcp->stat_threshold);

	if (unlikely(x > t || x < -t)) {
		node_page_state_add(x, pgdat, item);
		x = 0;
	}
	__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_node_page_state);

280
281
282
283
284
285
286
287
288
289
290
291
292
/*
 * Optimized increment and decrement functions.
 *
 * These are only for a single page and therefore can take a struct page *
 * argument instead of struct zone *. This allows the inclusion of the code
 * generated for page_zone(page) into the optimized functions.
 *
 * No overflow check is necessary and therefore the differential can be
 * incremented or decremented in place which may allow the compilers to
 * generate better code.
 * The increment or decrement is known and therefore one boundary check can
 * be omitted.
 *
293
294
295
 * NOTE: These functions are very performance sensitive. Change only
 * with care.
 *
296
297
298
299
300
301
302
 * Some processors have inc/dec instructions that are atomic vs an interrupt.
 * However, the code must first determine the differential location in a zone
 * based on the processor number and then inc/dec the counter. There is no
 * guarantee without disabling preemption that the processor will not change
 * in between and therefore the atomicity vs. interrupt cannot be exploited
 * in a useful way here.
 */
303
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
304
{
305
306
307
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
	s8 v, t;
308

309
	v = __this_cpu_inc_return(*p);
310
311
312
	t = __this_cpu_read(pcp->stat_threshold);
	if (unlikely(v > t)) {
		s8 overstep = t >> 1;
313

314
315
		zone_page_state_add(v + overstep, zone, item);
		__this_cpu_write(*p, -overstep);
316
317
	}
}
318

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
	s8 __percpu *p = pcp->vm_node_stat_diff + item;
	s8 v, t;

	v = __this_cpu_inc_return(*p);
	t = __this_cpu_read(pcp->stat_threshold);
	if (unlikely(v > t)) {
		s8 overstep = t >> 1;

		node_page_state_add(v + overstep, pgdat, item);
		__this_cpu_write(*p, -overstep);
	}
}

335
336
337
338
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
	__inc_zone_state(page_zone(page), item);
}
339
340
EXPORT_SYMBOL(__inc_zone_page_state);

341
342
343
344
345
346
void __inc_node_page_state(struct page *page, enum node_stat_item item)
{
	__inc_node_state(page_pgdat(page), item);
}
EXPORT_SYMBOL(__inc_node_page_state);

347
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
348
{
349
350
351
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
	s8 v, t;
352

353
	v = __this_cpu_dec_return(*p);
354
355
356
	t = __this_cpu_read(pcp->stat_threshold);
	if (unlikely(v < - t)) {
		s8 overstep = t >> 1;
357

358
359
		zone_page_state_add(v - overstep, zone, item);
		__this_cpu_write(*p, overstep);
360
361
	}
}
362

363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
	s8 __percpu *p = pcp->vm_node_stat_diff + item;
	s8 v, t;

	v = __this_cpu_dec_return(*p);
	t = __this_cpu_read(pcp->stat_threshold);
	if (unlikely(v < - t)) {
		s8 overstep = t >> 1;

		node_page_state_add(v - overstep, pgdat, item);
		__this_cpu_write(*p, overstep);
	}
}

379
380
381
382
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
	__dec_zone_state(page_zone(page), item);
}
383
384
EXPORT_SYMBOL(__dec_zone_page_state);

385
386
387
388
389
390
void __dec_node_page_state(struct page *page, enum node_stat_item item)
{
	__dec_node_state(page_pgdat(page), item);
}
EXPORT_SYMBOL(__dec_node_page_state);

391
#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
392
393
394
395
396
397
398
399
400
401
402
403
/*
 * If we have cmpxchg_local support then we do not need to incur the overhead
 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
 *
 * mod_state() modifies the zone counter state through atomic per cpu
 * operations.
 *
 * Overstep mode specifies how overstep should handled:
 *     0       No overstepping
 *     1       Overstepping half of threshold
 *     -1      Overstepping minus half of threshold
*/
404
405
static inline void mod_zone_state(struct zone *zone,
       enum zone_stat_item item, long delta, int overstep_mode)
406
407
408
409
410
411
412
413
414
415
416
{
	struct per_cpu_pageset __percpu *pcp = zone->pageset;
	s8 __percpu *p = pcp->vm_stat_diff + item;
	long o, n, t, z;

	do {
		z = 0;  /* overflow to zone counters */

		/*
		 * The fetching of the stat_threshold is racy. We may apply
		 * a counter threshold to the wrong the cpu if we get
417
418
419
420
421
422
		 * rescheduled while executing here. However, the next
		 * counter update will apply the threshold again and
		 * therefore bring the counter under the threshold again.
		 *
		 * Most of the time the thresholds are the same anyways
		 * for all cpus in a zone.
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
		 */
		t = this_cpu_read(pcp->stat_threshold);

		o = this_cpu_read(*p);
		n = delta + o;

		if (n > t || n < -t) {
			int os = overstep_mode * (t >> 1) ;

			/* Overflow must be added to zone counters */
			z = n + os;
			n = -os;
		}
	} while (this_cpu_cmpxchg(*p, o, n) != o);

	if (z)
		zone_page_state_add(z, zone, item);
}

void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
443
			 long delta)
444
{
445
	mod_zone_state(zone, item, delta, 0);
446
447
448
449
450
}
EXPORT_SYMBOL(mod_zone_page_state);

void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
451
	mod_zone_state(page_zone(page), item, 1, 1);
452
453
454
455
456
}
EXPORT_SYMBOL(inc_zone_page_state);

void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
457
	mod_zone_state(page_zone(page), item, -1, -1);
458
459
}
EXPORT_SYMBOL(dec_zone_page_state);
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521

static inline void mod_node_state(struct pglist_data *pgdat,
       enum node_stat_item item, int delta, int overstep_mode)
{
	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
	s8 __percpu *p = pcp->vm_node_stat_diff + item;
	long o, n, t, z;

	do {
		z = 0;  /* overflow to node counters */

		/*
		 * The fetching of the stat_threshold is racy. We may apply
		 * a counter threshold to the wrong the cpu if we get
		 * rescheduled while executing here. However, the next
		 * counter update will apply the threshold again and
		 * therefore bring the counter under the threshold again.
		 *
		 * Most of the time the thresholds are the same anyways
		 * for all cpus in a node.
		 */
		t = this_cpu_read(pcp->stat_threshold);

		o = this_cpu_read(*p);
		n = delta + o;

		if (n > t || n < -t) {
			int os = overstep_mode * (t >> 1) ;

			/* Overflow must be added to node counters */
			z = n + os;
			n = -os;
		}
	} while (this_cpu_cmpxchg(*p, o, n) != o);

	if (z)
		node_page_state_add(z, pgdat, item);
}

void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
					long delta)
{
	mod_node_state(pgdat, item, delta, 0);
}
EXPORT_SYMBOL(mod_node_page_state);

void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
	mod_node_state(pgdat, item, 1, 1);
}

void inc_node_page_state(struct page *page, enum node_stat_item item)
{
	mod_node_state(page_pgdat(page), item, 1, 1);
}
EXPORT_SYMBOL(inc_node_page_state);

void dec_node_page_state(struct page *page, enum node_stat_item item)
{
	mod_node_state(page_pgdat(page), item, -1, -1);
}
EXPORT_SYMBOL(dec_node_page_state);
522
523
524
525
526
#else
/*
 * Use interrupt disable to serialize counter updates
 */
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
527
			 long delta)
528
529
530
531
532
533
534
535
536
{
	unsigned long flags;

	local_irq_save(flags);
	__mod_zone_page_state(zone, item, delta);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_zone_page_state);

537
538
539
540
541
542
543
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
	unsigned long flags;
	struct zone *zone;

	zone = page_zone(page);
	local_irq_save(flags);
544
	__inc_zone_state(zone, item);
545
546
547
548
549
550
551
552
553
	local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_zone_page_state);

void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
554
	__dec_zone_page_state(page, item);
555
556
557
558
	local_irq_restore(flags);
}
EXPORT_SYMBOL(dec_zone_page_state);

559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__inc_node_state(pgdat, item);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_node_state);

void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
					long delta)
{
	unsigned long flags;

	local_irq_save(flags);
	__mod_node_page_state(pgdat, item, delta);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_node_page_state);

void inc_node_page_state(struct page *page, enum node_stat_item item)
{
	unsigned long flags;
	struct pglist_data *pgdat;

	pgdat = page_pgdat(page);
	local_irq_save(flags);
	__inc_node_state(pgdat, item);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_node_page_state);

void dec_node_page_state(struct page *page, enum node_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__dec_node_page_state(page, item);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(dec_node_page_state);
#endif
602
603
604
605
606

/*
 * Fold a differential into the global counters.
 * Returns the number of counters updated.
 */
607
static int fold_diff(int *zone_diff, int *node_diff)
Christoph Lameter's avatar
Christoph Lameter committed
608
609
{
	int i;
610
	int changes = 0;
Christoph Lameter's avatar
Christoph Lameter committed
611
612

	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
613
614
615
616
617
618
619
620
		if (zone_diff[i]) {
			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
			changes++;
	}

	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
		if (node_diff[i]) {
			atomic_long_add(node_diff[i], &vm_node_stat[i]);
621
622
623
			changes++;
	}
	return changes;
Christoph Lameter's avatar
Christoph Lameter committed
624
625
}

626
/*
627
 * Update the zone counters for the current cpu.
628
 *
629
630
631
632
633
634
635
636
637
638
 * Note that refresh_cpu_vm_stats strives to only access
 * node local memory. The per cpu pagesets on remote zones are placed
 * in the memory local to the processor using that pageset. So the
 * loop over all zones will access a series of cachelines local to
 * the processor.
 *
 * The call to zone_page_state_add updates the cachelines with the
 * statistics in the remote zone struct as well as the global cachelines
 * with the global counters. These could cause remote node cache line
 * bouncing and will have to be only done when necessary.
639
640
 *
 * The function returns the number of global counters updated.
641
 */
642
static int refresh_cpu_vm_stats(bool do_pagesets)
643
{
644
	struct pglist_data *pgdat;
645
646
	struct zone *zone;
	int i;
647
648
	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
649
	int changes = 0;
650

651
	for_each_populated_zone(zone) {
652
		struct per_cpu_pageset __percpu *p = zone->pageset;
653

654
655
		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
			int v;
656

657
658
			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
			if (v) {
659
660

				atomic_long_add(v, &zone->vm_stat[i]);
661
				global_zone_diff[i] += v;
662
663
#ifdef CONFIG_NUMA
				/* 3 seconds idle till flush */
664
				__this_cpu_write(p->expire, 3);
665
#endif
666
			}
667
		}
668
#ifdef CONFIG_NUMA
669
670
671
672
673
674
675
676
677
678
		if (do_pagesets) {
			cond_resched();
			/*
			 * Deal with draining the remote pageset of this
			 * processor
			 *
			 * Check if there are pages remaining in this pageset
			 * if not then there is nothing to expire.
			 */
			if (!__this_cpu_read(p->expire) ||
679
			       !__this_cpu_read(p->pcp.count))
680
				continue;
681

682
683
684
685
686
687
688
			/*
			 * We never drain zones local to this processor.
			 */
			if (zone_to_nid(zone) == numa_node_id()) {
				__this_cpu_write(p->expire, 0);
				continue;
			}
689

690
691
			if (__this_cpu_dec_return(p->expire))
				continue;
692

693
694
695
696
			if (__this_cpu_read(p->pcp.count)) {
				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
				changes++;
			}
697
		}
698
#endif
699
	}
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715

	for_each_online_pgdat(pgdat) {
		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;

		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
			int v;

			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
			if (v) {
				atomic_long_add(v, &pgdat->vm_stat[i]);
				global_node_diff[i] += v;
			}
		}
	}

	changes += fold_diff(global_zone_diff, global_node_diff);
716
	return changes;
717
718
}

719
720
721
722
723
724
725
/*
 * Fold the data for an offline cpu into the global array.
 * There cannot be any access by the offline cpu and therefore
 * synchronization is simplified.
 */
void cpu_vm_stats_fold(int cpu)
{
726
	struct pglist_data *pgdat;
727
728
	struct zone *zone;
	int i;
729
730
	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
731
732
733
734
735
736
737
738
739
740
741
742
743

	for_each_populated_zone(zone) {
		struct per_cpu_pageset *p;

		p = per_cpu_ptr(zone->pageset, cpu);

		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
			if (p->vm_stat_diff[i]) {
				int v;

				v = p->vm_stat_diff[i];
				p->vm_stat_diff[i] = 0;
				atomic_long_add(v, &zone->vm_stat[i]);
744
				global_zone_diff[i] += v;
745
746
747
			}
	}

748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
	for_each_online_pgdat(pgdat) {
		struct per_cpu_nodestat *p;

		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);

		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
			if (p->vm_node_stat_diff[i]) {
				int v;

				v = p->vm_node_stat_diff[i];
				p->vm_node_stat_diff[i] = 0;
				atomic_long_add(v, &pgdat->vm_stat[i]);
				global_node_diff[i] += v;
			}
	}

	fold_diff(global_zone_diff, global_node_diff);
765
766
}

767
768
769
770
/*
 * this is only called if !populated_zone(zone), which implies no other users of
 * pset->vm_stat_diff[] exsist.
 */
771
772
773
774
775
776
777
778
779
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
{
	int i;

	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
		if (pset->vm_stat_diff[i]) {
			int v = pset->vm_stat_diff[i];
			pset->vm_stat_diff[i] = 0;
			atomic_long_add(v, &zone->vm_stat[i]);
780
			atomic_long_add(v, &vm_zone_stat[i]);
781
782
		}
}
783
784
#endif

785
#ifdef CONFIG_NUMA
786
/*
787
788
789
 * Determine the per node value of a stat item. This function
 * is called frequently in a NUMA machine, so try to be as
 * frugal as possible.
790
 */
791
792
unsigned long sum_zone_node_page_state(int node,
				 enum zone_stat_item item)
793
794
{
	struct zone *zones = NODE_DATA(node)->node_zones;
795
796
	int i;
	unsigned long count = 0;
797

798
799
800
801
	for (i = 0; i < MAX_NR_ZONES; i++)
		count += zone_page_state(zones + i, item);

	return count;
802
803
}

804
805
806
807
808
809
810
811
812
813
814
815
816
/*
 * Determine the per node value of a stat item.
 */
unsigned long node_page_state(struct pglist_data *pgdat,
				enum node_stat_item item)
{
	long x = atomic_long_read(&pgdat->vm_stat[item]);
#ifdef CONFIG_SMP
	if (x < 0)
		x = 0;
#endif
	return x;
}
817
818
#endif

819
#ifdef CONFIG_COMPACTION
820

821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
struct contig_page_info {
	unsigned long free_pages;
	unsigned long free_blocks_total;
	unsigned long free_blocks_suitable;
};

/*
 * Calculate the number of free pages in a zone, how many contiguous
 * pages are free and how many are large enough to satisfy an allocation of
 * the target size. Note that this function makes no attempt to estimate
 * how many suitable free blocks there *might* be if MOVABLE pages were
 * migrated. Calculating that is possible, but expensive and can be
 * figured out from userspace
 */
static void fill_contig_page_info(struct zone *zone,
				unsigned int suitable_order,
				struct contig_page_info *info)
{
	unsigned int order;

	info->free_pages = 0;
	info->free_blocks_total = 0;
	info->free_blocks_suitable = 0;

	for (order = 0; order < MAX_ORDER; order++) {
		unsigned long blocks;

		/* Count number of free blocks */
		blocks = zone->free_area[order].nr_free;
		info->free_blocks_total += blocks;

		/* Count free base pages */
		info->free_pages += blocks << order;

		/* Count the suitable free blocks */
		if (order >= suitable_order)
			info->free_blocks_suitable += blocks <<
						(order - suitable_order);
	}
}
861
862
863
864
865
866
867
868

/*
 * A fragmentation index only makes sense if an allocation of a requested
 * size would fail. If that is true, the fragmentation index indicates
 * whether external fragmentation or a lack of memory was the problem.
 * The value can be used to determine if page reclaim or compaction
 * should be used
 */
869
static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
{
	unsigned long requested = 1UL << order;

	if (!info->free_blocks_total)
		return 0;

	/* Fragmentation index only makes sense when a request would fail */
	if (info->free_blocks_suitable)
		return -1000;

	/*
	 * Index is between 0 and 1 so return within 3 decimal places
	 *
	 * 0 => allocation would fail due to lack of memory
	 * 1 => allocation would fail due to fragmentation
	 */
	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
}
888
889
890
891
892
893
894
895
896

/* Same as __fragmentation index but allocs contig_page_info on stack */
int fragmentation_index(struct zone *zone, unsigned int order)
{
	struct contig_page_info info;

	fill_contig_page_info(zone, order, &info);
	return __fragmentation_index(order, &info);
}
897
898
#endif

899
#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
#else
#define TEXT_FOR_DMA(xx)
#endif

#ifdef CONFIG_ZONE_DMA32
#define TEXT_FOR_DMA32(xx) xx "_dma32",
#else
#define TEXT_FOR_DMA32(xx)
#endif

#ifdef CONFIG_HIGHMEM
#define TEXT_FOR_HIGHMEM(xx) xx "_high",
#else
#define TEXT_FOR_HIGHMEM(xx)
#endif

#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
					TEXT_FOR_HIGHMEM(xx) xx "_movable",

const char * const vmstat_text[] = {
922
	/* enum zone_stat_item countes */
923
	"nr_free_pages",
Minchan Kim's avatar
Minchan Kim committed
924
925
926
927
928
	"nr_zone_inactive_anon",
	"nr_zone_active_anon",
	"nr_zone_inactive_file",
	"nr_zone_active_file",
	"nr_zone_unevictable",
929
	"nr_zone_write_pending",
930
931
932
933
934
935
	"nr_mlock",
	"nr_slab_reclaimable",
	"nr_slab_unreclaimable",
	"nr_page_table_pages",
	"nr_kernel_stack",
	"nr_bounce",
Minchan Kim's avatar
Minchan Kim committed
936
937
938
#if IS_ENABLED(CONFIG_ZSMALLOC)
	"nr_zspages",
#endif
939
940
941
942
943
944
945
946
#ifdef CONFIG_NUMA
	"numa_hit",
	"numa_miss",
	"numa_foreign",
	"numa_interleave",
	"numa_local",
	"numa_other",
#endif
947
	"nr_free_cma",
948

949
950
951
952
953
954
955
956
	/* Node-based counters */
	"nr_inactive_anon",
	"nr_active_anon",
	"nr_inactive_file",
	"nr_active_file",
	"nr_unevictable",
	"nr_isolated_anon",
	"nr_isolated_file",
957
958
959
	"workingset_refault",
	"workingset_activate",
	"workingset_nodereclaim",
960
961
	"nr_anon_pages",
	"nr_mapped",
962
963
964
965
966
967
968
969
970
	"nr_file_pages",
	"nr_dirty",
	"nr_writeback",
	"nr_writeback_temp",
	"nr_shmem",
	"nr_shmem_hugepages",
	"nr_shmem_pmdmapped",
	"nr_anon_transparent_hugepages",
	"nr_unstable",
971
972
973
974
	"nr_vmscan_write",
	"nr_vmscan_immediate_reclaim",
	"nr_dirtied",
	"nr_written",
975

976
	/* enum writeback_stat_item counters */
977
978
979
980
	"nr_dirty_threshold",
	"nr_dirty_background_threshold",

#ifdef CONFIG_VM_EVENT_COUNTERS
981
	/* enum vm_event_item counters */
982
983
984
985
986
987
	"pgpgin",
	"pgpgout",
	"pswpin",
	"pswpout",

	TEXTS_FOR_ZONES("pgalloc")
988
989
	TEXTS_FOR_ZONES("allocstall")
	TEXTS_FOR_ZONES("pgskip")
990
991
992
993

	"pgfree",
	"pgactivate",
	"pgdeactivate",
994
	"pglazyfree",
995
996
997

	"pgfault",
	"pgmajfault",
Minchan Kim's avatar
Minchan Kim committed
998
	"pglazyfreed",
999

1000
1001
1002
1003
1004
	"pgrefill",
	"pgsteal_kswapd",
	"pgsteal_direct",
	"pgscan_kswapd",
	"pgscan_direct",
1005
	"pgscan_direct_throttle",
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018

#ifdef CONFIG_NUMA
	"zone_reclaim_failed",
#endif
	"pginodesteal",
	"slabs_scanned",
	"kswapd_inodesteal",
	"kswapd_low_wmark_hit_quickly",
	"kswapd_high_wmark_hit_quickly",
	"pageoutrun",

	"pgrotated",

1019
1020
	"drop_pagecache",
	"drop_slab",
1021
	"oom_kill",
1022

1023
1024
#ifdef CONFIG_NUMA_BALANCING
	"numa_pte_updates",
1025
	"numa_huge_pte_updates",
1026
1027
1028
1029
	"numa_hint_faults",
	"numa_hint_faults_local",
	"numa_pages_migrated",
#endif
1030
1031
1032
1033
#ifdef CONFIG_MIGRATION
	"pgmigrate_success",
	"pgmigrate_fail",
#endif
1034
#ifdef CONFIG_COMPACTION
1035
1036
1037
	"compact_migrate_scanned",
	"compact_free_scanned",
	"compact_isolated",
1038
1039
1040
	"compact_stall",
	"compact_fail",
	"compact_success",
1041
	"compact_daemon_wake",
1042
1043
	"compact_daemon_migrate_scanned",
	"compact_daemon_free_scanned",
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
#endif

#ifdef CONFIG_HUGETLB_PAGE
	"htlb_buddy_alloc_success",
	"htlb_buddy_alloc_fail",
#endif
	"unevictable_pgs_culled",
	"unevictable_pgs_scanned",
	"unevictable_pgs_rescued",
	"unevictable_pgs_mlocked",
	"unevictable_pgs_munlocked",
	"unevictable_pgs_cleared",
	"unevictable_pgs_stranded",

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	"thp_fault_alloc",
	"thp_fault_fallback",
	"thp_collapse_alloc",
	"thp_collapse_alloc_failed",
1063
1064
	"thp_file_alloc",
	"thp_file_mapped",
1065
1066
	"thp_split_page",
	"thp_split_page_failed",
1067
	"thp_deferred_split_page",
1068
	"thp_split_pmd",
1069
1070
1071
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
	"thp_split_pud",
#endif
1072
1073
	"thp_zero_page_alloc",
	"thp_zero_page_alloc_failed",
1074
#endif
1075
1076
1077
1078
1079
1080
1081
#ifdef CONFIG_MEMORY_BALLOON
	"balloon_inflate",
	"balloon_deflate",
#ifdef CONFIG_BALLOON_COMPACTION
	"balloon_migrate",
#endif
#endif /* CONFIG_MEMORY_BALLOON */
1082
#ifdef CONFIG_DEBUG_TLBFLUSH
1083
#ifdef CONFIG_SMP
Dave Hansen's avatar
Dave Hansen committed
1084
1085
	"nr_tlb_remote_flush",
	"nr_tlb_remote_flush_received",
1086
#endif /* CONFIG_SMP */
Dave Hansen's avatar
Dave Hansen committed
1087
1088
	"nr_tlb_local_flush_all",
	"nr_tlb_local_flush_one",
1089
#endif /* CONFIG_DEBUG_TLBFLUSH */
1090

Davidlohr Bueso's avatar
Davidlohr Bueso committed
1091
1092
1093
#ifdef CONFIG_DEBUG_VM_VMACACHE
	"vmacache_find_calls",
	"vmacache_find_hits",
1094
	"vmacache_full_flushes",
Davidlohr Bueso's avatar
Davidlohr Bueso committed
1095
#endif
1096
1097
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
1098
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
1099
1100


1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
     defined(CONFIG_PROC_FS)
static void *frag_start(struct seq_file *m, loff_t *pos)
{
	pg_data_t *pgdat;
	loff_t node = *pos;

	for (pgdat = first_online_pgdat();
	     pgdat && node;
	     pgdat = next_online_pgdat(pgdat))
		--node;

	return pgdat;
}

static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
{
	pg_data_t *pgdat = (pg_data_t *)arg;

	(*pos)++;
	return next_online_pgdat(pgdat);
}

static void frag_stop(struct seq_file *m, void *arg)
{
}

1128
1129
1130
1131
/*
 * Walk zones in a node and print using a callback.
 * If @assert_populated is true, only use callback for zones that are populated.
 */
1132
static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1133
		bool assert_populated,
1134
1135
1136
1137
1138
1139
1140
		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
{
	struct zone *zone;
	struct zone *node_zones = pgdat->node_zones;
	unsigned long flags;

	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1141
		if (assert_populated && !populated_zone(zone))
1142
1143
1144
1145
1146
1147
1148
1149
1150
			continue;

		spin_lock_irqsave(&zone->lock, flags);
		print(m, pgdat, zone);
		spin_unlock_irqrestore(&zone->lock, flags);
	}
}
#endif

1151
#ifdef CONFIG_PROC_FS
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
						struct zone *zone)
{
	int order;

	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
	for (order = 0; order < MAX_ORDER; ++order)
		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
	seq_putc(m, '\n');
}

/*
 * This walks the free areas for each zone.
 */
static int frag_show(struct seq_file *m, void *arg)
{
	pg_data_t *pgdat = (pg_data_t *)arg;
1169
	walk_zones_in_node(m, pgdat, true, frag_show_print);
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
	return 0;
}

static void pagetypeinfo_showfree_print(struct seq_file *m,
					pg_data_t *pgdat, struct zone *zone)
{
	int order, mtype;

	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
		seq_printf(m, "Node %4d, zone %8s, type %12s ",
					pgdat->node_id,
					zone->name,
					migratetype_names[mtype]);
		for (order = 0; order < MAX_ORDER; ++order) {
			unsigned long freecount = 0;
			struct free_area *area;
			struct list_head *curr;

			area = &(zone->free_area[order]);

			list_for_each(curr, &area->free_list[mtype])
				freecount++;
			seq_printf(m, "%6lu ", freecount);
		}
1194
1195
		seq_putc(m, '\n');
	}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
}

/* Print out the free pages at each order for each migatetype */
static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
{
	int order;
	pg_data_t *pgdat = (pg_data_t *)arg;

	/* Print header */
	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
	for (order = 0; order < MAX_ORDER; ++order)
		seq_printf(m, "%6d ", order);
	seq_putc(m, '\n');

1210
	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showfree_print);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220

	return 0;
}

static void pagetypeinfo_showblockcount_print(struct seq_file *m,
					pg_data_t *pgdat, struct zone *zone)
{
	int mtype;
	unsigned long pfn;
	unsigned long start_pfn = zone->zone_start_pfn;
1221
	unsigned long end_pfn = zone_end_pfn(zone);
1222
1223
1224
1225
1226
	unsigned long count[MIGRATE_TYPES] = { 0, };

	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
		struct page *page;

1227
1228
		page = pfn_to_online_page(pfn);
		if (!page)
1229
1230
			continue;

1231
1232
		/* Watch for unexpected holes punched in the memmap */
		if (!memmap_valid_within(pfn, page, zone))
1233
			continue;
1234

1235
1236
1237
		if (page_zone(page) != zone)
			continue;

1238
1239
		mtype = get_pageblock_migratetype(page);

1240
1241
		if (mtype < MIGRATE_TYPES)
			count[mtype]++;
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
	}

	/* Print counts */
	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
		seq_printf(m, "%12lu ", count[mtype]);
	seq_putc(m, '\n');
}

/* Print out the free pages at each order for each migratetype */
static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
{
	int mtype;
	pg_data_t *pgdat = (pg_data_t *)arg;

	seq_printf(m, "\n%-23s", "Number of blocks type ");
	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
		seq_printf(m, "%12s ", migratetype_names[mtype]);
	seq_putc(m, '\n');
1261
	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showblockcount_print);
1262
1263
1264
1265

	return 0;
}

1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
/*
 * Print out the number of pageblocks for each migratetype that contain pages
 * of other types. This gives an indication of how well fallbacks are being
 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
 * to determine what is going on
 */
static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
{
#ifdef CONFIG_PAGE_OWNER
	int mtype;

1277
	if (!static_branch_unlikely(&page_owner_inited))
1278
1279
1280
1281
1282
1283
1284
1285
1286
		return;

	drain_all_pages(NULL);

	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
		seq_printf(m, "%12s ", migratetype_names[mtype]);
	seq_putc(m, '\n');

1287
	walk_zones_in_node(m, pgdat, true, pagetypeinfo_showmixedcount_print);
1288
1289
1290
#endif /* CONFIG_PAGE_OWNER */
}

1291
1292
1293
1294
1295
1296
1297
1298
/*
 * This prints out statistics in relation to grouping pages by mobility.
 * It is expensive to collect so do not constantly read the file.
 */
static int pagetypeinfo_show(struct seq_file *m, void *arg)
{
	pg_data_t *pgdat = (pg_data_t *)arg;

1299
	/* check memoryless node */
1300
	if (!node_state(pgdat->node_id, N_MEMORY))
1301
1302
		return 0;

1303
1304
1305
1306
1307
	seq_printf(m, "Page block order: %d\n", pageblock_order);
	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
	seq_putc(m, '\n');
	pagetypeinfo_showfree(m, pgdat);
	pagetypeinfo_showblockcount(m, pgdat);
1308
	pagetypeinfo_showmixedcount(m, pgdat);
1309

1310
1311
1312
	return 0;
}

1313
static const struct seq_operations fragmentation_op = {
1314
1315
1316
1317
1318
1319
	.start	= frag_start,
	.next	= frag_next,
	.stop	= frag_stop,
	.show	= frag_show,
};

1320
1321
1322
1323
1324
static int fragmentation_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &fragmentation_op);
}

1325
static const struct file_operations buddyinfo_file_operations = {
1326
1327
1328
1329
1330
1331
	.open		= fragmentation_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release,
};

1332
static const struct seq_operations pagetypeinfo_op = {
1333
1334
1335
1336
1337
1338
	.start	= frag_start,
	.next	= frag_next,
	.stop	= frag_stop,
	.show	= pagetypeinfo_show,
};

1339
1340
1341
1342
1343
static int pagetypeinfo_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &pagetypeinfo_op);
}

1344
static const struct file_operations pagetypeinfo_file_operations = {
1345
1346
1347
1348
1349
1350
	.open		= pagetypeinfo_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release,
};

1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
{
	int zid;

	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		struct zone *compare = &pgdat->node_zones[zid];

		if (populated_zone(compare))
			return zone == compare;
	}

	return false;
}

1365
1366
static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
							struct zone *zone)
1367
{
1368
1369
	int i;
	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1370
1371
1372
1373
1374
1375
1376
1377
	if (is_zone_first_populated(pgdat, zone)) {
		seq_printf(m, "\n  per-node stats");
		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
			seq_printf(m, "\n      %-12s %lu",
				vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
				node_page_state(pgdat, i));
		}
	}
1378
1379
1380
1381
1382
1383
	seq_printf(m,
		   "\n  pages free     %lu"
		   "\n        min      %lu"
		   "\n        low      %lu"
		   "\n        high     %lu"
		   "\n        spanned  %lu"
1384
1385
		   "\n        present  %lu"
		   "\n        managed  %lu",
1386
		   zone_page_state(zone, NR_FREE_PAGES),
1387
1388
1389
		   min_wmark_pages(zone),
		   low_wmark_pages(zone),
		   high_wmark_pages(zone),
1390
		   zone->spanned_pages,
1391
1392
		   zone->present_pages,
		   zone->managed_pages);
1393
1394

	seq_printf(m,
1395
		   "\n        protection: (%ld",
1396
1397
		   zone->lowmem_reserve[0]);
	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1398
		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
	seq_putc(m, ')');

	/* If unpopulated, no other information is useful */
	if (!populated_zone(zone)) {
		seq_putc(m, '\n');
		return;
	}

	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
		seq_printf(m, "\n      %-12s %lu", vmstat_text[i],
				zone_page_state(zone, i));

	seq_printf(m, "\n  pagesets");
1412
1413
1414
	for_each_online_cpu(i) {
		struct per_cpu_pageset *pageset;

1415
		pageset = per_cpu_ptr(zone->pageset, i);
1416
1417
1418
1419
1420
1421
1422
1423
1424
		seq_printf(m,
			   "\n    cpu: %i"
			   "\n              count: %i"
			   "\n              high:  %i"
			   "\n              batch: %i",
			   i,
			   pageset->pcp.count,
			   pageset->pcp.high,
			   pageset->pcp.batch);
1425
#ifdef CONFIG_SMP
1426
1427
		seq_printf(m, "\n  vm stats threshold: %d",
				pageset->stat_threshold);
1428
#endif
1429
	}
1430
	seq_printf(m,
1431
1432
1433
		   "\n  node_unreclaimable:  %u"
		   "\n  start_pfn:           %lu"
		   "\n  node_inactive_ratio: %u",
1434
		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1435
		   zone->zone_start_pfn,
1436
		   zone->zone_pgdat->inactive_ratio);
1437
1438
1439
1440
	seq_putc(m, '\n');
}

/*
1441
1442
1443
1444
 * Output information about zones in @pgdat.  All zones are printed regardless
 * of whether they are populated or not: lowmem_reserve_ratio operates on the
 * set of all zones and userspace would not be aware of such zones if they are
 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).