Commit ca79b0c2 authored by Arun KS's avatar Arun KS Committed by Linus Torvalds
Browse files

mm: convert totalram_pages and totalhigh_pages variables to atomic

totalram_pages and totalhigh_pages are made static inline function.

Main motivation was that managed_page_count_lock handling was complicating
things.  It was discussed in length here,
https://lore.kernel.org/patchwork/patch/995739/#1181785 So it seemes
better to remove the lock and convert variables to atomic, with preventing
poteintial store-to-read tearing as a bonus.

[akpm@linux-foundation.org: coding style fixes]
Link: http://lkml.kernel.org/r/1542090790-21750-4-git-send-email-arunks@codeaurora.org

Signed-off-by: default avatarArun KS <arunks@codeaurora.org>
Suggested-by: default avatarMichal Hocko <mhocko@suse.com>
Suggested-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Reviewed-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9705bea5
......@@ -1235,7 +1235,7 @@ void __init kmem_cache_init(void)
* page orders on machines with more than 32MB of memory if
* not overridden on the command line.
*/
if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
if (!slab_max_order_set && totalram_pages() > (32 << 20) >> PAGE_SHIFT)
slab_max_order = SLAB_MAX_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated
......
......@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
*/
void __init swap_setup(void)
{
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
......
......@@ -593,7 +593,7 @@ unsigned long vm_commit_limit(void)
if (sysctl_overcommit_kbytes)
allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
else
allowed = ((totalram_pages - hugetlb_total_pages())
allowed = ((totalram_pages() - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100);
allowed += total_swap_pages;
......
......@@ -1634,7 +1634,7 @@ void *vmap(struct page **pages, unsigned int count,
might_sleep();
if (count > totalram_pages)
if (count > totalram_pages())
return NULL;
size = (unsigned long)count << PAGE_SHIFT;
......@@ -1739,7 +1739,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long real_size = size;
size = PAGE_ALIGN(size);
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
goto fail;
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
......
......@@ -549,7 +549,7 @@ static int __init workingset_init(void)
* double the initial memory by using totalram_pages as-is.
*/
timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
max_order = fls_long(totalram_pages - 1);
max_order = fls_long(totalram_pages() - 1);
if (max_order > timestamp_bits)
bucket_order = max_order - timestamp_bits;
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
......
......@@ -219,8 +219,8 @@ static const struct zpool_ops zswap_zpool_ops = {
static bool zswap_is_full(void)
{
return totalram_pages * zswap_max_pool_percent / 100 <
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
return totalram_pages() * zswap_max_pool_percent / 100 <
DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
}
static void zswap_update_total_size(void)
......
......@@ -1131,7 +1131,7 @@ EXPORT_SYMBOL_GPL(dccp_debug);
static int __init dccp_init(void)
{
unsigned long goal;
unsigned long nr_pages = totalram_pages;
unsigned long nr_pages = totalram_pages();
int ehash_order, bhash_order, i;
int rc;
......
......@@ -1866,7 +1866,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
goal = totalram_pages >> (26 - PAGE_SHIFT);
goal = totalram_pages() >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
......
......@@ -1000,7 +1000,7 @@ static int __net_init tcp_net_metrics_init(struct net *net)
slots = tcpmhash_entries;
if (!slots) {
if (totalram_pages >= 128 * 1024)
if (totalram_pages() >= 128 * 1024)
slots = 16 * 1024;
else
slots = 8 * 1024;
......
......@@ -2248,7 +2248,7 @@ static __always_inline unsigned int total_extension_size(void)
int nf_conntrack_init_start(void)
{
unsigned long nr_pages = totalram_pages;
unsigned long nr_pages = totalram_pages();
int max_factor = 8;
int ret = -ENOMEM;
int i;
......
......@@ -274,7 +274,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
struct xt_hashlimit_htable *hinfo;
const struct seq_operations *ops;
unsigned int size, i;
unsigned long nr_pages = totalram_pages;
unsigned long nr_pages = totalram_pages();
int ret;
if (cfg->size) {
......
......@@ -1368,7 +1368,7 @@ static __init int sctp_init(void)
int status = -EINVAL;
unsigned long goal;
unsigned long limit;
unsigned long nr_pages = totalram_pages;
unsigned long nr_pages = totalram_pages();
int max_share;
int order;
int num_entries;
......
......@@ -106,7 +106,7 @@ void ima_add_kexec_buffer(struct kimage *image)
kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
PAGE_SIZE / 2, PAGE_SIZE);
if ((kexec_segment_size == ULONG_MAX) ||
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
pr_err("Binary measurement list too large.\n");
return;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment