util.c 16 KB
Newer Older
1
#include <linux/mm.h>
2 3
#include <linux/slab.h>
#include <linux/string.h>
4
#include <linux/compiler.h>
5
#include <linux/export.h>
Davi Arnaut's avatar
Davi Arnaut committed
6
#include <linux/err.h>
7
#include <linux/sched.h>
8
#include <linux/sched/mm.h>
Al Viro's avatar
Al Viro committed
9
#include <linux/security.h>
Shaohua Li's avatar
Shaohua Li committed
10
#include <linux/swap.h>
11
#include <linux/swapops.h>
12 13
#include <linux/mman.h>
#include <linux/hugetlb.h>
Al Viro's avatar
Al Viro committed
14
#include <linux/vmalloc.h>
15
#include <linux/userfaultfd_k.h>
16

Andrzej Hajda's avatar
Andrzej Hajda committed
17
#include <asm/sections.h>
18
#include <linux/uaccess.h>
19

20 21
#include "internal.h"

Andrzej Hajda's avatar
Andrzej Hajda committed
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
static inline int is_kernel_rodata(unsigned long addr)
{
	return addr >= (unsigned long)__start_rodata &&
		addr < (unsigned long)__end_rodata;
}

/**
 * kfree_const - conditionally free memory
 * @x: pointer to the memory
 *
 * Function calls kfree only if @x is not in .rodata section.
 */
void kfree_const(const void *x)
{
	if (!is_kernel_rodata((unsigned long)x))
		kfree(x);
}
EXPORT_SYMBOL(kfree_const);

41 42 43 44 45 46 47 48 49 50 51 52 53 54
/**
 * kstrdup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 */
char *kstrdup(const char *s, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strlen(s) + 1;
55
	buf = kmalloc_track_caller(len, gfp);
56 57 58 59 60
	if (buf)
		memcpy(buf, s, len);
	return buf;
}
EXPORT_SYMBOL(kstrdup);
Davi Arnaut's avatar
Davi Arnaut committed
61

Andrzej Hajda's avatar
Andrzej Hajda committed
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/**
 * kstrdup_const - conditionally duplicate an existing const string
 * @s: the string to duplicate
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 *
 * Function returns source string if it is in .rodata section otherwise it
 * fallbacks to kstrdup.
 * Strings allocated by kstrdup_const should be freed by kfree_const.
 */
const char *kstrdup_const(const char *s, gfp_t gfp)
{
	if (is_kernel_rodata((unsigned long)s))
		return s;

	return kstrdup(s, gfp);
}
EXPORT_SYMBOL(kstrdup_const);

Jeremy Fitzhardinge's avatar
Jeremy Fitzhardinge committed
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
/**
 * kstrndup - allocate space for and copy an existing string
 * @s: the string to duplicate
 * @max: read at most @max chars from @s
 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 */
char *kstrndup(const char *s, size_t max, gfp_t gfp)
{
	size_t len;
	char *buf;

	if (!s)
		return NULL;

	len = strnlen(s, max);
	buf = kmalloc_track_caller(len+1, gfp);
	if (buf) {
		memcpy(buf, s, len);
		buf[len] = '\0';
	}
	return buf;
}
EXPORT_SYMBOL(kstrndup);

Alexey Dobriyan's avatar
Alexey Dobriyan committed
104 105 106 107 108 109 110 111 112 113 114
/**
 * kmemdup - duplicate region of memory
 *
 * @src: memory region to duplicate
 * @len: memory region length
 * @gfp: GFP mask to use
 */
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
	void *p;

115
	p = kmalloc_track_caller(len, gfp);
Alexey Dobriyan's avatar
Alexey Dobriyan committed
116 117 118 119 120 121
	if (p)
		memcpy(p, src, len);
	return p;
}
EXPORT_SYMBOL(kmemdup);

Li Zefan's avatar
Li Zefan committed
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
/**
 * memdup_user - duplicate memory region from user space
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
 * Returns an ERR_PTR() on failure.
 */
void *memdup_user(const void __user *src, size_t len)
{
	void *p;

	/*
	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
	 * cause pagefault, which makes it pointless to use GFP_NOFS
	 * or GFP_ATOMIC.
	 */
	p = kmalloc_track_caller(len, GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kfree(p);
		return ERR_PTR(-EFAULT);
	}

	return p;
}
EXPORT_SYMBOL(memdup_user);

Davi Arnaut's avatar
Davi Arnaut committed
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/*
 * strndup_user - duplicate an existing string from user space
 * @s: The string to duplicate
 * @n: Maximum number of bytes to copy, including the trailing NUL.
 */
char *strndup_user(const char __user *s, long n)
{
	char *p;
	long length;

	length = strnlen_user(s, n);

	if (!length)
		return ERR_PTR(-EFAULT);

	if (length > n)
		return ERR_PTR(-EINVAL);

Julia Lawall's avatar
Julia Lawall committed
170
	p = memdup_user(s, length);
Davi Arnaut's avatar
Davi Arnaut committed
171

Julia Lawall's avatar
Julia Lawall committed
172 173
	if (IS_ERR(p))
		return p;
Davi Arnaut's avatar
Davi Arnaut committed
174 175 176 177 178 179

	p[length - 1] = '\0';

	return p;
}
EXPORT_SYMBOL(strndup_user);
180

Al Viro's avatar
Al Viro committed
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/**
 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 *
 * @src: source address in user space
 * @len: number of bytes to copy
 *
 * Returns an ERR_PTR() on failure.
 */
void *memdup_user_nul(const void __user *src, size_t len)
{
	char *p;

	/*
	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
	 * cause pagefault, which makes it pointless to use GFP_NOFS
	 * or GFP_ATOMIC.
	 */
	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
	if (!p)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(p, src, len)) {
		kfree(p);
		return ERR_PTR(-EFAULT);
	}
	p[len] = '\0';

	return p;
}
EXPORT_SYMBOL(memdup_user_nul);

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
		struct vm_area_struct *prev, struct rb_node *rb_parent)
{
	struct vm_area_struct *next;

	vma->vm_prev = prev;
	if (prev) {
		next = prev->vm_next;
		prev->vm_next = vma;
	} else {
		mm->mmap = vma;
		if (rb_parent)
			next = rb_entry(rb_parent,
					struct vm_area_struct, vm_rb);
		else
			next = NULL;
	}
	vma->vm_next = next;
	if (next)
		next->vm_prev = vma;
}

234
/* Check if the vma is being used as a stack by this task */
235
int vma_is_stack_for_current(struct vm_area_struct *vma)
236
{
237 238
	struct task_struct * __maybe_unused t = current;

239 240 241
	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}

242
#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
243 244 245 246 247 248
void arch_pick_mmap_layout(struct mm_struct *mm)
{
	mm->mmap_base = TASK_UNMAPPED_BASE;
	mm->get_unmapped_area = arch_get_unmapped_area;
}
#endif
249

250 251 252
/*
 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 * back to the regular GUP.
Lucas De Marchi's avatar
Lucas De Marchi committed
253
 * If the architecture not support this function, simply return with no
254 255
 * page pinned
 */
256
int __weak __get_user_pages_fast(unsigned long start,
257 258 259 260 261 262
				 int nr_pages, int write, struct page **pages)
{
	return 0;
}
EXPORT_SYMBOL_GPL(__get_user_pages_fast);

263 264 265 266 267 268 269 270 271 272 273
/**
 * get_user_pages_fast() - pin user pages in memory
 * @start:	starting user address
 * @nr_pages:	number of pages from start to pin
 * @write:	whether pages will be written to
 * @pages:	array that receives pointers to the pages pinned.
 *		Should be at least nr_pages long.
 *
 * Returns number of pages pinned. This may be fewer than the number
 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 * were pinned, returns -errno.
274 275 276 277 278 279 280 281 282 283 284 285
 *
 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 * operating on current and current->mm, with force=0 and vma=NULL. However
 * unlike get_user_pages, it must be called without mmap_sem held.
 *
 * get_user_pages_fast may take mmap_sem and page table locks, so no
 * assumptions can be made about lack of locking. get_user_pages_fast is to be
 * implemented in a way that is advantageous (vs get_user_pages()) when the
 * user memory area is already faulted in and present in ptes. However if the
 * pages have to be faulted in, it may turn out to be slightly slower so
 * callers need to carefully consider what to use. On many architectures,
 * get_user_pages_fast simply falls back to get_user_pages.
286
 */
287
int __weak get_user_pages_fast(unsigned long start,
288 289
				int nr_pages, int write, struct page **pages)
{
290 291
	return get_user_pages_unlocked(start, nr_pages, pages,
				       write ? FOLL_WRITE : 0);
292 293
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
294

Al Viro's avatar
Al Viro committed
295 296
unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
Michal Hocko's avatar
Michal Hocko committed
297
	unsigned long flag, unsigned long pgoff)
Al Viro's avatar
Al Viro committed
298 299 300
{
	unsigned long ret;
	struct mm_struct *mm = current->mm;
301
	unsigned long populate;
302
	LIST_HEAD(uf);
Al Viro's avatar
Al Viro committed
303 304 305

	ret = security_mmap_file(file, prot, flag);
	if (!ret) {
Michal Hocko's avatar
Michal Hocko committed
306 307
		if (down_write_killable(&mm->mmap_sem))
			return -EINTR;
308
		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
309
				    &populate, &uf);
Al Viro's avatar
Al Viro committed
310
		up_write(&mm->mmap_sem);
311
		userfaultfd_unmap_complete(mm, &uf);
312 313
		if (populate)
			mm_populate(ret, populate);
Al Viro's avatar
Al Viro committed
314 315 316 317 318 319 320 321 322 323
	}
	return ret;
}

unsigned long vm_mmap(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long offset)
{
	if (unlikely(offset + PAGE_ALIGN(len) < offset))
		return -EINVAL;
324
	if (unlikely(offset_in_page(offset)))
Al Viro's avatar
Al Viro committed
325 326
		return -EINVAL;

Michal Hocko's avatar
Michal Hocko committed
327
	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
Al Viro's avatar
Al Viro committed
328 329 330
}
EXPORT_SYMBOL(vm_mmap);

Al Viro's avatar
Al Viro committed
331 332 333 334 335 336 337 338 339
void kvfree(const void *addr)
{
	if (is_vmalloc_addr(addr))
		vfree(addr);
	else
		kfree(addr);
}
EXPORT_SYMBOL(kvfree);

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
static inline void *__page_rmapping(struct page *page)
{
	unsigned long mapping;

	mapping = (unsigned long)page->mapping;
	mapping &= ~PAGE_MAPPING_FLAGS;

	return (void *)mapping;
}

/* Neutral page->mapping pointer to address_space or anon_vma or other */
void *page_rmapping(struct page *page)
{
	page = compound_head(page);
	return __page_rmapping(page);
}

Andrew Morton's avatar
Andrew Morton committed
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
/*
 * Return true if this page is mapped into pagetables.
 * For compound page it returns true if any subpage of compound page is mapped.
 */
bool page_mapped(struct page *page)
{
	int i;

	if (likely(!PageCompound(page)))
		return atomic_read(&page->_mapcount) >= 0;
	page = compound_head(page);
	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
		return true;
	if (PageHuge(page))
		return false;
	for (i = 0; i < hpage_nr_pages(page); i++) {
		if (atomic_read(&page[i]._mapcount) >= 0)
			return true;
	}
	return false;
}
EXPORT_SYMBOL(page_mapped);

380 381 382 383 384 385 386 387 388 389 390
struct anon_vma *page_anon_vma(struct page *page)
{
	unsigned long mapping;

	page = compound_head(page);
	mapping = (unsigned long)page->mapping;
	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
		return NULL;
	return __page_rmapping(page);
}

Shaohua Li's avatar
Shaohua Li committed
391 392
struct address_space *page_mapping(struct page *page)
{
393 394 395
	struct address_space *mapping;

	page = compound_head(page);
Shaohua Li's avatar
Shaohua Li committed
396

397 398 399 400
	/* This happens if someone calls flush_dcache_page on slab page */
	if (unlikely(PageSlab(page)))
		return NULL;

401 402 403 404
	if (unlikely(PageSwapCache(page))) {
		swp_entry_t entry;

		entry.val = page_private(page);
405 406 407
		return swap_address_space(entry);
	}

408
	mapping = page->mapping;
409
	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
410
		return NULL;
411 412

	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
Shaohua Li's avatar
Shaohua Li committed
413
}
414
EXPORT_SYMBOL(page_mapping);
Shaohua Li's avatar
Shaohua Li committed
415

416 417 418 419 420 421
/* Slow path of page_mapcount() for compound pages */
int __page_mapcount(struct page *page)
{
	int ret;

	ret = atomic_read(&page->_mapcount) + 1;
Kirill A. Shutemov's avatar
Kirill A. Shutemov committed
422 423 424 425 426 427
	/*
	 * For file THP page->_mapcount contains total number of mapping
	 * of the page: no need to look into compound_mapcount.
	 */
	if (!PageAnon(page) && !PageHuge(page))
		return ret;
428 429 430 431 432 433 434 435
	page = compound_head(page);
	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
	if (PageDoubleMap(page))
		ret--;
	return ret;
}
EXPORT_SYMBOL_GPL(__page_mapcount);

436 437 438 439 440 441 442
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
int sysctl_overcommit_ratio __read_mostly = 50;
unsigned long sysctl_overcommit_kbytes __read_mostly;
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
int overcommit_ratio_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
{
	int ret;

	ret = proc_dointvec(table, write, buffer, lenp, ppos);
	if (ret == 0 && write)
		sysctl_overcommit_kbytes = 0;
	return ret;
}

int overcommit_kbytes_handler(struct ctl_table *table, int write,
			     void __user *buffer, size_t *lenp,
			     loff_t *ppos)
{
	int ret;

	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
	if (ret == 0 && write)
		sysctl_overcommit_ratio = 0;
	return ret;
}

467 468 469 470 471
/*
 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 */
unsigned long vm_commit_limit(void)
{
472 473 474 475 476 477 478 479 480 481
	unsigned long allowed;

	if (sysctl_overcommit_kbytes)
		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
	else
		allowed = ((totalram_pages - hugetlb_total_pages())
			   * sysctl_overcommit_ratio / 100);
	allowed += total_swap_pages;

	return allowed;
482 483
}

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
/*
 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 * other variables. It can be updated by several CPUs frequently.
 */
struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;

/*
 * The global memory commitment made in the system can be a metric
 * that can be used to drive ballooning decisions when Linux is hosted
 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 * balancing memory across competing virtual machines that are hosted.
 * Several metrics drive this policy engine including the guest reported
 * memory commitment.
 */
unsigned long vm_memory_committed(void)
{
	return percpu_counter_read_positive(&vm_committed_as);
}
EXPORT_SYMBOL_GPL(vm_memory_committed);

/*
 * Check that a process has enough memory to allocate a new virtual
 * mapping. 0 means there is enough memory for the allocation to
 * succeed and -ENOMEM implies there is not.
 *
 * We currently support three overcommit policies, which are set via the
 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
 *
 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 * Additional code 2002 Jul 20 by Robert Love.
 *
 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 *
 * Note this is a helper function intended to be used by LSMs which
 * wish to use this logic.
 */
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
	long free, allowed, reserve;

	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
			-(s64)vm_committed_as_batch * num_online_cpus(),
			"memory commitment underflow");

	vm_acct_memory(pages);

	/*
	 * Sometimes we want to use more memory than we have
	 */
	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
		return 0;

	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
		free = global_page_state(NR_FREE_PAGES);
538
		free += global_node_page_state(NR_FILE_PAGES);
539 540 541 542 543 544 545

		/*
		 * shmem pages shouldn't be counted as free in this
		 * case, they can't be purged, only swapped out, and
		 * that won't affect the overall amount of available
		 * memory in the system.
		 */
546
		free -= global_node_page_state(NR_SHMEM);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600

		free += get_nr_swap_pages();

		/*
		 * Any slabs which are created with the
		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
		 * which are reclaimable, under pressure.  The dentry
		 * cache and most inode caches should fall into this
		 */
		free += global_page_state(NR_SLAB_RECLAIMABLE);

		/*
		 * Leave reserved pages. The pages are not for anonymous pages.
		 */
		if (free <= totalreserve_pages)
			goto error;
		else
			free -= totalreserve_pages;

		/*
		 * Reserve some for root
		 */
		if (!cap_sys_admin)
			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);

		if (free > pages)
			return 0;

		goto error;
	}

	allowed = vm_commit_limit();
	/*
	 * Reserve some for root
	 */
	if (!cap_sys_admin)
		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);

	/*
	 * Don't let a single process grow so big a user can't recover
	 */
	if (mm) {
		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
		allowed -= min_t(long, mm->total_vm / 32, reserve);
	}

	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
		return 0;
error:
	vm_unacct_memory(pages);

	return -ENOMEM;
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614
/**
 * get_cmdline() - copy the cmdline value to a buffer.
 * @task:     the task whose cmdline value to copy.
 * @buffer:   the buffer to copy to.
 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 *            to this length.
 * Returns the size of the cmdline field copied. Note that the copy does
 * not guarantee an ending NULL byte.
 */
int get_cmdline(struct task_struct *task, char *buffer, int buflen)
{
	int res = 0;
	unsigned int len;
	struct mm_struct *mm = get_task_mm(task);
615
	unsigned long arg_start, arg_end, env_start, env_end;
616 617 618 619 620
	if (!mm)
		goto out;
	if (!mm->arg_end)
		goto out_mm;	/* Shh! No looking before we're done */

621 622 623 624 625 626 627 628
	down_read(&mm->mmap_sem);
	arg_start = mm->arg_start;
	arg_end = mm->arg_end;
	env_start = mm->env_start;
	env_end = mm->env_end;
	up_read(&mm->mmap_sem);

	len = arg_end - arg_start;
629 630 631 632

	if (len > buflen)
		len = buflen;

633
	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
634 635 636 637 638 639 640 641 642 643

	/*
	 * If the nul at the end of args has been overwritten, then
	 * assume application is using setproctitle(3).
	 */
	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
		len = strnlen(buffer, res);
		if (len < res) {
			res = len;
		} else {
644
			len = env_end - env_start;
645 646
			if (len > buflen - res)
				len = buflen - res;
647
			res += access_process_vm(task, env_start,
648 649
						 buffer+res, len,
						 FOLL_FORCE);
650 651 652 653 654 655 656 657
			res = strnlen(buffer, res);
		}
	}
out_mm:
	mmput(mm);
out:
	return res;
}