Skip to content
Snippets Groups Projects
Commit e8b098fc authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds
Browse files
parent 002843de
No related branches found
No related tags found
No related merge requests found
......@@ -165,6 +165,9 @@ core_initcall(cma_init_reserved_areas);
* @base: Base address of the reserved area
* @size: Size of the reserved area (in bytes),
* @order_per_bit: Order of pages represented by one bit on bitmap.
* @name: The name of the area. If this parameter is NULL, the name of
* the area will be set to "cmaN", where N is a running counter of
* used areas.
* @res_cma: Pointer to store the created cma region.
*
* This function creates custom contiguous area from already reserved memory.
......@@ -227,6 +230,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
* @alignment: Alignment for the CMA area, should be power of 2 or zero
* @order_per_bit: Order of pages represented by one bit on bitmap.
* @fixed: hint about where to place the reserved area
* @name: The name of the area. See function cma_init_reserved_mem()
* @res_cma: Pointer to store the created cma region.
*
* This function reserves memory from early allocator. It should be
......@@ -390,6 +394,7 @@ static inline void cma_debug_show_areas(struct cma *cma) { }
* @cma: Contiguous memory region for which the allocation is performed.
* @count: Requested number of pages.
* @align: Requested alignment of pages (in PAGE_SIZE order).
* @gfp_mask: GFP mask to use during compaction
*
* This function allocates part of contiguous memory on specific
* contiguous memory area.
......
......@@ -576,6 +576,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/**
* isolate_freepages_range() - isolate free pages.
* @cc: Compaction control structure.
* @start_pfn: The first PFN to start isolating.
* @end_pfn: The one-past-last PFN.
*
......
......@@ -1187,6 +1187,11 @@ EXPORT_SYMBOL(kmemleak_no_scan);
/**
* kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
* address argument
* @phys: physical address of the object
* @size: size of the object
* @min_count: minimum number of references to this object.
* See kmemleak_alloc()
* @gfp: kmalloc() flags used for kmemleak internal memory allocations
*/
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
gfp_t gfp)
......@@ -1199,6 +1204,9 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
/**
* kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
* physical address argument
* @phys: physical address if the beginning or inside an object. This
* also represents the start of the range to be freed
* @size: size to be unregistered
*/
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
......@@ -1210,6 +1218,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
/**
* kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
* address argument
* @phys: physical address of the object
*/
void __ref kmemleak_not_leak_phys(phys_addr_t phys)
{
......@@ -1221,6 +1230,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
/**
* kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
* address argument
* @phys: physical address of the object
*/
void __ref kmemleak_ignore_phys(phys_addr_t phys)
{
......
......@@ -541,6 +541,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
* @zone: zone from which pages need to be removed
* @phys_start_pfn: starting pageframe (must be aligned to start of a section)
* @nr_pages: number of pages to remove (must be multiple of section size)
* @altmap: alternative device page map or %NULL if default memmap is used
*
* Generic helper function to remove section mappings and sysfs entries
* for the section of the memory we are removing. Caller needs to make
......@@ -1044,6 +1045,7 @@ static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
/**
* try_online_node - online a node if offlined
* @nid: the node ID
*
* called by cpu_up() to online a node without onlined memory.
*/
......@@ -1804,6 +1806,7 @@ static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
/**
* try_offline_node
* @nid: the node ID
*
* Offline a node if all memory sections and cpus of the node are removed.
*
......@@ -1847,6 +1850,9 @@ EXPORT_SYMBOL(try_offline_node);
/**
* remove_memory
* @nid: the node ID
* @start: physical address of the region to remove
* @size: size of the region to remove
*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations before this call, as required by
......
......@@ -185,6 +185,8 @@ static bool is_dump_unreclaim_slabs(void)
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
* @totalpages: total present RAM allowed for page allocation
* @memcg: task's memory controller, if constrained
* @nodemask: nodemask passed to page allocator for mempolicy ooms
*
* The heuristic for determining which task to kill is made to be as simple and
* predictable as possible. The goal is to return the highest value for the
......
......@@ -258,6 +258,9 @@ static int __walk_page_range(unsigned long start, unsigned long end,
/**
* walk_page_range - walk page table with caller specific callbacks
* @start: start address of the virtual address range
* @end: end address of the virtual address range
* @walk: mm_walk structure defining the callbacks and the target address space
*
* Recursively walk the page table tree of the process represented by @walk->mm
* within the virtual address range [@start, @end). During walking, we can do
......
......@@ -1171,6 +1171,7 @@ void page_add_new_anon_rmap(struct page *page,
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
* @compound: charge the page as compound or small page
*
* The caller needs to hold the pte lock.
*/
......
......@@ -851,6 +851,7 @@ static struct page *get_next_page(struct page *page)
/**
* obj_to_location - get (<page>, <obj_idx>) from encoded object value
* @obj: the encoded object value
* @page: page object resides in zspage
* @obj_idx: object index
*/
......@@ -1301,6 +1302,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages);
* zs_map_object - get address of allocated object from handle.
* @pool: pool from which the object was allocated
* @handle: handle returned from zs_malloc
* @mm: maping mode to use
*
* Before using an object allocated from zs_malloc, it must be mapped using
* this function. When done with the object, it must be unmapped using
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment