Commit af3fd26f authored by Vivek Das Mohapatra's avatar Vivek Das Mohapatra
Browse files

malloc/free: reorganise wrapper code in advance of realloc changes

glibc voodoo moved to its own header to make it easy to drop once
we don't need it any more

malloc/free cluster of functions moved into their own sub-struct
pointer so they don't perturb the main capsule namespace struct
layout if/when they change (as we expect them to).

abstract out the arcane memory checks so that the new realloc
wrapper can share them with the free wrapper
parent d598d1db
......@@ -98,6 +98,7 @@ libcapsule_la_SOURCES = capsule/capsule-dlmopen.c \
capsule/capsule-relocate.c \
capsule/capsule-init.c \
capsule/capsule-private.h \
capsule/capsule-malloc.h \
capsule/capsule-wrappers.c \
utils/dump.c \
utils/process-pt-dynamic.c \
......
......@@ -306,10 +306,12 @@ _capsule_load (const capsule cap,
if( !ret )
goto cleanup;
// =====================================================================
// stash any free/malloc/etc implementations _before_ we overwrite them:
// (currently just free)
if( cap->ns->free == NULL )
cap->ns->free = dlsym( ret, "free" );
// (only really need to deal with functions that deal with pre-alloc'd
// pointers, ie free and realloc)
if( cap->ns->mem->free == NULL )
cap->ns->mem->free = dlsym( ret, "free" );
// TODO: failure in the dlopen fixup phase should probably be fatal:
if( ret != NULL && // no errors so far
......
......@@ -134,19 +134,22 @@ get_namespace (const char *default_prefix, const char *soname)
ns->prefix = strdup( prefix );
ns->exclusions = ptr_list_alloc( 4 );
ns->exports = ptr_list_alloc( 4 );
ns->mem = calloc( 1, sizeof(capsule_memory) );
ptr_list_push_ptr( namespaces, ns );
return ns;
}
// items with names starting with '-' are initialised by hand
// and will be skipped in the automatic section below:
static capsule_item alloc_func[] =
{
{ "-" , (capsule_addr) NULL },
{ "-" , (capsule_addr) NULL },
{ "-dlopen" , (capsule_addr) NULL },
{ "-free" , (capsule_addr) NULL },
{ "-realloc" , (capsule_addr) NULL },
{ "malloc" , (capsule_addr) &_capsule_original_malloc },
{ "calloc" , (capsule_addr) &_capsule_original_calloc },
{ "realloc" , (capsule_addr) &_capsule_original_realloc },
{ "posix_memalign", (capsule_addr) &_capsule_original_pmalign },
{ NULL }
};
......
#pragma once
#define CAPSULE_MALLOC_EXTRA_CHECKS
#define DEBUG_MALLOC_VOODOO 0
#if DEBUG_MALLOC_VOODOO
#include <stdio.h>
#include <sys/param.h>
// can't printf inside malloc/free et al
// can't jump to an fputs from a different libc either
typedef int (*fputsfunc) (const char *buf, FILE *s);
static fputsfunc wf = NULL;
#define CAN_DEBUG_ALLOCS (wf != NULL)
static void dump_ptr (const char *label, const void *ptr)
{
char ptrbuf[ (sizeof(void *) * 2) + 1 ];
unsigned long x = (unsigned long) ptr;
static const char pattern[] = "0123456789abcdef";
ssize_t start = sizeof(long) * 2;
ptrbuf[ sizeof(ptrbuf) - 1 ] = '\0';
for( size_t i = 0; i < sizeof(long) * 2; i++ )
{
size_t c = (x >> ((((sizeof(long) * 2) - 1) - i) * 4)) & 0b01111;
ptrbuf[ i ] = pattern[ c ];
if( (start == (sizeof(long) * 2)) && (ptrbuf[i] != '0') )
start = i;
}
start = MAX( 0, start - 1 );
wf( label , stderr );
wf( ": <0x", stderr );
wf( ptrbuf + start, stderr );
wf( ">\n" , stderr );
}
#define FETCH_FPUTS() \
if(UNLIKELY(!wf)) wf = (fputsfunc) dlsym(cap->dl_handle, "fputs")
#else
#define dump_ptr(a,b)
#define FETCH_FPUTS()
#define CAN_DEBUG_ALLOCS 0
#endif
////////////////////////////////////////////////////////////////////////////
// copy some vodoo out of libc.
// it is to be hoped that this is a temporary hack but, well…
#define SIZE_SZ (sizeof(size_t))
struct malloc_chunk
{
size_t prev_size; /* Size of previous chunk (if free). */
size_t size; /* Size in bytes, including overhead. */
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
struct malloc_chunk* bk_nextsize;
};
typedef struct malloc_chunk* mchunkptr;
#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
#define IS_MMAPPED 0x2
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
////////////////////////////////////////////////////////////////////////////
// extra voodoo. not clear if we need this:
#ifdef CAPSULE_MALLOC_EXTRA_CHECKS
#define PREV_INUSE 0x1
/* similarly for non-main-arena chunks */
#define NON_MAIN_ARENA 0x4
#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
typedef struct malloc_chunk *mfastbinptr;
typedef int mutex_t;
#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
/* offset 2 to use otherwise unindexable first 2 bins */
#define fastbin_index(sz) \
((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
/* The maximum fastbin request size we support */
#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
#define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) ? \
__alignof__ (long double) : 2 *SIZE_SZ)
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
/* The smallest size we can malloc is an aligned minimal chunk */
#define MINSIZE \
(unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
#define request2size(req) \
(((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
MINSIZE : \
((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
#define NBINS 128
#define BINMAPSHIFT 5
#define BITSPERMAP (1U << BINMAPSHIFT)
#define BINMAPSIZE (NBINS / BITSPERMAP)
struct malloc_state
{
/* Serialize access. */
mutex_t mutex;
/* Flags (formerly in max_fast). */
int flags;
/* Fastbins */
mfastbinptr fastbinsY[NFASTBINS];
/* Base of the topmost chunk -- not otherwise kept in a bin */
mchunkptr top;
/* The remainder from the most recent split of a small request */
mchunkptr last_remainder;
/* Normal bins packed as described above */
mchunkptr bins[NBINS * 2 - 2];
/* Bitmap of bins */
unsigned int binmap[BINMAPSIZE];
/* Linked list */
struct malloc_state *next;
/* Linked list for free arenas. Access to this field is serialized
by free_list_lock in arena.c. */
struct malloc_state *next_free;
/* Number of threads attached to this arena. 0 if the arena is on
the free list. Access to this field is serialized by
free_list_lock in arena.c. */
size_t attached_threads;
/* Memory allocated from the system in this arena. */
size_t system_mem;
size_t max_system_mem;
};
typedef struct malloc_state *mstate;
typedef struct _heap_info
{
mstate ar_ptr; /* Arena for this heap. */
struct _heap_info *prev; /* Previous heap. */
size_t size; /* Current size in bytes. */
size_t mprotect_size; /* Size in bytes that has been mprotected
PROT_READ|PROT_WRITE. */
/* Make sure the following data is properly aligned, particularly
that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
MALLOC_ALIGNMENT. */
char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
} heap_info;
#ifndef DEFAULT_MMAP_THRESHOLD_MAX
/* For 32-bit platforms we cannot increase the maximum mmap
threshold much because it is also the minimum value for the
maximum heap size and its alignment. Going above 512k (i.e., 1M
for new heaps) wastes too much address space. */
# if __WORDSIZE == 32
# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
# else
# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
# endif
#endif
#define HEAP_MIN_SIZE (32 * 1024)
#ifndef HEAP_MAX_SIZE
# ifdef DEFAULT_MMAP_THRESHOLD_MAX
# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
# else
# define HEAP_MAX_SIZE (1024 * 1024)
# endif
#endif
#define heap_for_ptr(ptr) \
((heap_info *) ((unsigned long) (ptr) & ~(HEAP_MAX_SIZE - 1)))
#define arena_for_chunk(ptr) \
(chunk_non_main_arena (ptr) ? heap_for_ptr (ptr)->ar_ptr : NULL)
#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
typedef struct malloc_state *mstate;
#define NONCONTIGUOUS_BIT (2U)
#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
#define chunksize(p) ((p)->size & ~(SIZE_BITS))
#endif // CAPSULE_MALLOC_EXTRA_CHECKS
////////////////////////////////////////////////////////////////////////////
......@@ -15,15 +15,24 @@ typedef void * (*callocfunc) (size_t nmem, size_t size);
typedef void * (*rallocfunc) (void *ptr, size_t size);
typedef int (*palignfunc) (void **memptr, size_t alignment, size_t size);
typedef struct _capsule_memory
{
freefunc free;
rallocfunc realloc;
mallocfunc malloc; // UNUSED
callocfunc calloc; // UNUSED
palignfunc posix_memalign; // UNUSED
} capsule_memory;
typedef struct _capsule_namespace
{
Lmid_t ns;
const char *prefix;
ptr_list *exclusions;
ptr_list *exports;
freefunc free;
char **combined_exclude;
char **combined_export;
Lmid_t ns; // dlmopen namespace. LM_ID_NEWLM to create
const char *prefix; // default library tree prefix, eg /host
ptr_list *exclusions; // sonames to ignore
ptr_list *exports; // sonames to expose/export
char **combined_exclude; // combined exclude & export lists from all
char **combined_export; // capsules DSOs sharing the same namespace
capsule_memory *mem;
} capsule_namespace;
struct _capsule
......
#include "capsule/capsule.h"
#include "capsule/capsule-private.h"
#include "capsule/capsule-malloc.h"
#include "utils/utils.h"
#include "utils/ld-libs.h"
......@@ -271,44 +272,45 @@ cleanup:
return res;
}
////////////////////////////////////////////////////////////////////////////
// copy some vodoo out of libc.
// it is to be hoped that this is a temporary hack but, well…
#define SIZE_SZ (sizeof(size_t))
struct malloc_chunk
#ifdef CAPSULE_MALLOC_EXTRA_CHECKS
static inline int chunk_is_vanilla (mchunkptr p, void *ptr)
{
size_t prev_size; /* Size of previous chunk (if free). */
size_t size; /* Size in bytes, including overhead. */
mstate av = arena_for_chunk (p);
struct malloc_chunk* fd; /* double links -- used only if free. */
struct malloc_chunk* bk;
// arena_for_chunk can't find the main arena... but if this poiner
// is from the _main_ main arena then I think it would have been
// trapped by the heap check in capsule_shim_free already, so
// this did not come from the main instance of libc:
if( LIKELY(!av) )
return 0;
struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
struct malloc_chunk* bk_nextsize;
};
size_t size = chunksize (p);
mchunkptr nextchunk = chunk_at_offset(p, size);
typedef struct malloc_chunk* mchunkptr;
// invalid next size (fast)
if( UNLIKELY( nextchunk->size <= 2 * SIZE_SZ ) ||
UNLIKELY( chunksize( nextchunk ) >= av->system_mem) )
return 0;
#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
// double free or corruption (out)
if( UNLIKELY( contiguous(av) &&
(char *)nextchunk >= (char *)av->top + chunksize( av->top )) )
return 0;
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
#define IS_MMAPPED 0x2
/* check for mmap()'ed chunk */
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
////////////////////////////////////////////////////////////////////////////
return 1;
}
#else
static inline chunk_is_vanilla (mchunkptr p, void *ptr) { return 0; }
#endif
void
capsule_shim_free (const capsule cap, void *ptr)
static int address_within_main_heap (ElfW(Addr) addr)
{
static ElfW(Addr) base = (ElfW(Addr)) NULL;
ElfW(Addr) top;
if( !ptr )
return;
ElfW(Addr) top = (ElfW(Addr)) sbrk( 0 );
top = (ElfW(Addr)) sbrk( 0 );
// past the end of the heap:
if( top <= addr )
return 0;
if( base == (ElfW(Addr)) NULL )
{
......@@ -316,28 +318,43 @@ capsule_shim_free (const capsule cap, void *ptr)
base = top - (ElfW(Addr)) mi.arena;
}
// it's from the main heap, comes from the vanilla libc outside
// the capsule
if( (base < (ElfW(Addr)) ptr) && ((ElfW(Addr)) ptr < top) )
// address is below heap base
// ∴ either a mmapped address, non-malloc'd memory
// or an address from a secondary arena
if( base > addr )
return 0;
return 1;
}
void
capsule_shim_free (const capsule cap, void *ptr)
{
if( !ptr )
return;
// from the main heap: ie from the vanilla libc outside the capsule
if( address_within_main_heap( (ElfW(Addr)) ptr ) )
{
free( ptr );
return;
}
mchunkptr p = mem2chunk( ptr );
// mmapped pointer/chunk: can't tell whose this is but since we
// override the malloc/free cluster as early as possible we're
// kind of hoping we don't have any of these from inside the capsule
//
// we'd only have such a pointer if the libraries we dlmopen() into
// the capsule allocated large chunks of memory in their initialiser(s):
if (chunk_is_mmapped(p))
if( chunk_is_mmapped( p ) || chunk_is_vanilla( p, ptr ) )
{
free( ptr );
return;
}
// probably from the pseudo heap inside the capsule?
cap->ns->free( ptr );
// doesn't look like a valid pointer to the main libc,
// pass it to the capsule libc and hope for the best:
cap->ns->mem->free( ptr );
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment