Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Android patches - lzham_core.h config & workaround for missing malloc_usable_size() #10

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 36 additions & 1 deletion lzhamdecomp/lzham_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@
#error TODO: Unknown Apple target
#endif

#elif defined(__linux__) && (defined(__i386__) || defined(__x86_64__)) && !defined(LZHAM_ANSI_CPLUSPLUS)
#elif defined(__linux__) && !defined(ANDROID) && (defined(__i386__) || defined(__x86_64__)) && !defined(LZHAM_ANSI_CPLUSPLUS)
// --- Generic GCC/clang path for x86/x64, clang or GCC, Linux, OSX, FreeBSD or NetBSD, pthreads for threading, GCC built-ins for atomic ops.
#define LZHAM_PLATFORM_PC 1

Expand Down Expand Up @@ -199,6 +199,41 @@
#define LZHAM_FORCE_INLINE inline __attribute__((__always_inline__,__gnu_inline__))
#endif

#define LZHAM_NOTE_UNUSED(x) (void)x
#elif defined(ANDROID) && !defined(LZHAM_ANSI_CPLUSPLUS)
// Generic GCC path for Android, GCC built-ins for atomic ops. Basically identical to iOS path.
// Pthreads disabled because spin lock is missing..?
#define LZHAM_PLATFORM_PC 0

#if defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__)
#define LZHAM_PLATFORM_PC_X64 0
#define LZHAM_64BIT_POINTERS 1
#define LZHAM_CPU_HAS_64BIT_REGISTERS 1
#else
#define LZHAM_PLATFORM_PC_X86 0
#define LZHAM_64BIT_POINTERS 0
#define LZHAM_CPU_HAS_64BIT_REGISTERS 0
#endif

#define LZHAM_USE_UNALIGNED_INT_LOADS 0

#if __BIG_ENDIAN__
#define LZHAM_BIG_ENDIAN_CPU 1
#else
#define LZHAM_LITTLE_ENDIAN_CPU 1
#endif

#define LZHAM_USE_PTHREADS_API 0
#define LZHAM_USE_GCC_ATOMIC_BUILTINS 1

#define LZHAM_RESTRICT

#if defined(__clang__)
#define LZHAM_FORCE_INLINE inline
#else
#define LZHAM_FORCE_INLINE inline __attribute__((__always_inline__,__gnu_inline__))
#endif

#define LZHAM_NOTE_UNUSED(x) (void)x
#else
#warning Building as vanilla ANSI-C/C++, multi-threaded compression is disabled! Please configure lzhamdecomp/lzham_core.h.
Expand Down
95 changes: 83 additions & 12 deletions lzhamdecomp/lzham_mem.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,83 @@ using namespace lzham;

#define LZHAM_MEM_STATS 0

#ifndef LZHAM_USE_WIN32_API
#ifndef __APPLE__
#define _msize malloc_usable_size
#else
#define _msize malloc_size
#endif
#if !defined( ANDROID )

#define allocate( size ) malloc( size )
#define reallocate( p, size ) realloc( p, size )
#define deallocate( p ) free( p )
#define getAllocationSize( p ) _msize( p )

#ifndef LZHAM_USE_WIN32_API
#if !defined(__APPLE__) && !defined(ANDROID)
#define getAllocationSize( p ) malloc_usable_size( p )
#else
#define getAllocationSize( p ) malloc_size( p )
#endif
#else
#define getAllocationSize( p ) _msize( p )
#endif

#else

// Android does not have an API any more for discovering true allocation size, so we need to patch in that data ourselves.
static void* allocate( size_t size )
{
uint8* q = static_cast<uint8*>(malloc(LZHAM_MIN_ALLOC_ALIGNMENT + size));
if (!q)
return NULL;

uint8* p = q + LZHAM_MIN_ALLOC_ALIGNMENT;
reinterpret_cast<size_t*>(p)[-1] = size;
reinterpret_cast<size_t*>(p)[-2] = ~size;

return p;
}

static void deallocate( void* p )
{
if( p != NULL )
{
const size_t num = reinterpret_cast<size_t*>(p)[-1];
const size_t num_check = reinterpret_cast<size_t*>(p)[-2];
LZHAM_ASSERT(num && (num == ~num_check));
if (num == ~num_check)
{
free(reinterpret_cast<uint8*>(p) - LZHAM_MIN_ALLOC_ALIGNMENT);
}
}
}

static size_t getAllocationSize( void* p )
{
const size_t num = reinterpret_cast<size_t*>(p)[-1];
const size_t num_check = reinterpret_cast<size_t*>(p)[-2];
LZHAM_ASSERT(num && (num == ~num_check));
if (num == ~num_check)
return num;

return 0;
}

static void* reallocate( void* p, size_t size )
{
if( size == 0 )
{
deallocate( p );
return NULL;
}

uint8* q = static_cast<uint8*>(realloc( p, LZHAM_MIN_ALLOC_ALIGNMENT + size ));
if (!q)
return NULL;

uint8* newp = q + LZHAM_MIN_ALLOC_ALIGNMENT;
reinterpret_cast<size_t*>(newp)[-1] = size;
reinterpret_cast<size_t*>(newp)[-2] = ~size;

return newp;
}

#endif

namespace lzham
Expand Down Expand Up @@ -86,15 +157,15 @@ namespace lzham

if (!p)
{
p_new = malloc(size);
p_new = allocate(size);
LZHAM_ASSERT( (reinterpret_cast<ptr_bits_t>(p_new) & (LZHAM_MIN_ALLOC_ALIGNMENT - 1)) == 0 );

if (pActual_size)
*pActual_size = p_new ? _msize(p_new) : 0;
*pActual_size = p_new ? getAllocationSize(p_new) : 0;
}
else if (!size)
{
free(p);
deallocate(p);
p_new = NULL;

if (pActual_size)
Expand All @@ -117,7 +188,7 @@ namespace lzham
}
else if (movable)
{
p_new = realloc(p, size);
p_new = reallocate(p, size);

if (p_new)
{
Expand All @@ -127,7 +198,7 @@ namespace lzham
}

if (pActual_size)
*pActual_size = _msize(p_final_block);
*pActual_size = getAllocationSize(p_final_block);
}

return p_new;
Expand All @@ -136,7 +207,7 @@ namespace lzham
static size_t lzham_default_msize(void* p, void* pUser_data)
{
LZHAM_NOTE_UNUSED(pUser_data);
return p ? _msize(p) : 0;
return p ? getAllocationSize(p) : 0;
}

static lzham_realloc_func g_pRealloc = lzham_default_realloc;
Expand Down