diff options
author | Jonathan Peyton <jonathan.l.peyton@intel.com> | 2016-03-29 20:10:00 +0000 |
---|---|---|
committer | Jonathan Peyton <jonathan.l.peyton@intel.com> | 2016-03-29 20:10:00 +0000 |
commit | ee2f96c79b920ab3375e20281805eeff5d93ebc4 (patch) | |
tree | b313ec5072e32d35e07b23088d2450371e45c37d /openmp/runtime/src | |
parent | a58563d8c9d8be3f1c6257f010582e162b0b9fe4 (diff) | |
download | bcm5719-llvm-ee2f96c79b920ab3375e20281805eeff5d93ebc4.tar.gz bcm5719-llvm-ee2f96c79b920ab3375e20281805eeff5d93ebc4.zip |
Fix incorrect indention in kmp_alloc.c
llvm-svn: 264777
Diffstat (limited to 'openmp/runtime/src')
-rw-r--r-- | openmp/runtime/src/kmp_alloc.c | 134 |
1 files changed, 61 insertions, 73 deletions
diff --git a/openmp/runtime/src/kmp_alloc.c b/openmp/runtime/src/kmp_alloc.c index 21518c94a73..668c8b556df 100644 --- a/openmp/runtime/src/kmp_alloc.c +++ b/openmp/runtime/src/kmp_alloc.c @@ -1427,8 +1427,7 @@ void * kmpc_malloc( size_t size ) { void * ptr; - ptr = bget( __kmp_entry_thread(), (bufsize) size ); - + ptr = bget( __kmp_entry_thread(), (bufsize) size ); return ptr; } @@ -1436,8 +1435,7 @@ void * kmpc_calloc( size_t nelem, size_t elsize ) { void * ptr; - ptr = bgetz( __kmp_entry_thread(), (bufsize) (nelem * elsize) ); - + ptr = bgetz( __kmp_entry_thread(), (bufsize) (nelem * elsize) ); return ptr; } @@ -1445,19 +1443,17 @@ void * kmpc_realloc( void * ptr, size_t size ) { void * result = NULL; - - if ( ptr == NULL ) { - // If pointer is NULL, realloc behaves like malloc. - result = bget( __kmp_entry_thread(), (bufsize) size ); - } else if ( size == 0 ) { - // If size is 0, realloc behaves like free. - // The thread must be registered by the call to kmpc_malloc() or kmpc_calloc() before. - // So it should be safe to call __kmp_get_thread(), not __kmp_entry_thread(). - brel( __kmp_get_thread(), ptr ); - } else { - result = bgetr( __kmp_entry_thread(), ptr, (bufsize) size ); - }; // if - + if ( ptr == NULL ) { + // If pointer is NULL, realloc behaves like malloc. + result = bget( __kmp_entry_thread(), (bufsize) size ); + } else if ( size == 0 ) { + // If size is 0, realloc behaves like free. + // The thread must be registered by the call to kmpc_malloc() or kmpc_calloc() before. + // So it should be safe to call __kmp_get_thread(), not __kmp_entry_thread(). + brel( __kmp_get_thread(), ptr ); + } else { + result = bgetr( __kmp_entry_thread(), ptr, (bufsize) size ); + }; // if return result; } @@ -1470,9 +1466,9 @@ kmpc_free( void * ptr ) return; }; // if if ( ptr != NULL ) { - kmp_info_t *th = __kmp_get_thread(); - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - brel( th, ptr ); + kmp_info_t *th = __kmp_get_thread(); + __kmp_bget_dequeue( th ); /* Release any queued buffers */ + brel( th, ptr ); }; } @@ -1489,7 +1485,7 @@ ___kmp_thread_malloc( kmp_info_t *th, size_t size KMP_SRC_LOC_DECL ) (int) size KMP_SRC_LOC_PARM ) ); - ptr = bget( th, (bufsize) size ); + ptr = bget( th, (bufsize) size ); KE_TRACE( 30, ( "<- __kmp_thread_malloc() returns %p\n", ptr ) ); return ptr; } @@ -1505,7 +1501,7 @@ ___kmp_thread_calloc( kmp_info_t *th, size_t nelem, size_t elsize KMP_SRC_LOC_DE (int) elsize KMP_SRC_LOC_PARM ) ); - ptr = bgetz( th, (bufsize) (nelem * elsize) ); + ptr = bgetz( th, (bufsize) (nelem * elsize) ); KE_TRACE( 30, ( "<- __kmp_thread_calloc() returns %p\n", ptr ) ); return ptr; } @@ -1520,7 +1516,7 @@ ___kmp_thread_realloc( kmp_info_t *th, void *ptr, size_t size KMP_SRC_LOC_DECL ) (int) size KMP_SRC_LOC_PARM ) ); - ptr = bgetr( th, ptr, (bufsize) size ); + ptr = bgetr( th, ptr, (bufsize) size ); KE_TRACE( 30, ( "<- __kmp_thread_realloc() returns %p\n", ptr ) ); return ptr; } @@ -1535,8 +1531,8 @@ ___kmp_thread_free( kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL ) KMP_SRC_LOC_PARM ) ); if ( ptr != NULL ) { - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - brel( th, ptr ); + __kmp_bget_dequeue( th ); /* Release any queued buffers */ + brel( th, ptr ); } KE_TRACE( 30, ( "<- __kmp_thread_free()\n" ) ); } @@ -1602,11 +1598,11 @@ ___kmp_allocate_align( size_t size, size_t alignment KMP_SRC_LOC_DECL ) descr.size_aligned = size; descr.size_allocated = descr.size_aligned + sizeof( kmp_mem_descr_t ) + alignment; - #if KMP_DEBUG - descr.ptr_allocated = _malloc_src_loc( descr.size_allocated, _file_, _line_ ); - #else +#if KMP_DEBUG + descr.ptr_allocated = _malloc_src_loc( descr.size_allocated, _file_, _line_ ); +#else descr.ptr_allocated = malloc_src_loc( descr.size_allocated KMP_SRC_LOC_PARM ); - #endif +#endif KE_TRACE( 10, ( " malloc( %d ) returned %p\n", (int) descr.size_allocated, @@ -1638,11 +1634,10 @@ ___kmp_allocate_align( size_t size, size_t alignment KMP_SRC_LOC_DECL ) KMP_DEBUG_ASSERT( addr_descr + sizeof( kmp_mem_descr_t ) == addr_aligned ); KMP_DEBUG_ASSERT( addr_aligned + descr.size_aligned <= addr_allocated + descr.size_allocated ); KMP_DEBUG_ASSERT( addr_aligned % alignment == 0 ); - - #ifdef KMP_DEBUG - memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); - // Fill allocated memory block with 0xEF. - #endif +#ifdef KMP_DEBUG + memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); + // Fill allocated memory block with 0xEF. +#endif memset( descr.ptr_aligned, 0x00, descr.size_aligned ); // Fill the aligned memory block (which is intended for using by caller) with 0x00. Do not // put this filling under KMP_DEBUG condition! Many callers expect zeroed memory. (Padding @@ -1653,7 +1648,6 @@ ___kmp_allocate_align( size_t size, size_t alignment KMP_SRC_LOC_DECL ) KE_TRACE( 25, ( "<- ___kmp_allocate_align() returns %p\n", descr.ptr_aligned ) ); return descr.ptr_aligned; - } // func ___kmp_allocate_align @@ -1666,13 +1660,11 @@ ___kmp_allocate_align( size_t size, size_t alignment KMP_SRC_LOC_DECL ) void * ___kmp_allocate( size_t size KMP_SRC_LOC_DECL ) { - void * ptr; KE_TRACE( 25, ( "-> __kmp_allocate( %d ) called from %s:%d\n", (int) size KMP_SRC_LOC_PARM ) ); - ptr = ___kmp_allocate_align( size, __kmp_align_alloc KMP_SRC_LOC_PARM ); + ptr = ___kmp_allocate_align( size, __kmp_align_alloc KMP_SRC_LOC_PARM ); KE_TRACE( 25, ( "<- __kmp_allocate() returns %p\n", ptr ) ); return ptr; - } // func ___kmp_allocate #if (BUILD_MEMORY==FIRST_TOUCH) @@ -1726,7 +1718,7 @@ ___kmp_page_allocate( size_t size KMP_SRC_LOC_DECL ) (int) size KMP_SRC_LOC_PARM ) ); - ptr = ___kmp_allocate_align( size, page_size KMP_SRC_LOC_PARM ); + ptr = ___kmp_allocate_align( size, page_size KMP_SRC_LOC_PARM ); KE_TRACE( 25, ( "<- __kmp_page_allocate( %d ) returns %p\n", (int) size, ptr ) ); return ptr; } // ___kmp_page_allocate @@ -1738,49 +1730,45 @@ ___kmp_page_allocate( size_t size KMP_SRC_LOC_DECL ) void ___kmp_free( void * ptr KMP_SRC_LOC_DECL ) { + kmp_mem_descr_t descr; + kmp_uintptr_t addr_allocated; // Address returned by malloc(). + kmp_uintptr_t addr_aligned; // Aligned address passed by caller. - kmp_mem_descr_t descr; - kmp_uintptr_t addr_allocated; // Address returned by malloc(). - kmp_uintptr_t addr_aligned; // Aligned address passed by caller. - - KE_TRACE( 25, ( "-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM ) ); - KMP_ASSERT( ptr != NULL ); - - descr = * ( kmp_mem_descr_t *) ( (kmp_uintptr_t) ptr - sizeof( kmp_mem_descr_t ) ); + KE_TRACE( 25, ( "-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM ) ); + KMP_ASSERT( ptr != NULL ); - KE_TRACE( 26, ( " __kmp_free: " - "ptr_allocated=%p, size_allocated=%d, " - "ptr_aligned=%p, size_aligned=%d\n", - descr.ptr_allocated, (int) descr.size_allocated, - descr.ptr_aligned, (int) descr.size_aligned )); + descr = * ( kmp_mem_descr_t *) ( (kmp_uintptr_t) ptr - sizeof( kmp_mem_descr_t ) ); - addr_allocated = (kmp_uintptr_t) descr.ptr_allocated; - addr_aligned = (kmp_uintptr_t) descr.ptr_aligned; + KE_TRACE( 26, ( " __kmp_free: " + "ptr_allocated=%p, size_allocated=%d, " + "ptr_aligned=%p, size_aligned=%d\n", + descr.ptr_allocated, (int) descr.size_allocated, + descr.ptr_aligned, (int) descr.size_aligned )); - KMP_DEBUG_ASSERT( addr_aligned % CACHE_LINE == 0 ); - KMP_DEBUG_ASSERT( descr.ptr_aligned == ptr ); - KMP_DEBUG_ASSERT( addr_allocated + sizeof( kmp_mem_descr_t ) <= addr_aligned ); - KMP_DEBUG_ASSERT( descr.size_aligned < descr.size_allocated ); - KMP_DEBUG_ASSERT( addr_aligned + descr.size_aligned <= addr_allocated + descr.size_allocated ); + addr_allocated = (kmp_uintptr_t) descr.ptr_allocated; + addr_aligned = (kmp_uintptr_t) descr.ptr_aligned; - #ifdef KMP_DEBUG - memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); - // Fill memory block with 0xEF, it helps catch using freed memory. - #endif + KMP_DEBUG_ASSERT( addr_aligned % CACHE_LINE == 0 ); + KMP_DEBUG_ASSERT( descr.ptr_aligned == ptr ); + KMP_DEBUG_ASSERT( addr_allocated + sizeof( kmp_mem_descr_t ) <= addr_aligned ); + KMP_DEBUG_ASSERT( descr.size_aligned < descr.size_allocated ); + KMP_DEBUG_ASSERT( addr_aligned + descr.size_aligned <= addr_allocated + descr.size_allocated ); - #ifndef LEAK_MEMORY - KE_TRACE( 10, ( " free( %p )\n", descr.ptr_allocated ) ); - # ifdef KMP_DEBUG - _free_src_loc( descr.ptr_allocated, _file_, _line_ ); - # else - free_src_loc( descr.ptr_allocated KMP_SRC_LOC_PARM ); - # endif - #endif + #ifdef KMP_DEBUG + memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); + // Fill memory block with 0xEF, it helps catch using freed memory. + #endif + #ifndef LEAK_MEMORY + KE_TRACE( 10, ( " free( %p )\n", descr.ptr_allocated ) ); + # ifdef KMP_DEBUG + _free_src_loc( descr.ptr_allocated, _file_, _line_ ); + # else + free_src_loc( descr.ptr_allocated KMP_SRC_LOC_PARM ); + # endif + #endif KMP_MB(); - KE_TRACE( 25, ( "<- __kmp_free() returns\n" ) ); - } // func ___kmp_free /* ------------------------------------------------------------------------ */ |