summaryrefslogtreecommitdiffstats
path: root/libstdc++/stl/stl_alloc.h
diff options
context:
space:
mode:
authorjason <jason@138bc75d-0d04-0410-961f-82ee72b054a4>1998-09-02 17:25:15 +0000
committerjason <jason@138bc75d-0d04-0410-961f-82ee72b054a4>1998-09-02 17:25:15 +0000
commit79f8bd54b87824aefef05ecc6431fb51215b1af7 (patch)
treef1b8986118d98ea46eb8767eba9ca67b12541d4f /libstdc++/stl/stl_alloc.h
parent0df40fee7af4306861fad8917697f25d7e2b4ea8 (diff)
downloadppe42-gcc-79f8bd54b87824aefef05ecc6431fb51215b1af7.tar.gz
ppe42-gcc-79f8bd54b87824aefef05ecc6431fb51215b1af7.zip
* algorithm alloc.h defalloc.h hash_map.h hash_set.h iterator
memory pthread_alloc pthread_alloc.h rope ropeimpl.h stl_algo.h stl_algobase.h stl_alloc.h stl_bvector.h stl_config.h stl_construct.h stl_deque.h stl_function.h stl_hash_fun.h stl_hash_map.h stl_hash_set.h stl_hashtable.h stl_heap.h stl_iterator.h stl_list.h stl_map.h stl_multimap.h stl_multiset.h stl_numeric.h stl_pair.h stl_queue.h stl_raw_storage_iter.h stl_relops.h stl_rope.h stl_set.h stl_slist.h stl_stack.h stl_tempbuf.h stl_tree.h stl_uninitialized.h stl_vector.h tempbuf.h type_traits.h: Update to SGI STL 3.11. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@22190 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++/stl/stl_alloc.h')
-rw-r--r--libstdc++/stl/stl_alloc.h888
1 files changed, 612 insertions, 276 deletions
diff --git a/libstdc++/stl/stl_alloc.h b/libstdc++/stl/stl_alloc.h
index 2c3de40f61b..3a773b4bbc0 100644
--- a/libstdc++/stl/stl_alloc.h
+++ b/libstdc++/stl/stl_alloc.h
@@ -40,15 +40,12 @@
#if 0
# include <new>
-# define __THROW_BAD_ALLOC throw bad_alloc
+# define __THROW_BAD_ALLOC throw bad_alloc()
#elif !defined(__THROW_BAD_ALLOC)
# include <iostream.h>
# define __THROW_BAD_ALLOC cerr << "out of memory" << endl; exit(1)
#endif
-#ifndef __ALLOC
-# define __ALLOC alloc
-#endif
#ifdef __STL_WIN32THREADS
# include <windows.h>
#endif
@@ -72,9 +69,9 @@
// lock. Performance may not be adequate.
# include <pthread.h>
# define __NODE_ALLOCATOR_LOCK \
- if (threads) pthread_mutex_lock(&__node_allocator_lock)
+ if (threads) pthread_mutex_lock(&_S_node_allocator_lock)
# define __NODE_ALLOCATOR_UNLOCK \
- if (threads) pthread_mutex_unlock(&__node_allocator_lock)
+ if (threads) pthread_mutex_unlock(&_S_node_allocator_lock)
# define __NODE_ALLOCATOR_THREADS true
# define __VOLATILE volatile // Needed at -O3 on SGI
# endif
@@ -82,9 +79,9 @@
// The lock needs to be initialized by constructing an allocator
// objects of the right type. We do that here explicitly for alloc.
# define __NODE_ALLOCATOR_LOCK \
- EnterCriticalSection(&__node_allocator_lock)
+ EnterCriticalSection(&_S_node_allocator_lock)
# define __NODE_ALLOCATOR_UNLOCK \
- LeaveCriticalSection(&__node_allocator_lock)
+ LeaveCriticalSection(&_S_node_allocator_lock)
# define __NODE_ALLOCATOR_THREADS true
# define __VOLATILE volatile // may not be needed
# endif /* WIN32THREADS */
@@ -100,9 +97,9 @@
// would be cleaner but fails with certain levels of standard
// conformance.
# define __NODE_ALLOCATOR_LOCK if (threads && __us_rsthread_malloc) \
- { __lock(&__node_allocator_lock); }
+ { _S_lock(&_S_node_allocator_lock); }
# define __NODE_ALLOCATOR_UNLOCK if (threads && __us_rsthread_malloc) \
- { __unlock(&__node_allocator_lock); }
+ { _S_unlock(&_S_node_allocator_lock); }
# define __NODE_ALLOCATOR_THREADS true
# define __VOLATILE volatile // Needed at -O3 on SGI
# endif
@@ -131,100 +128,100 @@ __STL_BEGIN_NAMESPACE
# endif
#endif
-template <int inst>
+template <int __inst>
class __malloc_alloc_template {
private:
-static void *oom_malloc(size_t);
-
-static void *oom_realloc(void *, size_t);
+ static void* _S_oom_malloc(size_t);
+ static void* _S_oom_realloc(void*, size_t);
#ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
- static void (* __malloc_alloc_oom_handler)();
+ static void (* __malloc_alloc_oom_handler)();
#endif
public:
-static void * allocate(size_t n)
-{
- void *result = malloc(n);
- if (0 == result) result = oom_malloc(n);
- return result;
-}
+ static void* allocate(size_t __n)
+ {
+ void* __result = malloc(__n);
+ if (0 == __result) __result = _S_oom_malloc(__n);
+ return __result;
+ }
-static void deallocate(void *p, size_t /* n */)
-{
- free(p);
-}
+ static void deallocate(void* __p, size_t /* __n */)
+ {
+ free(__p);
+ }
-static void * reallocate(void *p, size_t /* old_sz */, size_t new_sz)
-{
- void * result = realloc(p, new_sz);
- if (0 == result) result = oom_realloc(p, new_sz);
- return result;
-}
+ static void* reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
+ {
+ void* __result = realloc(__p, __new_sz);
+ if (0 == __result) __result = _S_oom_realloc(__p, __new_sz);
+ return __result;
+ }
-static void (* set_malloc_handler(void (*f)()))()
-{
- void (* old)() = __malloc_alloc_oom_handler;
- __malloc_alloc_oom_handler = f;
- return(old);
-}
+ static void (* __set_malloc_handler(void (*__f)()))()
+ {
+ void (* __old)() = __malloc_alloc_oom_handler;
+ __malloc_alloc_oom_handler = __f;
+ return(__old);
+ }
};
// malloc_alloc out-of-memory handling
#ifndef __STL_STATIC_TEMPLATE_MEMBER_BUG
-template <int inst>
-void (* __malloc_alloc_template<inst>::__malloc_alloc_oom_handler)() = 0;
+template <int __inst>
+void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
#endif
-template <int inst>
-void * __malloc_alloc_template<inst>::oom_malloc(size_t n)
+template <int __inst>
+void*
+__malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n)
{
- void (* my_malloc_handler)();
- void *result;
+ void (* __my_malloc_handler)();
+ void* __result;
for (;;) {
- my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == my_malloc_handler) { __THROW_BAD_ALLOC; }
- (*my_malloc_handler)();
- result = malloc(n);
- if (result) return(result);
+ __my_malloc_handler = __malloc_alloc_oom_handler;
+ if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
+ (*__my_malloc_handler)();
+ __result = malloc(__n);
+ if (__result) return(__result);
}
}
-template <int inst>
-void * __malloc_alloc_template<inst>::oom_realloc(void *p, size_t n)
+template <int __inst>
+void* __malloc_alloc_template<__inst>::_S_oom_realloc(void* __p, size_t __n)
{
- void (* my_malloc_handler)();
- void *result;
+ void (* __my_malloc_handler)();
+ void* __result;
for (;;) {
- my_malloc_handler = __malloc_alloc_oom_handler;
- if (0 == my_malloc_handler) { __THROW_BAD_ALLOC; }
- (*my_malloc_handler)();
- result = realloc(p, n);
- if (result) return(result);
+ __my_malloc_handler = __malloc_alloc_oom_handler;
+ if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
+ (*__my_malloc_handler)();
+ __result = realloc(__p, __n);
+ if (__result) return(__result);
}
}
typedef __malloc_alloc_template<0> malloc_alloc;
-template<class T, class Alloc>
+template<class _Tp, class _Alloc>
class simple_alloc {
public:
- static T *allocate(size_t n)
- { return 0 == n? 0 : (T*) Alloc::allocate(n * sizeof (T)); }
- static T *allocate(void)
- { return (T*) Alloc::allocate(sizeof (T)); }
- static void deallocate(T *p, size_t n)
- { if (0 != n) Alloc::deallocate(p, n * sizeof (T)); }
- static void deallocate(T *p)
- { Alloc::deallocate(p, sizeof (T)); }
+ static _Tp* allocate(size_t __n)
+ { return 0 == __n ? 0 : (_Tp*) _Alloc::allocate(__n * sizeof (_Tp)); }
+ static _Tp* allocate(void)
+ { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); }
+ static void deallocate(_Tp* __p, size_t __n)
+ { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); }
+ static void deallocate(_Tp* __p)
+ { _Alloc::deallocate(__p, sizeof (_Tp)); }
};
// Allocator adaptor to check size arguments for debugging.
@@ -232,41 +229,40 @@ public:
// NDEBUG, but it's far better to just use the underlying allocator
// instead when no checking is desired.
// There is some evidence that this can confuse Purify.
-template <class Alloc>
+template <class _Alloc>
class debug_alloc {
private:
-enum {extra = 8}; // Size of space used to store size. Note
+ enum {_S_extra = 8}; // Size of space used to store size. Note
// that this must be large enough to preserve
// alignment.
public:
-static void * allocate(size_t n)
-{
- char *result = (char *)Alloc::allocate(n + extra);
- *(size_t *)result = n;
- return result + extra;
-}
-
-static void deallocate(void *p, size_t n)
-{
- char * real_p = (char *)p - extra;
- assert(*(size_t *)real_p == n);
- Alloc::deallocate(real_p, n + extra);
-}
+ static void* allocate(size_t __n)
+ {
+ char* __result = (char*)_Alloc::allocate(__n + _S_extra);
+ *(size_t*)__result = __n;
+ return __result + _S_extra;
+ }
-static void * reallocate(void *p, size_t old_sz, size_t new_sz)
-{
- char * real_p = (char *)p - extra;
- assert(*(size_t *)real_p == old_sz);
- char * result = (char *)
- Alloc::reallocate(real_p, old_sz + extra, new_sz + extra);
- *(size_t *)result = new_sz;
- return result + extra;
-}
+ static void deallocate(void* __p, size_t __n)
+ {
+ char* __real_p = (char*)__p - _S_extra;
+ assert(*(size_t*)__real_p == __n);
+ _Alloc::deallocate(__real_p, __n + _S_extra);
+ }
+ static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz)
+ {
+ char* __real_p = (char*)__p - _S_extra;
+ assert(*(size_t*)__real_p == __old_sz);
+ char* __result = (char*)
+ _Alloc::reallocate(__real_p, __old_sz + _S_extra, __new_sz + _S_extra);
+ *(size_t*)__result = __new_sz;
+ return __result + _S_extra;
+ }
};
@@ -286,10 +282,10 @@ typedef malloc_alloc single_client_alloc;
// DISAPPEAR in the future. Clients should just use alloc for now.
//
// Important implementation properties:
-// 1. If the client request an object of size > __MAX_BYTES, the resulting
+// 1. If the client request an object of size > _MAX_BYTES, the resulting
// object will be obtained directly from malloc.
// 2. In all other cases, we allocate an object of size exactly
-// ROUND_UP(requested_size). Thus the client has enough size
+// _S_round_up(requested_size). Thus the client has enough size
// information that we can return the object to the proper free list
// without permanently losing part of the object.
//
@@ -305,9 +301,9 @@ typedef malloc_alloc single_client_alloc;
// different types, limiting the utility of this approach.
#ifdef __SUNPRO_CC
// breaks if we make these template class members:
- enum {__ALIGN = 8};
- enum {__MAX_BYTES = 128};
- enum {__NFREELISTS = __MAX_BYTES/__ALIGN};
+ enum {_ALIGN = 8};
+ enum {_MAX_BYTES = 128};
+ enum {_NFREELISTS = _MAX_BYTES/_ALIGN};
#endif
template <bool threads, int inst>
@@ -317,123 +313,124 @@ private:
// Really we should use static const int x = N
// instead of enum { x = N }, but few compilers accept the former.
# ifndef __SUNPRO_CC
- enum {__ALIGN = 8};
- enum {__MAX_BYTES = 128};
- enum {__NFREELISTS = __MAX_BYTES/__ALIGN};
+ enum {_ALIGN = 8};
+ enum {_MAX_BYTES = 128};
+ enum {_NFREELISTS = _MAX_BYTES/_ALIGN};
# endif
- static size_t ROUND_UP(size_t bytes) {
- return (((bytes) + __ALIGN-1) & ~(__ALIGN - 1));
- }
+ static size_t
+ _S_round_up(size_t __bytes)
+ { return (((__bytes) + _ALIGN-1) & ~(_ALIGN - 1)); }
+
__PRIVATE:
- union obj {
- union obj * free_list_link;
- char client_data[1]; /* The client sees this. */
+ union _Obj {
+ union _Obj* _M_free_list_link;
+ char _M_client_data[1]; /* The client sees this. */
};
private:
# ifdef __SUNPRO_CC
- static obj * __VOLATILE free_list[];
+ static _Obj* __VOLATILE _S_free_list[];
// Specifying a size results in duplicate def for 4.1
# else
- static obj * __VOLATILE free_list[__NFREELISTS];
+ static _Obj* __VOLATILE _S_free_list[_NFREELISTS];
# endif
- static size_t FREELIST_INDEX(size_t bytes) {
- return (((bytes) + __ALIGN-1)/__ALIGN - 1);
+ static size_t _S_freelist_index(size_t __bytes) {
+ return (((__bytes) + _ALIGN-1)/_ALIGN - 1);
}
- // Returns an object of size n, and optionally adds to size n free list.
- static void *refill(size_t n);
+ // Returns an object of size __n, and optionally adds to size __n free list.
+ static void* _S_refill(size_t __n);
// Allocates a chunk for nobjs of size "size". nobjs may be reduced
// if it is inconvenient to allocate the requested number.
- static char *chunk_alloc(size_t size, int &nobjs);
+ static char* _S_chunk_alloc(size_t __size, int& __nobjs);
// Chunk allocation state.
- static char *start_free;
- static char *end_free;
- static size_t heap_size;
+ static char* _S_start_free;
+ static char* _S_end_free;
+ static size_t _S_heap_size;
# ifdef __STL_SGI_THREADS
- static volatile unsigned long __node_allocator_lock;
- static void __lock(volatile unsigned long *);
- static inline void __unlock(volatile unsigned long *);
+ static volatile unsigned long _S_node_allocator_lock;
+ static void _S_lock(volatile unsigned long*);
+ static inline void _S_unlock(volatile unsigned long*);
# endif
# ifdef __STL_PTHREADS
- static pthread_mutex_t __node_allocator_lock;
+ static pthread_mutex_t _S_node_allocator_lock;
# endif
# ifdef __STL_WIN32THREADS
- static CRITICAL_SECTION __node_allocator_lock;
- static bool __node_allocator_lock_initialized;
+ static CRITICAL_SECTION _S_node_allocator_lock;
+ static bool _S_node_allocator_lock_initialized;
public:
__default_alloc_template() {
// This assumes the first constructor is called before threads
// are started.
- if (!__node_allocator_lock_initialized) {
- InitializeCriticalSection(&__node_allocator_lock);
- __node_allocator_lock_initialized = true;
+ if (!_S_node_allocator_lock_initialized) {
+ InitializeCriticalSection(&_S_node_allocator_lock);
+ _S_node_allocator_lock_initialized = true;
}
}
private:
# endif
- class lock {
+ class _Lock {
public:
- lock() { __NODE_ALLOCATOR_LOCK; }
- ~lock() { __NODE_ALLOCATOR_UNLOCK; }
+ _Lock() { __NODE_ALLOCATOR_LOCK; }
+ ~_Lock() { __NODE_ALLOCATOR_UNLOCK; }
};
- friend class lock;
+ friend class _Lock;
public:
- /* n must be > 0 */
- static void * allocate(size_t n)
+ /* __n must be > 0 */
+ static void* allocate(size_t __n)
{
- obj * __VOLATILE * my_free_list;
- obj * __RESTRICT result;
+ _Obj* __VOLATILE* __my_free_list;
+ _Obj* __RESTRICT __result;
- if (n > (size_t) __MAX_BYTES) {
- return(malloc_alloc::allocate(n));
+ if (__n > (size_t) _MAX_BYTES) {
+ return(malloc_alloc::allocate(__n));
}
- my_free_list = free_list + FREELIST_INDEX(n);
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
// Acquire the lock here with a constructor call.
// This ensures that it is released in exit or during stack
// unwinding.
# ifndef _NOTHREADS
/*REFERENCED*/
- lock lock_instance;
+ _Lock __lock_instance;
# endif
- result = *my_free_list;
- if (result == 0) {
- void *r = refill(ROUND_UP(n));
- return r;
+ __result = *__my_free_list;
+ if (__result == 0) {
+ void* __r = _S_refill(_S_round_up(__n));
+ return __r;
}
- *my_free_list = result -> free_list_link;
- return (result);
+ *__my_free_list = __result -> _M_free_list_link;
+ return (__result);
};
- /* p may not be 0 */
- static void deallocate(void *p, size_t n)
+ /* __p may not be 0 */
+ static void deallocate(void* __p, size_t __n)
{
- obj *q = (obj *)p;
- obj * __VOLATILE * my_free_list;
+ _Obj* __q = (_Obj*)__p;
+ _Obj* __VOLATILE* __my_free_list;
- if (n > (size_t) __MAX_BYTES) {
- malloc_alloc::deallocate(p, n);
+ if (__n > (size_t) _MAX_BYTES) {
+ malloc_alloc::deallocate(__p, __n);
return;
}
- my_free_list = free_list + FREELIST_INDEX(n);
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
// acquire lock
# ifndef _NOTHREADS
/*REFERENCED*/
- lock lock_instance;
+ _Lock __lock_instance;
# endif /* _NOTHREADS */
- q -> free_list_link = *my_free_list;
- *my_free_list = q;
+ __q -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = __q;
// lock is released here
}
- static void * reallocate(void *p, size_t old_sz, size_t new_sz);
+ static void* reallocate(void* __p, size_t __old_sz, size_t __new_sz);
} ;
@@ -446,228 +443,246 @@ typedef __default_alloc_template<false, 0> single_client_alloc;
/* the malloc heap too much. */
/* We assume that size is properly aligned. */
/* We hold the allocation lock. */
-template <bool threads, int inst>
+template <bool __threads, int __inst>
char*
-__default_alloc_template<threads, inst>::chunk_alloc(size_t size, int& nobjs)
+__default_alloc_template<__threads, __inst>::_S_chunk_alloc(size_t __size,
+ int& __nobjs)
{
- char * result;
- size_t total_bytes = size * nobjs;
- size_t bytes_left = end_free - start_free;
-
- if (bytes_left >= total_bytes) {
- result = start_free;
- start_free += total_bytes;
- return(result);
- } else if (bytes_left >= size) {
- nobjs = bytes_left/size;
- total_bytes = size * nobjs;
- result = start_free;
- start_free += total_bytes;
- return(result);
+ char* __result;
+ size_t __total_bytes = __size * __nobjs;
+ size_t __bytes_left = _S_end_free - _S_start_free;
+
+ if (__bytes_left >= __total_bytes) {
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return(__result);
+ } else if (__bytes_left >= __size) {
+ __nobjs = (int)(__bytes_left/__size);
+ __total_bytes = __size * __nobjs;
+ __result = _S_start_free;
+ _S_start_free += __total_bytes;
+ return(__result);
} else {
- size_t bytes_to_get = 2 * total_bytes + ROUND_UP(heap_size >> 4);
+ size_t __bytes_to_get =
+ 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
// Try to make use of the left-over piece.
- if (bytes_left > 0) {
- obj * __VOLATILE * my_free_list =
- free_list + FREELIST_INDEX(bytes_left);
+ if (__bytes_left > 0) {
+ _Obj* __VOLATILE* __my_free_list =
+ _S_free_list + _S_freelist_index(__bytes_left);
- ((obj *)start_free) -> free_list_link = *my_free_list;
- *my_free_list = (obj *)start_free;
+ ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
+ *__my_free_list = (_Obj*)_S_start_free;
}
- start_free = (char *)malloc(bytes_to_get);
- if (0 == start_free) {
- int i;
- obj * __VOLATILE * my_free_list, *p;
+ _S_start_free = (char*)malloc(__bytes_to_get);
+ if (0 == _S_start_free) {
+ size_t __i;
+ _Obj* __VOLATILE* __my_free_list;
+ _Obj* __p;
// Try to make do with what we have. That can't
// hurt. We do not try smaller requests, since that tends
// to result in disaster on multi-process machines.
- for (i = size; i <= __MAX_BYTES; i += __ALIGN) {
- my_free_list = free_list + FREELIST_INDEX(i);
- p = *my_free_list;
- if (0 != p) {
- *my_free_list = p -> free_list_link;
- start_free = (char *)p;
- end_free = start_free + i;
- return(chunk_alloc(size, nobjs));
+ for (__i = __size; __i <= _MAX_BYTES; __i += _ALIGN) {
+ __my_free_list = _S_free_list + _S_freelist_index(__i);
+ __p = *__my_free_list;
+ if (0 != __p) {
+ *__my_free_list = __p -> _M_free_list_link;
+ _S_start_free = (char*)__p;
+ _S_end_free = _S_start_free + __i;
+ return(_S_chunk_alloc(__size, __nobjs));
// Any leftover piece will eventually make it to the
// right free list.
}
}
- end_free = 0; // In case of exception.
- start_free = (char *)malloc_alloc::allocate(bytes_to_get);
+ _S_end_free = 0; // In case of exception.
+ _S_start_free = (char*)malloc_alloc::allocate(__bytes_to_get);
// This should either throw an
// exception or remedy the situation. Thus we assume it
// succeeded.
}
- heap_size += bytes_to_get;
- end_free = start_free + bytes_to_get;
- return(chunk_alloc(size, nobjs));
+ _S_heap_size += __bytes_to_get;
+ _S_end_free = _S_start_free + __bytes_to_get;
+ return(_S_chunk_alloc(__size, __nobjs));
}
}
-/* Returns an object of size n, and optionally adds to size n free list.*/
-/* We assume that n is properly aligned. */
+/* Returns an object of size __n, and optionally adds to size __n free list.*/
+/* We assume that __n is properly aligned. */
/* We hold the allocation lock. */
-template <bool threads, int inst>
-void* __default_alloc_template<threads, inst>::refill(size_t n)
+template <bool __threads, int __inst>
+void*
+__default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
{
- int nobjs = 20;
- char * chunk = chunk_alloc(n, nobjs);
- obj * __VOLATILE * my_free_list;
- obj * result;
- obj * current_obj, * next_obj;
- int i;
+ int __nobjs = 20;
+ char* __chunk = _S_chunk_alloc(__n, __nobjs);
+ _Obj* __VOLATILE* __my_free_list;
+ _Obj* __result;
+ _Obj* __current_obj;
+ _Obj* __next_obj;
+ int __i;
- if (1 == nobjs) return(chunk);
- my_free_list = free_list + FREELIST_INDEX(n);
+ if (1 == __nobjs) return(__chunk);
+ __my_free_list = _S_free_list + _S_freelist_index(__n);
/* Build free list in chunk */
- result = (obj *)chunk;
- *my_free_list = next_obj = (obj *)(chunk + n);
- for (i = 1; ; i++) {
- current_obj = next_obj;
- next_obj = (obj *)((char *)next_obj + n);
- if (nobjs - 1 == i) {
- current_obj -> free_list_link = 0;
+ __result = (_Obj*)__chunk;
+ *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
+ for (__i = 1; ; __i++) {
+ __current_obj = __next_obj;
+ __next_obj = (_Obj*)((char*)__next_obj + __n);
+ if (__nobjs - 1 == __i) {
+ __current_obj -> _M_free_list_link = 0;
break;
} else {
- current_obj -> free_list_link = next_obj;
+ __current_obj -> _M_free_list_link = __next_obj;
}
}
- return(result);
+ return(__result);
}
template <bool threads, int inst>
void*
-__default_alloc_template<threads, inst>::reallocate(void *p,
- size_t old_sz,
- size_t new_sz)
+__default_alloc_template<threads, inst>::reallocate(void* __p,
+ size_t __old_sz,
+ size_t __new_sz)
{
- void * result;
- size_t copy_sz;
+ void* __result;
+ size_t __copy_sz;
- if (old_sz > (size_t) __MAX_BYTES && new_sz > (size_t) __MAX_BYTES) {
- return(realloc(p, new_sz));
+ if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES) {
+ return(realloc(__p, __new_sz));
}
- if (ROUND_UP(old_sz) == ROUND_UP(new_sz)) return(p);
- result = allocate(new_sz);
- copy_sz = new_sz > old_sz? old_sz : new_sz;
- memcpy(result, p, copy_sz);
- deallocate(p, old_sz);
- return(result);
+ if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
+ __result = allocate(__new_sz);
+ __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
+ memcpy(__result, __p, __copy_sz);
+ deallocate(__p, __old_sz);
+ return(__result);
}
#ifdef __STL_PTHREADS
- template <bool threads, int inst>
+ template <bool __threads, int __inst>
pthread_mutex_t
- __default_alloc_template<threads, inst>::__node_allocator_lock
+ __default_alloc_template<__threads, __inst>::_S_node_allocator_lock
= PTHREAD_MUTEX_INITIALIZER;
#endif
#ifdef __STL_WIN32THREADS
- template <bool threads, int inst> CRITICAL_SECTION
- __default_alloc_template<threads, inst>::__node_allocator_lock;
-
- template <bool threads, int inst> bool
- __default_alloc_template<threads, inst>::__node_allocator_lock_initialized
+ template <bool __threads, int __inst>
+ CRITICAL_SECTION
+ __default_alloc_template<__threads, __inst>::
+ _S_node_allocator_lock;
+
+ template <bool __threads, int __inst>
+ bool
+ __default_alloc_template<__threads, __inst>::
+ _S_node_allocator_lock_initialized
= false;
#endif
#ifdef __STL_SGI_THREADS
__STL_END_NAMESPACE
#include <mutex.h>
-#include <time.h>
+#include <time.h> /* XXX should use <ctime> */
__STL_BEGIN_NAMESPACE
// Somewhat generic lock implementations. We need only test-and-set
// and some way to sleep. These should work with both SGI pthreads
// and sproc threads. They may be useful on other systems.
-template <bool threads, int inst>
+template <bool __threads, int __inst>
volatile unsigned long
-__default_alloc_template<threads, inst>::__node_allocator_lock = 0;
+__default_alloc_template<__threads, __inst>::_S_node_allocator_lock = 0;
#if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) || defined(__GNUC__)
# define __test_and_set(l,v) test_and_set(l,v)
#endif
-template <bool threads, int inst>
+template <bool __threads, int __inst>
void
-__default_alloc_template<threads, inst>::__lock(volatile unsigned long *lock)
+__default_alloc_template<__threads, __inst>::
+ _S_lock(volatile unsigned long* __lock)
{
- const unsigned low_spin_max = 30; // spin cycles if we suspect uniprocessor
- const unsigned high_spin_max = 1000; // spin cycles for multiprocessor
- static unsigned spin_max = low_spin_max;
- unsigned my_spin_max;
- static unsigned last_spins = 0;
- unsigned my_last_spins;
- static struct timespec ts = {0, 1000};
- unsigned junk;
-# define __ALLOC_PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
- int i;
-
- if (!__test_and_set((unsigned long *)lock, 1)) {
+ const unsigned __low_spin_max = 30; // spins if we suspect uniprocessor
+ const unsigned __high_spin_max = 1000; // spins for multiprocessor
+ static unsigned __spin_max = __low_spin_max;
+ unsigned __my_spin_max;
+ static unsigned __last_spins = 0;
+ unsigned __my_last_spins;
+ unsigned __junk;
+# define __ALLOC_PAUSE \
+ __junk *= __junk; __junk *= __junk; __junk *= __junk; __junk *= __junk
+ int __i;
+
+ if (!__test_and_set((unsigned long*)__lock, 1)) {
return;
}
- my_spin_max = spin_max;
- my_last_spins = last_spins;
- for (i = 0; i < my_spin_max; i++) {
- if (i < my_last_spins/2 || *lock) {
+ __my_spin_max = __spin_max;
+ __my_last_spins = __last_spins;
+ for (__i = 0; __i < __my_spin_max; __i++) {
+ if (__i < __my_last_spins/2 || *__lock) {
__ALLOC_PAUSE;
continue;
}
- if (!__test_and_set((unsigned long *)lock, 1)) {
+ if (!__test_and_set((unsigned long*)__lock, 1)) {
// got it!
// Spinning worked. Thus we're probably not being scheduled
// against the other process with which we were contending.
// Thus it makes sense to spin longer the next time.
- last_spins = i;
- spin_max = high_spin_max;
+ __last_spins = __i;
+ __spin_max = __high_spin_max;
return;
}
}
// We are probably being scheduled against the other process. Sleep.
- spin_max = low_spin_max;
- for (;;) {
- if (!__test_and_set((unsigned long *)lock, 1)) {
+ __spin_max = __low_spin_max;
+ for (__i = 0 ;; ++__i) {
+ struct timespec __ts;
+ int __log_nsec = __i + 6;
+
+ if (!__test_and_set((unsigned long *)__lock, 1)) {
return;
}
- nanosleep(&ts, 0);
+ if (__log_nsec > 27) __log_nsec = 27;
+ /* Max sleep is 2**27nsec ~ 60msec */
+ __ts.tv_sec = 0;
+ __ts.tv_nsec = 1 << __log_nsec;
+ nanosleep(&__ts, 0);
}
}
-template <bool threads, int inst>
+template <bool __threads, int __inst>
inline void
-__default_alloc_template<threads, inst>::__unlock(volatile unsigned long *lock)
+__default_alloc_template<__threads, __inst>::_S_unlock(
+ volatile unsigned long* __lock)
{
# if defined(__GNUC__) && __mips >= 3
asm("sync");
- *lock = 0;
+ *__lock = 0;
# elif __mips >= 3 && (defined (_ABIN32) || defined(_ABI64))
- __lock_release(lock);
+ __lock_release(__lock);
# else
- *lock = 0;
+ *__lock = 0;
// This is not sufficient on many multiprocessors, since
// writes to protected variables and the lock may be reordered.
# endif
}
#endif
-template <bool threads, int inst>
-char *__default_alloc_template<threads, inst>::start_free = 0;
+template <bool __threads, int __inst>
+char* __default_alloc_template<__threads, __inst>::_S_start_free = 0;
-template <bool threads, int inst>
-char *__default_alloc_template<threads, inst>::end_free = 0;
+template <bool __threads, int __inst>
+char* __default_alloc_template<__threads, __inst>::_S_end_free = 0;
-template <bool threads, int inst>
-size_t __default_alloc_template<threads, inst>::heap_size = 0;
+template <bool __threads, int __inst>
+size_t __default_alloc_template<__threads, __inst>::_S_heap_size = 0;
-template <bool threads, int inst>
-__default_alloc_template<threads, inst>::obj * __VOLATILE
-__default_alloc_template<threads, inst> ::free_list[
+template <bool __threads, int __inst>
+__default_alloc_template<__threads, __inst>::_Obj* __VOLATILE
+__default_alloc_template<__threads, __inst> ::_S_free_list[
# ifdef __SUNPRO_CC
- __NFREELISTS
+ _NFREELISTS
# else
- __default_alloc_template<threads, inst>::__NFREELISTS
+ __default_alloc_template<__threads, __inst>::_NFREELISTS
# endif
] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
// The 16 zeros are necessary to make version 4.1 of the SunPro
@@ -683,6 +698,327 @@ __default_alloc_template<threads, inst> ::free_list[
#endif /* ! __USE_MALLOC */
+// This implements allocators as specified in the C++ standard.
+//
+// Note that standard-conforming allocators use many language features
+// that are not yet widely implemented. In particular, they rely on
+// member templates, partial specialization, partial ordering of function
+// templates, the typename keyword, and the use of the template keyword
+// to refer to a template member of a dependent type.
+
+#ifdef __STL_USE_STD_ALLOCATORS
+
+template <class _Tp>
+class allocator {
+ typedef alloc _Alloc; // The underlying allocator.
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+
+ template <class _Tp1> struct rebind {
+ typedef allocator<_Tp1> other;
+ };
+
+ allocator() __STL_NOTHROW {}
+ allocator(const allocator&) __STL_NOTHROW {}
+ template <class _Tp1> allocator(const allocator<_Tp1>&) __STL_NOTHROW {}
+ ~allocator() __STL_NOTHROW {}
+
+ pointer address(reference __x) const { return &__x; }
+ const_pointer address(const_reference __x) const { return &__x; }
+
+ // __n is permitted to be 0. The C++ standard says nothing about what
+ // the return value is when __n == 0.
+ _Tp* allocate(size_type __n, const void* = 0) {
+ return __n != 0 ? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)))
+ : 0;
+ }
+
+ // __p is not permitted to be a null pointer.
+ void deallocate(pointer __p, size_type __n)
+ { _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
+
+ size_type max_size() const __STL_NOTHROW
+ { return size_t(-1) / sizeof(_Tp); }
+
+ void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
+ void destroy(pointer __p) { __p->~_Tp(); }
+};
+
+template<>
+class allocator<void> {
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+
+ template <class _Tp1> struct rebind {
+ typedef allocator<_Tp1> other;
+ };
+};
+
+
+template <class _T1, class _T2>
+inline bool operator==(const allocator<_T1>&, const allocator<_T2>&)
+{
+ return true;
+}
+
+template <class _T1, class _T2>
+inline bool operator!=(const allocator<_T1>&, const allocator<_T2>&)
+{
+ return false;
+}
+
+// Allocator adaptor to turn an SGI-style allocator (e.g. alloc, malloc_alloc)
+// into a standard-conforming allocator. Note that this adaptor does
+// *not* assume that all objects of the underlying alloc class are
+// identical, nor does it assume that all of the underlying alloc's
+// member functions are static member functions. Note, also, that
+// __allocator<_Tp, alloc> is essentially the same thing as allocator<_Tp>.
+
+template <class _Tp, class _Alloc>
+struct __allocator {
+ _Alloc __underlying_alloc;
+
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+
+ template <class _Tp1> struct rebind {
+ typedef __allocator<_Tp1, _Alloc> other;
+ };
+
+ __allocator() __STL_NOTHROW {}
+ __allocator(const __allocator& __a) __STL_NOTHROW
+ : __underlying_alloc(__a.__underlying_alloc) {}
+ template <class _Tp1>
+ __allocator(const __allocator<_Tp1, _Alloc>& __a) __STL_NOTHROW
+ : __underlying_alloc(__a.__underlying_alloc) {}
+ ~__allocator() __STL_NOTHROW {}
+
+ pointer address(reference __x) const { return &__x; }
+ const_pointer address(const_reference __x) const { return &__x; }
+
+ // __n is permitted to be 0.
+ _Tp* allocate(size_type __n, const void* = 0) {
+ return __n != 0
+ ? static_cast<_Tp*>(__underlying_alloc.allocate(__n * sizeof(_Tp)))
+ : 0;
+ }
+
+ // __p is not permitted to be a null pointer.
+ void deallocate(pointer __p, size_type __n)
+ { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
+
+ size_type max_size() const __STL_NOTHROW
+ { return size_t(-1) / sizeof(_Tp); }
+
+ void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
+ void destroy(pointer __p) { __p->~_Tp(); }
+};
+
+template <class _Alloc>
+class __allocator<void, _Alloc> {
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+
+ template <class _Tp1> struct rebind {
+ typedef __allocator<_Tp1, _Alloc> other;
+ };
+};
+
+template <class _Tp, class _Alloc>
+inline bool operator==(const __allocator<_Tp, _Alloc>& __a1,
+ const __allocator<_Tp, _Alloc>& __a2)
+{
+ return __a1.__underlying_alloc == __a2.__underlying_alloc;
+}
+
+#ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
+template <class _Tp, class _Alloc>
+inline bool operator!=(const __allocator<_Tp, _Alloc>& __a1,
+ const __allocator<_Tp, _Alloc>& __a2)
+{
+ return __a1.__underlying_alloc != __a2.__underlying_alloc;
+}
+#endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
+
+// Comparison operators for all of the predifined SGI-style allocators.
+// This ensures that __allocator<malloc_alloc> (for example) will
+// work correctly.
+
+template <int inst>
+inline bool operator==(const __malloc_alloc_template<inst>&,
+ const __malloc_alloc_template<inst>&)
+{
+ return true;
+}
+
+#ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
+template <int __inst>
+inline bool operator!=(const __malloc_alloc_template<__inst>&,
+ const __malloc_alloc_template<__inst>&)
+{
+ return false;
+}
+#endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
+
+template <bool __threads, int __inst>
+inline bool operator==(const __default_alloc_template<__threads, __inst>&,
+ const __default_alloc_template<__threads, __inst>&)
+{
+ return true;
+}
+
+#ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
+template <bool __threads, int __inst>
+inline bool operator!=(const __default_alloc_template<__threads, __inst>&,
+ const __default_alloc_template<__threads, __inst>&)
+{
+ return false;
+}
+#endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
+
+template <class _Alloc>
+inline bool operator==(const debug_alloc<_Alloc>&,
+ const debug_alloc<_Alloc>&) {
+ return true;
+}
+
+#ifdef __STL_FUNCTION_TMPL_PARTIAL_ORDER
+template <class _Alloc>
+inline bool operator!=(const debug_alloc<_Alloc>&,
+ const debug_alloc<_Alloc>&) {
+ return false;
+}
+#endif /* __STL_FUNCTION_TMPL_PARTIAL_ORDER */
+
+// Another allocator adaptor: _Alloc_traits. This serves two
+// purposes. First, make it possible to write containers that can use
+// either SGI-style allocators or standard-conforming allocator.
+// Second, provide a mechanism so that containers can query whether or
+// not the allocator has distinct instances. If not, the container
+// can avoid wasting a word of memory to store an empty object.
+
+// This adaptor uses partial specialization. The general case of
+// _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a
+// standard-conforming allocator, possibly with non-equal instances
+// and non-static members. (It still behaves correctly even if _Alloc
+// has static member and if all instances are equal. Refinements
+// affect performance, not correctness.)
+
+// There are always two members: allocator_type, which is a standard-
+// conforming allocator type for allocating objects of type _Tp, and
+// _S_instanceless, a static const member of type bool. If
+// _S_instanceless is true, this means that there is no difference
+// between any two instances of type allocator_type. Furthermore, if
+// _S_instanceless is true, then _Alloc_traits has one additional
+// member: _Alloc_type. This type encapsulates allocation and
+// deallocation of objects of type _Tp through a static interface; it
+// has two member functions, whose signatures are
+// static _Tp* allocate(size_t)
+// static void deallocate(_Tp*, size_t)
+
+// The fully general version.
+
+template <class _Tp, class _Allocator>
+struct _Alloc_traits
+{
+ static const bool _S_instanceless = false;
+ typedef typename _Allocator::__STL_TEMPLATE rebind<_Tp>::other
+ allocator_type;
+};
+
+template <class _Tp, class _Allocator>
+const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
+
+// The version for the default allocator.
+
+template <class _Tp, class _Tp1>
+struct _Alloc_traits<_Tp, allocator<_Tp1> >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, alloc> _Alloc_type;
+ typedef allocator<_Tp> allocator_type;
+};
+
+// Versions for the predefined SGI-style allocators.
+
+template <class _Tp, int __inst>
+struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
+ typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
+};
+
+template <class _Tp, bool __threads, int __inst>
+struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
+ _Alloc_type;
+ typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
+ allocator_type;
+};
+
+template <class _Tp, class _Alloc>
+struct _Alloc_traits<_Tp, debug_alloc<_Alloc> >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type;
+ typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type;
+};
+
+// Versions for the __allocator adaptor used with the predefined
+// SGI-style allocators.
+
+template <class _Tp, class _Tp1, int __inst>
+struct _Alloc_traits<_Tp,
+ __allocator<_Tp1, __malloc_alloc_template<__inst> > >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
+ typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
+};
+
+template <class _Tp, class _Tp1, bool __thr, int __inst>
+struct _Alloc_traits<_Tp,
+ __allocator<_Tp1,
+ __default_alloc_template<__thr, __inst> > >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
+ _Alloc_type;
+ typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
+ allocator_type;
+};
+
+template <class _Tp, class _Tp1, class _Alloc>
+struct _Alloc_traits<_Tp, __allocator<_Tp1, debug_alloc<_Alloc> > >
+{
+ static const bool _S_instanceless = true;
+ typedef simple_alloc<_Tp, debug_alloc<_Alloc> > _Alloc_type;
+ typedef __allocator<_Tp, debug_alloc<_Alloc> > allocator_type;
+};
+
+
+#endif /* __STL_USE_STD_ALLOCATORS */
+
#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
#pragma reset woff 1174
#endif
OpenPOWER on IntegriCloud