stl_alloc.h: Deprecate all 'reallocate' memfns.

2002-06-27  Phil Edwards  <pme@gcc.gnu.org>

	* include/bits/stl_alloc.h:  Deprecate all 'reallocate' memfns.
	* docs/html/ext/howto.html:  Update allocator notes.

From-SVN: r55044
This commit is contained in:
Phil Edwards 2002-06-27 22:09:02 +00:00
parent 5ce49b4b08
commit 07a6e20be9
3 changed files with 276 additions and 245 deletions

View file

@ -1,3 +1,8 @@
2002-06-27 Phil Edwards <pme@gcc.gnu.org>
* include/bits/stl_alloc.h: Deprecate all 'reallocate' memfns.
* docs/html/ext/howto.html: Update allocator notes.
2002-06-26 Benjamin Kosnik <bkoz@redhat.com>
* configure.in (INTERFACE): Remove.

View file

@ -37,7 +37,8 @@
<ul>
<li><a href="#1">Ropes and trees and hashes, oh my!</a>
<li><a href="#2">Added members and types</a>
<li><a href="#3">Allocators</a>
<li><a href="#3">Allocators (versions 3.0, 3.1, 3.2)</a>
<li><a href="#6">Allocators (version 3.3)</a>
<li><a href="#4">Compile-time checks</a>
<li><a href="#5">LWG Issues</a>
</ul>
@ -154,7 +155,7 @@
</p>
<hr>
<h2><a name="3">Allocators</a></h2>
<h2><a name="3">Allocators (versions 3.0, 3.1, 3.2)</a></h2>
<p>Thread-safety, space efficiency, high speed, portability... this is a
mess. Where to begin?
</p>
@ -220,17 +221,18 @@
</p>
<h3>Available allocators in namespace std</h3>
<p>First I'll describe the situation as it exists for the code which
was released in GCC 3.1. Then I'll
describe the differences for 3.0.x, which will not change much in
this respect.
was released in GCC 3.1 and 3.2. Then I'll describe the differences
for 3.0. The allocator classes also have source documentation,
which is described <a href="../documentation.html#4">here</a> (you
will need to retrieve the maintainer-level docs, as almost none of
these entities are in the ISO standard).
</p>
<p>As a general rule of thumb, users are not allowed to use names which
begin with an underscore. This means that to be portable between
compilers, none of the following may be used in your program directly.
(If you decide to be unportable, then you're free do do what you want,
but it's not our fault if stuff breaks.) They are presented here for
information for maintainers and contributors in addition to users, but
we will probably make them available for users in 3.2 somehow.
information for maintainers and contributors in addition to users.
</p>
<p>These classes are always available:
<ul>
@ -301,7 +303,7 @@
<li><code>__single_client_alloc</code> are all typedef'd to
<code>__malloc_alloc_template</code>.
<li><code>__default_alloc_template</code> is no longer available.
At all. Anywhere. <!-- might change? -->
At all. Anywhere.
</ol>
</p>
<h3>Writing your own allocators</h3>
@ -359,7 +361,13 @@
can affect the 3.0.x allocators. Do not use them. Those macros have
been completely removed for 3.1.
</p>
<p>More notes as we remember them...
<p>Return <a href="#top">to top of page</a> or
<a href="../faq/index.html">to the FAQ</a>.
</p>
<hr>
<h2><a name="6">Allocators (version 3.3)</a></h2>
<p>Changes are coming...
</p>
<p>Return <a href="#top">to top of page</a> or
<a href="../faq/index.html">to the FAQ</a>.
@ -540,6 +548,7 @@
</dl></p>
<p>Return <a href="#top">to top of page</a> or
<a href="../faq/index.html">to the FAQ</a>.
</p>
<!-- ####################################################### -->

View file

@ -74,6 +74,10 @@
* into a "standard" one.
* @endif
*
* @note The @c reallocate member functions have been deprecated for 3.2
* and will be removed in 3.3. You must define @c _GLIBCPP_DEPRECATED
* to make this visible in 3.2; see c++config.h.
*
* The canonical description of these classes is in docs/html/ext/howto.html
* or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
*/
@ -101,7 +105,7 @@ namespace std
static void*
allocate(size_t __n)
{ return ::operator new(__n); }
static void
deallocate(void* __p, size_t)
{ ::operator delete(__p); }
@ -122,36 +126,40 @@ namespace std
{
private:
static void* _S_oom_malloc(size_t);
#ifdef _GLIBCPP_DEPRECATED
static void* _S_oom_realloc(void*, size_t);
#endif
static void (* __malloc_alloc_oom_handler)();
public:
static void*
allocate(size_t __n)
{
void* __result = malloc(__n);
if (0 == __result) __result = _S_oom_malloc(__n);
return __result;
void* __result = malloc(__n);
if (0 == __result) __result = _S_oom_malloc(__n);
return __result;
}
static void
deallocate(void* __p, size_t /* __n */)
{ free(__p); }
#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
{
void* __result = realloc(__p, __new_sz);
if (0 == __result)
__result = _S_oom_realloc(__p, __new_sz);
return __result;
void* __result = realloc(__p, __new_sz);
if (0 == __result)
__result = _S_oom_realloc(__p, __new_sz);
return __result;
}
#endif
static void (* __set_malloc_handler(void (*__f)()))()
{
void (* __old)() = __malloc_alloc_oom_handler;
__malloc_alloc_oom_handler = __f;
return(__old);
void (* __old)() = __malloc_alloc_oom_handler;
__malloc_alloc_oom_handler = __f;
return(__old);
}
};
@ -161,42 +169,45 @@ namespace std
template<int __inst>
void*
__malloc_alloc_template<__inst>::_S_oom_malloc(size_t __n)
__malloc_alloc_template<__inst>::
_S_oom_malloc(size_t __n)
{
void (* __my_malloc_handler)();
void* __result;
for (;;)
{
__my_malloc_handler = __malloc_alloc_oom_handler;
if (0 == __my_malloc_handler)
std::__throw_bad_alloc();
(*__my_malloc_handler)();
__result = malloc(__n);
if (__result)
return(__result);
}
}
template<int __inst>
void*
__malloc_alloc_template<__inst>::
_S_oom_realloc(void* __p, size_t __n)
{
void (* __my_malloc_handler)();
void* __result;
for (;;)
{
__my_malloc_handler = __malloc_alloc_oom_handler;
if (0 == __my_malloc_handler)
for (;;)
{
__my_malloc_handler = __malloc_alloc_oom_handler;
if (0 == __my_malloc_handler)
std::__throw_bad_alloc();
(*__my_malloc_handler)();
__result = realloc(__p, __n);
if (__result)
return(__result);
}
}
(*__my_malloc_handler)();
__result = malloc(__n);
if (__result)
return(__result);
}
}
#ifdef _GLIBCPP_DEPRECATED
template<int __inst>
void*
__malloc_alloc_template<__inst>::
_S_oom_realloc(void* __p, size_t __n)
{
void (* __my_malloc_handler)();
void* __result;
for (;;)
{
__my_malloc_handler = __malloc_alloc_oom_handler;
if (0 == __my_malloc_handler)
std::__throw_bad_alloc();
(*__my_malloc_handler)();
__result = realloc(__p, __n);
if (__result)
return(__result);
}
}
#endif
// Determines the underlying allocator choice for the node allocator.
@ -259,41 +270,43 @@ namespace std
private:
// Size of space used to store size. Note that this must be
// large enough to preserve alignment.
enum {_S_extra = 8};
enum {_S_extra = 8};
public:
static void*
allocate(size_t __n)
{
char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
*(size_t*)__result = __n;
return __result + (int) _S_extra;
char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
*(size_t*)__result = __n;
return __result + (int) _S_extra;
}
static void
deallocate(void* __p, size_t __n)
{
char* __real_p = (char*)__p - (int) _S_extra;
assert(*(size_t*)__real_p == __n);
_Alloc::deallocate(__real_p, __n + (int) _S_extra);
char* __real_p = (char*)__p - (int) _S_extra;
assert(*(size_t*)__real_p == __n);
_Alloc::deallocate(__real_p, __n + (int) _S_extra);
}
#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t __old_sz, size_t __new_sz)
{
char* __real_p = (char*)__p - (int) _S_extra;
assert(*(size_t*)__real_p == __old_sz);
char* __result = (char*)
_Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
__new_sz + (int) _S_extra);
*(size_t*)__result = __new_sz;
return __result + (int) _S_extra;
char* __real_p = (char*)__p - (int) _S_extra;
assert(*(size_t*)__real_p == __old_sz);
char* __result = (char*)
_Alloc::reallocate(__real_p, __old_sz + (int) _S_extra,
__new_sz + (int) _S_extra);
*(size_t*)__result = __new_sz;
return __result + (int) _S_extra;
}
#endif
};
#ifdef __USE_MALLOC
typedef __mem_interface __alloc;
typedef __mem_interface __single_client_alloc;
@ -336,114 +349,116 @@ namespace std
enum {_ALIGN = 8};
enum {_MAX_BYTES = 128};
enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
union _Obj
{
union _Obj* _M_free_list_link;
char _M_client_data[1]; // The client sees this.
union _Obj* _M_free_list_link;
char _M_client_data[1]; // The client sees this.
};
static _Obj* volatile _S_free_list[_NFREELISTS];
// Chunk allocation state.
static char* _S_start_free;
static char* _S_end_free;
static size_t _S_heap_size;
static _STL_mutex_lock _S_node_allocator_lock;
static size_t
_S_round_up(size_t __bytes)
{ return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
static size_t
_S_freelist_index(size_t __bytes)
{ return (((__bytes) + (size_t)_ALIGN-1)/(size_t)_ALIGN - 1); }
// Returns an object of size __n, and optionally adds to size __n
// free list.
static void*
_S_refill(size_t __n);
// Allocates a chunk for nobjs of size size. nobjs may be reduced
// if it is inconvenient to allocate the requested number.
static char*
_S_chunk_alloc(size_t __size, int& __nobjs);
// It would be nice to use _STL_auto_lock here. But we need a
// test whether threads are in use.
struct _Lock
{
_Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
_Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
} __attribute__ ((__unused__));
friend struct _Lock;
public:
// __n must be > 0
static void*
allocate(size_t __n)
{
void* __ret = 0;
if (__n > (size_t) _MAX_BYTES)
__ret = __mem_interface::allocate(__n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__my_free_list;
if (__result == 0)
__ret = _S_refill(_S_round_up(__n));
else
{
*__my_free_list = __result -> _M_free_list_link;
__ret = __result;
}
}
return __ret;
void* __ret = 0;
if (__n > (size_t) _MAX_BYTES)
__ret = __mem_interface::allocate(__n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
_Obj* __restrict__ __result = *__my_free_list;
if (__result == 0)
__ret = _S_refill(_S_round_up(__n));
else
{
*__my_free_list = __result -> _M_free_list_link;
__ret = __result;
}
}
return __ret;
};
// __p may not be 0
static void
deallocate(void* __p, size_t __n)
{
if (__n > (size_t) _MAX_BYTES)
__mem_interface::deallocate(__p, __n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
_Obj* __q = (_Obj*)__p;
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
__q -> _M_free_list_link = *__my_free_list;
*__my_free_list = __q;
}
if (__n > (size_t) _MAX_BYTES)
__mem_interface::deallocate(__p, __n);
else
{
_Obj* volatile* __my_free_list = _S_free_list
+ _S_freelist_index(__n);
_Obj* __q = (_Obj*)__p;
// Acquire the lock here with a constructor call. This
// ensures that it is released in exit or during stack
// unwinding.
_Lock __lock_instance;
__q -> _M_free_list_link = *__my_free_list;
*__my_free_list = __q;
}
}
#ifdef _GLIBCPP_DEPRECATED
static void*
reallocate(void* __p, size_t __old_sz, size_t __new_sz);
#endif
};
template<bool __threads, int __inst>
inline bool
operator==(const __default_alloc_template<__threads,__inst>&,
const __default_alloc_template<__threads,__inst>&)
const __default_alloc_template<__threads,__inst>&)
{ return true; }
template<bool __threads, int __inst>
inline bool
operator!=(const __default_alloc_template<__threads,__inst>&,
const __default_alloc_template<__threads,__inst>&)
const __default_alloc_template<__threads,__inst>&)
{ return false; }
@ -458,69 +473,69 @@ namespace std
char* __result;
size_t __total_bytes = __size * __nobjs;
size_t __bytes_left = _S_end_free - _S_start_free;
if (__bytes_left >= __total_bytes)
{
__result = _S_start_free;
_S_start_free += __total_bytes;
return(__result);
}
{
__result = _S_start_free;
_S_start_free += __total_bytes;
return(__result);
}
else if (__bytes_left >= __size)
{
__nobjs = (int)(__bytes_left/__size);
__total_bytes = __size * __nobjs;
__result = _S_start_free;
_S_start_free += __total_bytes;
return(__result);
}
{
__nobjs = (int)(__bytes_left/__size);
__total_bytes = __size * __nobjs;
__result = _S_start_free;
_S_start_free += __total_bytes;
return(__result);
}
else
{
size_t __bytes_to_get =
2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
// Try to make use of the left-over piece.
if (__bytes_left > 0)
{
_Obj* volatile* __my_free_list =
_S_free_list + _S_freelist_index(__bytes_left);
((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
*__my_free_list = (_Obj*)_S_start_free;
}
_S_start_free = (char*) __mem_interface::allocate(__bytes_to_get);
if (0 == _S_start_free)
{
size_t __i;
_Obj* volatile* __my_free_list;
_Obj* __p;
// Try to make do with what we have. That can't hurt. We
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __size;
for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
{
__my_free_list = _S_free_list + _S_freelist_index(__i);
__p = *__my_free_list;
if (0 != __p)
{
*__my_free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
_S_end_free = _S_start_free + __i;
return(_S_chunk_alloc(__size, __nobjs));
// Any leftover piece will eventually make it to the
// right free list.
}
}
_S_end_free = 0; // In case of exception.
_S_start_free = (char*)__mem_interface::allocate(__bytes_to_get);
// This should either throw an exception or remedy the situation.
// Thus we assume it succeeded.
}
_S_heap_size += __bytes_to_get;
_S_end_free = _S_start_free + __bytes_to_get;
return(_S_chunk_alloc(__size, __nobjs));
}
{
size_t __bytes_to_get =
2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
// Try to make use of the left-over piece.
if (__bytes_left > 0)
{
_Obj* volatile* __my_free_list =
_S_free_list + _S_freelist_index(__bytes_left);
((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
*__my_free_list = (_Obj*)_S_start_free;
}
_S_start_free = (char*) __mem_interface::allocate(__bytes_to_get);
if (0 == _S_start_free)
{
size_t __i;
_Obj* volatile* __my_free_list;
_Obj* __p;
// Try to make do with what we have. That can't hurt. We
// do not try smaller requests, since that tends to result
// in disaster on multi-process machines.
__i = __size;
for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
{
__my_free_list = _S_free_list + _S_freelist_index(__i);
__p = *__my_free_list;
if (0 != __p)
{
*__my_free_list = __p -> _M_free_list_link;
_S_start_free = (char*)__p;
_S_end_free = _S_start_free + __i;
return(_S_chunk_alloc(__size, __nobjs));
// Any leftover piece will eventually make it to the
// right free list.
}
}
_S_end_free = 0; // In case of exception.
_S_start_free = (char*)__mem_interface::allocate(__bytes_to_get);
// This should either throw an exception or remedy the situation.
// Thus we assume it succeeded.
}
_S_heap_size += __bytes_to_get;
_S_end_free = _S_start_free + __bytes_to_get;
return(_S_chunk_alloc(__size, __nobjs));
}
}
// Returns an object of size __n, and optionally adds to "size
// __n"'s free list. We assume that __n is properly aligned. We
@ -536,30 +551,31 @@ namespace std
_Obj* __current_obj;
_Obj* __next_obj;
int __i;
if (1 == __nobjs)
return(__chunk);
return(__chunk);
__my_free_list = _S_free_list + _S_freelist_index(__n);
// Build free list in chunk.
__result = (_Obj*)__chunk;
*__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
for (__i = 1; ; __i++)
{
__current_obj = __next_obj;
__next_obj = (_Obj*)((char*)__next_obj + __n);
if (__nobjs - 1 == __i)
{
__current_obj -> _M_free_list_link = 0;
break;
}
else
__current_obj -> _M_free_list_link = __next_obj;
}
{
__current_obj = __next_obj;
__next_obj = (_Obj*)((char*)__next_obj + __n);
if (__nobjs - 1 == __i)
{
__current_obj -> _M_free_list_link = 0;
break;
}
else
__current_obj -> _M_free_list_link = __next_obj;
}
return(__result);
}
#ifdef _GLIBCPP_DEPRECATED
template<bool threads, int inst>
void*
__default_alloc_template<threads, inst>::
@ -567,17 +583,18 @@ namespace std
{
void* __result;
size_t __copy_sz;
if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)
return(realloc(__p, __new_sz));
return(realloc(__p, __new_sz));
if (_S_round_up(__old_sz) == _S_round_up(__new_sz))
return(__p);
return(__p);
__result = allocate(__new_sz);
__copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
memcpy(__result, __p, __copy_sz);
deallocate(__p, __old_sz);
return(__result);
}
#endif
template<bool __threads, int __inst>
_STL_mutex_lock
@ -630,40 +647,40 @@ namespace std
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
template<typename _Tp1>
struct rebind
{ typedef allocator<_Tp1> other; };
template<typename _Tp1>
struct rebind
{ typedef allocator<_Tp1> other; };
allocator() throw() {}
allocator(const allocator&) throw() {}
template<typename _Tp1>
template<typename _Tp1>
allocator(const allocator<_Tp1>&) throw() {}
~allocator() throw() {}
pointer
pointer
address(reference __x) const { return &__x; }
const_pointer
const_pointer
address(const_reference __x) const { return &__x; }
// __n is permitted to be 0. The C++ standard says nothing about what
// the return value is when __n == 0.
_Tp*
allocate(size_type __n, const void* = 0)
{
return __n != 0
? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) : 0;
return __n != 0
? static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp))) : 0;
}
// __p is not permitted to be a null pointer.
void
deallocate(pointer __p, size_type __n)
{ _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
size_type
max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
void destroy(pointer __p) { __p->~_Tp(); }
};
@ -677,12 +694,12 @@ namespace std
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template<typename _Tp1>
struct rebind
template<typename _Tp1>
struct rebind
{ typedef allocator<_Tp1> other; };
};
template<typename _T1, typename _T2>
inline bool
@ -710,7 +727,7 @@ namespace std
struct __allocator
{
_Alloc __underlying_alloc;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef _Tp* pointer;
@ -718,10 +735,10 @@ namespace std
typedef _Tp& reference;
typedef const _Tp& const_reference;
typedef _Tp value_type;
template<typename _Tp1>
struct rebind
{ typedef __allocator<_Tp1, _Alloc> other; };
template<typename _Tp1>
struct rebind
{ typedef __allocator<_Tp1, _Alloc> other; };
__allocator() throw() {}
__allocator(const __allocator& __a) throw()
@ -733,10 +750,10 @@ namespace std
~__allocator() throw() {}
pointer
pointer
address(reference __x) const { return &__x; }
const_pointer
const_pointer
address(const_reference __x) const { return &__x; }
// __n is permitted to be 0.
@ -756,10 +773,10 @@ namespace std
size_type
max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
void
void
construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
void
void
destroy(pointer __p) { __p->~_Tp(); }
};
@ -771,22 +788,22 @@ namespace std
typedef void* pointer;
typedef const void* const_pointer;
typedef void value_type;
template<typename _Tp1>
struct rebind
{ typedef __allocator<_Tp1, _Alloc> other; };
template<typename _Tp1>
struct rebind
{ typedef __allocator<_Tp1, _Alloc> other; };
};
template<typename _Tp, typename _Alloc>
inline bool
operator==(const __allocator<_Tp,_Alloc>& __a1,
const __allocator<_Tp,_Alloc>& __a2)
const __allocator<_Tp,_Alloc>& __a2)
{ return __a1.__underlying_alloc == __a2.__underlying_alloc; }
template<typename _Tp, typename _Alloc>
inline bool
operator!=(const __allocator<_Tp, _Alloc>& __a1,
const __allocator<_Tp, _Alloc>& __a2)
operator!=(const __allocator<_Tp, _Alloc>& __a1,
const __allocator<_Tp, _Alloc>& __a2)
{ return __a1.__underlying_alloc != __a2.__underlying_alloc; }
@ -797,14 +814,14 @@ namespace std
*/
template<int inst>
inline bool
operator==(const __malloc_alloc_template<inst>&,
const __malloc_alloc_template<inst>&)
operator==(const __malloc_alloc_template<inst>&,
const __malloc_alloc_template<inst>&)
{ return true; }
template<int __inst>
inline bool
operator!=(const __malloc_alloc_template<__inst>&,
const __malloc_alloc_template<__inst>&)
operator!=(const __malloc_alloc_template<__inst>&,
const __malloc_alloc_template<__inst>&)
{ return false; }
template<typename _Alloc>
@ -863,7 +880,7 @@ namespace std
static const bool _S_instanceless = false;
typedef typename _Allocator::template rebind<_Tp>::other allocator_type;
};
template<typename _Tp, typename _Allocator>
const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
@ -913,7 +930,7 @@ namespace std
/// "SGI" style allocators.
template<typename _Tp, typename _Tp1, int __inst>
struct _Alloc_traits<_Tp,
__allocator<_Tp1, __malloc_alloc_template<__inst> > >
__allocator<_Tp1, __malloc_alloc_template<__inst> > >
{
static const bool _S_instanceless = true;
typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
@ -953,4 +970,4 @@ namespace std
#endif
} // namespace std
#endif
#endif