In libobjc/: 2010-12-18 Nicola Pero <nicola.pero@meta-innovation.com>
In libobjc/: 2010-12-18 Nicola Pero <nicola.pero@meta-innovation.com> * class.c: Tidied up comments and indentation. No code changes. * error.c: Same. * exception.c: Same. * init.c: Same. * ivars.c: Same. * memory.c: Same. * objc-foreach.c: Same. * objc-sync.c: Same. * objects.c: Same. * protocols.c: Same. * sarray.c: Same. * thr.c: Same. From-SVN: r168022
This commit is contained in:
parent
f21fe68470
commit
575584a982
13 changed files with 504 additions and 611 deletions
|
@ -1,3 +1,18 @@
|
|||
2010-12-18 Nicola Pero <nicola.pero@meta-innovation.com>
|
||||
|
||||
* class.c: Tidied up comments and indentation. No code changes.
|
||||
* error.c: Same.
|
||||
* exception.c: Same.
|
||||
* init.c: Same.
|
||||
* ivars.c: Same.
|
||||
* memory.c: Same.
|
||||
* objc-foreach.c: Same.
|
||||
* objc-sync.c: Same.
|
||||
* objects.c: Same.
|
||||
* protocols.c: Same.
|
||||
* sarray.c: Same.
|
||||
* thr.c: Same.
|
||||
|
||||
2010-12-17 Nicola Pero <nicola.pero@meta-innovation.com>
|
||||
|
||||
* init.c: Include objc/runtime.h and objc-private/module-abi-8.h
|
||||
|
|
|
@ -26,11 +26,9 @@ a copy of the GCC Runtime Library Exception along with this program;
|
|||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
/*
|
||||
The code in this file critically affects class method invocation
|
||||
/* The code in this file critically affects class method invocation
|
||||
speed. This long preamble comment explains why, and the issues
|
||||
involved.
|
||||
|
||||
involved.
|
||||
|
||||
One of the traditional weaknesses of the GNU Objective-C runtime is
|
||||
that class method invocations are slow. The reason is that when you
|
||||
|
@ -44,7 +42,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
|
||||
objc_get_class returns the class pointer corresponding to the string
|
||||
`NSArray'; and because of the lookup, the operation is more
|
||||
complicated and slow than a simple instance method invocation.
|
||||
complicated and slow than a simple instance method invocation.
|
||||
|
||||
Most high performance Objective-C code (using the GNU Objc runtime)
|
||||
I had the opportunity to read (or write) work around this problem by
|
||||
|
@ -61,7 +59,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
In this case, you always perform a class lookup (the first one), but
|
||||
then all the [arrayClass new] methods run exactly as fast as an
|
||||
instance method invocation. It helps if you have many class method
|
||||
invocations to the same class.
|
||||
invocations to the same class.
|
||||
|
||||
The long-term solution to this problem would be to modify the
|
||||
compiler to output tables of class pointers corresponding to all the
|
||||
|
@ -70,14 +68,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
to perform precisely as fast as instance method invocations, because
|
||||
no class lookup would be involved. I think the Apple Objective-C
|
||||
runtime uses this technique. Doing this involves synchronized
|
||||
modifications in the runtime and in the compiler.
|
||||
modifications in the runtime and in the compiler.
|
||||
|
||||
As a first medicine to the problem, I [NP] have redesigned and
|
||||
rewritten the way the runtime is performing class lookup. This
|
||||
doesn't give as much speed as the other (definitive) approach, but
|
||||
at least a class method invocation now takes approximately 4.5 times
|
||||
an instance method invocation on my machine (it would take approx 12
|
||||
times before the rewriting), which is a lot better.
|
||||
times before the rewriting), which is a lot better.
|
||||
|
||||
One of the main reason the new class lookup is so faster is because
|
||||
I implemented it in a way that can safely run multithreaded without
|
||||
|
@ -97,11 +95,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
#include <string.h> /* For memset */
|
||||
|
||||
/* We use a table which maps a class name to the corresponding class
|
||||
* pointer. The first part of this file defines this table, and
|
||||
* functions to do basic operations on the table. The second part of
|
||||
* the file implements some higher level Objective-C functionality for
|
||||
* classes by using the functions provided in the first part to manage
|
||||
* the table. */
|
||||
pointer. The first part of this file defines this table, and
|
||||
functions to do basic operations on the table. The second part of
|
||||
the file implements some higher level Objective-C functionality for
|
||||
classes by using the functions provided in the first part to manage
|
||||
the table. */
|
||||
|
||||
/**
|
||||
** Class Table Internals
|
||||
|
@ -145,7 +143,7 @@ static class_node_ptr class_table_array[CLASS_TABLE_SIZE];
|
|||
static objc_mutex_t __class_table_lock = NULL;
|
||||
|
||||
/* CLASS_TABLE_HASH is how we compute the hash of a class name. It is
|
||||
a macro - *not* a function - arguments *are* modified directly.
|
||||
a macro - *not* a function - arguments *are* modified directly.
|
||||
|
||||
INDEX should be a variable holding an int;
|
||||
HASH should be a variable holding an int;
|
||||
|
@ -176,7 +174,8 @@ class_table_setup (void)
|
|||
}
|
||||
|
||||
|
||||
/* Insert a class in the table (used when a new class is registered). */
|
||||
/* Insert a class in the table (used when a new class is
|
||||
registered). */
|
||||
static void
|
||||
class_table_insert (const char *class_name, Class class_pointer)
|
||||
{
|
||||
|
@ -221,18 +220,15 @@ class_table_replace (Class old_class_pointer, Class new_class_pointer)
|
|||
{
|
||||
hash++;
|
||||
if (hash < CLASS_TABLE_SIZE)
|
||||
{
|
||||
node = class_table_array[hash];
|
||||
}
|
||||
node = class_table_array[hash];
|
||||
}
|
||||
else
|
||||
{
|
||||
Class class1 = node->pointer;
|
||||
|
||||
if (class1 == old_class_pointer)
|
||||
{
|
||||
node->pointer = new_class_pointer;
|
||||
}
|
||||
node->pointer = new_class_pointer;
|
||||
|
||||
node = node->next;
|
||||
}
|
||||
}
|
||||
|
@ -267,9 +263,7 @@ class_table_get_safe (const char *class_name)
|
|||
for (i = 0; i < length; i++)
|
||||
{
|
||||
if ((node->name)[i] != class_name[i])
|
||||
{
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == length)
|
||||
|
@ -309,9 +303,7 @@ class_table_next (struct class_table_enumerator **e)
|
|||
next = class_table_array[enumerator->hash];
|
||||
}
|
||||
else
|
||||
{
|
||||
next = enumerator->node->next;
|
||||
}
|
||||
next = enumerator->node->next;
|
||||
|
||||
if (next != NULL)
|
||||
{
|
||||
|
@ -385,18 +377,16 @@ class_table_print_histogram (void)
|
|||
{
|
||||
printf ("%4d:", i + 1);
|
||||
for (j = 0; j < counter; j++)
|
||||
{
|
||||
printf ("X");
|
||||
}
|
||||
printf ("X");
|
||||
|
||||
printf ("\n");
|
||||
counter = 0;
|
||||
}
|
||||
}
|
||||
printf ("%4d:", i + 1);
|
||||
for (j = 0; j < counter; j++)
|
||||
{
|
||||
printf ("X");
|
||||
}
|
||||
printf ("X");
|
||||
|
||||
printf ("\n");
|
||||
}
|
||||
#endif /* DEBUGGING FUNCTIONS */
|
||||
|
@ -409,7 +399,7 @@ class_table_print_histogram (void)
|
|||
should be via the class_table_* functions. */
|
||||
|
||||
/* This is a hook which is called by objc_get_class and
|
||||
objc_lookup_class if the runtime is not able to find the class.
|
||||
objc_lookup_class if the runtime is not able to find the class.
|
||||
This may e.g. try to load in the class using dynamic loading.
|
||||
|
||||
This hook was a public, global variable in the Traditional GNU
|
||||
|
@ -558,9 +548,7 @@ objc_getClassList (Class *returnValue, int maxNumberOfClassesToReturn)
|
|||
if (count < maxNumberOfClassesToReturn)
|
||||
returnValue[count] = node->pointer;
|
||||
else
|
||||
{
|
||||
return count;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
count++;
|
||||
node = node->next;
|
||||
|
@ -869,20 +857,16 @@ __objc_update_classes_with_methods (struct objc_method *method_a, struct objc_me
|
|||
/* If the method is one of the ones we are looking
|
||||
for, update the implementation. */
|
||||
if (method == method_a)
|
||||
{
|
||||
sarray_at_put_safe (class->dtable,
|
||||
(sidx) method_a->method_name->sel_id,
|
||||
method_a->method_imp);
|
||||
}
|
||||
sarray_at_put_safe (class->dtable,
|
||||
(sidx) method_a->method_name->sel_id,
|
||||
method_a->method_imp);
|
||||
|
||||
if (method == method_b)
|
||||
{
|
||||
if (method_b != NULL)
|
||||
{
|
||||
sarray_at_put_safe (class->dtable,
|
||||
(sidx) method_b->method_name->sel_id,
|
||||
method_b->method_imp);
|
||||
}
|
||||
sarray_at_put_safe (class->dtable,
|
||||
(sidx) method_b->method_name->sel_id,
|
||||
method_b->method_imp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
|
||||
/* __USE_FIXED_PROTOTYPES__ used to be required to get prototypes for
|
||||
malloc, free, etc. on some platforms. It is unclear if we still
|
||||
need it, but it can't hurt.
|
||||
*/
|
||||
need it, but it can't hurt. */
|
||||
#define __USE_FIXED_PROTOTYPES__
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
|
|
@ -33,15 +33,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
|
||||
/* This hook allows libraries to sepecify special actions when an
|
||||
exception is thrown without a handler in place. This is deprecated
|
||||
in favour of objc_set_uncaught_exception_handler ().
|
||||
*/
|
||||
in favour of objc_set_uncaught_exception_handler (). */
|
||||
void (*_objc_unexpected_exception) (id exception); /* !T:SAFE */
|
||||
|
||||
|
||||
/* 'is_kind_of_exception_matcher' is our default exception matcher -
|
||||
it determines if the object 'exception' is of class 'catch_class',
|
||||
or of a subclass.
|
||||
*/
|
||||
or of a subclass. */
|
||||
static int
|
||||
is_kind_of_exception_matcher (Class catch_class, id exception)
|
||||
{
|
||||
|
@ -49,9 +47,8 @@ is_kind_of_exception_matcher (Class catch_class, id exception)
|
|||
if (catch_class == Nil)
|
||||
return 1;
|
||||
|
||||
/* If exception is nil (eg, @throw nil;), then it can only be catched
|
||||
* by a catch-all (eg, @catch (id object)).
|
||||
*/
|
||||
/* If exception is nil (eg, @throw nil;), then it can only be
|
||||
catched by a catch-all (eg, @catch (id object)). */
|
||||
if (exception != nil)
|
||||
{
|
||||
Class c;
|
||||
|
@ -114,19 +111,18 @@ static const _Unwind_Exception_Class __objc_exception_class
|
|||
|
||||
/* This is the object that is passed around by the Objective C runtime
|
||||
to represent the exception in flight. */
|
||||
|
||||
struct ObjcException
|
||||
{
|
||||
/* This bit is needed in order to interact with the unwind runtime. */
|
||||
struct _Unwind_Exception base;
|
||||
|
||||
/* The actual object we want to throw. Note: must come immediately after
|
||||
unwind header. */
|
||||
/* The actual object we want to throw. Note: must come immediately
|
||||
after unwind header. */
|
||||
id value;
|
||||
|
||||
#ifdef __ARM_EABI_UNWINDER__
|
||||
/* Note: we use the barrier cache defined in the unwind control block for
|
||||
ARM EABI. */
|
||||
/* Note: we use the barrier cache defined in the unwind control
|
||||
block for ARM EABI. */
|
||||
#else
|
||||
/* Cache some internal unwind data between phase 1 and phase 2. */
|
||||
_Unwind_Ptr landingPad;
|
||||
|
@ -156,14 +152,16 @@ parse_lsda_header (struct _Unwind_Context *context, const unsigned char *p,
|
|||
|
||||
info->Start = (context ? _Unwind_GetRegionStart (context) : 0);
|
||||
|
||||
/* Find @LPStart, the base to which landing pad offsets are relative. */
|
||||
/* Find @LPStart, the base to which landing pad offsets are
|
||||
relative. */
|
||||
lpstart_encoding = *p++;
|
||||
if (lpstart_encoding != DW_EH_PE_omit)
|
||||
p = read_encoded_value (context, lpstart_encoding, p, &info->LPStart);
|
||||
else
|
||||
info->LPStart = info->Start;
|
||||
|
||||
/* Find @TType, the base of the handler and exception spec type data. */
|
||||
/* Find @TType, the base of the handler and exception spec type
|
||||
data. */
|
||||
info->ttype_encoding = *p++;
|
||||
if (info->ttype_encoding != DW_EH_PE_omit)
|
||||
{
|
||||
|
@ -222,7 +220,8 @@ get_ttype_entry (struct lsda_header_info *info, _Unwind_Word i)
|
|||
#endif
|
||||
|
||||
/* Using a different personality function name causes link failures
|
||||
when trying to mix code using different exception handling models. */
|
||||
when trying to mix code using different exception handling
|
||||
models. */
|
||||
#ifdef SJLJ_EXCEPTIONS
|
||||
#define PERSONALITY_FUNCTION __gnu_objc_personality_sj0
|
||||
#define __builtin_eh_return_data_regno(x) x
|
||||
|
@ -294,14 +293,14 @@ PERSONALITY_FUNCTION (int version,
|
|||
}
|
||||
actions |= state & _US_FORCE_UNWIND;
|
||||
|
||||
/* TODO: Foreign exceptions need some attention (e.g. rethrowing doesn't
|
||||
work). */
|
||||
/* TODO: Foreign exceptions need some attention (e.g. rethrowing
|
||||
doesn't work). */
|
||||
foreign_exception = 0;
|
||||
|
||||
/* The dwarf unwinder assumes the context structure holds things like the
|
||||
function and LSDA pointers. The ARM implementation caches these in
|
||||
the exception header (UCB). To avoid rewriting everything we make the
|
||||
virtual IP register point at the UCB. */
|
||||
/* The dwarf unwinder assumes the context structure holds things
|
||||
like the function and LSDA pointers. The ARM implementation
|
||||
caches these in the exception header (UCB). To avoid rewriting
|
||||
everything we make the virtual IP register point at the UCB. */
|
||||
ip = (_Unwind_Ptr) ue_header;
|
||||
_Unwind_SetGR (context, 12, ip);
|
||||
|
||||
|
@ -351,8 +350,8 @@ PERSONALITY_FUNCTION (int version,
|
|||
#ifdef SJLJ_EXCEPTIONS
|
||||
/* The given "IP" is an index into the call-site table, with two
|
||||
exceptions -- -1 means no-action, and 0 means terminate. But
|
||||
since we're using uleb128 values, we've not got random access
|
||||
to the array. */
|
||||
since we're using uleb128 values, we've not got random access to
|
||||
the array. */
|
||||
if ((int) ip < 0)
|
||||
return _URC_CONTINUE_UNWIND;
|
||||
else
|
||||
|
@ -373,13 +372,15 @@ PERSONALITY_FUNCTION (int version,
|
|||
goto found_something;
|
||||
}
|
||||
#else
|
||||
/* Search the call-site table for the action associated with this IP. */
|
||||
/* Search the call-site table for the action associated with this
|
||||
IP. */
|
||||
while (p < info.action_table)
|
||||
{
|
||||
_Unwind_Ptr cs_start, cs_len, cs_lp;
|
||||
_uleb128_t cs_action;
|
||||
|
||||
/* Note that all call-site encodings are "absolute" displacements. */
|
||||
/* Note that all call-site encodings are "absolute"
|
||||
displacements. */
|
||||
p = read_encoded_value (0, info.call_site_encoding, p, &cs_start);
|
||||
p = read_encoded_value (0, info.call_site_encoding, p, &cs_len);
|
||||
p = read_encoded_value (0, info.call_site_encoding, p, &cs_lp);
|
||||
|
@ -400,8 +401,8 @@ PERSONALITY_FUNCTION (int version,
|
|||
#endif /* SJLJ_EXCEPTIONS */
|
||||
|
||||
/* If ip is not present in the table, C++ would call terminate. */
|
||||
/* ??? As with Java, it's perhaps better to tweek the LSDA to
|
||||
that no-action is mapped to no-entry. */
|
||||
/* ??? As with Java, it's perhaps better to tweek the LSDA to that
|
||||
no-action is mapped to no-entry. */
|
||||
CONTINUE_UNWINDING;
|
||||
|
||||
found_something:
|
||||
|
@ -410,8 +411,8 @@ PERSONALITY_FUNCTION (int version,
|
|||
|
||||
if (landing_pad == 0)
|
||||
{
|
||||
/* If ip is present, and has a null landing pad, there are
|
||||
no cleanups or handlers to be run. */
|
||||
/* If ip is present, and has a null landing pad, there are no
|
||||
cleanups or handlers to be run. */
|
||||
}
|
||||
else if (action_record == 0)
|
||||
{
|
||||
|
@ -438,14 +439,14 @@ PERSONALITY_FUNCTION (int version,
|
|||
}
|
||||
|
||||
/* During forced unwinding, we only run cleanups. With a
|
||||
foreign exception class, we have no class info to match. */
|
||||
foreign exception class, we have no class info to
|
||||
match. */
|
||||
else if ((actions & _UA_FORCE_UNWIND) || foreign_exception)
|
||||
;
|
||||
|
||||
else if (ar_filter > 0)
|
||||
{
|
||||
/* Positive filter values are handlers. */
|
||||
|
||||
Class catch_type = get_ttype_entry (&info, ar_filter);
|
||||
|
||||
if ((*__objc_exception_matcher) (catch_type, xh->value))
|
||||
|
@ -476,7 +477,8 @@ PERSONALITY_FUNCTION (int version,
|
|||
if (!saw_handler)
|
||||
CONTINUE_UNWINDING;
|
||||
|
||||
/* For domestic exceptions, we cache data from phase 1 for phase 2. */
|
||||
/* For domestic exceptions, we cache data from phase 1 for phase
|
||||
2. */
|
||||
if (!foreign_exception)
|
||||
{
|
||||
#ifdef __ARM_EABI_UNWINDER__
|
||||
|
@ -531,16 +533,14 @@ objc_exception_throw (id exception)
|
|||
#endif
|
||||
|
||||
/* No exception handler was installed. Call the uncaught exception
|
||||
handler if any is defined.
|
||||
*/
|
||||
handler if any is defined. */
|
||||
if (__objc_uncaught_exception_handler != 0)
|
||||
{
|
||||
(*__objc_uncaught_exception_handler) (exception);
|
||||
}
|
||||
|
||||
/* As a last resort support the old, deprecated way of setting an
|
||||
uncaught exception handler.
|
||||
*/
|
||||
uncaught exception handler. */
|
||||
if (_objc_unexpected_exception != 0)
|
||||
{
|
||||
(*_objc_unexpected_exception) (exception);
|
||||
|
|
115
libobjc/init.c
115
libobjc/init.c
|
@ -38,15 +38,17 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
__objc_protocols_register_selectors() */
|
||||
#include "objc-private/accessors.h" /* For __objc_accessors_init() */
|
||||
|
||||
/* The version number of this runtime. This must match the number
|
||||
/* The version number of this runtime. This must match the number
|
||||
defined in gcc (objc-act.c). */
|
||||
#define OBJC_VERSION 8
|
||||
#define PROTOCOL_VERSION 2
|
||||
|
||||
/* This list contains all modules currently loaded into the runtime. */
|
||||
/* This list contains all modules currently loaded into the
|
||||
runtime. */
|
||||
static struct objc_list *__objc_module_list = 0; /* !T:MUTEX */
|
||||
|
||||
/* This list contains all proto_list's not yet assigned class links. */
|
||||
/* This list contains all proto_list's not yet assigned class
|
||||
links. */
|
||||
static struct objc_list *unclaimed_proto_list = 0; /* !T:MUTEX */
|
||||
|
||||
/* List of unresolved static instances. */
|
||||
|
@ -95,7 +97,8 @@ extern SEL
|
|||
__sel_register_typed_name (const char *name, const char *types,
|
||||
struct objc_selector *orig, BOOL is_const);
|
||||
|
||||
/* Sends +load to all classes and categories in certain situations. */
|
||||
/* Sends +load to all classes and categories in certain
|
||||
situations. */
|
||||
static void objc_send_load (void);
|
||||
|
||||
/* Inserts all the classes defined in module in a tree of classes that
|
||||
|
@ -145,20 +148,19 @@ static cache_ptr __objc_load_methods = NULL;
|
|||
|
||||
This function returns the superclass of a class in both cases, and
|
||||
can be used to build the determine the class relationships while
|
||||
building the tree.
|
||||
*/
|
||||
building the tree. */
|
||||
static Class class_superclass_of_class (Class class)
|
||||
{
|
||||
char *super_class_name;
|
||||
|
||||
/* If the class links have been resolved, use the resolved
|
||||
* links. */
|
||||
links. */
|
||||
if (CLS_ISRESOLV (class))
|
||||
return class->super_class;
|
||||
|
||||
/* Else, 'class' has not yet been resolved. This means that its
|
||||
* super_class pointer is really the name of the super class (rather
|
||||
* than a pointer to the actual superclass). */
|
||||
super_class pointer is really the name of the super class (rather
|
||||
than a pointer to the actual superclass). */
|
||||
super_class_name = (char *)class->super_class;
|
||||
|
||||
/* Return Nil for a root class. */
|
||||
|
@ -175,7 +177,6 @@ static Class class_superclass_of_class (Class class)
|
|||
`bottom_class'. The classes in this tree are super classes of
|
||||
`bottom_class'. `subclasses' member of each tree node point to the
|
||||
next subclass tree node. */
|
||||
|
||||
static objc_class_tree *
|
||||
create_tree_of_subclasses_inherited_from (Class bottom_class, Class upper)
|
||||
{
|
||||
|
@ -213,7 +214,6 @@ create_tree_of_subclasses_inherited_from (Class bottom_class, Class upper)
|
|||
part of the classes hierarchy described by `tree'. This function is
|
||||
private to objc_tree_insert_class (), you should not call it
|
||||
directly. */
|
||||
|
||||
static objc_class_tree *
|
||||
__objc_tree_insert_class (objc_class_tree *tree, Class class)
|
||||
{
|
||||
|
@ -224,15 +224,15 @@ __objc_tree_insert_class (objc_class_tree *tree, Class class)
|
|||
return create_tree_of_subclasses_inherited_from (class, NULL);
|
||||
else if (class == tree->class)
|
||||
{
|
||||
/* `class' has been already inserted */
|
||||
/* `class' has been already inserted. */
|
||||
DEBUG_PRINTF ("1. class %s was previously inserted\n", class->name);
|
||||
return tree;
|
||||
}
|
||||
else if (class_superclass_of_class (class) == tree->class)
|
||||
{
|
||||
/* If class is a direct subclass of tree->class then add class to the
|
||||
list of subclasses. First check to see if it wasn't already
|
||||
inserted. */
|
||||
/* If class is a direct subclass of tree->class then add class
|
||||
to the list of subclasses. First check to see if it wasn't
|
||||
already inserted. */
|
||||
struct objc_list *list = tree->subclasses;
|
||||
objc_class_tree *node;
|
||||
|
||||
|
@ -249,7 +249,8 @@ __objc_tree_insert_class (objc_class_tree *tree, Class class)
|
|||
list = list->tail;
|
||||
}
|
||||
|
||||
/* Create a new node class and insert it into the list of subclasses */
|
||||
/* Create a new node class and insert it into the list of
|
||||
subclasses. */
|
||||
node = objc_calloc (1, sizeof (objc_class_tree));
|
||||
node->class = class;
|
||||
tree->subclasses = list_cons (node, tree->subclasses);
|
||||
|
@ -258,8 +259,8 @@ __objc_tree_insert_class (objc_class_tree *tree, Class class)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* The class is not a direct subclass of tree->class. Search for
|
||||
class's superclasses in the list of subclasses. */
|
||||
/* The class is not a direct subclass of tree->class. Search
|
||||
for class's superclasses in the list of subclasses. */
|
||||
struct objc_list *subclasses = tree->subclasses;
|
||||
|
||||
/* Precondition: the class must be a subclass of tree->class;
|
||||
|
@ -298,7 +299,6 @@ __objc_tree_insert_class (objc_class_tree *tree, Class class)
|
|||
}
|
||||
|
||||
/* This function inserts `class' in the right tree hierarchy classes. */
|
||||
|
||||
static void
|
||||
objc_tree_insert_class (Class class)
|
||||
{
|
||||
|
@ -328,7 +328,6 @@ objc_tree_insert_class (Class class)
|
|||
}
|
||||
|
||||
/* Traverse tree in preorder. Used to send +load. */
|
||||
|
||||
static void
|
||||
objc_preorder_traverse (objc_class_tree *tree,
|
||||
int level,
|
||||
|
@ -342,7 +341,6 @@ objc_preorder_traverse (objc_class_tree *tree,
|
|||
}
|
||||
|
||||
/* Traverse tree in postorder. Used to destroy a tree. */
|
||||
|
||||
static void
|
||||
objc_postorder_traverse (objc_class_tree *tree,
|
||||
int level,
|
||||
|
@ -356,7 +354,6 @@ objc_postorder_traverse (objc_class_tree *tree,
|
|||
}
|
||||
|
||||
/* Used to print a tree class hierarchy. */
|
||||
|
||||
#ifdef DEBUG
|
||||
static void
|
||||
__objc_tree_print (objc_class_tree *tree, int level)
|
||||
|
@ -374,7 +371,6 @@ __objc_tree_print (objc_class_tree *tree, int level)
|
|||
reverse order assures the +load of class is executed first and then
|
||||
+load of categories because of the way in which categories are
|
||||
added to the class methods. */
|
||||
|
||||
static void
|
||||
__objc_send_message_in_list (struct objc_method_list *method_list, Class class, SEL op)
|
||||
{
|
||||
|
@ -383,7 +379,7 @@ __objc_send_message_in_list (struct objc_method_list *method_list, Class class,
|
|||
if (! method_list)
|
||||
return;
|
||||
|
||||
/* First execute the `op' message in the following method lists */
|
||||
/* First execute the `op' message in the following method lists. */
|
||||
__objc_send_message_in_list (method_list->method_next, class, op);
|
||||
|
||||
/* Search the method list. */
|
||||
|
@ -394,13 +390,13 @@ __objc_send_message_in_list (struct objc_method_list *method_list, Class class,
|
|||
if (mth->method_name && sel_eq (mth->method_name, op)
|
||||
&& ! objc_hash_is_key_in_hash (__objc_load_methods, mth->method_imp))
|
||||
{
|
||||
/* Add this method into the +load hash table */
|
||||
/* Add this method into the +load hash table. */
|
||||
objc_hash_add (&__objc_load_methods,
|
||||
mth->method_imp,
|
||||
mth->method_imp);
|
||||
|
||||
|
||||
DEBUG_PRINTF ("sending +load in class: %s\n", class->name);
|
||||
|
||||
|
||||
/* The method was found and wasn't previously executed. */
|
||||
(*mth->method_imp) ((id)class, mth->method_name);
|
||||
|
||||
|
@ -432,7 +428,6 @@ __objc_destroy_class_tree_node (objc_class_tree *tree,
|
|||
|
||||
/* This is used to check if the relationship between two classes
|
||||
before the runtime completely installs the classes. */
|
||||
|
||||
static BOOL
|
||||
class_is_subclass_of_class (Class class, Class superclass)
|
||||
{
|
||||
|
@ -451,7 +446,6 @@ class_is_subclass_of_class (Class class, Class superclass)
|
|||
static struct objc_list *unresolved_classes = 0;
|
||||
|
||||
/* Extern function used to reference the Object class. */
|
||||
|
||||
extern void __objc_force_linking (void);
|
||||
|
||||
void
|
||||
|
@ -463,7 +457,6 @@ __objc_force_linking (void)
|
|||
|
||||
/* Run through the statics list, removing modules as soon as all its
|
||||
statics have been initialized. */
|
||||
|
||||
static void
|
||||
objc_init_statics (void)
|
||||
{
|
||||
|
@ -516,8 +509,9 @@ objc_init_statics (void)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* Other static instances (typically constant strings) are
|
||||
easier as we just fix up their class pointers. */
|
||||
/* Other static instances (typically constant
|
||||
strings) are easier as we just fix up their class
|
||||
pointers. */
|
||||
for (inst = &statics->instances[0]; *inst; inst++)
|
||||
(*inst)->class_pointer = class;
|
||||
}
|
||||
|
@ -535,7 +529,7 @@ objc_init_statics (void)
|
|||
}
|
||||
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
} /* objc_init_statics */
|
||||
}
|
||||
|
||||
/* This function is called by constructor functions generated for each
|
||||
module compiled. (_GLOBAL_$I$...) The purpose of this function is
|
||||
|
@ -565,18 +559,18 @@ __objc_exec_class (struct objc_module *module)
|
|||
/* The table of selector references for this module. */
|
||||
SEL selectors = symtab->refs;
|
||||
|
||||
/* dummy counter. */
|
||||
int i;
|
||||
|
||||
DEBUG_PRINTF ("received module: %s\n", module->name);
|
||||
|
||||
/* check gcc version */
|
||||
/* Check gcc version. */
|
||||
init_check_module_version (module);
|
||||
|
||||
/* On the first call of this routine, initialize some data structures. */
|
||||
/* On the first call of this routine, initialize some data
|
||||
structures. */
|
||||
if (! previous_constructors)
|
||||
{
|
||||
/* Initialize thread-safe system */
|
||||
/* Initialize thread-safe system. */
|
||||
__objc_init_thread_system ();
|
||||
__objc_runtime_threads_alive = 1;
|
||||
__objc_runtime_mutex = objc_mutex_allocate ();
|
||||
|
@ -594,7 +588,8 @@ __objc_exec_class (struct objc_module *module)
|
|||
previous_constructors = 1;
|
||||
}
|
||||
|
||||
/* Save the module pointer for later processing. (not currently used) */
|
||||
/* Save the module pointer for later processing. (not currently
|
||||
used). */
|
||||
objc_mutex_lock (__objc_runtime_mutex);
|
||||
__objc_module_list = list_cons (module, __objc_module_list);
|
||||
|
||||
|
@ -606,15 +601,17 @@ __objc_exec_class (struct objc_module *module)
|
|||
const char *name, *type;
|
||||
name = (char *) selectors[i].sel_id;
|
||||
type = (char *) selectors[i].sel_types;
|
||||
/* Constructors are constant static data so we can safely store
|
||||
pointers to them in the runtime structures. is_const == YES */
|
||||
/* Constructors are constant static data so we can safely
|
||||
store pointers to them in the runtime
|
||||
structures. is_const == YES. */
|
||||
__sel_register_typed_name (name, type,
|
||||
(struct objc_selector *) &(selectors[i]),
|
||||
YES);
|
||||
}
|
||||
}
|
||||
|
||||
/* Parse the classes in the load module and gather selector information. */
|
||||
/* Parse the classes in the load module and gather selector
|
||||
information. */
|
||||
DEBUG_PRINTF ("gathering selectors from module: %s\n", module->name);
|
||||
for (i = 0; i < symtab->cls_def_cnt; ++i)
|
||||
{
|
||||
|
@ -626,14 +623,14 @@ __objc_exec_class (struct objc_module *module)
|
|||
assert (CLS_ISMETA (class->class_pointer));
|
||||
DEBUG_PRINTF ("phase 1, processing class: %s\n", class->name);
|
||||
|
||||
/* Initialize the subclass list to be NULL.
|
||||
In some cases it isn't and this crashes the program. */
|
||||
/* Initialize the subclass list to be NULL. In some cases it
|
||||
isn't and this crashes the program. */
|
||||
class->subclass_list = NULL;
|
||||
|
||||
__objc_init_class (class);
|
||||
|
||||
/* Check to see if the superclass is known in this point. If it's not
|
||||
add the class to the unresolved_classes list. */
|
||||
/* Check to see if the superclass is known in this point. If
|
||||
it's not add the class to the unresolved_classes list. */
|
||||
if (superclass && ! objc_getClass (superclass))
|
||||
unresolved_classes = list_cons (class, unresolved_classes);
|
||||
}
|
||||
|
@ -644,7 +641,8 @@ __objc_exec_class (struct objc_module *module)
|
|||
struct objc_category *category = symtab->defs[i + symtab->cls_def_cnt];
|
||||
Class class = objc_getClass (category->class_name);
|
||||
|
||||
/* If the class for the category exists then append its methods. */
|
||||
/* If the class for the category exists then append its
|
||||
methods. */
|
||||
if (class)
|
||||
{
|
||||
|
||||
|
@ -673,8 +671,8 @@ __objc_exec_class (struct objc_module *module)
|
|||
}
|
||||
else
|
||||
{
|
||||
/* The object to which the category methods belong can't be found.
|
||||
Save the information. */
|
||||
/* The object to which the category methods belong can't be
|
||||
found. Save the information. */
|
||||
unclaimed_categories = list_cons (category, unclaimed_categories);
|
||||
}
|
||||
}
|
||||
|
@ -684,8 +682,8 @@ __objc_exec_class (struct objc_module *module)
|
|||
if (uninitialized_statics)
|
||||
objc_init_statics ();
|
||||
|
||||
/* Scan the unclaimed category hash. Attempt to attach any unclaimed
|
||||
categories to objects. */
|
||||
/* Scan the unclaimed category hash. Attempt to attach any
|
||||
unclaimed categories to objects. */
|
||||
for (cell = &unclaimed_categories; *cell; )
|
||||
{
|
||||
struct objc_category *category = (*cell)->head;
|
||||
|
@ -794,8 +792,7 @@ objc_send_load (void)
|
|||
static void
|
||||
__objc_create_classes_tree (struct objc_module *module)
|
||||
{
|
||||
/* The runtime mutex is locked in this point */
|
||||
|
||||
/* The runtime mutex is locked at this point */
|
||||
struct objc_symtab *symtab = module->symtab;
|
||||
int i;
|
||||
|
||||
|
@ -812,8 +809,7 @@ __objc_create_classes_tree (struct objc_module *module)
|
|||
static void
|
||||
__objc_call_callback (struct objc_module *module)
|
||||
{
|
||||
/* The runtime mutex is locked in this point. */
|
||||
|
||||
/* The runtime mutex is locked at this point. */
|
||||
struct objc_symtab *symtab = module->symtab;
|
||||
int i;
|
||||
|
||||
|
@ -842,7 +838,6 @@ __objc_call_callback (struct objc_module *module)
|
|||
}
|
||||
|
||||
/* Sanity check the version of gcc used to compile `module'. */
|
||||
|
||||
static void
|
||||
init_check_module_version (struct objc_module *module)
|
||||
{
|
||||
|
@ -864,7 +859,7 @@ __objc_init_class (Class class)
|
|||
__objc_register_selectors_from_class (class);
|
||||
__objc_register_selectors_from_class ((Class) class->class_pointer);
|
||||
|
||||
/* Install the fake dispatch tables */
|
||||
/* Install the fake dispatch tables. */
|
||||
__objc_install_premature_dtable (class);
|
||||
__objc_install_premature_dtable (class->class_pointer);
|
||||
|
||||
|
@ -888,7 +883,7 @@ __objc_init_protocol (struct objc_protocol *protocol)
|
|||
|
||||
if (((size_t)protocol->class_pointer) == PROTOCOL_VERSION)
|
||||
{
|
||||
/* Assign class pointer */
|
||||
/* Assign class pointer. */
|
||||
protocol->class_pointer = proto_class;
|
||||
|
||||
/* Register all the selectors in the protocol with the runtime.
|
||||
|
@ -907,7 +902,7 @@ __objc_init_protocol (struct objc_protocol *protocol)
|
|||
name. */
|
||||
__objc_protocols_add_protocol (protocol->protocol_name, protocol);
|
||||
|
||||
/* Init super protocols */
|
||||
/* Init super protocols. */
|
||||
__objc_init_protocols (protocol->protocol_list);
|
||||
}
|
||||
else if (protocol->class_pointer != proto_class)
|
||||
|
@ -941,7 +936,7 @@ __objc_init_protocols (struct objc_protocol_list *protos)
|
|||
}
|
||||
|
||||
#if 0
|
||||
assert (protos->next == 0); /* only single ones allowed */
|
||||
assert (protos->next == 0); /* Only single ones allowed. */
|
||||
#endif
|
||||
|
||||
for (i = 0; i < protos->count; i++)
|
||||
|
@ -956,11 +951,9 @@ __objc_init_protocols (struct objc_protocol_list *protos)
|
|||
static void
|
||||
__objc_class_add_protocols (Class class, struct objc_protocol_list *protos)
|
||||
{
|
||||
/* Well... */
|
||||
if (! protos)
|
||||
return;
|
||||
|
||||
/* Add it... */
|
||||
protos->next = class->protocols;
|
||||
class->protocols = protos;
|
||||
}
|
||||
|
|
|
@ -47,9 +47,7 @@ class_getInstanceVariable (Class class_, const char *name)
|
|||
struct objc_ivar *ivar = &(ivars->ivar_list[i]);
|
||||
|
||||
if (!strcmp (ivar->ivar_name, name))
|
||||
{
|
||||
return ivar;
|
||||
}
|
||||
return ivar;
|
||||
}
|
||||
}
|
||||
class_ = class_getSuperclass (class_);
|
||||
|
@ -83,10 +81,8 @@ object_getIndexedIvars (id object)
|
|||
if (object == nil)
|
||||
return NULL;
|
||||
else
|
||||
{
|
||||
return (void *)(((char *)object)
|
||||
+ object->class_pointer->instance_size);
|
||||
}
|
||||
return (void *)(((char *)object)
|
||||
+ object->class_pointer->instance_size);
|
||||
}
|
||||
|
||||
struct objc_ivar *
|
||||
|
@ -203,9 +199,7 @@ struct objc_ivar ** class_copyIvarList (Class class_, unsigned int *numberOfRetu
|
|||
|
||||
/* Copy the ivars. */
|
||||
for (i = 0; i < count; i++)
|
||||
{
|
||||
returnValue[i] = &(ivar_list->ivar_list[i]);
|
||||
}
|
||||
returnValue[i] = &(ivar_list->ivar_list[i]);
|
||||
|
||||
returnValue[i] = NULL;
|
||||
}
|
||||
|
@ -243,9 +237,7 @@ class_addIvar (Class class_, const char * ivar_name, size_t size,
|
|||
struct objc_ivar *ivar = &(ivars->ivar_list[i]);
|
||||
|
||||
if (strcmp (ivar->ivar_name, ivar_name) == 0)
|
||||
{
|
||||
return NO;
|
||||
}
|
||||
return NO;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,11 +24,9 @@ a copy of the GCC Runtime Library Exception along with this program;
|
|||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
/*
|
||||
This file includes the standard functions for memory allocation and
|
||||
disposal. Users should use these functions in their ObjC programs
|
||||
so that they work properly with garbage collectors.
|
||||
*/
|
||||
/* This file includes the standard functions for memory allocation and
|
||||
disposal. Users should use these functions in their ObjC programs
|
||||
so that they work properly with garbage collectors. */
|
||||
|
||||
/* TODO: Turn these into macros or inline functions. */
|
||||
|
||||
|
@ -37,8 +35,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
|
||||
/* __USE_FIXED_PROTOTYPES__ used to be required to get prototypes for
|
||||
malloc, free, etc. on some platforms. It is unclear if we still
|
||||
need it, but it can't hurt.
|
||||
*/
|
||||
need it, but it can't hurt. */
|
||||
#define __USE_FIXED_PROTOTYPES__
|
||||
#include <stdlib.h>
|
||||
|
||||
|
@ -163,11 +160,8 @@ objc_valloc (size_t size)
|
|||
|
||||
#endif /* !OBJC_WITH_GC */
|
||||
|
||||
/*
|
||||
Hook functions for memory allocation and disposal. Deprecated
|
||||
and currently unused.
|
||||
*/
|
||||
|
||||
/* Hook functions for memory allocation and disposal. Deprecated and
|
||||
currently unused. */
|
||||
void *(*_objc_malloc) (size_t) = malloc;
|
||||
void *(*_objc_atomic_malloc) (size_t) = malloc;
|
||||
void *(*_objc_valloc) (size_t) = malloc;
|
||||
|
|
|
@ -22,11 +22,9 @@ a copy of the GCC Runtime Library Exception along with this program;
|
|||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
/*
|
||||
This file implements objc_enumeration_mutation() and
|
||||
objc_set_enumeration_mutation_handler(), the two functions required
|
||||
to handle mutations during a fast enumeration.
|
||||
*/
|
||||
/* This file implements objc_enumeration_mutation() and
|
||||
objc_set_enumeration_mutation_handler(), the two functions required
|
||||
to handle mutations during a fast enumeration. */
|
||||
#include "objc-private/common.h"
|
||||
#include "objc-private/error.h" /* For _objc_abort() */
|
||||
#include "objc/runtime.h" /* For objc_enumerationMutation() and objc_set_enumeration_mutation_handler() */
|
||||
|
|
|
@ -22,16 +22,14 @@ a copy of the GCC Runtime Library Exception along with this program;
|
|||
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
||||
<http://www.gnu.org/licenses/>. */
|
||||
|
||||
/*
|
||||
This file implements objc_sync_enter() and objc_sync_exit(), the
|
||||
two functions required to support @synchronized().
|
||||
/* This file implements objc_sync_enter() and objc_sync_exit(), the
|
||||
two functions required to support @synchronized().
|
||||
|
||||
objc_sync_enter(object) needs to get a recursive lock associated
|
||||
with 'object', and lock it.
|
||||
|
||||
objc_sync_exit(object) needs to get the recursive lock associated
|
||||
with 'object', and unlock it.
|
||||
*/
|
||||
objc_sync_enter(object) needs to get a recursive lock associated
|
||||
with 'object', and lock it.
|
||||
|
||||
objc_sync_exit(object) needs to get the recursive lock associated
|
||||
with 'object', and unlock it. */
|
||||
|
||||
/* To avoid the overhead of continuously allocating and deallocating
|
||||
locks, we implement a pool of locks. When a lock is needed for an
|
||||
|
@ -61,18 +59,15 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
which is already held by the current thread without having to use
|
||||
any protection lock or synchronization mechanism. It can so detect
|
||||
recursive locks/unlocks, and transform them into no-ops that
|
||||
require no actual locking or synchronization mechanisms at all.
|
||||
*/
|
||||
require no actual locking or synchronization mechanisms at all. */
|
||||
|
||||
/* You can disable the thread-local cache (most likely to benchmark
|
||||
the code with and without it) by compiling with
|
||||
-DSYNC_CACHE_DISABLE, or commenting out the following line.
|
||||
*/
|
||||
-DSYNC_CACHE_DISABLE, or commenting out the following line. */
|
||||
/* #define SYNC_CACHE_DISABLE */
|
||||
|
||||
/* If thread-local storage is not available, automatically disable the
|
||||
cache.
|
||||
*/
|
||||
cache. */
|
||||
#ifndef HAVE_TLS
|
||||
# define SYNC_CACHE_DISABLE
|
||||
#endif
|
||||
|
@ -85,13 +80,11 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
|
||||
/* We have 32 pools of locks, each of them protected by its own
|
||||
protection lock. It's tempting to increase this number to reduce
|
||||
contention; but in our tests it is high enough.
|
||||
*/
|
||||
contention; but in our tests it is high enough. */
|
||||
#define SYNC_NUMBER_OF_POOLS 32
|
||||
|
||||
/* Given an object, it determines which pool contains the associated
|
||||
lock.
|
||||
*/
|
||||
lock. */
|
||||
#define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
|
||||
|
||||
/* The locks protecting each pool. */
|
||||
|
@ -126,8 +119,8 @@ typedef struct lock_node
|
|||
because in that case you know that node->usage_count can't get to
|
||||
zero until you release the lock. It is valid to have usage_count
|
||||
== 0 and object != nil; in that case, the lock is not currently
|
||||
being used, but is still currently associated with the object.
|
||||
*/
|
||||
being used, but is still currently associated with the
|
||||
object. */
|
||||
id object;
|
||||
|
||||
/* This is a counter reserved for use by the thread currently
|
||||
|
@ -143,21 +136,18 @@ typedef struct lock_node
|
|||
require any synchronization with other threads, since it's
|
||||
protected by the node->lock itself) instead of the usage_count
|
||||
(which requires locking the pool protection lock). And it can
|
||||
skip the call to objc_mutex_lock/unlock too.
|
||||
*/
|
||||
skip the call to objc_mutex_lock/unlock too. */
|
||||
unsigned int recursive_usage_count;
|
||||
} *lock_node_ptr;
|
||||
|
||||
|
||||
/* The pools of locks. Each of them is a linked list of lock_nodes.
|
||||
In the list we keep both unlocked and locked nodes.
|
||||
*/
|
||||
In the list we keep both unlocked and locked nodes. */
|
||||
static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
|
||||
|
||||
#ifndef SYNC_CACHE_DISABLE
|
||||
/* We store a cache of locks acquired by each thread in thread-local
|
||||
storage.
|
||||
*/
|
||||
storage. */
|
||||
static __thread lock_node_ptr *lock_cache = NULL;
|
||||
|
||||
/* This is a conservative implementation that uses a static array of
|
||||
|
@ -176,8 +166,7 @@ static __thread lock_node_ptr *lock_cache = NULL;
|
|||
first 8 get the speed benefits of the cache, but the cache remains
|
||||
always small, fast and predictable.
|
||||
|
||||
SYNC_CACHE_SIZE is the size of the lock cache for each thread.
|
||||
*/
|
||||
SYNC_CACHE_SIZE is the size of the lock cache for each thread. */
|
||||
#define SYNC_CACHE_SIZE 8
|
||||
#endif /* SYNC_CACHE_DISABLE */
|
||||
|
||||
|
@ -217,23 +206,20 @@ objc_sync_enter (id object)
|
|||
lock_node_ptr unused_node;
|
||||
|
||||
if (object == nil)
|
||||
{
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
}
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
|
||||
#ifndef SYNC_CACHE_DISABLE
|
||||
if (lock_cache == NULL)
|
||||
{
|
||||
/* Note that this calloc only happen only once per thread, the
|
||||
very first time a thread does a objc_sync_enter().
|
||||
*/
|
||||
very first time a thread does a objc_sync_enter(). */
|
||||
lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
|
||||
}
|
||||
|
||||
/* Check the cache to see if we have a record of having already
|
||||
locked the lock corresponding to this object. While doing so,
|
||||
keep track of the first free cache node in case we need it later.
|
||||
*/
|
||||
keep track of the first free cache node in case we need it
|
||||
later. */
|
||||
node = NULL;
|
||||
free_cache_slot = -1;
|
||||
|
||||
|
@ -246,9 +232,7 @@ objc_sync_enter (id object)
|
|||
if (locked_node == NULL)
|
||||
{
|
||||
if (free_cache_slot == -1)
|
||||
{
|
||||
free_cache_slot = i;
|
||||
}
|
||||
free_cache_slot = i;
|
||||
}
|
||||
else if (locked_node->object == object)
|
||||
{
|
||||
|
@ -261,26 +245,22 @@ objc_sync_enter (id object)
|
|||
if (node != NULL)
|
||||
{
|
||||
/* We found the lock. Increase recursive_usage_count, which is
|
||||
protected by node->lock, which we already hold.
|
||||
*/
|
||||
protected by node->lock, which we already hold. */
|
||||
node->recursive_usage_count++;
|
||||
|
||||
/* There is no need to actually lock anything, since we already
|
||||
hold the lock. Correspondingly, objc_sync_exit() will just
|
||||
decrease recursive_usage_count and do nothing to unlock.
|
||||
*/
|
||||
decrease recursive_usage_count and do nothing to unlock. */
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
}
|
||||
#endif /* SYNC_CACHE_DISABLE */
|
||||
|
||||
/* The following is the standard lookup for the lock in the standard
|
||||
pool lock. It requires a pool protection lock.
|
||||
*/
|
||||
pool lock. It requires a pool protection lock. */
|
||||
hash = SYNC_OBJECT_HASH(object);
|
||||
|
||||
/* Search for an existing lock for 'object'. While searching, make
|
||||
note of any unused lock if we find any.
|
||||
*/
|
||||
note of any unused lock if we find any. */
|
||||
unused_node = NULL;
|
||||
|
||||
objc_mutex_lock (sync_pool_protection_locks[hash]);
|
||||
|
@ -298,9 +278,7 @@ objc_sync_enter (id object)
|
|||
#ifndef SYNC_CACHE_DISABLE
|
||||
/* Put it in the cache. */
|
||||
if (free_cache_slot != -1)
|
||||
{
|
||||
lock_cache[free_cache_slot] = node;
|
||||
}
|
||||
lock_cache[free_cache_slot] = node;
|
||||
#endif
|
||||
|
||||
/* Lock it. */
|
||||
|
@ -329,9 +307,7 @@ objc_sync_enter (id object)
|
|||
|
||||
#ifndef SYNC_CACHE_DISABLE
|
||||
if (free_cache_slot != -1)
|
||||
{
|
||||
lock_cache[free_cache_slot] = unused_node;
|
||||
}
|
||||
lock_cache[free_cache_slot] = unused_node;
|
||||
#endif
|
||||
|
||||
objc_mutex_lock (unused_node->lock);
|
||||
|
@ -357,9 +333,7 @@ objc_sync_enter (id object)
|
|||
|
||||
#ifndef SYNC_CACHE_DISABLE
|
||||
if (free_cache_slot != -1)
|
||||
{
|
||||
lock_cache[free_cache_slot] = new_node;
|
||||
}
|
||||
lock_cache[free_cache_slot] = new_node;
|
||||
#endif
|
||||
|
||||
objc_mutex_lock (new_node->lock);
|
||||
|
@ -375,9 +349,7 @@ objc_sync_exit (id object)
|
|||
lock_node_ptr node;
|
||||
|
||||
if (object == nil)
|
||||
{
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
}
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
|
||||
#ifndef SYNC_CACHE_DISABLE
|
||||
if (lock_cache != NULL)
|
||||
|
@ -399,7 +371,6 @@ objc_sync_exit (id object)
|
|||
/* Note that, if a node was found in the cache, the variable i
|
||||
now holds the index where it was found, which will be used to
|
||||
remove it from the cache. */
|
||||
|
||||
if (node != NULL)
|
||||
{
|
||||
if (node->recursive_usage_count > 0)
|
||||
|
@ -413,8 +384,8 @@ objc_sync_exit (id object)
|
|||
hash = SYNC_OBJECT_HASH(object);
|
||||
|
||||
/* TODO: If we had atomic increase/decrease operations
|
||||
with memory barriers, we could avoid the lock here!
|
||||
*/
|
||||
with memory barriers, we could avoid the lock
|
||||
here! */
|
||||
objc_mutex_lock (sync_pool_protection_locks[hash]);
|
||||
node->usage_count--;
|
||||
/* Normally, we do not reset object to nil here. We'll
|
||||
|
@ -430,8 +401,7 @@ objc_sync_exit (id object)
|
|||
object from being released. In that case, we remove
|
||||
it (TODO: maybe we should avoid using the garbage
|
||||
collector at all ? Nothing is ever deallocated in
|
||||
this file).
|
||||
*/
|
||||
this file). */
|
||||
#if OBJC_WITH_GC
|
||||
node->object = nil;
|
||||
#endif
|
||||
|
@ -442,8 +412,7 @@ objc_sync_exit (id object)
|
|||
objc_mutex_unlock (node->lock), the pool is unlocked
|
||||
so other threads may allocate this same lock to
|
||||
another object (!). This is not a problem, but it is
|
||||
curious.
|
||||
*/
|
||||
curious. */
|
||||
objc_mutex_unlock (node->lock);
|
||||
|
||||
/* Remove the node from the cache. */
|
||||
|
@ -476,9 +445,7 @@ objc_sync_exit (id object)
|
|||
objc_mutex_unlock (node->lock);
|
||||
|
||||
/* No need to remove the node from the cache, since it
|
||||
wasn't found in the cache when we looked for it!
|
||||
*/
|
||||
|
||||
wasn't found in the cache when we looked for it! */
|
||||
return OBJC_SYNC_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -123,11 +123,8 @@ object_setClass (id object, Class class_)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Hook functions for memory allocation and disposal. Deprecated
|
||||
and currently unused.
|
||||
*/
|
||||
|
||||
/* Hook functions for memory allocation and disposal. Deprecated and
|
||||
currently unused. */
|
||||
id (*_objc_object_alloc) (Class) = 0;
|
||||
id (*_objc_object_dispose) (id) = 0;
|
||||
id (*_objc_object_copy) (id) = 0;
|
||||
|
|
|
@ -69,9 +69,7 @@ __objc_protocols_add_protocol (const char *name, Protocol *object)
|
|||
Objective-C programs while trying to catch a problem that has
|
||||
never been seen in practice, so we don't do it. */
|
||||
if (! objc_hash_is_key_in_hash (__protocols_hashtable, name))
|
||||
{
|
||||
objc_hash_add (&__protocols_hashtable, name, object);
|
||||
}
|
||||
objc_hash_add (&__protocols_hashtable, name, object);
|
||||
|
||||
objc_mutex_unlock (__protocols_hashtable_lock);
|
||||
}
|
||||
|
|
298
libobjc/sarray.c
298
libobjc/sarray.c
|
@ -59,38 +59,41 @@ sarray_remove_garbage (void)
|
|||
vp = first_free_data;
|
||||
first_free_data = NULL;
|
||||
|
||||
while (vp) {
|
||||
np = *vp;
|
||||
objc_free (vp);
|
||||
vp = np;
|
||||
}
|
||||
while (vp)
|
||||
{
|
||||
np = *vp;
|
||||
objc_free (vp);
|
||||
vp = np;
|
||||
}
|
||||
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
}
|
||||
|
||||
/* Free a block of dynamically allocated memory. If we are in multi-threaded
|
||||
mode, it is ok to free it. If not, we add it to the garbage heap to be
|
||||
freed later. */
|
||||
|
||||
/* Free a block of dynamically allocated memory. If we are in
|
||||
multi-threaded mode, it is ok to free it. If not, we add it to the
|
||||
garbage heap to be freed later. */
|
||||
static void
|
||||
sarray_free_garbage (void *vp)
|
||||
{
|
||||
objc_mutex_lock (__objc_runtime_mutex);
|
||||
|
||||
if (__objc_runtime_threads_alive == 1) {
|
||||
objc_free (vp);
|
||||
if (first_free_data)
|
||||
sarray_remove_garbage ();
|
||||
}
|
||||
else {
|
||||
*(void **)vp = first_free_data;
|
||||
first_free_data = vp;
|
||||
}
|
||||
|
||||
if (__objc_runtime_threads_alive == 1)
|
||||
{
|
||||
objc_free (vp);
|
||||
if (first_free_data)
|
||||
sarray_remove_garbage ();
|
||||
}
|
||||
else
|
||||
{
|
||||
*(void **)vp = first_free_data;
|
||||
first_free_data = vp;
|
||||
}
|
||||
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
}
|
||||
|
||||
/* sarray_at_put : copies data in such a way as to be thread reader safe. */
|
||||
/* sarray_at_put copies data in such a way as to be thread reader
|
||||
safe. */
|
||||
void
|
||||
sarray_at_put (struct sarray *array, sidx index, void *element)
|
||||
{
|
||||
|
@ -134,63 +137,63 @@ sarray_at_put (struct sarray *array, sidx index, void *element)
|
|||
#endif
|
||||
|
||||
if ((*the_bucket)->elems[eoffset] == element)
|
||||
return; /* great! we just avoided a lazy copy */
|
||||
return; /* Great! we just avoided a lazy copy. */
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
|
||||
/* First, perform lazy copy/allocation of index if needed */
|
||||
|
||||
if ((*the_index) == array->empty_index) {
|
||||
|
||||
/* The index was previously empty, allocate a new */
|
||||
new_index = (struct sindex *) objc_malloc (sizeof (struct sindex));
|
||||
memcpy (new_index, array->empty_index, sizeof (struct sindex));
|
||||
new_index->version.version = array->version.version;
|
||||
*the_index = new_index; /* Prepared for install. */
|
||||
the_bucket = &((*the_index)->buckets[boffset]);
|
||||
|
||||
nindices += 1;
|
||||
} else if ((*the_index)->version.version != array->version.version) {
|
||||
|
||||
/* This index must be lazy copied */
|
||||
struct sindex *old_index = *the_index;
|
||||
new_index = (struct sindex *) objc_malloc (sizeof (struct sindex));
|
||||
memcpy (new_index, old_index, sizeof (struct sindex));
|
||||
new_index->version.version = array->version.version;
|
||||
*the_index = new_index; /* Prepared for install. */
|
||||
the_bucket = &((*the_index)->buckets[boffset]);
|
||||
|
||||
nindices += 1;
|
||||
}
|
||||
/* First, perform lazy copy/allocation of index if needed. */
|
||||
|
||||
if ((*the_index) == array->empty_index)
|
||||
{
|
||||
/* The index was previously empty, allocate a new. */
|
||||
new_index = (struct sindex *) objc_malloc (sizeof (struct sindex));
|
||||
memcpy (new_index, array->empty_index, sizeof (struct sindex));
|
||||
new_index->version.version = array->version.version;
|
||||
*the_index = new_index; /* Prepared for install. */
|
||||
the_bucket = &((*the_index)->buckets[boffset]);
|
||||
|
||||
nindices += 1;
|
||||
}
|
||||
else if ((*the_index)->version.version != array->version.version)
|
||||
{
|
||||
/* This index must be lazy copied. */
|
||||
struct sindex *old_index = *the_index;
|
||||
new_index = (struct sindex *) objc_malloc (sizeof (struct sindex));
|
||||
memcpy (new_index, old_index, sizeof (struct sindex));
|
||||
new_index->version.version = array->version.version;
|
||||
*the_index = new_index; /* Prepared for install. */
|
||||
the_bucket = &((*the_index)->buckets[boffset]);
|
||||
|
||||
nindices += 1;
|
||||
}
|
||||
|
||||
#endif /* OBJC_SPARSE3 */
|
||||
|
||||
/* next, perform lazy allocation/copy of the bucket if needed */
|
||||
|
||||
if ((*the_bucket) == array->empty_bucket) {
|
||||
|
||||
/* The bucket was previously empty (or something like that), */
|
||||
/* allocate a new. This is the effect of `lazy' allocation */
|
||||
new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket));
|
||||
memcpy ((void *) new_bucket, (const void *) array->empty_bucket,
|
||||
sizeof (struct sbucket));
|
||||
new_bucket->version.version = array->version.version;
|
||||
*the_bucket = new_bucket; /* Prepared for install. */
|
||||
|
||||
nbuckets += 1;
|
||||
|
||||
} else if ((*the_bucket)->version.version != array->version.version) {
|
||||
|
||||
/* Perform lazy copy. */
|
||||
struct sbucket *old_bucket = *the_bucket;
|
||||
new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket));
|
||||
memcpy (new_bucket, old_bucket, sizeof (struct sbucket));
|
||||
new_bucket->version.version = array->version.version;
|
||||
*the_bucket = new_bucket; /* Prepared for install. */
|
||||
|
||||
nbuckets += 1;
|
||||
|
||||
}
|
||||
|
||||
/* Next, perform lazy allocation/copy of the bucket if needed. */
|
||||
if ((*the_bucket) == array->empty_bucket)
|
||||
{
|
||||
/* The bucket was previously empty (or something like that),
|
||||
allocate a new. This is the effect of `lazy' allocation. */
|
||||
new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket));
|
||||
memcpy ((void *) new_bucket, (const void *) array->empty_bucket,
|
||||
sizeof (struct sbucket));
|
||||
new_bucket->version.version = array->version.version;
|
||||
*the_bucket = new_bucket; /* Prepared for install. */
|
||||
|
||||
nbuckets += 1;
|
||||
|
||||
}
|
||||
else if ((*the_bucket)->version.version != array->version.version)
|
||||
{
|
||||
/* Perform lazy copy. */
|
||||
struct sbucket *old_bucket = *the_bucket;
|
||||
new_bucket = (struct sbucket *) objc_malloc (sizeof (struct sbucket));
|
||||
memcpy (new_bucket, old_bucket, sizeof (struct sbucket));
|
||||
new_bucket->version.version = array->version.version;
|
||||
*the_bucket = new_bucket; /* Prepared for install. */
|
||||
|
||||
nbuckets += 1;
|
||||
}
|
||||
(*the_bucket)->elems[eoffset] = element;
|
||||
}
|
||||
|
||||
|
@ -217,16 +220,16 @@ sarray_new (int size, void *default_element)
|
|||
|
||||
assert (size > 0);
|
||||
|
||||
/* Allocate core array */
|
||||
/* Allocate core array. */
|
||||
arr = (struct sarray *) objc_malloc (sizeof (struct sarray));
|
||||
arr->version.version = 0;
|
||||
|
||||
/* Initialize members */
|
||||
/* Initialize members. */
|
||||
#ifdef OBJC_SPARSE3
|
||||
arr->capacity = num_indices*INDEX_CAPACITY;
|
||||
new_indices = (struct sindex **)
|
||||
objc_malloc (sizeof (struct sindex *) * num_indices);
|
||||
|
||||
|
||||
arr->empty_index = (struct sindex *) objc_malloc (sizeof (struct sindex));
|
||||
arr->empty_index->version.version = 0;
|
||||
|
||||
|
@ -279,10 +282,9 @@ sarray_new (int size, void *default_element)
|
|||
}
|
||||
|
||||
|
||||
/* Reallocate the sparse array to hold `newsize' entries
|
||||
Note: We really allocate and then free. We have to do this to ensure that
|
||||
any concurrent readers notice the update. */
|
||||
|
||||
/* Reallocate the sparse array to hold `newsize' entries Note: We
|
||||
really allocate and then free. We have to do this to ensure that
|
||||
any concurrent readers notice the update. */
|
||||
void
|
||||
sarray_realloc (struct sarray *array, int newsize)
|
||||
{
|
||||
|
@ -308,31 +310,29 @@ sarray_realloc (struct sarray *array, int newsize)
|
|||
|
||||
assert (newsize > 0);
|
||||
|
||||
/* The size is the same, just ignore the request */
|
||||
/* The size is the same, just ignore the request. */
|
||||
if (rounded_size <= array->capacity)
|
||||
return;
|
||||
|
||||
assert (array->ref_count == 1); /* stop if lazy copied... */
|
||||
|
||||
/* We are asked to extend the array -- allocate new bucket table, */
|
||||
/* and insert empty_bucket in newly allocated places. */
|
||||
/* We are asked to extend the array -- allocate new bucket table,
|
||||
and insert empty_bucket in newly allocated places. */
|
||||
if (rounded_size > array->capacity)
|
||||
{
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
new_max_index += 4;
|
||||
rounded_size = (new_max_index + 1) * INDEX_CAPACITY;
|
||||
|
||||
#else /* OBJC_SPARSE2 */
|
||||
new_max_index += 4;
|
||||
rounded_size = (new_max_index + 1) * BUCKET_SIZE;
|
||||
#endif
|
||||
|
||||
/* update capacity */
|
||||
/* Update capacity. */
|
||||
array->capacity = rounded_size;
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* alloc to force re-read by any concurrent readers. */
|
||||
/* Alloc to force re-read by any concurrent readers. */
|
||||
old_indices = array->indices;
|
||||
new_indices = (struct sindex **)
|
||||
objc_malloc ((new_max_index + 1) * sizeof (struct sindex *));
|
||||
|
@ -342,34 +342,35 @@ sarray_realloc (struct sarray *array, int newsize)
|
|||
objc_malloc ((new_max_index + 1) * sizeof (struct sbucket *));
|
||||
#endif
|
||||
|
||||
/* copy buckets below old_max_index (they are still valid) */
|
||||
for (counter = 0; counter <= old_max_index; counter++ ) {
|
||||
/* Copy buckets below old_max_index (they are still valid). */
|
||||
for (counter = 0; counter <= old_max_index; counter++ )
|
||||
{
|
||||
#ifdef OBJC_SPARSE3
|
||||
new_indices[counter] = old_indices[counter];
|
||||
new_indices[counter] = old_indices[counter];
|
||||
#else /* OBJC_SPARSE2 */
|
||||
new_buckets[counter] = old_buckets[counter];
|
||||
new_buckets[counter] = old_buckets[counter];
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* reset entries above old_max_index to empty_bucket */
|
||||
/* Reset entries above old_max_index to empty_bucket. */
|
||||
for (counter = old_max_index + 1; counter <= new_max_index; counter++)
|
||||
new_indices[counter] = array->empty_index;
|
||||
#else /* OBJC_SPARSE2 */
|
||||
/* reset entries above old_max_index to empty_bucket */
|
||||
/* Reset entries above old_max_index to empty_bucket. */
|
||||
for (counter = old_max_index + 1; counter <= new_max_index; counter++)
|
||||
new_buckets[counter] = array->empty_bucket;
|
||||
#endif
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* install the new indices */
|
||||
/* Install the new indices. */
|
||||
array->indices = new_indices;
|
||||
#else /* OBJC_SPARSE2 */
|
||||
array->buckets = new_buckets;
|
||||
#endif
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* free the old indices */
|
||||
/* Free the old indices. */
|
||||
sarray_free_garbage (old_indices);
|
||||
#else /* OBJC_SPARSE2 */
|
||||
sarray_free_garbage (old_buckets);
|
||||
|
@ -382,7 +383,6 @@ sarray_realloc (struct sarray *array, int newsize)
|
|||
|
||||
|
||||
/* Free a sparse array allocated with sarray_new */
|
||||
|
||||
void
|
||||
sarray_free (struct sarray *array) {
|
||||
#ifdef OBJC_SPARSE3
|
||||
|
@ -405,75 +405,77 @@ sarray_free (struct sarray *array) {
|
|||
old_buckets = array->buckets;
|
||||
#endif
|
||||
|
||||
/* Free all entries that do not point to empty_bucket */
|
||||
for (counter = 0; counter <= old_max_index; counter++ ) {
|
||||
/* Free all entries that do not point to empty_bucket. */
|
||||
for (counter = 0; counter <= old_max_index; counter++ )
|
||||
{
|
||||
#ifdef OBJC_SPARSE3
|
||||
struct sindex *idx = old_indices[counter];
|
||||
if ((idx != array->empty_index) &&
|
||||
(idx->version.version == array->version.version)) {
|
||||
int c2;
|
||||
for (c2 = 0; c2 < INDEX_SIZE; c2++) {
|
||||
struct sbucket *bkt = idx->buckets[c2];
|
||||
if ((bkt != array->empty_bucket) &&
|
||||
(bkt->version.version == array->version.version))
|
||||
{
|
||||
sarray_free_garbage (bkt);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
}
|
||||
sarray_free_garbage (idx);
|
||||
struct sindex *idx = old_indices[counter];
|
||||
if ((idx != array->empty_index)
|
||||
&& (idx->version.version == array->version.version))
|
||||
{
|
||||
int c2;
|
||||
for (c2 = 0; c2 < INDEX_SIZE; c2++)
|
||||
{
|
||||
struct sbucket *bkt = idx->buckets[c2];
|
||||
if ((bkt != array->empty_bucket)
|
||||
&& (bkt->version.version == array->version.version))
|
||||
{
|
||||
sarray_free_garbage (bkt);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
}
|
||||
sarray_free_garbage (idx);
|
||||
nindices -= 1;
|
||||
}
|
||||
#else /* OBJC_SPARSE2 */
|
||||
struct sbucket *bkt = old_buckets[counter];
|
||||
if ((bkt != array->empty_bucket)
|
||||
&& (bkt->version.version == array->version.version))
|
||||
{
|
||||
sarray_free_garbage (bkt);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* Free empty_index. */
|
||||
if (array->empty_index->version.version == array->version.version)
|
||||
{
|
||||
sarray_free_garbage (array->empty_index);
|
||||
nindices -= 1;
|
||||
}
|
||||
#else /* OBJC_SPARSE2 */
|
||||
struct sbucket *bkt = old_buckets[counter];
|
||||
if ((bkt != array->empty_bucket) &&
|
||||
(bkt->version.version == array->version.version))
|
||||
{
|
||||
sarray_free_garbage (bkt);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* free empty_index */
|
||||
if (array->empty_index->version.version == array->version.version) {
|
||||
sarray_free_garbage (array->empty_index);
|
||||
nindices -= 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* free empty_bucket */
|
||||
if (array->empty_bucket->version.version == array->version.version) {
|
||||
sarray_free_garbage (array->empty_bucket);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
/* Free empty_bucket. */
|
||||
if (array->empty_bucket->version.version == array->version.version)
|
||||
{
|
||||
sarray_free_garbage (array->empty_bucket);
|
||||
nbuckets -= 1;
|
||||
}
|
||||
idxsize -= (old_max_index + 1);
|
||||
narrays -= 1;
|
||||
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* free bucket table */
|
||||
/* Free bucket table. */
|
||||
sarray_free_garbage (array->indices);
|
||||
|
||||
#else
|
||||
/* free bucket table */
|
||||
/* Free bucket table. */
|
||||
sarray_free_garbage (array->buckets);
|
||||
|
||||
#endif
|
||||
|
||||
/* If this is a copy of another array, we free it (which might just
|
||||
* decrement its reference count so it will be freed when no longer in use).
|
||||
*/
|
||||
decrement its reference count so it will be freed when no longer
|
||||
in use). */
|
||||
if (array->is_copy_of)
|
||||
sarray_free (array->is_copy_of);
|
||||
|
||||
/* free array */
|
||||
/* Free array. */
|
||||
sarray_free_garbage (array);
|
||||
}
|
||||
|
||||
/* This is a lazy copy. Only the core of the structure is actually */
|
||||
/* copied. */
|
||||
|
||||
/* This is a lazy copy. Only the core of the structure is actually
|
||||
copied. */
|
||||
struct sarray *
|
||||
sarray_lazy_copy (struct sarray *oarr)
|
||||
{
|
||||
|
@ -487,7 +489,7 @@ sarray_lazy_copy (struct sarray *oarr)
|
|||
struct sbucket **new_buckets;
|
||||
#endif
|
||||
|
||||
/* Allocate core array */
|
||||
/* Allocate core array. */
|
||||
arr = (struct sarray *) objc_malloc (sizeof (struct sarray)); /* !!! */
|
||||
arr->version.version = oarr->version.version + 1;
|
||||
#ifdef OBJC_SPARSE3
|
||||
|
@ -500,13 +502,13 @@ sarray_lazy_copy (struct sarray *oarr)
|
|||
arr->capacity = oarr->capacity;
|
||||
|
||||
#ifdef OBJC_SPARSE3
|
||||
/* Copy bucket table */
|
||||
/* Copy bucket table. */
|
||||
new_indices = (struct sindex **)
|
||||
objc_malloc (sizeof (struct sindex *) * num_indices);
|
||||
memcpy (new_indices, oarr->indices, sizeof (struct sindex *) * num_indices);
|
||||
arr->indices = new_indices;
|
||||
#else
|
||||
/* Copy bucket table */
|
||||
/* Copy bucket table. */
|
||||
new_buckets = (struct sbucket **)
|
||||
objc_malloc (sizeof (struct sbucket *) * num_indices);
|
||||
memcpy (new_buckets, oarr->buckets, sizeof (struct sbucket *) * num_indices);
|
||||
|
|
374
libobjc/thr.c
374
libobjc/thr.c
|
@ -47,24 +47,23 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
|||
/* Global exit status. */
|
||||
int __objc_thread_exit_status = 0;
|
||||
|
||||
/* Flag which lets us know if we ever became multi threaded */
|
||||
/* Flag which lets us know if we ever became multi threaded. */
|
||||
int __objc_is_multi_threaded = 0;
|
||||
|
||||
/* The hook function called when the runtime becomes multi threaded */
|
||||
/* The hook function called when the runtime becomes multi
|
||||
threaded. */
|
||||
objc_thread_callback _objc_became_multi_threaded = NULL;
|
||||
|
||||
/*
|
||||
Use this to set the hook function that will be called when the
|
||||
runtime initially becomes multi threaded.
|
||||
The hook function is only called once, meaning only when the
|
||||
2nd thread is spawned, not for each and every thread.
|
||||
/* Use this to set the hook function that will be called when the
|
||||
runtime initially becomes multi threaded. The hook function is
|
||||
only called once, meaning only when the 2nd thread is spawned, not
|
||||
for each and every thread.
|
||||
|
||||
It returns the previous hook function or NULL if there is none.
|
||||
It returns the previous hook function or NULL if there is none.
|
||||
|
||||
A program outside of the runtime could set this to some function so
|
||||
it can be informed; for example, the GNUstep Base Library sets it
|
||||
so it can implement the NSBecomingMultiThreaded notification.
|
||||
*/
|
||||
A program outside of the runtime could set this to some function so
|
||||
it can be informed; for example, the GNUstep Base Library sets it
|
||||
so it can implement the NSBecomingMultiThreaded notification. */
|
||||
objc_thread_callback objc_set_thread_callback (objc_thread_callback func)
|
||||
{
|
||||
objc_thread_callback temp = _objc_became_multi_threaded;
|
||||
|
@ -72,26 +71,22 @@ objc_thread_callback objc_set_thread_callback (objc_thread_callback func)
|
|||
return temp;
|
||||
}
|
||||
|
||||
/*
|
||||
Private functions
|
||||
/* Private functions.
|
||||
|
||||
These functions are utilized by the runtime, but they are not
|
||||
considered part of the public interface. */
|
||||
|
||||
These functions are utilized by the frontend, but they are not
|
||||
considered part of the public interface.
|
||||
*/
|
||||
|
||||
/* Initialize the threads subsystem. */
|
||||
/* Initialize the threads subsystem. */
|
||||
int
|
||||
__objc_init_thread_system(void)
|
||||
{
|
||||
return __gthread_objc_init_thread_system ();
|
||||
}
|
||||
|
||||
/*
|
||||
First function called in a thread, starts everything else.
|
||||
/* First function called in a thread, starts everything else.
|
||||
|
||||
This function is passed to the backend by objc_thread_detach
|
||||
as the starting function for a new thread.
|
||||
*/
|
||||
This function is passed to the backend by objc_thread_detach as the
|
||||
starting function for a new thread. */
|
||||
struct __objc_thread_start_state
|
||||
{
|
||||
SEL selector;
|
||||
|
@ -103,459 +98,419 @@ static void __attribute__((noreturn))
|
|||
__objc_thread_detach_function (struct __objc_thread_start_state *istate)
|
||||
{
|
||||
/* Valid state? */
|
||||
if (istate) {
|
||||
id (*imp) (id, SEL, id);
|
||||
SEL selector = istate->selector;
|
||||
id object = istate->object;
|
||||
id argument = istate->argument;
|
||||
if (istate)
|
||||
{
|
||||
id (*imp) (id, SEL, id);
|
||||
SEL selector = istate->selector;
|
||||
id object = istate->object;
|
||||
id argument = istate->argument;
|
||||
|
||||
/* Don't need anymore so free it. */
|
||||
objc_free (istate);
|
||||
|
||||
/* Don't need anymore so free it */
|
||||
objc_free (istate);
|
||||
|
||||
/* Clear out the thread local storage */
|
||||
objc_thread_set_data (NULL);
|
||||
|
||||
/* Check to see if we just became multi threaded */
|
||||
if (! __objc_is_multi_threaded)
|
||||
{
|
||||
__objc_is_multi_threaded = 1;
|
||||
|
||||
/* Call the hook function */
|
||||
if (_objc_became_multi_threaded != NULL)
|
||||
(*_objc_became_multi_threaded) ();
|
||||
}
|
||||
|
||||
/* Call the method */
|
||||
if ((imp = (id (*) (id, SEL, id))objc_msg_lookup (object, selector)))
|
||||
/* Clear out the thread local storage. */
|
||||
objc_thread_set_data (NULL);
|
||||
|
||||
/* Check to see if we just became multi threaded. */
|
||||
if (! __objc_is_multi_threaded)
|
||||
{
|
||||
__objc_is_multi_threaded = 1;
|
||||
|
||||
/* Call the hook function. */
|
||||
if (_objc_became_multi_threaded != NULL)
|
||||
(*_objc_became_multi_threaded) ();
|
||||
}
|
||||
|
||||
/* Call the method. */
|
||||
if ((imp = (id (*) (id, SEL, id))objc_msg_lookup (object, selector)))
|
||||
(*imp) (object, selector, argument);
|
||||
else
|
||||
{
|
||||
/* FIXME: Should we abort here ? */
|
||||
_objc_abort ("objc_thread_detach called with bad selector.\n");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* FIXME: Should we abort here ? */
|
||||
_objc_abort ("objc_thread_detach called with bad selector.\n");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/* FIXME: Should we abort here ? */
|
||||
_objc_abort ("objc_thread_detach called with NULL state.\n");
|
||||
}
|
||||
|
||||
/* Exit the thread */
|
||||
|
||||
/* Exit the thread. */
|
||||
objc_thread_exit ();
|
||||
|
||||
/* Make sure compiler detects no return. */
|
||||
__builtin_trap ();
|
||||
}
|
||||
|
||||
/*
|
||||
Frontend functions
|
||||
/* Public functions.
|
||||
|
||||
These functions constitute the public interface to the Objective-C thread
|
||||
and mutex functionality.
|
||||
*/
|
||||
These functions constitute the public interface to the Objective-C
|
||||
thread and mutex functionality. */
|
||||
|
||||
/* Frontend thread functions */
|
||||
|
||||
/*
|
||||
Detach a new thread of execution and return its id. Returns NULL if fails.
|
||||
Thread is started by sending message with selector to object. Message
|
||||
takes a single argument.
|
||||
*/
|
||||
/* Detach a new thread of execution and return its id. Returns NULL
|
||||
if fails. Thread is started by sending message with selector to
|
||||
object. Message takes a single argument. */
|
||||
objc_thread_t
|
||||
objc_thread_detach (SEL selector, id object, id argument)
|
||||
{
|
||||
struct __objc_thread_start_state *istate;
|
||||
objc_thread_t thread_id = NULL;
|
||||
|
||||
/* Allocate the state structure */
|
||||
if (! (istate = (struct __objc_thread_start_state *)
|
||||
objc_malloc (sizeof (*istate))))
|
||||
/* Allocate the state structure. */
|
||||
if (!(istate = (struct __objc_thread_start_state *)objc_malloc
|
||||
(sizeof (*istate))))
|
||||
return NULL;
|
||||
|
||||
/* Initialize the state structure */
|
||||
|
||||
/* Initialize the state structure. */
|
||||
istate->selector = selector;
|
||||
istate->object = object;
|
||||
istate->argument = argument;
|
||||
|
||||
/* lock access */
|
||||
/* Lock access. */
|
||||
objc_mutex_lock (__objc_runtime_mutex);
|
||||
|
||||
/* Call the backend to spawn the thread */
|
||||
/* Call the backend to spawn the thread. */
|
||||
if ((thread_id = __gthread_objc_thread_detach ((void *)__objc_thread_detach_function,
|
||||
istate)) == NULL)
|
||||
{
|
||||
/* failed! */
|
||||
/* Failed! */
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
objc_free (istate);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Increment our thread counter */
|
||||
/* Increment our thread counter. */
|
||||
__objc_runtime_threads_alive++;
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
|
||||
return thread_id;
|
||||
}
|
||||
|
||||
/* Set the current thread's priority. */
|
||||
/* Set the current thread's priority. */
|
||||
int
|
||||
objc_thread_set_priority (int priority)
|
||||
{
|
||||
/* Call the backend */
|
||||
return __gthread_objc_thread_set_priority (priority);
|
||||
}
|
||||
|
||||
/* Return the current thread's priority. */
|
||||
/* Return the current thread's priority. */
|
||||
int
|
||||
objc_thread_get_priority (void)
|
||||
{
|
||||
/* Call the backend */
|
||||
return __gthread_objc_thread_get_priority ();
|
||||
}
|
||||
|
||||
/*
|
||||
Yield our process time to another thread. Any BUSY waiting that is done
|
||||
by a thread should use this function to make sure that other threads can
|
||||
make progress even on a lazy uniprocessor system.
|
||||
*/
|
||||
/* Yield our process time to another thread. Any BUSY waiting that is
|
||||
done by a thread should use this function to make sure that other
|
||||
threads can make progress even on a lazy uniprocessor system. */
|
||||
void
|
||||
objc_thread_yield (void)
|
||||
{
|
||||
/* Call the backend */
|
||||
__gthread_objc_thread_yield ();
|
||||
}
|
||||
|
||||
/*
|
||||
Terminate the current tread. Doesn't return.
|
||||
Actually, if it failed returns -1.
|
||||
*/
|
||||
/* Terminate the current tread. Doesn't return. Actually, if it
|
||||
failed returns -1. */
|
||||
int
|
||||
objc_thread_exit (void)
|
||||
{
|
||||
/* Decrement our counter of the number of threads alive */
|
||||
/* Decrement our counter of the number of threads alive. */
|
||||
objc_mutex_lock (__objc_runtime_mutex);
|
||||
__objc_runtime_threads_alive--;
|
||||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
|
||||
/* Call the backend to terminate the thread */
|
||||
/* Call the backend to terminate the thread. */
|
||||
return __gthread_objc_thread_exit ();
|
||||
}
|
||||
|
||||
/*
|
||||
Returns an integer value which uniquely describes a thread. Must not be
|
||||
NULL which is reserved as a marker for "no thread".
|
||||
*/
|
||||
/* Returns an integer value which uniquely describes a thread. Must
|
||||
not be NULL which is reserved as a marker for "no thread". */
|
||||
objc_thread_t
|
||||
objc_thread_id (void)
|
||||
{
|
||||
/* Call the backend */
|
||||
return __gthread_objc_thread_id ();
|
||||
}
|
||||
|
||||
/*
|
||||
Sets the thread's local storage pointer.
|
||||
Returns 0 if successful or -1 if failed.
|
||||
*/
|
||||
/* Sets the thread's local storage pointer. Returns 0 if successful
|
||||
or -1 if failed. */
|
||||
int
|
||||
objc_thread_set_data (void *value)
|
||||
{
|
||||
/* Call the backend */
|
||||
return __gthread_objc_thread_set_data (value);
|
||||
}
|
||||
|
||||
/*
|
||||
Returns the thread's local storage pointer. Returns NULL on failure.
|
||||
*/
|
||||
/* Returns the thread's local storage pointer. Returns NULL on
|
||||
failure. */
|
||||
void *
|
||||
objc_thread_get_data (void)
|
||||
{
|
||||
/* Call the backend */
|
||||
return __gthread_objc_thread_get_data ();
|
||||
}
|
||||
|
||||
/* Frontend mutex functions */
|
||||
/* Public mutex functions */
|
||||
|
||||
/*
|
||||
Allocate a mutex. Return the mutex pointer if successful or NULL if the
|
||||
allocation failed for any reason.
|
||||
*/
|
||||
/* Allocate a mutex. Return the mutex pointer if successful or NULL
|
||||
if the allocation failed for any reason. */
|
||||
objc_mutex_t
|
||||
objc_mutex_allocate (void)
|
||||
{
|
||||
objc_mutex_t mutex;
|
||||
|
||||
/* Allocate the mutex structure */
|
||||
/* Allocate the mutex structure. */
|
||||
if (! (mutex = (objc_mutex_t)objc_malloc (sizeof (struct objc_mutex))))
|
||||
return NULL;
|
||||
|
||||
/* Call backend to create the mutex */
|
||||
/* Call backend to create the mutex. */
|
||||
if (__gthread_objc_mutex_allocate (mutex))
|
||||
{
|
||||
/* failed! */
|
||||
/* Failed! */
|
||||
objc_free (mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Initialize mutex */
|
||||
/* Initialize mutex. */
|
||||
mutex->owner = NULL;
|
||||
mutex->depth = 0;
|
||||
return mutex;
|
||||
}
|
||||
|
||||
/*
|
||||
Deallocate a mutex. Note that this includes an implicit mutex_lock to
|
||||
insure that no one else is using the lock. It is legal to deallocate
|
||||
a lock if we have a lock on it, but illegal to deallocate a lock held
|
||||
by anyone else.
|
||||
Returns the number of locks on the thread. (1 for deallocate).
|
||||
*/
|
||||
/* Deallocate a mutex. Note that this includes an implicit mutex_lock
|
||||
to insure that no one else is using the lock. It is legal to
|
||||
deallocate a lock if we have a lock on it, but illegal to
|
||||
deallocate a lock held by anyone else. Returns the number of locks
|
||||
on the thread. (1 for deallocate). */
|
||||
int
|
||||
objc_mutex_deallocate (objc_mutex_t mutex)
|
||||
{
|
||||
int depth;
|
||||
|
||||
/* Valid mutex? */
|
||||
/* Valid mutex? */
|
||||
if (! mutex)
|
||||
return -1;
|
||||
|
||||
/* Acquire lock on mutex */
|
||||
/* Acquire lock on mutex. */
|
||||
depth = objc_mutex_lock (mutex);
|
||||
|
||||
/* Call backend to destroy mutex */
|
||||
/* Call backend to destroy mutex. */
|
||||
if (__gthread_objc_mutex_deallocate (mutex))
|
||||
return -1;
|
||||
|
||||
/* Free the mutex structure */
|
||||
/* Free the mutex structure. */
|
||||
objc_free (mutex);
|
||||
|
||||
/* Return last depth */
|
||||
/* Return last depth. */
|
||||
return depth;
|
||||
}
|
||||
|
||||
/*
|
||||
Grab a lock on a mutex. If this thread already has a lock on this mutex
|
||||
then we increment the lock count. If another thread has a lock on the
|
||||
mutex we block and wait for the thread to release the lock.
|
||||
Returns the lock count on the mutex held by this thread.
|
||||
*/
|
||||
/* Grab a lock on a mutex. If this thread already has a lock on this
|
||||
mutex then we increment the lock count. If another thread has a
|
||||
lock on the mutex we block and wait for the thread to release the
|
||||
lock. Returns the lock count on the mutex held by this thread. */
|
||||
int
|
||||
objc_mutex_lock (objc_mutex_t mutex)
|
||||
{
|
||||
objc_thread_t thread_id;
|
||||
int status;
|
||||
|
||||
/* Valid mutex? */
|
||||
/* Valid mutex? */
|
||||
if (! mutex)
|
||||
return -1;
|
||||
|
||||
/* If we already own the lock then increment depth */
|
||||
/* If we already own the lock then increment depth. */
|
||||
thread_id = __gthread_objc_thread_id ();
|
||||
if (mutex->owner == thread_id)
|
||||
return ++mutex->depth;
|
||||
|
||||
/* Call the backend to lock the mutex */
|
||||
/* Call the backend to lock the mutex. */
|
||||
status = __gthread_objc_mutex_lock (mutex);
|
||||
|
||||
/* Failed? */
|
||||
/* Failed? */
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Successfully locked the thread */
|
||||
/* Successfully locked the thread. */
|
||||
mutex->owner = thread_id;
|
||||
return mutex->depth = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
Try to grab a lock on a mutex. If this thread already has a lock on
|
||||
this mutex then we increment the lock count and return it. If another
|
||||
thread has a lock on the mutex returns -1.
|
||||
*/
|
||||
/* Try to grab a lock on a mutex. If this thread already has a lock
|
||||
on this mutex then we increment the lock count and return it. If
|
||||
another thread has a lock on the mutex returns -1. */
|
||||
int
|
||||
objc_mutex_trylock (objc_mutex_t mutex)
|
||||
{
|
||||
objc_thread_t thread_id;
|
||||
int status;
|
||||
|
||||
/* Valid mutex? */
|
||||
/* Valid mutex? */
|
||||
if (! mutex)
|
||||
return -1;
|
||||
|
||||
/* If we already own the lock then increment depth */
|
||||
/* If we already own the lock then increment depth. */
|
||||
thread_id = __gthread_objc_thread_id ();
|
||||
if (mutex->owner == thread_id)
|
||||
return ++mutex->depth;
|
||||
|
||||
/* Call the backend to try to lock the mutex */
|
||||
/* Call the backend to try to lock the mutex. */
|
||||
status = __gthread_objc_mutex_trylock (mutex);
|
||||
|
||||
/* Failed? */
|
||||
/* Failed? */
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* Successfully locked the thread */
|
||||
/* Successfully locked the thread. */
|
||||
mutex->owner = thread_id;
|
||||
return mutex->depth = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
Unlocks the mutex by one level.
|
||||
Decrements the lock count on this mutex by one.
|
||||
If the lock count reaches zero, release the lock on the mutex.
|
||||
Returns the lock count on the mutex.
|
||||
It is an error to attempt to unlock a mutex which this thread
|
||||
doesn't hold in which case return -1 and the mutex is unaffected.
|
||||
*/
|
||||
/* Unlocks the mutex by one level. Decrements the lock count on this
|
||||
mutex by one. If the lock count reaches zero, release the lock on
|
||||
the mutex. Returns the lock count on the mutex. It is an error to
|
||||
attempt to unlock a mutex which this thread doesn't hold in which
|
||||
case return -1 and the mutex is unaffected. */
|
||||
int
|
||||
objc_mutex_unlock (objc_mutex_t mutex)
|
||||
{
|
||||
objc_thread_t thread_id;
|
||||
int status;
|
||||
|
||||
/* Valid mutex? */
|
||||
/* Valid mutex? */
|
||||
if (! mutex)
|
||||
return -1;
|
||||
|
||||
/* If another thread owns the lock then abort */
|
||||
/* If another thread owns the lock then abort. */
|
||||
thread_id = __gthread_objc_thread_id ();
|
||||
if (mutex->owner != thread_id)
|
||||
return -1;
|
||||
|
||||
/* Decrement depth and return */
|
||||
/* Decrement depth and return. */
|
||||
if (mutex->depth > 1)
|
||||
return --mutex->depth;
|
||||
|
||||
/* Depth down to zero so we are no longer the owner */
|
||||
/* Depth down to zero so we are no longer the owner. */
|
||||
mutex->depth = 0;
|
||||
mutex->owner = NULL;
|
||||
|
||||
/* Have the backend unlock the mutex */
|
||||
/* Have the backend unlock the mutex. */
|
||||
status = __gthread_objc_mutex_unlock (mutex);
|
||||
|
||||
/* Failed? */
|
||||
/* Failed? */
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Frontend condition mutex functions */
|
||||
/* Public condition mutex functions */
|
||||
|
||||
/*
|
||||
Allocate a condition. Return the condition pointer if successful or NULL
|
||||
if the allocation failed for any reason.
|
||||
*/
|
||||
/* Allocate a condition. Return the condition pointer if successful
|
||||
or NULL if the allocation failed for any reason. */
|
||||
objc_condition_t
|
||||
objc_condition_allocate (void)
|
||||
{
|
||||
objc_condition_t condition;
|
||||
|
||||
/* Allocate the condition mutex structure */
|
||||
/* Allocate the condition mutex structure. */
|
||||
if (! (condition =
|
||||
(objc_condition_t) objc_malloc (sizeof (struct objc_condition))))
|
||||
return NULL;
|
||||
|
||||
/* Call the backend to create the condition mutex */
|
||||
/* Call the backend to create the condition mutex. */
|
||||
if (__gthread_objc_condition_allocate (condition))
|
||||
{
|
||||
/* failed! */
|
||||
/* Failed! */
|
||||
objc_free (condition);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Success! */
|
||||
/* Success! */
|
||||
return condition;
|
||||
}
|
||||
|
||||
/*
|
||||
Deallocate a condition. Note that this includes an implicit
|
||||
condition_broadcast to insure that waiting threads have the opportunity
|
||||
to wake. It is legal to dealloc a condition only if no other
|
||||
thread is/will be using it. Here we do NOT check for other threads
|
||||
waiting but just wake them up.
|
||||
*/
|
||||
/* Deallocate a condition. Note that this includes an implicit
|
||||
condition_broadcast to insure that waiting threads have the
|
||||
opportunity to wake. It is legal to dealloc a condition only if no
|
||||
other thread is/will be using it. Here we do NOT check for other
|
||||
threads waiting but just wake them up. */
|
||||
int
|
||||
objc_condition_deallocate (objc_condition_t condition)
|
||||
{
|
||||
/* Broadcast the condition */
|
||||
/* Broadcast the condition. */
|
||||
if (objc_condition_broadcast (condition))
|
||||
return -1;
|
||||
|
||||
/* Call the backend to destroy */
|
||||
/* Call the backend to destroy. */
|
||||
if (__gthread_objc_condition_deallocate (condition))
|
||||
return -1;
|
||||
|
||||
/* Free the condition mutex structure */
|
||||
/* Free the condition mutex structure. */
|
||||
objc_free (condition);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Wait on the condition unlocking the mutex until objc_condition_signal ()
|
||||
or objc_condition_broadcast () are called for the same condition. The
|
||||
given mutex *must* have the depth set to 1 so that it can be unlocked
|
||||
here, so that someone else can lock it and signal/broadcast the condition.
|
||||
The mutex is used to lock access to the shared data that make up the
|
||||
"condition" predicate.
|
||||
*/
|
||||
/* Wait on the condition unlocking the mutex until
|
||||
objc_condition_signal () or objc_condition_broadcast () are called
|
||||
for the same condition. The given mutex *must* have the depth set
|
||||
to 1 so that it can be unlocked here, so that someone else can lock
|
||||
it and signal/broadcast the condition. The mutex is used to lock
|
||||
access to the shared data that make up the "condition"
|
||||
predicate. */
|
||||
int
|
||||
objc_condition_wait (objc_condition_t condition, objc_mutex_t mutex)
|
||||
{
|
||||
objc_thread_t thread_id;
|
||||
|
||||
/* Valid arguments? */
|
||||
/* Valid arguments? */
|
||||
if (! mutex || ! condition)
|
||||
return -1;
|
||||
|
||||
/* Make sure we are owner of mutex */
|
||||
/* Make sure we are owner of mutex. */
|
||||
thread_id = __gthread_objc_thread_id ();
|
||||
if (mutex->owner != thread_id)
|
||||
return -1;
|
||||
|
||||
/* Cannot be locked more than once */
|
||||
/* Cannot be locked more than once. */
|
||||
if (mutex->depth > 1)
|
||||
return -1;
|
||||
|
||||
/* Virtually unlock the mutex */
|
||||
/* Virtually unlock the mutex. */
|
||||
mutex->depth = 0;
|
||||
mutex->owner = (objc_thread_t)NULL;
|
||||
|
||||
/* Call the backend to wait */
|
||||
/* Call the backend to wait. */
|
||||
__gthread_objc_condition_wait (condition, mutex);
|
||||
|
||||
/* Make ourselves owner of the mutex */
|
||||
/* Make ourselves owner of the mutex. */
|
||||
mutex->owner = thread_id;
|
||||
mutex->depth = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Wake up all threads waiting on this condition. It is recommended that
|
||||
the called would lock the same mutex as the threads in objc_condition_wait
|
||||
before changing the "condition predicate" and make this call and unlock it
|
||||
right away after this call.
|
||||
*/
|
||||
/* Wake up all threads waiting on this condition. It is recommended
|
||||
that the called would lock the same mutex as the threads in
|
||||
objc_condition_wait before changing the "condition predicate" and
|
||||
make this call and unlock it right away after this call. */
|
||||
int
|
||||
objc_condition_broadcast (objc_condition_t condition)
|
||||
{
|
||||
/* Valid condition mutex? */
|
||||
/* Valid condition mutex? */
|
||||
if (! condition)
|
||||
return -1;
|
||||
|
||||
return __gthread_objc_condition_broadcast (condition);
|
||||
}
|
||||
|
||||
/*
|
||||
Wake up one thread waiting on this condition. It is recommended that
|
||||
the called would lock the same mutex as the threads in objc_condition_wait
|
||||
before changing the "condition predicate" and make this call and unlock it
|
||||
right away after this call.
|
||||
*/
|
||||
/* Wake up one thread waiting on this condition. It is recommended
|
||||
that the called would lock the same mutex as the threads in
|
||||
objc_condition_wait before changing the "condition predicate" and
|
||||
make this call and unlock it right away after this call. */
|
||||
int
|
||||
objc_condition_signal (objc_condition_t condition)
|
||||
{
|
||||
/* Valid condition mutex? */
|
||||
/* Valid condition mutex? */
|
||||
if (! condition)
|
||||
return -1;
|
||||
|
||||
|
@ -591,4 +546,3 @@ objc_thread_remove (void)
|
|||
objc_mutex_unlock (__objc_runtime_mutex);
|
||||
}
|
||||
|
||||
/* End of File */
|
||||
|
|
Loading…
Add table
Reference in a new issue