Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
Alchemy Viewer
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Iterations
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Deploy
Releases
Package registry
Operate
Terraform modules
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Silent mode is enabled
All outbound communications are blocked.
Learn more
.
Show more breadcrumbs
Alchemy Viewer
Alchemy Viewer
Commits
39638b0d
Commit
39638b0d
authored
12 years ago
by
simon
Browse files
Options
Downloads
Patches
Plain Diff
Convert LLThreadSafeRefCount back to atomic ref counting. Reviewed by Kelly
parent
b49f6e1e
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
indra/llcommon/llthread.h
+9
-81
9 additions, 81 deletions
indra/llcommon/llthread.h
with
9 additions
and
81 deletions
indra/llcommon/llthread.h
+
9
−
81
View file @
39638b0d
...
...
@@ -225,7 +225,6 @@ void LLThread::unlockData()
// see llmemory.h for LLPointer<> definition
#if (1) // Old code - see comment below
class
LL_COMMON_API
LLThreadSafeRefCount
{
public:
...
...
@@ -243,99 +242,28 @@ class LL_COMMON_API LLThreadSafeRefCount
LLThreadSafeRefCount
(
const
LLThreadSafeRefCount
&
);
LLThreadSafeRefCount
&
operator
=
(
const
LLThreadSafeRefCount
&
ref
)
{
if
(
sMutex
)
{
sMutex
->
lock
();
}
mRef
=
0
;
if
(
sMutex
)
{
sMutex
->
unlock
();
}
return
*
this
;
}
void
ref
()
{
if
(
sMutex
)
sMutex
->
lock
();
mRef
++
;
if
(
sMutex
)
sMutex
->
unlock
();
}
S32
unref
()
void
unref
()
{
llassert
(
mRef
>=
1
);
if
(
sMutex
)
sMutex
->
lock
();
S32
res
=
--
mRef
;
if
(
sMutex
)
sMutex
->
unlock
();
if
(
0
==
res
)
{
delete
this
;
return
0
;
if
(
(
--
mRef
)
==
0
)
// See note in llapr.h on atomic decrement operator return value.
{
// If we hit zero, the caller should be the only smart pointer owning the object and we can delete it.
// It is technically possible for a vanilla pointer to mess this up, or another thread to
// jump in, find this object, create another smart pointer and end up dangling, but if
// the code is that bad and not thread-safe, it's trouble already.
delete
this
;
}
return
res
;
}
S32
getNumRefs
()
const
{
return
mRef
;
}
private
:
S32
mRef
;
};
#else
// New code - This was from https://bitbucket.org/lindenlab/viewer-cat/commits/b03bb43e4ead57f904cb3c1e9745dc8460de6efc
// and attempts
class
LL_COMMON_API
LLThreadSafeRefCount
{
public:
static
void
initThreadSafeRefCount
();
// creates sMutex
static
void
cleanupThreadSafeRefCount
();
// destroys sMutex
private:
static
LLMutex
*
sMutex
;
protected:
virtual
~
LLThreadSafeRefCount
();
// use unref()
public:
LLThreadSafeRefCount
();
LLThreadSafeRefCount
(
const
LLThreadSafeRefCount
&
);
LLThreadSafeRefCount
&
operator
=
(
const
LLThreadSafeRefCount
&
ref
)
{
mRef
=
0
;
return
*
this
;
}
void
ref
()
{
mRef
++
;
}
S32
unref
()
{
llassert
(
mRef
>=
1
);
bool
time_to_die
=
(
mRef
==
1
);
if
(
time_to_die
)
{
if
(
sMutex
)
sMutex
->
lock
();
// Looks redundant, but is very much not
// We need to check again once we've acquired the lock
// so that two threads who get into the if in parallel
// don't both attempt to the delete.
//
mRef
--
;
// Simon: why not if (mRef == 1) delete this; ? There still seems to be a window where mRef could be modified
if
(
mRef
==
0
)
delete
this
;
if
(
sMutex
)
sMutex
->
unlock
();
return
0
;
}
return
--
mRef
;
}
S32
getNumRefs
()
const
{
const
S32
currentVal
=
mRef
.
CurrentValue
();
...
...
@@ -345,7 +273,7 @@ class LL_COMMON_API LLThreadSafeRefCount
private
:
LLAtomic32
<
S32
>
mRef
;
};
#endif // new code
/**
* intrusive pointer support for LLThreadSafeRefCount
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment