[prev in list] [next in list] [prev in thread] [next in thread] 

List:       helix-common-cvs
Subject:    [Common-cvs] include atomicbase.h,1.52,1.53
From:       dcollins () helixcommunity ! org
Date:       2010-10-29 15:12:54
Message-ID: 201010291513.o9TFDdWU007332 () mailer ! progressive-comp ! com
[Download RAW message or body]

Update of /cvsroot/common/include
In directory cvs01.internal.helixcommunity.org:/tmp/cvs-serv5857

Modified Files:
	atomicbase.h 
Log Message:
Synopsis
========
Adds 64-bit atomic operators for the server's 64-bit platforms

Branches: HEAD
Reviewer: Chytanya


Description
===========

This adds 64-bit atomic operators for Linux/x86_64, Windows/x86_64 and
Solaris SPARC/64-bit server builds.  These are primarily needed to
implement certain RSS counters in the server.  Most counters will
continue to be 32-bit, but a few need to be expanded (which will be in
separate CRs).

The Solaris and Windows platforms use their respective system's intrinsic
routines.  Linux uses inline assembly.


Files Affected
==============

common/include/atomicbase.h


Testing Performed
=================

Unit Tests:
- N/A

Integration Tests:
- Tested with a driver that extensively exercizes each operator in various ways


Leak Tests:
- N/A

Performance Tests:
- N/A

Platforms Tested: linux-rhel5-x86_64
Builds Verified: linux-rhel5-x86_64, sunos-5.10-sparc64, win-x86_64-vc10


QA Hints
========
- N/A


Index: atomicbase.h
===================================================================
RCS file: /cvsroot/common/include/atomicbase.h,v
retrieving revision 1.52
retrieving revision 1.53
diff -u -d -r1.52 -r1.53
--- atomicbase.h	17 Sep 2010 18:04:45 -0000	1.52
+++ atomicbase.h	29 Oct 2010 15:12:52 -0000	1.53
@@ -93,7 +93,7 @@
  ***********************************************************************
  *
  * TODO:
- *   Add INT64 versions
+ *   Add INT64 versions for more platforms which can support this.
  *   Obsolete the 0x80000000-based Solaris implementation entirely.
  *
  ***********************************************************************/
@@ -105,7 +105,8 @@
  * Sun Solaris / SPARC (Native compiler)
  *
  * Implementation Notes:
- * This uses inline assembly from server/common/util/platform/solaris/atomicops.il
+ * This uses inline assembly from common/util/platform/solaris/atomicops.il
+ * (formerly in server/common/util/platform/solaris/atomicops.il)
  * Note: Sparc/gcc is in include/atomicbase.h
  */
 #if defined (_SOLARIS) && !defined (__GNUC__) && !defined (_SOLARISX86)
@@ -140,6 +141,26 @@
 inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return \
HXAtomicAddRetUINT32((UINT32*)p, (UINT32)n); }  inline INT32 \
HXAtomicSubRetINT32(INT32* p, INT32 n) { return HXAtomicSubRetUINT32((UINT32*)p, \
(UINT32)n); }  
+// Use Sun's atomic operators for 64-bits (for now)
+#include <atomic.h>
+#define HXAtomicIncUINT64(p)      atomic_add_64((uint64_t *)(p), 1)
+#define HXAtomicDecUINT64(p)      atomic_add_64((uint64_t *)(p),-1)
+#define HXAtomicIncRetUINT64(p)   atomic_add_64_nv((uint64_t *)(p), 1)
+#define HXAtomicDecRetUINT64(p)   atomic_add_64_nv((uint64_t *)(p), -1)
+#define HXAtomicAddUINT64(p,n)    atomic_add_64((uint64_t *)(p),(int64_t)(n))
+#define HXAtomicSubUINT64(p,n)    atomic_add_64((uint64_t *)(p),(int64_t)(-n))
+#define HXAtomicAddRetUINT64(p,n) atomic_add_64_nv((uint64_t *)(p),(int64_t)(n))
+#define HXAtomicSubRetUINT64(p,n) atomic_add_64_nv((uint64_t *)(p),(int64_t)(-n))
+
+#define HXAtomicIncINT64(p)       atomic_add_64((uint64_t *)(p), 1)
+#define HXAtomicDecINT64(p)       atomic_add_64((uint64_t *)(p),-1)
+#define HXAtomicIncRetINT64(p)    atomic_add_64_nv((uint64_t *)(p), 1)
+#define HXAtomicDecRetINT64(p)    atomic_add_64_nv((uint64_t *)(p), -1)
+#define HXAtomicAddINT64(p,n)     atomic_add_64((uint64_t *)(p),(int64_t)(n))
+#define HXAtomicSubINT64(p,n)     atomic_add_64((uint64_t *)(p),(int64_t)(-n))
+#define HXAtomicAddRetINT64(p,n)  atomic_add_64_nv((uint64_t *)(p),(int64_t)(n))
+#define HXAtomicSubRetINT64(p,n)  atomic_add_64_nv((uint64_t *)(p),(int64_t)(-n))
+
 /***********************************************************************
  * Sun Solaris / x86 (Native compiler)
  *
@@ -404,17 +425,30 @@
  *
  * Implementation Notes:
  *    Uses compiler intrinsics rather than inline assembly since
- *    the latter is not supported for 64-bit systems.
- *    There are currently no subtraction intrinsics, so the argument is negated and \
                added.
- *    Enable these specific compiler intrinisics regardless of the optimization \
level. + *    the latter is not yet supported by Visual Studio for 64-bit systems.
+ *
+ *    There are currently no subtraction intrinsics, so the argument is negated
+ *    and added.
+ *
+ *    Enable these specific compiler intrinisics regardless of the
+ *    optimization level.
+ *
+ *    The *Add intrinsics return the previous value, to which we add the delta
+ *    and return the new value.
+ *
  */
+
 #elif defined (_WIN64)
 
 #include <intrin.h>
 #pragma intrinsic(_InterlockedIncrement)
 #pragma intrinsic(_InterlockedDecrement)
 #pragma intrinsic(_InterlockedExchangeAdd)
+#pragma intrinsic(_InterlockedIncrement64)
+#pragma intrinsic(_InterlockedDecrement64)
+#pragma intrinsic(_InterlockedExchangeAdd64)
 
+//32-BIT:
 static __inline void HXAtomicIncUINT32(UINT32* p)               { \
_InterlockedIncrement((long*)p); }  static __inline void HXAtomicDecUINT32(UINT32* p) \
{ _InterlockedDecrement((long*)p); }  static __inline void HXAtomicAddUINT32(UINT32* \
p, UINT32 n)     { _InterlockedExchangeAdd((long*)p, (long)n); } @@ -424,14 +458,33 \
@@  static __inline INT32 HXAtomicAddRetUINT32(UINT32* p, UINT32 n) { return (n + \
_InterlockedExchangeAdd((long*)p, (long)n)); }  static __inline INT32 \
HXAtomicSubRetUINT32(UINT32* p, UINT32 n) { return (n + \
_InterlockedExchangeAdd((long*)p, (-(long)n))); }  
-static __inline void HXAtomicIncINT32(INT32* p)              { \
                _InterlockedIncrement((long*)p); }
-static __inline void HXAtomicDecINT32(INT32* p)              { \
                _InterlockedDecrement((long*)p); }
-static __inline void HXAtomicAddINT32(INT32* p, INT32 n)     { \
                _InterlockedExchangeAdd((long*)p, (long)n); }
-static __inline void HXAtomicSubINT32(INT32* p, INT32 n)     { \
                _InterlockedExchangeAdd((long*)p, (-(long)n)); }
-static __inline INT32 HXAtomicIncRetINT32(INT32* p)          { return \
                _InterlockedIncrement((long*)p); }
-static __inline INT32 HXAtomicDecRetINT32(INT32* p)          { return \
                _InterlockedDecrement((long*)p); }
-static __inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n) { return (n + \
                _InterlockedExchangeAdd((long*)p, (long)n)); }
-static __inline INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { return (n + \
_InterlockedExchangeAdd((long*)p, (-(long)n))); } +static __inline void \
HXAtomicIncINT32(INT32* p)                 { _InterlockedIncrement((long*)p); } \
+static __inline void HXAtomicDecINT32(INT32* p)                 { \
_InterlockedDecrement((long*)p); } +static __inline void HXAtomicAddINT32(INT32* p, \
INT32 n)        { _InterlockedExchangeAdd((long*)p, (long)n); } +static __inline void \
HXAtomicSubINT32(INT32* p, INT32 n)        { _InterlockedExchangeAdd((long*)p, \
(-(long)n)); } +static __inline INT32 HXAtomicIncRetINT32(INT32* p)             { \
return _InterlockedIncrement((long*)p); } +static __inline INT32 \
HXAtomicDecRetINT32(INT32* p)             { return _InterlockedDecrement((long*)p); } \
+static __inline INT32 HXAtomicAddRetINT32(INT32* p, INT32 n)    { return (n + \
_InterlockedExchangeAdd((long*)p, (long)n)); } +static __inline INT32 \
HXAtomicSubRetINT32(INT32* p, INT32 n)    { return (n + \
_InterlockedExchangeAdd((long*)p, (-(long)n))); } +
+//64-BIT:
+static __inline void HXAtomicIncUINT64(UINT64* p)               { \
_InterlockedIncrement64((long long*)p); } +static __inline void \
HXAtomicDecUINT64(UINT64* p)               { _InterlockedDecrement64((long long*)p); \
} +static __inline void HXAtomicAddUINT64(UINT64* p, UINT64 n)     { \
_InterlockedExchangeAdd64((long long*)p, (long long)n); } +static __inline void \
HXAtomicSubUINT64(UINT64* p, UINT64 n)     { _InterlockedExchangeAdd64((long long*)p, \
(-(long long)n)); } +static __inline INT64 HXAtomicIncRetUINT64(UINT64* p)           \
{ return _InterlockedIncrement64((long long*)p); } +static __inline INT64 \
HXAtomicDecRetUINT64(UINT64* p)           { return _InterlockedDecrement64((long \
long*)p); } +static __inline INT64 HXAtomicAddRetUINT64(UINT64* p, UINT64 n) { return \
(n + _InterlockedExchangeAdd64((long long*)p, (long long)n)); } +static __inline \
INT64 HXAtomicSubRetUINT64(UINT64* p, UINT64 n) { return (n + \
_InterlockedExchangeAdd64((long long*)p, (-(long long)n))); } +
+static __inline void HXAtomicIncINT64(INT64* p)                 { \
_InterlockedIncrement64((long long*)p); } +static __inline void \
HXAtomicDecINT64(INT64* p)                 { _InterlockedDecrement64((long long*)p); \
} +static __inline void HXAtomicAddINT64(INT64* p, INT64 n)        { \
_InterlockedExchangeAdd64((long long*)p, (long long)n); } +static __inline void \
HXAtomicSubINT64(INT64* p, INT64 n)        { _InterlockedExchangeAdd64((long long*)p, \
(-(long long)n)); } +static __inline INT64 HXAtomicIncRetINT64(INT64* p)             \
{ return _InterlockedIncrement64((long long*)p); } +static __inline INT64 \
HXAtomicDecRetINT64(INT64* p)             { return _InterlockedDecrement64((long \
long*)p); } +static __inline INT64 HXAtomicAddRetINT64(INT64* p, INT64 n)    { return \
(n + _InterlockedExchangeAdd64((long long*)p, (long long)n)); } +static __inline \
INT64 HXAtomicSubRetINT64(INT64* p, INT64 n)    { return (n + \
_InterlockedExchangeAdd64((long long*)p, (-(long long)n))); }  
 
 /***********************************************************************
@@ -617,7 +670,7 @@
 HXAtomicIncUINT32(UINT32* pNum)
 {
     __asm__ __volatile__(
-        "lock incl (%0);"                // atomically add 1 to *pNum
+        "lock incl (%0);"                // atomically increment *pNum
         : /* no output */
         : "r" (pNum)
         : "cc", "memory"
@@ -629,7 +682,7 @@
 HXAtomicDecUINT32(UINT32* pNum)
 {
     __asm__ __volatile__(
-        "lock decl (%0);"                // atomically add -1 to *pNum
+        "lock decl (%0);"                // atomically decrement *pNum
         : /* no output */
         : "r" (pNum)
         : "cc", "memory"
@@ -683,7 +736,7 @@
 HXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum)
 {
     __asm__ __volatile__(
-        "lock subl %1, (%0);"            // atomically add ulNum to *pNum
+        "lock subl %1, (%0);"            // atomically subtract ulNum from *pNum
         : /* no output */
         : "r" (pNum), "r" (ulNum)
         : "cc", "memory"
@@ -747,7 +800,7 @@
 HXAtomicIncUINT32(UINT32* pNum)
 {
     __asm__ __volatile__(
-        "lock incl (%%rax);"             // atomically add 1 to *pNum
+        "lock incl (%%rax);"             // atomically increment *pNum
         : /* no output */
         : "a" (pNum)
         : "cc", "memory"
@@ -759,7 +812,7 @@
 HXAtomicDecUINT32(UINT32* pNum)
 {
     __asm__ __volatile__(
-        "lock decl (%%rax);"             // atomically add -1 to *pNum
+        "lock decl (%%rax);"             // atomically decrement *pNum
         : /* no output */
         : "a" (pNum)
         : "cc", "memory"
@@ -790,7 +843,7 @@
         "lock xaddl %%ebx, (%%rax);"     // atomically add -1 to *pNum
         "     decl  %%ebx;"              // old value in %%ebx, decrement it
         : "=b" (ulRet)
-        : "a" (pNum), "b" (-1)
+        : "a" (pNum), "b" (0xffffffff)
         : "cc", "memory"
         );
     return ulRet;
@@ -813,7 +866,7 @@
 HXAtomicSubUINT32(UINT32* pNum, UINT32 ulNum)
 {
     __asm__ __volatile__(
-        "lock subl %%ebx, (%%rax);"      // atomically add ulNum to *pNum
+        "lock subl %%ebx, (%%rax);"      // atomically subtract ulNum from *pNum
         : /* no output */
         : "a" (pNum), "b" (ulNum)
         : "cc", "memory"
@@ -853,6 +906,120 @@
 }
 
 
+/* 64-BIT: Increment by 1 */
+static __inline__ void
+HXAtomicIncUINT64(UINT64* pNum)
+{
+    __asm__ __volatile__(
+        "lock incq (%%rax);"             // atomically increment *pNum
+        : /* no output */
+        : "a" (pNum)
+        : "cc", "memory"
+        );
+}
+
+/* 64-BIT: Decrement by 1 */
+static __inline__ void
+HXAtomicDecUINT64(UINT64* pNum)
+{
+    __asm__ __volatile__(
+        "lock decq (%%rax);"             // atomically decrement *pNum
+        : /* no output */
+        : "a" (pNum)
+        : "cc", "memory"
+        );
+}
+
+/* 64-BIT: Increment by 1 and return new value */
+static __inline__ UINT64
+HXAtomicIncRetUINT64(UINT64* pNum)
+{
+    volatile UINT64 ullRet;
+    __asm__ __volatile__(
+        "lock xaddq %%rbx, (%%rax);"     // atomically add 1 to *pNum
+        "     incq   %%rbx;"             // old value in %%ebx, increment it
+        : "=b" (ullRet)
+        : "a" (pNum), "b" (0x1)
+        : "cc", "memory"
+        );
+    return ullRet;
+}
+
+/* 64-BIT: Decrement by 1 and return new value */
+static __inline__ UINT64
+HXAtomicDecRetUINT64(UINT64* pNum)
+{   
+    volatile UINT64 ullRet;
+    __asm__ __volatile__(
+        "lock xaddq %%rbx, (%%rax);"     // atomically add -1 to *pNum
+        "     decq  %%rbx;"              // old value in %%ebx, decrement it
+        : "=b" (ullRet)
+        : "a" (pNum), "b" (0xffffffffffffffff)
+        : "cc", "memory"
+        );
+    return ullRet;
+}
+
+/* 64-BIT: Add n */
+static __inline__ void
+HXAtomicAddUINT64(UINT64* pNum, UINT64 ullNum)
+{
+    __asm__ __volatile__(
+        "lock addq %%rbx, (%%rax);"      // atomically add ullNum to *pNum
+        : /* no output */
+        : "a" (pNum), "b" (ullNum)
+        : "cc", "memory"
+        );
+}
+
+/* 64-BIT: Subtract n */
+static __inline__ void
+HXAtomicSubUINT64(UINT64* pNum, UINT64 ullNum)
+{
+    __asm__ __volatile__(
+        "lock subq %%rbx, (%%rax);"      // atomically add ullNum to *pNum
+        : /* no output */
+        : "a" (pNum), "b" (ullNum)
+        : "cc", "memory"
+        );
+}
+
+/* 64-BIT: Add n and return new value */
+static __inline__ UINT64
+HXAtomicAddRetUINT64(UINT64* pNum, UINT64 ullNum)
+{
+    volatile UINT64 ullRet;
+    __asm__ __volatile__(
+        "     movq  %%rbx, %%rcx;"       // copy ullNum into %0
+        "lock xaddq %%rcx, (%%rax);"     // atomically add ullNum to *pNum
+        "     addq  %%rbx, %%rcx;"       // old value in %%ecx, add ullNum
+        : "=c" (ullRet)
+        : "a" (pNum), "b" (ullNum), "c" (0)
+        : "cc", "memory"
+        );
+    return ullRet;
+}
+
+/* 64-BIT: Subtract n and return new value */
+static __inline__ UINT64
+HXAtomicSubRetUINT64(UINT64* pNum, UINT64 ullNum) 
+{   
+    volatile UINT64 ullRet;
+    __asm__ __volatile__(
+        "     subq  %%rbx, %%rcx;"       // negate ullNum, saving in %0
+        "lock xaddq %%rcx, (%%rax);"     // atomically add -(ullNum) to *pNum
+        "     subq  %%rbx, %%rcx;"       // old value in %%ecx, subtract ullNum
+        : "=c" (ullRet)
+        : "a" (pNum), "b" (ullNum), "c" (0)
+        : "cc", "memory"
+        );
+    return ullRet;
+}
+
+
+
+
+//32-BIT:
 static __inline__ void HXAtomicIncINT32(INT32* p)              { \
HXAtomicIncUINT32((UINT32*)p); }  static __inline__ void HXAtomicDecINT32(INT32* p)   \
{ HXAtomicDecUINT32((UINT32*)p); }  static __inline__ void HXAtomicAddINT32(INT32* p, \
INT32 n)     { HXAtomicAddUINT32((UINT32*)p, (UINT32)n); } @@ -861,7 +1028,15 @@
 static __inline__ INT32 HXAtomicDecRetINT32(INT32* p)          { return \
HXAtomicDecRetUINT32((UINT32*)p); }  static __inline__ INT32 \
HXAtomicAddRetINT32(INT32* p, INT32 n) { return HXAtomicAddRetUINT32((UINT32*)p, \
(UINT32)n); }  static __inline__ INT32 HXAtomicSubRetINT32(INT32* p, INT32 n) { \
                return HXAtomicSubRetUINT32((UINT32*)p, (UINT32)n); }
-
+//64-BIT:
+static __inline__ void HXAtomicIncINT64(INT64* p)              { \
HXAtomicIncUINT64((UINT64*)p); } +static __inline__ void HXAtomicDecINT64(INT64* p)   \
{ HXAtomicDecUINT64((UINT64*)p); } +static __inline__ void HXAtomicAddINT64(INT64* p, \
INT64 n)     { HXAtomicAddUINT64((UINT64*)p, (UINT64)n); } +static __inline__ void \
HXAtomicSubINT64(INT64* p, INT64 n)     { HXAtomicSubUINT64((UINT64*)p, (UINT64)n); } \
+static __inline__ INT64 HXAtomicIncRetINT64(INT64* p)          { return \
HXAtomicIncRetUINT64((UINT64*)p); } +static __inline__ INT64 \
HXAtomicDecRetINT64(INT64* p)          { return HXAtomicDecRetUINT64((UINT64*)p); } \
+static __inline__ INT64 HXAtomicAddRetINT64(INT64* p, INT64 n) { return \
HXAtomicAddRetUINT64((UINT64*)p, (UINT64)n); } +static __inline__ INT64 \
HXAtomicSubRetINT64(INT64* p, INT64 n) { return HXAtomicSubRetUINT64((UINT64*)p, \
(UINT64)n); }  
 
 /***********************************************************************


_______________________________________________
Common-cvs mailing list
Common-cvs@helixcommunity.org
http://lists.helixcommunity.org/mailman/listinfo/common-cvs


[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic