Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

289 lines
10 KiB

  1. #ifndef _SPINLOCK_HPP_
  2. #define _SPINLOCK_HPP_
  3. // Ruler
  4. // 1 2 3 4 5 6 7 8
  5. //345678901234567890123456789012345678901234567890123456789012345678901234567890
  6. /********************************************************************/
  7. /* */
  8. /* The standard layout. */
  9. /* */
  10. /* The standard layout for 'hpp' files for this code is as */
  11. /* follows: */
  12. /* */
  13. /* 1. Include files. */
  14. /* 2. Constants exported from the class. */
  15. /* 3. Data structures exported from the class. */
  16. /* 4. Forward references to other data structures. */
  17. /* 5. Class specifications (including inline functions). */
  18. /* 6. Additional large inline functions. */
  19. /* */
  20. /* Any portion that is not required is simply omitted. */
  21. /* */
  22. /********************************************************************/
  23. #include "Global.hpp"
  24. #include "Environment.hpp"
  25. /********************************************************************/
  26. /* */
  27. /* Constants exported from the class. */
  28. /* */
  29. /* The spinlock constants indicate when the lock is open and */
  30. /* when it is closed. */
  31. /* */
  32. /********************************************************************/
  33. CONST LONG LockClosed = 1;
  34. CONST LONG LockOpen = 0;
  35. /********************************************************************/
  36. /* */
  37. /* Spinlock and Semaphore locking. */
  38. /* */
  39. /* This class provides a very conservative locking scheme. */
  40. /* The assumption behind the code is that locks will be */
  41. /* held for a very short time. When a lock is taken a memory */
  42. /* location is exchanged. All other threads that want this */
  43. /* lock wait by spinning and sometimes sleeping on a semaphore */
  44. /* until it becomes free again. The only other choice is not */
  45. /* to wait at all and move on to do something else. This */
  46. /* module should normally be used in conjunction with cache */
  47. /* aligned memory in minimize cache line misses. */
  48. /* */
  49. /********************************************************************/
  50. class SPINLOCK : public ENVIRONMENT
  51. {
  52. //
  53. // Private data.
  54. //
  55. SBIT32 MaxSpins;
  56. SBIT32 MaxUsers;
  57. #ifdef ENABLE_RECURSIVE_LOCKS
  58. SBIT32 Owner;
  59. SBIT32 Recursive;
  60. #endif
  61. HANDLE Semaphore;
  62. VOLATILE SBIT32 Spinlock;
  63. VOLATILE SBIT32 Waiting;
  64. #ifdef ENABLE_LOCK_STATISTICS
  65. //
  66. // Counters for debugging builds.
  67. //
  68. VOLATILE SBIT32 TotalLocks;
  69. VOLATILE SBIT32 TotalSleeps;
  70. VOLATILE SBIT32 TotalSpins;
  71. VOLATILE SBIT32 TotalTimeouts;
  72. VOLATILE SBIT32 TotalWaits;
  73. #endif
  74. public:
  75. //
  76. // Public functions.
  77. //
  78. SPINLOCK( SBIT32 NewMaxSpins = 4096, SBIT32 NewMaxUsers = 256 );
  79. INLINE BOOLEAN ClaimLock( SBIT32 Sleep = INFINITE );
  80. INLINE VOID ReleaseLock( VOID );
  81. ~SPINLOCK( VOID );
  82. private:
  83. //
  84. // Private functions.
  85. //
  86. INLINE BOOLEAN ClaimSpinlock( VOLATILE SBIT32 *Spinlock );
  87. INLINE VOID DeleteExclusiveOwner( VOID );
  88. INLINE VOID NewExclusiveOwner( SBIT32 NewOwner );
  89. VOID UpdateSemaphore( VOID );
  90. BOOLEAN WaitForLock( SBIT32 Sleep );
  91. VOID WakeAllSleepers( VOID );
  92. //
  93. // Disabled operations.
  94. //
  95. SPINLOCK( CONST SPINLOCK & Copy );
  96. VOID operator=( CONST SPINLOCK & Copy );
  97. };
  98. /********************************************************************/
  99. /* */
  100. /* A guaranteed atomic exchange. */
  101. /* */
  102. /* An attempt is made to claim the spinlock. This action is */
  103. /* guaranteed to be atomic. */
  104. /* */
  105. /********************************************************************/
  106. INLINE BOOLEAN SPINLOCK::ClaimSpinlock( VOLATILE SBIT32 *Spinlock )
  107. {
  108. return
  109. (
  110. AtomicCompareExchange( Spinlock,LockClosed,LockOpen )
  111. ==
  112. LockOpen
  113. );
  114. }
  115. /********************************************************************/
  116. /* */
  117. /* Claim the spinlock. */
  118. /* */
  119. /* Claim the lock if available else wait or exit. */
  120. /* */
  121. /********************************************************************/
  122. INLINE BOOLEAN SPINLOCK::ClaimLock( SBIT32 Sleep )
  123. {
  124. #ifdef ENABLE_RECURSIVE_LOCKS
  125. REGISTER SBIT32 ThreadId = GetThreadId();
  126. //
  127. // We may already own the spin lock. If so
  128. // we increment the recursive count. If not
  129. // we have to wait.
  130. //
  131. if ( Owner != ThreadId )
  132. {
  133. #endif
  134. //
  135. // Claim the spinlock.
  136. //
  137. if ( ! ClaimSpinlock( & Spinlock ) )
  138. {
  139. //
  140. // We have to wait. If we are not
  141. // allowed to sleep or we have timed
  142. // out then exit.
  143. //
  144. if ( (Sleep == 0) || (! WaitForLock( Sleep )) )
  145. { return False; }
  146. }
  147. #ifdef ENABLE_RECURSIVE_LOCKS
  148. //
  149. // Register the new owner of the lock.
  150. //
  151. NewExclusiveOwner( ThreadId );
  152. }
  153. else
  154. { Recursive ++; }
  155. #endif
  156. #ifdef ENABLE_LOCK_STATISTICS
  157. //
  158. // Update the statistics.
  159. //
  160. (VOID) AtomicIncrement( & TotalLocks );
  161. #endif
  162. return True;
  163. }
  164. #ifdef ENABLE_RECURSIVE_LOCKS
  165. /********************************************************************/
  166. /* */
  167. /* New exclusive owner. */
  168. /* */
  169. /* Delete the exclusive lock owner information. */
  170. /* */
  171. /********************************************************************/
  172. INLINE VOID SPINLOCK::DeleteExclusiveOwner( VOID )
  173. {
  174. #ifdef DEBUGGING
  175. if ( Owner != NULL )
  176. {
  177. #endif
  178. Owner = NULL;
  179. #ifdef DEBUGGING
  180. }
  181. else
  182. { Failure( "Sharelock has no owner in DeleteExclusiveOwner" ); }
  183. #endif
  184. }
  185. /********************************************************************/
  186. /* */
  187. /* New exclusive owner. */
  188. /* */
  189. /* Register new exclusive lock owner information. */
  190. /* */
  191. /********************************************************************/
  192. INLINE VOID SPINLOCK::NewExclusiveOwner( SBIT32 NewOwner )
  193. {
  194. #ifdef DEBUGGING
  195. if ( Owner == NULL )
  196. {
  197. #endif
  198. Owner = NewOwner;
  199. #ifdef DEBUGGING
  200. }
  201. else
  202. { Failure( "Already exclusive in NewExclusiveOwner" ); }
  203. #endif
  204. }
  205. #endif
  206. /********************************************************************/
  207. /* */
  208. /* Release the spinlock. */
  209. /* */
  210. /* Release the lock and if needed wakeup any sleepers. */
  211. /* */
  212. /********************************************************************/
  213. INLINE VOID SPINLOCK::ReleaseLock( VOID )
  214. {
  215. #ifdef ENABLE_RECURSIVE_LOCKS
  216. //
  217. // When we have recursive lock calls we do not
  218. // release the lock until we have exited to the
  219. // top level.
  220. //
  221. if ( Recursive <= 0 )
  222. {
  223. //
  224. // Delete the exclusive owner information.
  225. //
  226. DeleteExclusiveOwner();
  227. #endif
  228. #ifdef DEBUGGING
  229. //
  230. // Release the spinlock.
  231. //
  232. if ( AtomicExchange( & Spinlock, LockOpen ) == LockClosed )
  233. {
  234. #else
  235. (VOID) AtomicExchange( & Spinlock, LockOpen );
  236. #endif
  237. //
  238. // Wakeup anyone who is asleep waiting.
  239. //
  240. if ( Waiting > 0 )
  241. { WakeAllSleepers(); }
  242. #ifdef DEBUGGING
  243. }
  244. else
  245. { Failure( "Spinlock released by not held in ReleaseLock" ); }
  246. #endif
  247. #ifdef ENABLE_RECURSIVE_LOCKS
  248. }
  249. else
  250. { Recursive --; }
  251. #endif
  252. }
  253. #endif