Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

503 lines
16 KiB

  1. #ifndef _SHARELOCK_HPP_
  2. #define _SHARELOCK_HPP_
  3. // Ruler
  4. // 1 2 3 4 5 6 7 8
  5. //345678901234567890123456789012345678901234567890123456789012345678901234567890
  6. /********************************************************************/
  7. /* */
  8. /* The standard layout. */
  9. /* */
  10. /* The standard layout for 'hpp' files for this code is as */
  11. /* follows: */
  12. /* */
  13. /* 1. Include files. */
  14. /* 2. Constants exported from the class. */
  15. /* 3. Data structures exported from the class. */
  16. /* 4. Forward references to other data structures. */
  17. /* 5. Class specifications (including inline functions). */
  18. /* 6. Additional large inline functions. */
  19. /* */
  20. /* Any portion that is not required is simply omitted. */
  21. /* */
  22. /********************************************************************/
  23. #include "Global.hpp"
  24. #include "Environment.hpp"
  25. /********************************************************************/
  26. /* */
  27. /* Sharelock and Semaphore locking. */
  28. /* */
  29. /* This class provides a very conservative locking scheme. */
  30. /* The assumption behind the code is that locks will be */
  31. /* held for a very short time. A lock can be obtained in */
  32. /* either exclusive mode or shared mode. If the lock is not */
  33. /* available the caller waits by spinning or if that fails */
  34. /* by sleeping. */
  35. /* */
  36. /********************************************************************/
  37. class SHARELOCK : public ENVIRONMENT
  38. {
  39. //
  40. // Private data.
  41. //
  42. SBIT32 MaxSpins;
  43. SBIT32 MaxUsers;
  44. VOLATILE SBIT32 ExclusiveUsers;
  45. VOLATILE SBIT32 TotalUsers;
  46. HANDLE NormalSemaphore;
  47. VOLATILE SBIT32 NormalWaiting;
  48. HANDLE PrioritySemaphore;
  49. VOLATILE SBIT32 PriorityWaiting;
  50. #ifdef ENABLE_RECURSIVE_LOCKS
  51. SBIT32 Owner;
  52. SBIT32 Recursive;
  53. #endif
  54. #ifdef ENABLE_LOCK_STATISTICS
  55. //
  56. // Counters for debugging builds.
  57. //
  58. VOLATILE SBIT32 TotalExclusiveLocks;
  59. VOLATILE SBIT32 TotalShareLocks;
  60. VOLATILE SBIT32 TotalSleeps;
  61. VOLATILE SBIT32 TotalSpins;
  62. VOLATILE SBIT32 TotalTimeouts;
  63. VOLATILE SBIT32 TotalWaits;
  64. #endif
  65. public:
  66. //
  67. // Public functions.
  68. //
  69. SHARELOCK( SBIT32 NewMaxSpins = 4096, SBIT32 NewMaxUsers = 256 );
  70. INLINE VOID ChangeExclusiveLockToSharedLock( VOID );
  71. INLINE BOOLEAN ChangeSharedLockToExclusiveLock( SBIT32 Sleep = INFINITE );
  72. INLINE BOOLEAN ClaimExclusiveLock( SBIT32 Sleep = INFINITE );
  73. INLINE BOOLEAN ClaimShareLock( SBIT32 Sleep = INFINITE );
  74. INLINE VOID ReleaseExclusiveLock( VOID );
  75. INLINE VOID ReleaseShareLock( VOID );
  76. BOOLEAN UpdateMaxSpins( SBIT32 NewMaxSpins );
  77. BOOLEAN UpdateMaxUsers( SBIT32 NewMaxUsers );
  78. ~SHARELOCK( VOID );
  79. //
  80. // Public inline functions.
  81. //
  82. INLINE SBIT32 ActiveUsers( VOID )
  83. { return (SBIT32) TotalUsers; }
  84. private:
  85. //
  86. // Private functions.
  87. //
  88. INLINE VOID DeleteExclusiveOwner( VOID );
  89. INLINE VOID NewExclusiveOwner( SBIT32 NewOwner );
  90. BOOLEAN SleepWaitingForLock
  91. (
  92. HANDLE *Semaphore,
  93. SBIT32 Sleep,
  94. VOLATILE SBIT32 *Waiting
  95. );
  96. BOOLEAN UpdateSemaphore( HANDLE *Semaphore );
  97. BOOLEAN WaitForExclusiveLock( SBIT32 Sleep );
  98. BOOLEAN WaitForShareLock( SBIT32 Sleep );
  99. VOID WakeAllSleepers( VOID );
  100. //
  101. // Disabled operations.
  102. //
  103. SHARELOCK( CONST SHARELOCK & Copy );
  104. VOID operator=( CONST SHARELOCK & Copy );
  105. };
  106. /********************************************************************/
  107. /* */
  108. /* Change an exclusive lock to a shared lock. */
  109. /* */
  110. /* Downgrade the existing exclusive lock to a shared lock. */
  111. /* */
  112. /********************************************************************/
  113. INLINE VOID SHARELOCK::ChangeExclusiveLockToSharedLock( VOID )
  114. {
  115. #ifdef ENABLE_RECURSIVE_LOCKS
  116. //
  117. // When we have recursive lock calls we do not
  118. // release the lock until we have exited to the
  119. // top level.
  120. //
  121. if ( Recursive <= 0 )
  122. {
  123. //
  124. // Delete the exclusive owner information.
  125. //
  126. DeleteExclusiveOwner();
  127. #endif
  128. //
  129. // Simply decrement the exclusive count.
  130. // This allows the lock to be shared.
  131. //
  132. (VOID) AtomicDecrement( & ExclusiveUsers );
  133. #ifdef ENABLE_RECURSIVE_LOCKS
  134. }
  135. #endif
  136. #ifdef ENABLE_LOCK_STATISTICS
  137. //
  138. // Update the statistics.
  139. //
  140. (VOID) AtomicIncrement( & TotalShareLocks );
  141. #endif
  142. }
  143. /********************************************************************/
  144. /* */
  145. /* Change a shared lock to an exclusive lock. */
  146. /* */
  147. /* Upgrade the existing shared lock to an exclusive lock. */
  148. /* */
  149. /********************************************************************/
  150. INLINE BOOLEAN SHARELOCK::ChangeSharedLockToExclusiveLock( SBIT32 Sleep )
  151. {
  152. #ifdef ENABLE_RECURSIVE_LOCKS
  153. REGISTER SBIT32 ThreadId = GetThreadId();
  154. //
  155. // We may already own an exclusive lock. If so
  156. // we increment the recursive count otherwise
  157. // we have to wait.
  158. //
  159. if ( Owner != ThreadId )
  160. {
  161. #endif
  162. //
  163. // We need to increment the exclusive count
  164. // to prevent the lock from being shared.
  165. //
  166. (VOID) AtomicIncrement( & ExclusiveUsers );
  167. //
  168. // If the total number of users is one then
  169. // we have the lock exclusively otherwise we
  170. // may need to wait.
  171. //
  172. if ( TotalUsers != 1 )
  173. {
  174. //
  175. // We have to wait. If we are not allowed
  176. // to sleep or we have timed out then exit.
  177. //
  178. if ( ! WaitForExclusiveLock( Sleep ) )
  179. { return False; }
  180. }
  181. #ifdef ENABLE_RECURSIVE_LOCKS
  182. //
  183. // Register the new exclusive owner
  184. // of the lock.
  185. //
  186. NewExclusiveOwner( ThreadId );
  187. }
  188. #endif
  189. #ifdef ENABLE_LOCK_STATISTICS
  190. //
  191. // Update the statistics.
  192. //
  193. (VOID) AtomicIncrement( & TotalExclusiveLocks );
  194. #endif
  195. return True;
  196. }
  197. /********************************************************************/
  198. /* */
  199. /* Claim an exclusive lock. */
  200. /* */
  201. /* Claim an exclusive lock if available else wait or exit. */
  202. /* */
  203. /********************************************************************/
  204. INLINE BOOLEAN SHARELOCK::ClaimExclusiveLock( SBIT32 Sleep )
  205. {
  206. #ifdef ENABLE_RECURSIVE_LOCKS
  207. REGISTER SBIT32 ThreadId = GetThreadId();
  208. //
  209. // We may already own an exclusive lock. If so
  210. // we increment the recursive count otherwise
  211. // we have to wait.
  212. //
  213. if ( Owner != ThreadId )
  214. {
  215. #endif
  216. //
  217. // We need to increment the exclusive count
  218. // to prevent the lock from being shared and
  219. // the total number of users count. We need
  220. // update the total number of users first to
  221. // avoid a deadlock when real-time priorites
  222. // are being used. If not then an interupt
  223. // can occur after the update of the
  224. // 'ExclusiveUsers' count but before the
  225. // 'TotalUsers' count that will prevent
  226. // another thread from getting the lock or
  227. // sleeping (i.e. a live lock).
  228. //
  229. (VOID) AtomicIncrement( & TotalUsers );
  230. (VOID) AtomicIncrement( & ExclusiveUsers );
  231. if ( TotalUsers != 1 )
  232. {
  233. //
  234. // We have to wait. If we are not allowed
  235. // to sleep or we have timed out then exit.
  236. //
  237. if ( ! WaitForExclusiveLock( Sleep ) )
  238. { return False; }
  239. }
  240. #ifdef ENABLE_RECURSIVE_LOCKS
  241. //
  242. // Register the new exclusive owner
  243. // of the lock.
  244. //
  245. NewExclusiveOwner( ThreadId );
  246. }
  247. else
  248. { Recursive ++; }
  249. #endif
  250. #ifdef ENABLE_LOCK_STATISTICS
  251. //
  252. // Update the statistics.
  253. //
  254. (VOID) AtomicIncrement( & TotalExclusiveLocks );
  255. #endif
  256. return True;
  257. }
  258. /********************************************************************/
  259. /* */
  260. /* Claim a shared lock. */
  261. /* */
  262. /* Claim a shared lock if available else wait or exit. */
  263. /* */
  264. /********************************************************************/
  265. INLINE BOOLEAN SHARELOCK::ClaimShareLock( SBIT32 Sleep )
  266. {
  267. #ifdef ENABLE_RECURSIVE_LOCKS
  268. REGISTER SBIT32 ThreadId = GetThreadId();
  269. //
  270. // We may already own an exclusive lock. If so
  271. // we increment the recursive count otherwise
  272. // we have to wait.
  273. //
  274. if ( Owner != ThreadId )
  275. {
  276. #endif
  277. //
  278. // We need to increment the total number of
  279. // users count to prevent the lock from being
  280. // claimed for exclusive use.
  281. //
  282. (VOID) AtomicIncrement( & TotalUsers );
  283. if ( (ExclusiveUsers > 0) || (TotalUsers > MaxUsers) )
  284. {
  285. //
  286. // We have to wait. If we are not allowed
  287. // to sleep or we have timed out then exit.
  288. //
  289. if ( ! WaitForShareLock( Sleep ) )
  290. { return False; }
  291. }
  292. #ifdef ENABLE_RECURSIVE_LOCKS
  293. }
  294. else
  295. { Recursive ++; }
  296. #endif
  297. #ifdef ENABLE_LOCK_STATISTICS
  298. //
  299. // Update the statistics.
  300. //
  301. (VOID) AtomicIncrement( & TotalShareLocks );
  302. #endif
  303. return True;
  304. }
  305. #ifdef ENABLE_RECURSIVE_LOCKS
  306. /********************************************************************/
  307. /* */
  308. /* New exclusive owner. */
  309. /* */
  310. /* Delete the exclusive lock owner information. */
  311. /* */
  312. /********************************************************************/
  313. INLINE VOID SHARELOCK::DeleteExclusiveOwner( VOID )
  314. {
  315. #ifdef DEBUGGING
  316. if ( Owner != NULL )
  317. {
  318. #endif
  319. Owner = NULL;
  320. #ifdef DEBUGGING
  321. }
  322. else
  323. { Failure( "Sharelock has no owner in DeleteExclusiveOwner" ); }
  324. #endif
  325. }
  326. /********************************************************************/
  327. /* */
  328. /* New exclusive owner. */
  329. /* */
  330. /* Register new exclusive lock owner information. */
  331. /* */
  332. /********************************************************************/
  333. INLINE VOID SHARELOCK::NewExclusiveOwner( SBIT32 NewOwner )
  334. {
  335. #ifdef DEBUGGING
  336. if ( Owner == NULL )
  337. {
  338. #endif
  339. Owner = NewOwner;
  340. #ifdef DEBUGGING
  341. }
  342. else
  343. { Failure( "Already exclusive in NewExclusiveOwner" ); }
  344. #endif
  345. }
  346. #endif
  347. /********************************************************************/
  348. /* */
  349. /* Release an exclusive lock. */
  350. /* */
  351. /* Release an exclusive lock and if needed wakeup any sleepers. */
  352. /* */
  353. /********************************************************************/
  354. INLINE VOID SHARELOCK::ReleaseExclusiveLock( VOID )
  355. {
  356. #ifdef ENABLE_RECURSIVE_LOCKS
  357. //
  358. // When we have recursive lock calls we do not
  359. // release the lock until we have exited to the
  360. // top level.
  361. //
  362. if ( Recursive <= 0 )
  363. {
  364. //
  365. // Delete the exclusive owner information.
  366. //
  367. DeleteExclusiveOwner();
  368. #endif
  369. //
  370. // Release an exclusive lock.
  371. //
  372. #ifdef DEBUGGING
  373. if
  374. (
  375. (AtomicDecrement( & ExclusiveUsers ) < 0)
  376. ||
  377. (AtomicDecrement( & TotalUsers ) < 0)
  378. )
  379. { Failure( "Negative lock count in ReleaseExclusiveLock" ); }
  380. #else
  381. AtomicDecrement( & ExclusiveUsers );
  382. AtomicDecrement( & TotalUsers );
  383. #endif
  384. //
  385. // Wakeup anyone who is asleep waiting. We
  386. // need to be vaery careful here as in very
  387. // rare situations the waiting counts can
  388. // be negative for very breif periods.
  389. //
  390. if ( (PriorityWaiting > 0) || (NormalWaiting > 0) )
  391. { WakeAllSleepers(); }
  392. #ifdef ENABLE_RECURSIVE_LOCKS
  393. }
  394. else
  395. { Recursive --; }
  396. #endif
  397. }
  398. /********************************************************************/
  399. /* */
  400. /* Release a shared lock. */
  401. /* */
  402. /* Release a shared lock and if needed wakeup any sleepers. */
  403. /* */
  404. /********************************************************************/
  405. INLINE VOID SHARELOCK::ReleaseShareLock( VOID )
  406. {
  407. #ifdef ENABLE_RECURSIVE_LOCKS
  408. //
  409. // When we have recursive lock calls we do not
  410. // release the lock until we have exited to the
  411. // top level.
  412. //
  413. if ( Recursive <= 0 )
  414. {
  415. #endif
  416. #ifdef DEBUGGING
  417. //
  418. // Release a shared lock.
  419. //
  420. if ( AtomicDecrement( & TotalUsers ) < 0 )
  421. { Failure( "Negative lock count in ReleaseShareLock" ); }
  422. #else
  423. AtomicDecrement( & TotalUsers );
  424. #endif
  425. //
  426. // Wakeup anyone who is asleep waiting. We
  427. // need to be vaery careful here as in very
  428. // rare situations the waiting counts can
  429. // be negative for very breif periods.
  430. //
  431. if ( (PriorityWaiting > 0) || (NormalWaiting > 0) )
  432. { WakeAllSleepers(); }
  433. #ifdef ENABLE_RECURSIVE_LOCKS
  434. }
  435. else
  436. { Recursive --; }
  437. #endif
  438. }
  439. #endif