Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1857 lines
55 KiB

  1. /*++
  2. Copyright (c) 1997-2002 Microsoft Corporation
  3. Module Name :
  4. Locks.h
  5. Abstract:
  6. A collection of locks for multithreaded access to data structures
  7. Author:
  8. George V. Reilly (GeorgeRe) 06-Jan-1998
  9. Environment:
  10. Win32 - User Mode
  11. Project:
  12. LKRhash
  13. Revision History:
  14. --*/
  15. #ifndef __LOCKS_H__
  16. #define __LOCKS_H__
  17. //--------------------------------------------------------------------
  18. // File: locks.h
  19. //
  20. // A collection of different implementations of read/write locks that all
  21. // share the same interface. This allows different locks to be plugged
  22. // into C++ templates as parameters.
  23. //
  24. // The implementations are:
  25. // CSmallSpinLock lightweight critical section
  26. // CSpinLock variant of CSmallSpinLock
  27. // CFakeLock do-nothing class; useful as a template parameter
  28. // CCritSec Win32 CRITICAL_SECTION
  29. // Multi-Reader/Single-Writer locks:
  30. // CReaderWriterLock MRSW lock from Neel Jain
  31. // CReaderWriterLock2 smaller implementation of CReaderWriterLock
  32. // CReaderWriterLock3 CReaderWriterLock2 with recursive WriteLock
  33. //
  34. // CAutoReadLock<Lock> and CAutoWriteLock<Lock> can used as
  35. // exception-safe wrappers.
  36. //
  37. // TODO:
  38. // * Add per-class lock-contention statistics
  39. // * Add a timeout feature to Try{Read,Write}Lock
  40. // * Add some way of tracking all the owners of a multi-reader lock
  41. //--------------------------------------------------------------------
  42. #ifndef LOCKS_KERNEL_MODE
  43. # define LOCKS_ENTER_CRIT_REGION() ((void) 0)
  44. # define LOCKS_LEAVE_CRIT_REGION() ((void) 0)
  45. #else
  46. # define LOCKS_ENTER_CRIT_REGION() KeEnterCriticalRegion()
  47. # define LOCKS_LEAVE_CRIT_REGION() KeLeaveCriticalRegion()
  48. #endif
  49. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  50. // The __forceinline keyword is new to VC6
  51. # define LOCK_FORCEINLINE __forceinline
  52. #else
  53. # define LOCK_FORCEINLINE inline
  54. #endif
  55. #ifndef __IRTLDBG_H__
  56. # include <irtldbg.h>
  57. #endif
  58. enum LOCK_LOCKTYPE {
  59. LOCK_FAKELOCK = 1,
  60. LOCK_SMALLSPINLOCK,
  61. LOCK_SPINLOCK,
  62. LOCK_CRITSEC,
  63. LOCK_READERWRITERLOCK,
  64. LOCK_READERWRITERLOCK2,
  65. LOCK_READERWRITERLOCK3,
  66. LOCK_READERWRITERLOCK4,
  67. LOCK_KSPINLOCK,
  68. LOCK_FASTMUTEX,
  69. LOCK_ERESOURCE,
  70. LOCK_RTL_MRSW_LOCK,
  71. };
  72. // Forward declarations
  73. class IRTL_DLLEXP CSmallSpinLock;
  74. class IRTL_DLLEXP CSpinLock;
  75. class IRTL_DLLEXP CFakeLock;
  76. class IRTL_DLLEXP CCritSec;
  77. class IRTL_DLLEXP CReaderWriterLock;
  78. class IRTL_DLLEXP CReaderWriterLock2;
  79. class IRTL_DLLEXP CReaderWriterLock3;
  80. //--------------------------------------------------------------------
  81. // Spin count values.
  82. enum LOCK_SPINS {
  83. LOCK_MAXIMUM_SPINS = 10000, // maximum allowable spin count
  84. LOCK_DEFAULT_SPINS = 4000, // default spin count
  85. LOCK_MINIMUM_SPINS = 100, // minimum allowable spin count
  86. LOCK_USE_DEFAULT_SPINS = 0xFFFF, // use class default spin count
  87. LOCK_DONT_SPIN = 0, // don't spin at all
  88. };
  89. #ifndef LOCKS_KERNEL_MODE
  90. // Boilerplate code for the per-class default spincount and spinfactor
  91. #define LOCK_DEFAULT_SPIN_IMPLEMENTATION() \
  92. protected: \
  93. /* per-class variables */ \
  94. static WORD sm_wDefaultSpinCount; /* global default spin count */ \
  95. static double sm_dblDfltSpinAdjFctr; /* global spin adjustment factor*/\
  96. \
  97. public: \
  98. /* Set the default spin count for all locks */ \
  99. static void SetDefaultSpinCount(WORD wSpins) \
  100. { \
  101. IRTLASSERT((wSpins == LOCK_DONT_SPIN) \
  102. || (wSpins == LOCK_USE_DEFAULT_SPINS) \
  103. || (LOCK_MINIMUM_SPINS <= wSpins \
  104. && wSpins <= LOCK_MAXIMUM_SPINS)); \
  105. \
  106. if ((LOCK_MINIMUM_SPINS <= wSpins && wSpins <= LOCK_MAXIMUM_SPINS)\
  107. || (wSpins == LOCK_DONT_SPIN)) \
  108. sm_wDefaultSpinCount = wSpins; \
  109. else if (wSpins == LOCK_USE_DEFAULT_SPINS) \
  110. sm_wDefaultSpinCount = LOCK_DEFAULT_SPINS; \
  111. } \
  112. \
  113. /* Return the default spin count for all locks */ \
  114. static WORD GetDefaultSpinCount() \
  115. { \
  116. return sm_wDefaultSpinCount; \
  117. } \
  118. \
  119. /* Set the adjustment factor for the spincount, used in each iteration */\
  120. /* of countdown-and-sleep by the backoff algorithm. */ \
  121. static void SetDefaultSpinAdjustmentFactor(double dblAdjFactor) \
  122. { \
  123. IRTLASSERT(0.1 <= dblAdjFactor && dblAdjFactor <= 10.0); \
  124. if (0.1 <= dblAdjFactor && dblAdjFactor <= 10.0) \
  125. sm_dblDfltSpinAdjFctr = dblAdjFactor; \
  126. } \
  127. \
  128. /* Return the default spin count for all locks */ \
  129. static double GetDefaultSpinAdjustmentFactor() \
  130. { \
  131. return sm_dblDfltSpinAdjFctr; \
  132. } \
  133. #endif // !LOCKS_KERNEL_MODE
  134. //--------------------------------------------------------------------
  135. // Various Lock Traits
  136. // Is the lock a simple mutex or a multi-reader/single-writer lock?
  137. enum LOCK_RW_MUTEX {
  138. LOCK_MUTEX = 1, // mutexes allow only one thread to hold the lock
  139. LOCK_MRSW, // multi-reader, single-writer
  140. };
  141. // Can the lock be recursively acquired?
  142. enum LOCK_RECURSION {
  143. LOCK_RECURSIVE = 1, // Write and Read locks can be recursively acquired
  144. LOCK_READ_RECURSIVE, // Read locks can be reacquired, but not Write
  145. LOCK_NON_RECURSIVE, // Will deadlock if attempt to acquire recursively
  146. };
  147. // Does the lock Sleep in a loop or block on a kernel synch object handle?
  148. // May (or may not) spin first before sleeping/blocking.
  149. enum LOCK_WAIT_TYPE {
  150. LOCK_WAIT_SLEEP = 1, // Calls Sleep() in a loop
  151. LOCK_WAIT_HANDLE, // Blocks on a kernel mutex, semaphore, or event
  152. LOCK_WAIT_SPIN, // Spins until lock acquired. Never sleeps.
  153. };
  154. // When the lock is taken, how are the waiters dequeued?
  155. enum LOCK_QUEUE_TYPE {
  156. LOCK_QUEUE_FIFO = 1, // First in, first out. Fair.
  157. LOCK_QUEUE_LIFO, // Unfair but CPU cache friendly
  158. LOCK_QUEUE_KERNEL, // Determined by vagaries of scheduler
  159. };
  160. // Can the lock's spincount be set on a per-lock basis, or is it only
  161. // possible to modify the default spincount for all the locks in this class?
  162. enum LOCK_PERLOCK_SPIN {
  163. LOCK_NO_SPIN = 1, // The locks do not spin at all
  164. LOCK_CLASS_SPIN, // Can set class-wide spincount, not individual
  165. LOCK_INDIVIDUAL_SPIN, // Can set a spincount on an individual lock
  166. };
  167. //--------------------------------------------------------------------
  168. // CLockBase: bundle the above attributes
  169. template < LOCK_LOCKTYPE locktype,
  170. LOCK_RW_MUTEX mutextype,
  171. LOCK_RECURSION recursiontype,
  172. LOCK_WAIT_TYPE waittype,
  173. LOCK_QUEUE_TYPE queuetype,
  174. LOCK_PERLOCK_SPIN spintype
  175. >
  176. class CLockBase
  177. {
  178. public:
  179. static LOCK_LOCKTYPE LockType() {return locktype;}
  180. static LOCK_RW_MUTEX MutexType() {return mutextype;}
  181. static LOCK_RECURSION Recursion() {return recursiontype;}
  182. static LOCK_WAIT_TYPE WaitType() {return waittype;}
  183. static LOCK_QUEUE_TYPE QueueType() {return queuetype;}
  184. static LOCK_PERLOCK_SPIN PerLockSpin() {return spintype;}
  185. enum {
  186. LOCK_WRITELOCK_RECURSIVE = (LOCK_RECURSIVE == recursiontype),
  187. };
  188. };
  189. // Lock instrumentation causes all sorts of interesting statistics about
  190. // lock contention, etc., to be gathered, but makes locks considerably fatter
  191. // and somewhat slower. Turned off by default.
  192. // #define LOCK_INSTRUMENTATION 1
  193. #ifdef LOCK_INSTRUMENTATION
  194. //--------------------------------------------------------------------
  195. // CLockStatistics: statistics for an individual lock
  196. class IRTL_DLLEXP CLockStatistics
  197. {
  198. public:
  199. enum {
  200. L_NAMELEN = 8,
  201. };
  202. double m_nContentions; // #times this lock was already locked
  203. double m_nSleeps; // Total #Sleep()s needed
  204. double m_nContentionSpins; // Total iterations this lock spun
  205. double m_nAverageSpins; // Average spins each contention needed
  206. double m_nReadLocks; // Number of times lock acquired for reading
  207. double m_nWriteLocks; // Number of times lock acquired for writing
  208. TCHAR m_tszName[L_NAMELEN];// Name of this lock
  209. CLockStatistics()
  210. : m_nContentions(0),
  211. m_nSleeps(0),
  212. m_nContentionSpins(0),
  213. m_nAverageSpins(0),
  214. m_nReadLocks(0),
  215. m_nWriteLocks(0)
  216. {
  217. m_tszName[0] = _TEXT('\0');
  218. }
  219. };
  220. //--------------------------------------------------------------------
  221. // CGlobalLockStatistics: statistics for all the known locks
  222. class IRTL_DLLEXP CGlobalLockStatistics
  223. {
  224. public:
  225. LONG m_cTotalLocks; // Total number of locks created
  226. LONG m_cContendedLocks; // Total number of contended locks
  227. LONG m_nSleeps; // Total #Sleep()s needed by all locks
  228. LONGLONG m_cTotalSpins; // Total iterations all locks spun
  229. double m_nAverageSpins; // Average spins needed for each contended lock
  230. LONG m_nReadLocks; // Total ReadLocks
  231. LONG m_nWriteLocks; // Total WriteLocks
  232. CGlobalLockStatistics()
  233. : m_cTotalLocks(0),
  234. m_cContendedLocks(0),
  235. m_nSleeps(0),
  236. m_cTotalSpins(0),
  237. m_nAverageSpins(0),
  238. m_nReadLocks(0),
  239. m_nWriteLocks(0)
  240. {}
  241. };
  242. # define LOCK_INSTRUMENTATION_DECL() \
  243. private: \
  244. volatile LONG m_nContentionSpins; /* #iterations this lock spun */ \
  245. volatile WORD m_nContentions; /* #times lock was already locked */\
  246. volatile WORD m_nSleeps; /* #Sleep()s needed */ \
  247. volatile WORD m_nReadLocks; /* #ReadLocks */ \
  248. volatile WORD m_nWriteLocks; /* #WriteLocks */ \
  249. TCHAR m_tszName[CLockStatistics::L_NAMELEN]; /* Name of lock */\
  250. \
  251. static LONG sm_cTotalLocks; /* Total number of locks created */ \
  252. static LONG sm_cContendedLocks; /* Total number of contended locks */\
  253. static LONG sm_nSleeps; /* Total #Sleep()s by all locks */ \
  254. static LONGLONG sm_cTotalSpins; /* Total iterations all locks spun */\
  255. static LONG sm_nReadLocks; /* Total ReadLocks */ \
  256. static LONG sm_nWriteLocks; /* Total WriteLocks */ \
  257. \
  258. public: \
  259. const TCHAR* Name() const {return m_tszName;} \
  260. \
  261. CLockStatistics Statistics() const; \
  262. static CGlobalLockStatistics GlobalStatistics(); \
  263. static void ResetGlobalStatistics(); \
  264. private: \
  265. // Add this to constructors
  266. # define LOCK_INSTRUMENTATION_INIT(ptszName) \
  267. m_nContentionSpins = 0; \
  268. m_nContentions = 0; \
  269. m_nSleeps = 0; \
  270. m_nReadLocks = 0; \
  271. m_nWriteLocks = 0; \
  272. ++sm_cTotalLocks; \
  273. if (ptszName == NULL) \
  274. m_tszName[0] = _TEXT('\0'); \
  275. else \
  276. _tcsncpy(m_tszName, ptszName, sizeof(m_tszName)/sizeof(TCHAR))
  277. // Note: we are not using Interlocked operations for the shared
  278. // statistical counters. We'll lose perfect accuracy, but we'll
  279. // gain by reduced bus synchronization traffic.
  280. # define LOCK_READLOCK_INSTRUMENTATION() \
  281. { ++m_nReadLocks; \
  282. ++sm_nReadLocks; }
  283. # define LOCK_WRITELOCK_INSTRUMENTATION() \
  284. { ++m_nWriteLocks; \
  285. ++sm_nWriteLocks; }
  286. #else // !LOCK_INSTRUMENTATION
  287. # define LOCK_INSTRUMENTATION_DECL()
  288. # define LOCK_READLOCK_INSTRUMENTATION() ((void) 0)
  289. # define LOCK_WRITELOCK_INSTRUMENTATION() ((void) 0)
  290. #endif // !LOCK_INSTRUMENTATION
  291. //--------------------------------------------------------------------
  292. // CAutoReadLock<Lock> and CAutoWriteLock<Lock> provide exception-safe
  293. // acquisition and release of the other locks defined below
  294. template <class _Lock>
  295. class IRTL_DLLEXP CAutoReadLock
  296. {
  297. private:
  298. bool m_fLocked;
  299. _Lock& m_Lock;
  300. public:
  301. CAutoReadLock(
  302. _Lock& rLock,
  303. bool fLockNow = true)
  304. : m_fLocked(false), m_Lock(rLock)
  305. {
  306. if (fLockNow)
  307. Lock();
  308. }
  309. ~CAutoReadLock()
  310. {
  311. Unlock();
  312. }
  313. void Lock()
  314. {
  315. // disallow recursive acquisition of the lock through this wrapper
  316. if (!m_fLocked)
  317. {
  318. m_fLocked = true;
  319. m_Lock.ReadLock();
  320. }
  321. }
  322. void Unlock()
  323. {
  324. if (m_fLocked)
  325. {
  326. m_Lock.ReadUnlock();
  327. m_fLocked = false;
  328. }
  329. }
  330. };
  331. template <class _Lock>
  332. class IRTL_DLLEXP CAutoWriteLock
  333. {
  334. private:
  335. bool m_fLocked;
  336. _Lock& m_Lock;
  337. public:
  338. CAutoWriteLock(
  339. _Lock& rLock,
  340. bool fLockNow = true)
  341. : m_fLocked(false), m_Lock(rLock)
  342. {
  343. if (fLockNow)
  344. Lock();
  345. }
  346. ~CAutoWriteLock()
  347. {
  348. Unlock();
  349. }
  350. void Lock()
  351. {
  352. // disallow recursive acquisition of the lock through this wrapper
  353. if (!m_fLocked)
  354. {
  355. m_fLocked = true;
  356. m_Lock.WriteLock();
  357. }
  358. }
  359. void Unlock()
  360. {
  361. if (m_fLocked)
  362. {
  363. m_fLocked = false;
  364. m_Lock.WriteUnlock();
  365. }
  366. }
  367. };
  368. //--------------------------------------------------------------------
  369. // A dummy class, primarily useful as a template parameter
  370. class IRTL_DLLEXP CFakeLock :
  371. public CLockBase<LOCK_FAKELOCK, LOCK_MUTEX,
  372. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_FIFO,
  373. LOCK_NO_SPIN
  374. >
  375. {
  376. private:
  377. LOCK_INSTRUMENTATION_DECL();
  378. public:
  379. CFakeLock() {}
  380. #ifdef LOCK_INSTRUMENTATION
  381. CFakeLock(const char*) {}
  382. #endif // LOCK_INSTRUMENTATION
  383. ~CFakeLock() {}
  384. void WriteLock() {}
  385. void ReadLock() {}
  386. bool ReadOrWriteLock() {return true;}
  387. bool TryWriteLock() {return true;}
  388. bool TryReadLock() {return true;}
  389. void WriteUnlock() {}
  390. void ReadUnlock() {}
  391. void ReadOrWriteUnlock(bool) {}
  392. bool IsWriteLocked() const {return true;}
  393. bool IsReadLocked() const {return IsWriteLocked();}
  394. bool IsWriteUnlocked() const {return true;}
  395. bool IsReadUnlocked() const {return true;}
  396. void ConvertSharedToExclusive() {}
  397. void ConvertExclusiveToShared() {}
  398. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  399. bool SetSpinCount(WORD) {return false;}
  400. WORD GetSpinCount() const {return LOCK_DONT_SPIN;}
  401. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  402. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  403. static const TCHAR* ClassName() {return _TEXT("CFakeLock");}
  404. }; // CFakeLock
  405. //--------------------------------------------------------------------
  406. // A spinlock is a sort of lightweight critical section. Its main
  407. // advantage over a true Win32 CRITICAL_SECTION is that it occupies 4 bytes
  408. // instead of 24 (+ another 32 bytes for the RTL_CRITICAL_SECTION_DEBUG data),
  409. // which is important when we have many thousands of locks
  410. // and we're trying to be L1 cache-conscious. A CRITICAL_SECTION also
  411. // contains a HANDLE to a semaphore, although this is not initialized until
  412. // the first time that the CRITICAL_SECTION blocks.
  413. //
  414. // On a multiprocessor machine, a spinlock tries to acquire the lock. If
  415. // it fails, it sits in a tight loop, testing the lock and decrementing a
  416. // counter. If the counter reaches zero, it does a Sleep(0), yielding the
  417. // processor to another thread. When control returns to the thread, the
  418. // lock is probably free. If not, the loop starts again and it is
  419. // terminated only when the lock is acquired. The theory is that it is
  420. // less costly to spin in a busy loop for a short time rather than
  421. // immediately yielding the processor, forcing an expensive context switch
  422. // that requires the old thread's state (registers, etc) be saved, the new
  423. // thread's state be reloaded, and the L1 and L2 caches be left full of
  424. // stale data.
  425. //
  426. // You can tune the spin count (global only: per-lock spin counts are
  427. // disabled) and the backoff algorithm (the factor by which the spin
  428. // count is multiplied after each Sleep).
  429. //
  430. // On a 1P machine, the loop is pointless---this thread has control,
  431. // hence no other thread can possibly release the lock while this thread
  432. // is looping---so the processor is yielded immediately.
  433. //
  434. // The kernel uses spinlocks internally and spinlocks were also added to
  435. // CRITICAL_SECTIONs in NT 4.0 sp3. In the CRITICAL_SECTION implementation,
  436. // however, the counter counts down only once and waits on a semaphore
  437. // thereafter (i.e., the same blocking behavior that it exhibits without
  438. // the spinlock).
  439. //
  440. // A disadvantage of a user-level spinlock such as this is that if the
  441. // thread that owns the spinlock blocks for any reason (or is preempted by
  442. // the scheduler), all the other threads will continue to spin on the
  443. // spinlock, wasting CPU, until the owning thread completes its wait and
  444. // releases the lock. (The kernel spinlocks, however, are smart enough to
  445. // switch to another runnable thread instead of wasting time spinning.)
  446. // The backoff algorithm decreases the spin count on each iteration in an
  447. // attempt to minimize this effect. The best policy---and this is true for
  448. // all locks---is to hold the lock for as short as time as possible.
  449. //
  450. // Note: unlike a CRITICAL_SECTION, a CSmallSpinLock cannot be recursively
  451. // acquired; i.e., if you acquire a spinlock and then attempt to acquire it
  452. // again *on the same thread* (perhaps from a different function), the
  453. // thread will hang forever. Use CSpinLock instead, which is safe though a
  454. // little slower than a CSmallSpinLock. If you own all the code
  455. // that is bracketed by Lock() and Unlock() (e.g., no callbacks or passing
  456. // back of locked data structures to callers) and know for certain that it
  457. // will not attempt to reacquire the lock, you can use CSmallSpinLock.
  458. //
  459. // See also http://muralik/work/performance/spinlocks.htm and John Vert's
  460. // MSDN article, "Writing Scalable Applications for Windows NT".
  461. //
  462. // The original implementation is due to PALarson.
  463. class IRTL_DLLEXP CSmallSpinLock :
  464. public CLockBase<LOCK_SMALLSPINLOCK, LOCK_MUTEX,
  465. LOCK_NON_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  466. LOCK_CLASS_SPIN
  467. >
  468. {
  469. private:
  470. volatile LONG m_lTid; // The lock state variable
  471. enum {
  472. SL_UNOWNED = 0,
  473. #ifdef LOCK_SMALL_SPIN_NO_THREAD_ID
  474. SL_LOCKED = 1,
  475. #endif // LOCK_SMALL_SPIN_NO_THREAD_ID
  476. };
  477. LOCK_INSTRUMENTATION_DECL();
  478. static LONG _CurrentThreadId();
  479. private:
  480. // Does all the spinning (and instrumentation) if the lock is contended.
  481. void _LockSpin();
  482. // Attempt to acquire the lock
  483. bool _TryLock();
  484. // Release the lock
  485. void _Unlock();
  486. public:
  487. #ifndef LOCK_INSTRUMENTATION
  488. CSmallSpinLock()
  489. : m_lTid(SL_UNOWNED)
  490. {}
  491. #else // LOCK_INSTRUMENTATION
  492. CSmallSpinLock(
  493. const TCHAR* ptszName)
  494. : m_lTid(SL_UNOWNED)
  495. {
  496. LOCK_INSTRUMENTATION_INIT(ptszName);
  497. }
  498. #endif // LOCK_INSTRUMENTATION
  499. #ifdef IRTLDEBUG
  500. ~CSmallSpinLock()
  501. {
  502. IRTLASSERT(m_lTid == SL_UNOWNED);
  503. }
  504. #endif // IRTLDEBUG
  505. // Acquire an exclusive lock for writing.
  506. // Blocks (if needed) until acquired.
  507. LOCK_FORCEINLINE void
  508. WriteLock()
  509. {
  510. LOCKS_ENTER_CRIT_REGION();
  511. LOCK_WRITELOCK_INSTRUMENTATION();
  512. if (! _TryLock())
  513. _LockSpin();
  514. }
  515. // Acquire a (possibly shared) lock for reading.
  516. // Blocks (if needed) until acquired.
  517. LOCK_FORCEINLINE void
  518. ReadLock()
  519. {
  520. LOCKS_ENTER_CRIT_REGION();
  521. LOCK_READLOCK_INSTRUMENTATION();
  522. if (! _TryLock())
  523. _LockSpin();
  524. }
  525. // Try to acquire an exclusive lock for writing. Returns true
  526. // if successful. Non-blocking.
  527. LOCK_FORCEINLINE bool
  528. TryWriteLock()
  529. {
  530. LOCKS_ENTER_CRIT_REGION();
  531. bool fAcquired = _TryLock();
  532. if (fAcquired)
  533. LOCK_WRITELOCK_INSTRUMENTATION();
  534. else
  535. LOCKS_LEAVE_CRIT_REGION();
  536. return fAcquired;
  537. }
  538. // Try to acquire a (possibly shared) lock for reading. Returns true
  539. // if successful. Non-blocking.
  540. LOCK_FORCEINLINE bool
  541. TryReadLock()
  542. {
  543. LOCKS_ENTER_CRIT_REGION();
  544. bool fAcquired = _TryLock();
  545. if (fAcquired)
  546. LOCK_READLOCK_INSTRUMENTATION();
  547. else
  548. LOCKS_LEAVE_CRIT_REGION();
  549. return fAcquired;
  550. }
  551. // Unlock the lock after a successful call to {,Try}WriteLock().
  552. // Assumes caller owned the lock.
  553. LOCK_FORCEINLINE void
  554. WriteUnlock()
  555. {
  556. _Unlock();
  557. LOCKS_LEAVE_CRIT_REGION();
  558. }
  559. // Unlock the lock after a successful call to {,Try}ReadLock().
  560. // Assumes caller owned the lock.
  561. LOCK_FORCEINLINE void
  562. ReadUnlock()
  563. {
  564. _Unlock();
  565. LOCKS_LEAVE_CRIT_REGION();
  566. }
  567. // Is the lock already locked for writing by this thread?
  568. bool IsWriteLocked() const
  569. {
  570. return (m_lTid == _CurrentThreadId());
  571. }
  572. // Is the lock already locked for reading?
  573. bool IsReadLocked() const
  574. {
  575. return IsWriteLocked();
  576. }
  577. // Is the lock unlocked for writing?
  578. bool IsWriteUnlocked() const
  579. {
  580. return (m_lTid == SL_UNOWNED);
  581. }
  582. // Is the lock unlocked for reading?
  583. bool IsReadUnlocked() const
  584. {
  585. return IsWriteUnlocked();
  586. }
  587. // Convert a reader lock to a writer lock
  588. void ConvertSharedToExclusive()
  589. {
  590. // no-op
  591. }
  592. // Convert a writer lock to a reader lock
  593. void ConvertExclusiveToShared()
  594. {
  595. // no-op
  596. }
  597. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  598. // Set the spin count for this lock.
  599. // Returns true if successfully set the per-lock spincount, false otherwise
  600. bool SetSpinCount(WORD wSpins)
  601. {
  602. UNREFERENCED_PARAMETER(wSpins);
  603. IRTLASSERT((wSpins == LOCK_DONT_SPIN)
  604. || (wSpins == LOCK_USE_DEFAULT_SPINS)
  605. || (LOCK_MINIMUM_SPINS <= wSpins
  606. && wSpins <= LOCK_MAXIMUM_SPINS));
  607. return false;
  608. }
  609. // Return the spin count for this lock.
  610. WORD GetSpinCount() const
  611. {
  612. return sm_wDefaultSpinCount;
  613. }
  614. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  615. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  616. static const TCHAR* ClassName() {return _TEXT("CSmallSpinLock");}
  617. }; // CSmallSpinLock
  618. //--------------------------------------------------------------------
  619. // CSpinLock is a spinlock that doesn't deadlock if recursively acquired.
  620. // This version occupies only 4 bytes. Uses 24 bits for the thread id.
  621. class IRTL_DLLEXP CSpinLock :
  622. public CLockBase<LOCK_SPINLOCK, LOCK_MUTEX,
  623. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  624. LOCK_CLASS_SPIN
  625. >
  626. {
  627. private:
  628. // a union for convenience
  629. volatile LONG m_lTid;
  630. enum {
  631. SL_THREAD_SHIFT = 0,
  632. SL_THREAD_BITS = 24,
  633. SL_OWNER_SHIFT = SL_THREAD_BITS,
  634. SL_OWNER_BITS = 8,
  635. SL_THREAD_MASK = ((1 << SL_THREAD_BITS) - 1) << SL_THREAD_SHIFT,
  636. SL_OWNER_INCR = 1 << SL_THREAD_BITS,
  637. SL_OWNER_MASK = ((1 << SL_OWNER_BITS) - 1) << SL_OWNER_SHIFT,
  638. SL_UNOWNED = 0,
  639. };
  640. LOCK_INSTRUMENTATION_DECL();
  641. private:
  642. // Get the current thread ID. Assumes that it can fit into 24 bits,
  643. // which is fairly safe as NT recycles thread IDs and failing to fit
  644. // into 24 bits would mean that more than 16 million threads were
  645. // currently active (actually 4 million as lowest two bits are always
  646. // zero on W2K). This is improbable in the extreme as NT runs out of
  647. // resources if there are more than a few thousands threads in
  648. // existence and the overhead of context swapping becomes unbearable.
  649. inline static DWORD _GetCurrentThreadId()
  650. {
  651. #ifdef LOCKS_KERNEL_MODE
  652. return (DWORD) HandleToULong(::PsGetCurrentThreadId());
  653. #else // !LOCKS_KERNEL_MODE
  654. return ::GetCurrentThreadId();
  655. #endif // !LOCKS_KERNEL_MODE
  656. }
  657. inline static LONG _CurrentThreadId()
  658. {
  659. DWORD dwTid = _GetCurrentThreadId();
  660. // Thread ID 0 is used by the System Idle Process (Process ID 0).
  661. // We use a thread-id of zero to indicate that the lock is unowned.
  662. // NT uses +ve thread ids, Win9x uses -ve ids
  663. IRTLASSERT(dwTid != SL_UNOWNED
  664. && ((dwTid <= SL_THREAD_MASK) || (dwTid > ~SL_THREAD_MASK)));
  665. return (LONG) (dwTid & SL_THREAD_MASK);
  666. }
  667. // Attempt to acquire the lock without blocking
  668. bool _TryLock();
  669. // Acquire the lock, recursively if need be
  670. void _Lock();
  671. // Release the lock
  672. void _Unlock();
  673. // Return true if the lock is owned by this thread
  674. bool _IsLocked() const
  675. {
  676. const LONG lTid = m_lTid;
  677. if (lTid == SL_UNOWNED)
  678. return false;
  679. bool fLocked = ((lTid ^ _GetCurrentThreadId()) << SL_OWNER_BITS) == 0;
  680. IRTLASSERT(!fLocked
  681. || ((lTid & SL_OWNER_MASK) > 0
  682. && (lTid & SL_THREAD_MASK) == _CurrentThreadId()));
  683. return fLocked;
  684. }
  685. // Does all the spinning (and instrumentation) if the lock is contended.
  686. void _LockSpin();
  687. public:
  688. #ifndef LOCK_INSTRUMENTATION
  689. CSpinLock()
  690. : m_lTid(SL_UNOWNED)
  691. {}
  692. #else // LOCK_INSTRUMENTATION
  693. CSpinLock(
  694. const TCHAR* ptszName)
  695. : m_lTid(SL_UNOWNED)
  696. {
  697. LOCK_INSTRUMENTATION_INIT(ptszName);
  698. }
  699. #endif // LOCK_INSTRUMENTATION
  700. #ifdef IRTLDEBUG
  701. ~CSpinLock()
  702. {
  703. IRTLASSERT(m_lTid == SL_UNOWNED);
  704. }
  705. #endif // IRTLDEBUG
  706. // Acquire an exclusive lock for writing. Blocks until acquired.
  707. LOCK_FORCEINLINE void
  708. WriteLock()
  709. {
  710. LOCKS_ENTER_CRIT_REGION();
  711. LOCK_WRITELOCK_INSTRUMENTATION();
  712. // Is the lock unowned?
  713. if (! _TryLock())
  714. _Lock();
  715. }
  716. // Acquire a (possibly shared) lock for reading. Blocks until acquired.
  717. LOCK_FORCEINLINE void
  718. ReadLock()
  719. {
  720. LOCKS_ENTER_CRIT_REGION();
  721. LOCK_READLOCK_INSTRUMENTATION();
  722. // Is the lock unowned?
  723. if (! _TryLock())
  724. _Lock();
  725. }
  726. // See the description under CReaderWriterLock3::ReadOrWriteLock
  727. LOCK_FORCEINLINE bool
  728. ReadOrWriteLock()
  729. {
  730. ReadLock();
  731. return true;
  732. }
  733. // Try to acquire an exclusive lock for writing. Returns true
  734. // if successful. Non-blocking.
  735. LOCK_FORCEINLINE bool
  736. TryWriteLock()
  737. {
  738. LOCKS_ENTER_CRIT_REGION();
  739. bool fAcquired = _TryLock();
  740. if (fAcquired)
  741. LOCK_WRITELOCK_INSTRUMENTATION();
  742. else
  743. LOCKS_LEAVE_CRIT_REGION();
  744. return fAcquired;
  745. }
  746. // Try to acquire a (possibly shared) lock for reading. Returns true
  747. // if successful. Non-blocking.
  748. LOCK_FORCEINLINE bool
  749. TryReadLock()
  750. {
  751. LOCKS_ENTER_CRIT_REGION();
  752. bool fAcquired = _TryLock();
  753. if (fAcquired)
  754. LOCK_READLOCK_INSTRUMENTATION();
  755. else
  756. LOCKS_LEAVE_CRIT_REGION();
  757. return fAcquired;
  758. }
  759. // Unlock the lock after a successful call to {,Try}WriteLock().
  760. LOCK_FORCEINLINE void
  761. WriteUnlock()
  762. {
  763. _Unlock();
  764. LOCKS_LEAVE_CRIT_REGION();
  765. }
  766. // Unlock the lock after a successful call to {,Try}ReadLock().
  767. LOCK_FORCEINLINE void
  768. ReadUnlock()
  769. {
  770. _Unlock();
  771. LOCKS_LEAVE_CRIT_REGION();
  772. }
  773. // Unlock the lock after a call to ReadOrWriteLock().
  774. LOCK_FORCEINLINE void
  775. ReadOrWriteUnlock(bool)
  776. {
  777. ReadUnlock();
  778. }
  779. // Is the lock already locked for writing?
  780. bool IsWriteLocked() const
  781. {
  782. return _IsLocked();
  783. }
  784. // Is the lock already locked for reading?
  785. bool IsReadLocked() const
  786. {
  787. return _IsLocked();
  788. }
  789. // Is the lock unlocked for writing?
  790. bool IsWriteUnlocked() const
  791. {
  792. return !IsWriteLocked();
  793. }
  794. // Is the lock unlocked for reading?
  795. bool IsReadUnlocked() const
  796. {
  797. return !IsReadLocked();
  798. }
  799. // Convert a reader lock to a writer lock
  800. void ConvertSharedToExclusive()
  801. {
  802. // no-op
  803. }
  804. // Convert a writer lock to a reader lock
  805. void ConvertExclusiveToShared()
  806. {
  807. // no-op
  808. }
  809. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  810. // Set the spin count for this lock.
  811. bool SetSpinCount(WORD) {return false;}
  812. // Return the spin count for this lock.
  813. WORD GetSpinCount() const
  814. {
  815. return sm_wDefaultSpinCount;
  816. }
  817. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  818. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  819. static const TCHAR* ClassName() {return _TEXT("CSpinLock");}
  820. }; // CSpinLock
  821. #ifndef LOCKS_KERNEL_MODE
  822. //--------------------------------------------------------------------
  823. // A Win32 CRITICAL_SECTION
  824. class IRTL_DLLEXP CCritSec :
  825. public CLockBase<LOCK_CRITSEC, LOCK_MUTEX,
  826. LOCK_RECURSIVE, LOCK_WAIT_HANDLE, LOCK_QUEUE_KERNEL,
  827. LOCK_INDIVIDUAL_SPIN
  828. >
  829. {
  830. private:
  831. CRITICAL_SECTION m_cs;
  832. LOCK_INSTRUMENTATION_DECL();
  833. public:
  834. CCritSec()
  835. {
  836. InitializeCriticalSection(&m_cs);
  837. SetSpinCount(sm_wDefaultSpinCount);
  838. }
  839. #ifdef LOCK_INSTRUMENTATION
  840. CCritSec(const char*)
  841. {
  842. InitializeCriticalSection(&m_cs);
  843. SetSpinCount(sm_wDefaultSpinCount);
  844. }
  845. #endif // LOCK_INSTRUMENTATION
  846. ~CCritSec() { DeleteCriticalSection(&m_cs); }
  847. void WriteLock() { EnterCriticalSection(&m_cs); }
  848. void ReadLock() { WriteLock(); }
  849. bool ReadOrWriteLock() { ReadLock(); return true; }
  850. bool TryWriteLock();
  851. bool TryReadLock() { return TryWriteLock(); }
  852. void WriteUnlock() { LeaveCriticalSection(&m_cs); }
  853. void ReadUnlock() { WriteUnlock(); }
  854. void ReadOrWriteUnlock(bool) { ReadUnlock(); }
  855. bool IsWriteLocked() const {return true;} // TODO: fix this
  856. bool IsReadLocked() const {return IsWriteLocked();}
  857. bool IsWriteUnlocked() const {return true;} // TODO: fix this
  858. bool IsReadUnlocked() const {return true;} // TODO: fix this
  859. // Convert a reader lock to a writer lock
  860. void ConvertSharedToExclusive()
  861. {
  862. // no-op
  863. }
  864. // Convert a writer lock to a reader lock
  865. void ConvertExclusiveToShared()
  866. {
  867. // no-op
  868. }
  869. // Wrapper for ::SetCriticalSectionSpinCount which was introduced
  870. // in NT 4.0 sp3 and hence is not available on all platforms
  871. static DWORD SetSpinCount(LPCRITICAL_SECTION pcs,
  872. DWORD dwSpinCount=LOCK_DEFAULT_SPINS);
  873. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  874. bool SetSpinCount(WORD wSpins)
  875. {SetSpinCount(&m_cs, wSpins); return true;}
  876. WORD GetSpinCount() const { return sm_wDefaultSpinCount; } // TODO
  877. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  878. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  879. static const TCHAR* ClassName() {return _TEXT("CCritSec");}
  880. }; // CCritSec
  881. #endif // !LOCKS_KERNEL_MODE
  882. //--------------------------------------------------------------------
  883. // CReaderWriterlock is a multi-reader, single-writer spinlock due to NJain,
  884. // which in turn is derived from an exclusive spinlock by DmitryR.
  885. // Gives priority to writers. Cannot be acquired recursively.
  886. // No error checking. Use CReaderWriterLock3.
  887. class IRTL_DLLEXP CReaderWriterLock :
  888. public CLockBase<LOCK_READERWRITERLOCK, LOCK_MRSW,
  889. LOCK_READ_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  890. LOCK_CLASS_SPIN
  891. >
  892. {
  893. private:
  894. volatile LONG m_nState; // > 0 => that many readers
  895. volatile LONG m_cWaiting; // number of would-be writers
  896. LOCK_INSTRUMENTATION_DECL();
  897. private:
  898. enum {
  899. SL_FREE = 0,
  900. SL_EXCLUSIVE = -1,
  901. };
  902. void _LockSpin(bool fWrite);
  903. void _WriteLockSpin() { _LockSpin(true); }
  904. void _ReadLockSpin() { _LockSpin(false); }
  905. bool _CmpExch(LONG lNew, LONG lCurrent);
  906. bool _TryWriteLock();
  907. bool _TryReadLock();
  908. public:
  909. CReaderWriterLock()
  910. : m_nState(SL_FREE),
  911. m_cWaiting(0)
  912. {
  913. }
  914. #ifdef LOCK_INSTRUMENTATION
  915. CReaderWriterLock(
  916. const TCHAR* ptszName)
  917. : m_nState(SL_FREE),
  918. m_cWaiting(0)
  919. {
  920. LOCK_INSTRUMENTATION_INIT(ptszName);
  921. }
  922. #endif // LOCK_INSTRUMENTATION
  923. #ifdef IRTLDEBUG
  924. ~CReaderWriterLock()
  925. {
  926. IRTLASSERT(m_nState == SL_FREE && m_cWaiting == 0);
  927. }
  928. #endif // IRTLDEBUG
  929. void WriteLock();
  930. void ReadLock();
  931. bool TryWriteLock();
  932. bool TryReadLock();
  933. void WriteUnlock();
  934. void ReadUnlock();
  935. bool IsWriteLocked() const {return m_nState == SL_EXCLUSIVE;}
  936. bool IsReadLocked() const {return m_nState > SL_FREE;}
  937. bool IsWriteUnlocked() const {return m_nState != SL_EXCLUSIVE;}
  938. bool IsReadUnlocked() const {return m_nState <= SL_FREE;}
  939. void ConvertSharedToExclusive();
  940. void ConvertExclusiveToShared();
  941. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  942. bool SetSpinCount(WORD) {return false;}
  943. WORD GetSpinCount() const {return sm_wDefaultSpinCount;}
  944. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  945. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  946. static const TCHAR* ClassName() {return _TEXT("CReaderWriterLock");}
  947. }; // CReaderWriterLock
  948. //--------------------------------------------------------------------
  949. // CReaderWriterlock2 is a multi-reader, single-writer spinlock due to NJain,
  950. // which in turn is derived from an exclusive spinlock by DmitryR.
  951. // Gives priority to writers. Cannot be acquired recursively.
  952. // No error checking. The difference between this and CReaderWriterLock is
  953. // that all the state is packed into a single LONG, instead of two LONGs.
  954. class IRTL_DLLEXP CReaderWriterLock2 :
  955. public CLockBase<LOCK_READERWRITERLOCK2, LOCK_MRSW,
  956. LOCK_READ_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  957. LOCK_CLASS_SPIN
  958. >
  959. {
  960. private:
  961. volatile LONG m_lRW;
  962. // LoWord is state. ==0 => free; >0 => readers; ==0xFFFF => 1 writer.
  963. // HiWord is count of writers, W.
  964. // If LoWord==0xFFFF => W-1 waiters, 1 writer;
  965. // otherwise W waiters.
  966. enum {
  967. SL_FREE = 0x00000000,
  968. SL_STATE_MASK = 0x0000FFFF,
  969. SL_STATE_SHIFT = 0,
  970. SL_WAITING_MASK = 0xFFFF0000, // waiting writers
  971. SL_WAITING_SHIFT = 16,
  972. SL_READER_INCR = 0x00000001,
  973. SL_READER_MASK = 0x00007FFF,
  974. SL_EXCLUSIVE = 0x0000FFFF, // one writer
  975. SL_WRITER_INCR = 0x00010000,
  976. SL_ONE_WRITER = SL_EXCLUSIVE | SL_WRITER_INCR,
  977. SL_ONE_READER = (SL_FREE + 1),
  978. SL_WRITERS_MASK = ~SL_READER_MASK,
  979. };
  980. LOCK_INSTRUMENTATION_DECL();
  981. private:
  982. void _LockSpin(bool fWrite);
  983. void _WriteLockSpin();
  984. void _ReadLockSpin() { _LockSpin(false); }
  985. bool _CmpExch(LONG lNew, LONG lCurrent);
  986. bool _TryWriteLock(LONG nIncr);
  987. bool _TryReadLock();
  988. public:
  989. CReaderWriterLock2()
  990. : m_lRW(SL_FREE)
  991. {}
  992. #ifdef LOCK_INSTRUMENTATION
  993. CReaderWriterLock2(
  994. const TCHAR* ptszName)
  995. : m_lRW(SL_FREE)
  996. {
  997. LOCK_INSTRUMENTATION_INIT(ptszName);
  998. }
  999. #endif // LOCK_INSTRUMENTATION
  1000. #ifdef IRTLDEBUG
  1001. ~CReaderWriterLock2()
  1002. {
  1003. IRTLASSERT(m_lRW == SL_FREE);
  1004. }
  1005. #endif // IRTLDEBUG
  1006. LOCK_FORCEINLINE void
  1007. WriteLock()
  1008. {
  1009. LOCKS_ENTER_CRIT_REGION();
  1010. LOCK_WRITELOCK_INSTRUMENTATION();
  1011. // Optimize for the common case
  1012. if (_TryWriteLock(SL_WRITER_INCR))
  1013. return;
  1014. _WriteLockSpin();
  1015. }
  1016. LOCK_FORCEINLINE void
  1017. ReadLock()
  1018. {
  1019. LOCKS_ENTER_CRIT_REGION();
  1020. LOCK_READLOCK_INSTRUMENTATION();
  1021. // Optimize for the common case
  1022. if (_TryReadLock())
  1023. return;
  1024. _ReadLockSpin();
  1025. }
  1026. LOCK_FORCEINLINE bool
  1027. TryWriteLock()
  1028. {
  1029. LOCKS_ENTER_CRIT_REGION();
  1030. if (_TryWriteLock(SL_WRITER_INCR))
  1031. {
  1032. LOCK_WRITELOCK_INSTRUMENTATION();
  1033. return true;
  1034. }
  1035. else
  1036. {
  1037. LOCKS_LEAVE_CRIT_REGION();
  1038. return false;
  1039. }
  1040. }
  1041. LOCK_FORCEINLINE bool
  1042. TryReadLock()
  1043. {
  1044. LOCKS_ENTER_CRIT_REGION();
  1045. if (_TryReadLock())
  1046. {
  1047. LOCK_READLOCK_INSTRUMENTATION();
  1048. return true;
  1049. }
  1050. else
  1051. {
  1052. LOCKS_LEAVE_CRIT_REGION();
  1053. return false;
  1054. }
  1055. }
  1056. void WriteUnlock();
  1057. void ReadUnlock();
  1058. bool IsWriteLocked() const
  1059. {return (m_lRW & SL_STATE_MASK) == SL_EXCLUSIVE;}
  1060. bool IsReadLocked() const
  1061. {
  1062. LONG lRW = m_lRW;
  1063. return (((lRW & SL_STATE_MASK) != SL_EXCLUSIVE)
  1064. && (lRW & SL_READER_MASK) >= SL_READER_INCR);
  1065. }
  1066. bool IsWriteUnlocked() const
  1067. {return !IsWriteLocked();}
  1068. bool IsReadUnlocked() const
  1069. {return !IsReadLocked();}
  1070. void ConvertSharedToExclusive();
  1071. void ConvertExclusiveToShared();
  1072. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1073. bool SetSpinCount(WORD) {return false;}
  1074. WORD GetSpinCount() const {return sm_wDefaultSpinCount;}
  1075. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1076. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1077. static const TCHAR* ClassName() {return _TEXT("CReaderWriterLock2");}
  1078. }; // CReaderWriterLock2
  1079. //--------------------------------------------------------------------
  1080. // CReaderWriterLock3 is a multi-reader, single-writer spinlock due
  1081. // to NJain, which in turn is derived from an exclusive spinlock by DmitryR.
  1082. // Gives priority to writers.
  1083. // No error checking. Much like CReaderWriterLock2, except that the WriteLock
  1084. // can be acquired recursively.
  1085. class IRTL_DLLEXP CReaderWriterLock3 :
  1086. public CLockBase<LOCK_READERWRITERLOCK3, LOCK_MRSW,
  1087. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  1088. LOCK_CLASS_SPIN
  1089. >
  1090. {
  1091. private:
  1092. volatile LONG m_lRW; // Reader-Writer state
  1093. volatile LONG m_lTid; // Owning Thread ID + recursion count
  1094. // m_lRW:
  1095. // LoWord is state. ==0 => free; >0 => readers; ==0xFFFF => 1 writer
  1096. // HiWord is count of waiters + writers, N.
  1097. // If LoWord==0xFFFF => N-1 waiters, 1 writer;
  1098. // otherwise => N waiters, 0 writers.
  1099. // m_lTid:
  1100. // If readers, then 0; if a write lock, then thread id + recursion count
  1101. enum {
  1102. // m_lRW
  1103. SL_FREE = 0x00000000,
  1104. SL_STATE_BITS = 16,
  1105. SL_STATE_SHIFT = 0,
  1106. SL_STATE_MASK = ((1 << SL_STATE_BITS) - 1) << SL_STATE_SHIFT,
  1107. SL_WAITING_BITS = 16,
  1108. SL_WAITING_SHIFT = SL_STATE_BITS,
  1109. SL_WAITING_MASK = ((1 << SL_WAITING_BITS) - 1) << SL_WAITING_SHIFT,
  1110. // waiting writers
  1111. SL_READER_INCR = 1 << SL_STATE_SHIFT,
  1112. SL_READER_MASK = ((1 << (SL_STATE_BITS - 1)) - 1) << SL_STATE_SHIFT,
  1113. SL_EXCLUSIVE = SL_STATE_MASK, // one writer
  1114. SL_WRITER_INCR = 1 << SL_WAITING_SHIFT,
  1115. SL_ONE_WRITER = SL_EXCLUSIVE | SL_WRITER_INCR,
  1116. SL_ONE_READER = (SL_FREE + SL_READER_INCR),
  1117. SL_WRITERS_MASK = ~SL_READER_MASK, // == waiter | writer
  1118. // m_lTid
  1119. SL_UNOWNED = 0,
  1120. SL_THREAD_SHIFT = 0,
  1121. SL_THREAD_BITS = 24,
  1122. SL_OWNER_SHIFT = SL_THREAD_BITS,
  1123. SL_OWNER_BITS = 8,
  1124. SL_THREAD_MASK = ((1 << SL_THREAD_BITS) - 1) << SL_THREAD_SHIFT,
  1125. SL_OWNER_INCR = 1 << SL_THREAD_BITS,
  1126. SL_OWNER_MASK = ((1 << SL_OWNER_BITS) - 1) << SL_OWNER_SHIFT,
  1127. };
  1128. LOCK_INSTRUMENTATION_DECL();
  1129. private:
  1130. enum SPIN_TYPE {
  1131. SPIN_WRITE = 1,
  1132. SPIN_READ,
  1133. SPIN_READ_RECURSIVE,
  1134. };
  1135. void _LockSpin(SPIN_TYPE st);
  1136. void _WriteLockSpin();
  1137. void _ReadLockSpin(SPIN_TYPE st) { _LockSpin(st); }
  1138. bool _CmpExchRW(LONG lNew, LONG lCurrent);
  1139. LONG _SetTid(LONG lNewTid);
  1140. bool _TryWriteLock(LONG nWriterIncr);
  1141. bool _TryReadLock();
  1142. bool _TryReadLockRecursive();
  1143. static LONG _GetCurrentThreadId();
  1144. static LONG _CurrentThreadId();
  1145. public:
  1146. CReaderWriterLock3()
  1147. : m_lRW(SL_FREE),
  1148. m_lTid(SL_UNOWNED)
  1149. {}
  1150. #ifdef LOCK_INSTRUMENTATION
  1151. CReaderWriterLock3(
  1152. const TCHAR* ptszName)
  1153. : m_lRW(SL_FREE),
  1154. m_lTid(SL_UNOWNED)
  1155. {
  1156. LOCK_INSTRUMENTATION_INIT(ptszName);
  1157. }
  1158. #endif // LOCK_INSTRUMENTATION
  1159. #ifdef IRTLDEBUG
  1160. ~CReaderWriterLock3()
  1161. {
  1162. IRTLASSERT(m_lRW == SL_FREE && m_lTid == SL_UNOWNED);
  1163. }
  1164. #endif // IRTLDEBUG
  1165. LOCK_FORCEINLINE void
  1166. WriteLock()
  1167. {
  1168. LOCKS_ENTER_CRIT_REGION();
  1169. LOCK_WRITELOCK_INSTRUMENTATION();
  1170. if (! _TryWriteLock(SL_WRITER_INCR))
  1171. _WriteLockSpin();
  1172. IRTLASSERT(IsWriteLocked());
  1173. }
  1174. LOCK_FORCEINLINE void
  1175. ReadLock()
  1176. {
  1177. LOCKS_ENTER_CRIT_REGION();
  1178. LOCK_READLOCK_INSTRUMENTATION();
  1179. if (! _TryReadLock())
  1180. _ReadLockSpin(SPIN_READ);
  1181. IRTLASSERT(IsReadLocked());
  1182. }
  1183. // ReadOrWriteLock: If already locked, recursively acquires another lock
  1184. // of the same kind (read or write). Otherwise, just acquires a read lock.
  1185. // Needed for cases like this.
  1186. // pTable->WriteLock();
  1187. // if (!pTable->FindKey(&SomeKey))
  1188. // InsertRecord(&Whatever);
  1189. // pTable->WriteUnlock();
  1190. // where FindKey looks like
  1191. // Table::FindKey(pKey) {
  1192. // ReadOrWriteLock();
  1193. // // find pKey if present in table
  1194. // ReadOrWriteUnlock();
  1195. // }
  1196. // and InsertRecord looks like
  1197. // Table::InsertRecord(pRecord) {
  1198. // WriteLock();
  1199. // // insert pRecord into table
  1200. // WriteUnlock();
  1201. // }
  1202. // If FindKey called ReadLock while the thread already had done a
  1203. // WriteLock, the thread would deadlock.
  1204. bool ReadOrWriteLock();
  1205. LOCK_FORCEINLINE bool
  1206. TryWriteLock()
  1207. {
  1208. LOCKS_ENTER_CRIT_REGION();
  1209. if (_TryWriteLock(SL_WRITER_INCR))
  1210. {
  1211. LOCK_WRITELOCK_INSTRUMENTATION();
  1212. IRTLASSERT(IsWriteLocked());
  1213. return true;
  1214. }
  1215. else
  1216. {
  1217. LOCKS_LEAVE_CRIT_REGION();
  1218. IRTLASSERT(!IsWriteLocked());
  1219. return false;
  1220. }
  1221. }
  1222. LOCK_FORCEINLINE bool
  1223. TryReadLock()
  1224. {
  1225. LOCKS_ENTER_CRIT_REGION();
  1226. if (_TryReadLock())
  1227. {
  1228. LOCK_READLOCK_INSTRUMENTATION();
  1229. IRTLASSERT(IsReadLocked());
  1230. return true;
  1231. }
  1232. else
  1233. {
  1234. LOCKS_LEAVE_CRIT_REGION();
  1235. // Can't IRTLASSERT(!IsReadLocked()) because other threads
  1236. // may have acquired a read lock by now
  1237. return false;
  1238. }
  1239. }
  1240. void WriteUnlock();
  1241. void ReadUnlock();
  1242. void ReadOrWriteUnlock(bool fIsReadLocked);
  1243. // Does current thread hold a write lock?
  1244. LOCK_FORCEINLINE bool
  1245. IsWriteLocked() const
  1246. {
  1247. const LONG lTid = m_lTid;
  1248. if (lTid == SL_UNOWNED)
  1249. return false;
  1250. bool fLocked = ((lTid ^ _GetCurrentThreadId()) << SL_OWNER_BITS) == 0;
  1251. IRTLASSERT(!fLocked
  1252. || ((m_lRW & SL_STATE_MASK) == SL_EXCLUSIVE
  1253. && (lTid & SL_THREAD_MASK) == _CurrentThreadId()
  1254. && (lTid & SL_OWNER_MASK) > 0));
  1255. return fLocked;
  1256. }
  1257. LOCK_FORCEINLINE bool
  1258. IsReadLocked() const
  1259. {
  1260. LONG lRW = m_lRW;
  1261. return (((lRW & SL_STATE_MASK) != SL_EXCLUSIVE)
  1262. && (lRW & SL_READER_MASK) >= SL_READER_INCR);
  1263. }
  1264. bool
  1265. IsWriteUnlocked() const
  1266. { return !IsWriteLocked(); }
  1267. bool
  1268. IsReadUnlocked() const
  1269. { return !IsReadLocked(); }
  1270. // Note: if there's more than one reader, then there's a window where
  1271. // another thread can acquire and release a writelock before this routine
  1272. // returns.
  1273. void
  1274. ConvertSharedToExclusive();
  1275. // There is no such window when converting from a writelock to a readlock
  1276. void
  1277. ConvertExclusiveToShared();
  1278. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1279. bool
  1280. SetSpinCount(WORD)
  1281. {return false;}
  1282. WORD
  1283. GetSpinCount() const
  1284. {return sm_wDefaultSpinCount;}
  1285. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1286. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1287. static const TCHAR*
  1288. ClassName()
  1289. {return _TEXT("CReaderWriterLock3");}
  1290. }; // CReaderWriterLock3
  1291. //--------------------------------------------------------------------
  1292. // CReaderWriterLock4 is a multi-reader, single-writer spinlock due
  1293. // to NJain, which in turn is derived from an exclusive spinlock by DmitryR.
  1294. // Gives priority to writers.
  1295. // No error checking. Much like CReaderWriterLock2, except that the WriteLock
  1296. // can be acquired recursively.
  1297. class IRTL_DLLEXP CReaderWriterLock4 :
  1298. public CLockBase<LOCK_READERWRITERLOCK4, LOCK_MRSW,
  1299. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  1300. LOCK_CLASS_SPIN
  1301. >
  1302. {
  1303. private:
  1304. volatile LONG m_lRW; // Reader-Writer state
  1305. volatile LONG m_lTid; // Owning Thread ID + recursion count
  1306. // m_lRW:
  1307. // LoWord is state. ==0 => free;
  1308. // > 0 => # readers;
  1309. // < 0 => 1 writer + recursion count
  1310. // HiWord is count of waiters + writers, N.
  1311. // If LoWord < 0 => N-1 waiters, 1 writer;
  1312. // otherwise => N waiters, 0 writers.
  1313. // m_lTid:
  1314. // If readers, then 0; if a write lock, then thread id
  1315. enum {
  1316. // m_lRW
  1317. SL_FREE = 0x00000000,
  1318. SL_STATE_BITS = 16,
  1319. SL_STATE_SHIFT = 0,
  1320. SL_STATE_MASK = ((1 << SL_STATE_BITS) - 1) << SL_STATE_SHIFT,
  1321. SL_WAITING_BITS = 16,
  1322. SL_WAITING_SHIFT = SL_STATE_BITS,
  1323. SL_WAITING_MASK = ((1 << SL_WAITING_BITS) - 1) << SL_WAITING_SHIFT,
  1324. // waiting writers
  1325. SL_READER_INCR = 1 << SL_STATE_SHIFT,
  1326. SL_WRITER_INCR = - SL_READER_INCR,
  1327. SL_READER_MASK = ((1 << (SL_STATE_BITS - 1)) - 1) << SL_STATE_SHIFT,
  1328. SL_EXCLUSIVE = SL_STATE_MASK, // one writer, recursion == 1
  1329. SL_WRITER_MIN = SL_READER_MASK + SL_READER_INCR,
  1330. SL_WAIT_WRITER_INCR = 1 << SL_WAITING_SHIFT,
  1331. SL_ONE_WRITER = SL_EXCLUSIVE | SL_WAIT_WRITER_INCR,
  1332. SL_ONE_READER = (SL_FREE + SL_READER_INCR),
  1333. SL_WRITERS_MASK = ~SL_READER_MASK, // == waiter | writer
  1334. // m_lTid
  1335. SL_UNOWNED = 0,
  1336. SL_THREAD_SHIFT = 0,
  1337. SL_THREAD_BITS = 32,
  1338. SL_THREAD_MASK = ((1 << SL_THREAD_BITS) - 1) << SL_THREAD_SHIFT,
  1339. };
  1340. LOCK_INSTRUMENTATION_DECL();
  1341. private:
  1342. enum SPIN_TYPE {
  1343. SPIN_WRITE = 1,
  1344. SPIN_READ,
  1345. SPIN_READ_RECURSIVE,
  1346. };
  1347. void _LockSpin(SPIN_TYPE st);
  1348. void _WriteLockSpin();
  1349. void _ReadLockSpin(SPIN_TYPE st) { _LockSpin(st); }
  1350. bool _CmpExchRW(LONG lNew, LONG lCurrent);
  1351. LONG _SetTid(LONG lNewTid);
  1352. bool _TryWriteLock();
  1353. bool _TryWriteLock2();
  1354. bool _TryReadLock();
  1355. bool _TryReadLockRecursive();
  1356. static LONG _GetCurrentThreadId();
  1357. static LONG _CurrentThreadId();
  1358. public:
  1359. CReaderWriterLock4()
  1360. : m_lRW(SL_FREE),
  1361. m_lTid(SL_UNOWNED)
  1362. {}
  1363. #ifdef LOCK_INSTRUMENTATION
  1364. CReaderWriterLock4(
  1365. const TCHAR* ptszName)
  1366. : m_lRW(SL_FREE),
  1367. m_lTid(SL_UNOWNED)
  1368. {
  1369. LOCK_INSTRUMENTATION_INIT(ptszName);
  1370. }
  1371. #endif // LOCK_INSTRUMENTATION
  1372. #ifdef IRTLDEBUG
  1373. ~CReaderWriterLock4()
  1374. {
  1375. IRTLASSERT(m_lRW == SL_FREE && m_lTid == SL_UNOWNED);
  1376. }
  1377. #endif // IRTLDEBUG
  1378. LOCK_FORCEINLINE void
  1379. WriteLock()
  1380. {
  1381. LOCKS_ENTER_CRIT_REGION();
  1382. LOCK_WRITELOCK_INSTRUMENTATION();
  1383. if (! _TryWriteLock())
  1384. _WriteLockSpin();
  1385. IRTLASSERT(IsWriteLocked());
  1386. }
  1387. LOCK_FORCEINLINE void
  1388. ReadLock()
  1389. {
  1390. LOCKS_ENTER_CRIT_REGION();
  1391. LOCK_READLOCK_INSTRUMENTATION();
  1392. if (! _TryReadLock())
  1393. _ReadLockSpin(SPIN_READ);
  1394. IRTLASSERT(IsReadLocked());
  1395. }
  1396. // ReadOrWriteLock: If already locked, recursively acquires another lock
  1397. // of the same kind (read or write). Otherwise, just acquires a read lock.
  1398. // Needed for cases like this.
  1399. // pTable->WriteLock();
  1400. // if (!pTable->FindKey(&SomeKey))
  1401. // InsertRecord(&Whatever);
  1402. // pTable->WriteUnlock();
  1403. // where FindKey looks like
  1404. // Table::FindKey(pKey) {
  1405. // ReadOrWriteLock();
  1406. // // find pKey if present in table
  1407. // ReadOrWriteUnlock();
  1408. // }
  1409. // and InsertRecord looks like
  1410. // Table::InsertRecord(pRecord) {
  1411. // WriteLock();
  1412. // // insert pRecord into table
  1413. // WriteUnlock();
  1414. // }
  1415. // If FindKey called ReadLock while the thread already had done a
  1416. // WriteLock, the thread would deadlock.
  1417. bool ReadOrWriteLock();
  1418. LOCK_FORCEINLINE bool
  1419. TryWriteLock()
  1420. {
  1421. LOCKS_ENTER_CRIT_REGION();
  1422. if (_TryWriteLock())
  1423. {
  1424. LOCK_WRITELOCK_INSTRUMENTATION();
  1425. IRTLASSERT(IsWriteLocked());
  1426. return true;
  1427. }
  1428. else
  1429. {
  1430. LOCKS_LEAVE_CRIT_REGION();
  1431. IRTLASSERT(!IsWriteLocked());
  1432. return false;
  1433. }
  1434. }
  1435. LOCK_FORCEINLINE bool
  1436. TryReadLock()
  1437. {
  1438. LOCKS_ENTER_CRIT_REGION();
  1439. if (_TryReadLock())
  1440. {
  1441. LOCK_READLOCK_INSTRUMENTATION();
  1442. IRTLASSERT(IsReadLocked());
  1443. return true;
  1444. }
  1445. else
  1446. {
  1447. LOCKS_LEAVE_CRIT_REGION();
  1448. // Can't IRTLASSERT(!IsReadLocked()) because other threads
  1449. // may have acquired a read lock by now
  1450. return false;
  1451. }
  1452. }
  1453. void WriteUnlock();
  1454. void ReadUnlock();
  1455. void ReadOrWriteUnlock(bool fIsReadLocked);
  1456. // Does current thread hold a write lock?
  1457. LOCK_FORCEINLINE bool
  1458. IsWriteLocked() const
  1459. {
  1460. const LONG lTid = m_lTid;
  1461. if (lTid == SL_UNOWNED)
  1462. return false;
  1463. bool fLocked = (lTid == _GetCurrentThreadId());
  1464. IRTLASSERT(!fLocked
  1465. || ((SL_WRITER_MIN <= (m_lRW & SL_STATE_MASK))
  1466. && ((m_lRW & SL_STATE_MASK) <= SL_EXCLUSIVE)));
  1467. return fLocked;
  1468. }
  1469. LOCK_FORCEINLINE bool
  1470. IsReadLocked() const
  1471. {
  1472. LONG lRW = m_lRW;
  1473. return ((SL_READER_INCR <= (lRW & SL_STATE_MASK))
  1474. && ((lRW & SL_STATE_MASK) <= SL_READER_MASK));
  1475. }
  1476. bool
  1477. IsWriteUnlocked() const
  1478. { return !IsWriteLocked(); }
  1479. bool
  1480. IsReadUnlocked() const
  1481. { return !IsReadLocked(); }
  1482. // Note: if there's more than one reader, then there's a window where
  1483. // another thread can acquire and release a writelock before this routine
  1484. // returns.
  1485. void
  1486. ConvertSharedToExclusive();
  1487. // There is no such window when converting from a writelock to a readlock
  1488. void
  1489. ConvertExclusiveToShared();
  1490. #ifdef LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1491. bool
  1492. SetSpinCount(WORD)
  1493. {return false;}
  1494. WORD
  1495. GetSpinCount() const
  1496. {return sm_wDefaultSpinCount;}
  1497. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1498. #endif // LOCK_DEFAULT_SPIN_IMPLEMENTATION
  1499. static const TCHAR*
  1500. ClassName()
  1501. {return _TEXT("CReaderWriterLock4");}
  1502. }; // CReaderWriterLock4
  1503. #endif // __LOCKS_H__