Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2132 lines
65 KiB

  1. /*++
  2. Copyright (c) 1998-2000 Microsoft Corporation
  3. Module Name :
  4. locks.h
  5. Abstract:
  6. A collection of locks for multithreaded access to data structures
  7. Author:
  8. George V. Reilly (GeorgeRe) 06-Jan-1998
  9. Environment:
  10. Win32 - User Mode
  11. Project:
  12. Internet Information Server RunTime Library
  13. Revision History:
  14. --*/
  15. #ifndef __LOCKS_H__
  16. #define __LOCKS_H__
  17. //--------------------------------------------------------------------
  18. // File: locks.h
  19. //
  20. // A collection of different implementations of read/write locks that all
  21. // share the same interface. This allows different locks to be plugged
  22. // into C++ templates as parameters.
  23. //
  24. // The implementations are:
  25. // CSmallSpinLock lightweight critical section
  26. // CSpinLock variant of CSmallSpinLock
  27. // CFakeLock do-nothing class; useful as a template parameter
  28. // CCritSec Win32 CRITICAL_SECTION
  29. // Multi-Reader/Single-Writer locks:
  30. // CRtlResource NT's RTL_RESOURCE
  31. // CShareLock Michael Parkes's CSharelock
  32. // CReaderWriterLock MRSW lock from Neel Jain
  33. // CReaderWriterLock2 smaller implementation of CReaderWriterLock
  34. // CReaderWriterLock3 CReaderWriterLock2 with recursive WriteLock
  35. //
  36. // CAutoReadLock<Lock> and CAutoWriteLock<Lock> can used as
  37. // exception-safe wrappers.
  38. //
  39. // TODO:
  40. // * Add per-class lock-contention statistics
  41. // * Add a timeout feature to Try{Read,Write}Lock
  42. // * Add some way of tracking all the owners of a multi-reader lock
  43. //--------------------------------------------------------------------
  44. #ifndef __IRTLDBG_H__
  45. # include <irtldbg.h>
  46. #endif
  47. enum LOCK_LOCKTYPE {
  48. LOCK_SMALLSPINLOCK = 1,
  49. LOCK_SPINLOCK,
  50. LOCK_FAKELOCK,
  51. LOCK_CRITSEC,
  52. LOCK_RTLRESOURCE,
  53. LOCK_SHARELOCK,
  54. LOCK_READERWRITERLOCK,
  55. LOCK_READERWRITERLOCK2,
  56. LOCK_READERWRITERLOCK3,
  57. };
  58. // Forward declarations
  59. class IRTL_DLLEXP CSmallSpinLock;
  60. class IRTL_DLLEXP CSpinLock;
  61. class IRTL_DLLEXP CFakeLock;
  62. class IRTL_DLLEXP CCritSec;
  63. class IRTL_DLLEXP CRtlResource;
  64. class IRTL_DLLEXP CShareLock;
  65. class IRTL_DLLEXP CReaderWriterLock;
  66. class IRTL_DLLEXP CReaderWriterLock2;
  67. class IRTL_DLLEXP CReaderWriterLock3;
  68. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  69. // __forceinline keyword new to VC6
  70. # define LOCK_FORCEINLINE __forceinline
  71. #else
  72. # define LOCK_FORCEINLINE inline
  73. #endif
  74. #ifdef _M_IX86
  75. // The compiler will warn that the assembly language versions of the
  76. // Lock_Atomic* functions don't return a value. Actually, they do: in EAX.
  77. # pragma warning(disable: 4035)
  78. #endif
  79. // Workarounds for certain useful interlocked operations that are not
  80. // available on Windows 95. Note: the CMPXCHG and XADD instructions were
  81. // introduced in the 80486. If you still need to run on a 386 (unlikely in
  82. // 2000), you'll need to use something else.
  83. LOCK_FORCEINLINE
  84. LONG
  85. Lock_AtomicIncrement(
  86. IN OUT PLONG plAddend)
  87. {
  88. #ifdef _M_IX86
  89. __asm
  90. {
  91. mov ecx, plAddend
  92. mov eax, 1
  93. lock xadd [ecx], eax
  94. inc eax // correct result
  95. }
  96. #else
  97. return InterlockedIncrement(plAddend);
  98. #endif
  99. }
  100. LOCK_FORCEINLINE
  101. LONG
  102. Lock_AtomicDecrement(
  103. IN OUT PLONG plAddend)
  104. {
  105. #ifdef _M_IX86
  106. __asm
  107. {
  108. mov ecx, plAddend
  109. mov eax, -1
  110. lock xadd [ecx], eax
  111. dec eax // correct result
  112. }
  113. #else
  114. return InterlockedDecrement(plAddend);
  115. #endif
  116. }
  117. LOCK_FORCEINLINE
  118. LONG
  119. Lock_AtomicExchange(
  120. IN OUT PLONG plAddr,
  121. IN LONG lNew)
  122. {
  123. #ifdef _M_IX86
  124. __asm
  125. {
  126. mov ecx, plAddr
  127. mov edx, lNew
  128. mov eax, [ecx]
  129. LAEloop:
  130. lock cmpxchg [ecx], edx
  131. jnz LAEloop
  132. }
  133. #else
  134. return InterlockedExchange(plAddr, lNew);
  135. #endif
  136. }
  137. LOCK_FORCEINLINE
  138. LONG
  139. Lock_AtomicCompareExchange(
  140. IN OUT PLONG plAddr,
  141. IN LONG lNew,
  142. IN LONG lCurrent)
  143. {
  144. #ifdef _M_IX86
  145. __asm
  146. {
  147. mov ecx, plAddr
  148. mov edx, lNew
  149. mov eax, lCurrent
  150. lock cmpxchg [ecx], edx
  151. }
  152. #else
  153. return InterlockedCompareExchange(plAddr, lNew, lCurrent);
  154. #endif
  155. }
  156. LOCK_FORCEINLINE
  157. LONG
  158. Lock_AtomicExchangeAdd(
  159. IN OUT LPLONG plAddr,
  160. IN LONG lValue)
  161. {
  162. #ifdef _M_IX86
  163. __asm
  164. {
  165. mov ecx, plAddr
  166. mov eax, lValue
  167. lock xadd [ecx], eax
  168. }
  169. #else
  170. return InterlockedExchangeAdd(plAddr, lValue);
  171. #endif
  172. }
  173. #ifdef _M_IX86
  174. # pragma warning(default: 4035)
  175. // Makes tight loops a little more cache friendly and reduces power
  176. // consumption. Needed on Willamette processors.
  177. # define Lock_Yield() _asm { rep nop }
  178. #else
  179. # define Lock_Yield() ((void) 0)
  180. #endif
  181. //--------------------------------------------------------------------
  182. // Spin count values.
  183. enum LOCK_SPINS {
  184. LOCK_MAXIMUM_SPINS = 10000, // maximum allowable spin count
  185. LOCK_DEFAULT_SPINS = 4000, // default spin count
  186. LOCK_MINIMUM_SPINS = 100, // minimum allowable spin count
  187. LOCK_USE_DEFAULT_SPINS = 0xFFFF, // use class default spin count
  188. LOCK_DONT_SPIN = 0, // don't spin at all
  189. };
  190. // Boilerplate code for the per-class default spincount and spinfactor
  191. #define LOCK_DEFAULT_SPIN_IMPLEMENTATION() \
  192. protected: \
  193. /* per-class variables */ \
  194. static WORD sm_wDefaultSpinCount; /* global default spin count */ \
  195. static double sm_dblDfltSpinAdjFctr; /* global spin adjustment factor*/\
  196. \
  197. public: \
  198. /* Set the default spin count for all locks */ \
  199. static void SetDefaultSpinCount(WORD wSpins) \
  200. { \
  201. IRTLASSERT((wSpins == LOCK_DONT_SPIN) \
  202. || (wSpins == LOCK_USE_DEFAULT_SPINS) \
  203. || (LOCK_MINIMUM_SPINS <= wSpins \
  204. && wSpins <= LOCK_MAXIMUM_SPINS)); \
  205. \
  206. if ((LOCK_MINIMUM_SPINS <= wSpins && wSpins <= LOCK_MAXIMUM_SPINS)\
  207. || (wSpins == LOCK_DONT_SPIN)) \
  208. sm_wDefaultSpinCount = wSpins; \
  209. else if (wSpins == LOCK_USE_DEFAULT_SPINS) \
  210. sm_wDefaultSpinCount = LOCK_DEFAULT_SPINS; \
  211. } \
  212. \
  213. /* Return the default spin count for all locks */ \
  214. static WORD GetDefaultSpinCount() \
  215. { \
  216. return sm_wDefaultSpinCount; \
  217. } \
  218. \
  219. /* Set the adjustment factor for the spincount, used in each iteration */\
  220. /* of countdown-and-sleep by the backoff algorithm. */ \
  221. static void SetDefaultSpinAdjustmentFactor(double dblAdjFactor) \
  222. { \
  223. IRTLASSERT(0.1 <= dblAdjFactor && dblAdjFactor <= 10.0); \
  224. if (0.1 <= dblAdjFactor && dblAdjFactor <= 10.0) \
  225. sm_dblDfltSpinAdjFctr = dblAdjFactor; \
  226. } \
  227. \
  228. /* Return the default spin count for all locks */ \
  229. static double GetDefaultSpinAdjustmentFactor() \
  230. { \
  231. return sm_dblDfltSpinAdjFctr; \
  232. } \
  233. //--------------------------------------------------------------------
  234. // Various Lock Traits
  235. // Is the lock a simple mutex or a multi-reader/single-writer lock?
  236. enum LOCK_RW_MUTEX {
  237. LOCK_MUTEX = 1, // mutexes allow only one thread to hold the lock
  238. LOCK_MRSW, // multi-reader, single-writer
  239. };
  240. // Can the lock be recursively acquired?
  241. enum LOCK_RECURSION {
  242. LOCK_RECURSIVE = 1, // Write and Read locks can be recursively acquired
  243. LOCK_READ_RECURSIVE, // Read locks can be reacquired, but not Write
  244. LOCK_NON_RECURSIVE, // Will deadlock if attempt to acquire recursively
  245. };
  246. // Does the lock Sleep in a loop or block on a kernel synch object handle?
  247. // May (or may not) spin first before sleeping/blocking.
  248. enum LOCK_WAIT_TYPE {
  249. LOCK_WAIT_SLEEP = 1, // Calls Sleep() in a loop
  250. LOCK_WAIT_HANDLE, // Blocks on a kernel mutex, semaphore, or event
  251. };
  252. // When the lock is taken, how are the waiters dequeued?
  253. enum LOCK_QUEUE_TYPE {
  254. LOCK_QUEUE_FIFO = 1, // First in, first out. Fair.
  255. LOCK_QUEUE_LIFO, // Unfair but CPU cache friendly
  256. LOCK_QUEUE_KERNEL, // Determined by vagaries of scheduler
  257. };
  258. // Can the lock's spincount be set on a per-lock basis, or is it only
  259. // possible to modify the default spincount for all the locks in this class?
  260. enum LOCK_PERLOCK_SPIN {
  261. LOCK_NO_SPIN = 1, // The locks do not spin at all
  262. LOCK_CLASS_SPIN, // Can set class-wide spincount, not individual
  263. LOCK_INDIVIDUAL_SPIN, // Can set a spincount on an individual lock
  264. };
  265. //--------------------------------------------------------------------
  266. // CLockBase: bundle the above attributes
  267. template < LOCK_LOCKTYPE locktype,
  268. LOCK_RW_MUTEX mutextype,
  269. LOCK_RECURSION recursiontype,
  270. LOCK_WAIT_TYPE waittype,
  271. LOCK_QUEUE_TYPE queuetype,
  272. LOCK_PERLOCK_SPIN spintype
  273. >
  274. class CLockBase
  275. {
  276. public:
  277. static LOCK_LOCKTYPE LockType() {return locktype;}
  278. static LOCK_RW_MUTEX MutexType() {return mutextype;}
  279. static LOCK_RECURSION Recursion() {return recursiontype;}
  280. static LOCK_WAIT_TYPE WaitType() {return waittype;}
  281. static LOCK_QUEUE_TYPE QueueType() {return queuetype;}
  282. static LOCK_PERLOCK_SPIN PerLockSpin() {return spintype;}
  283. };
  284. // Lock instrumentation causes all sorts of interesting statistics about
  285. // lock contention, etc., to be gathered, but makes locks considerably fatter
  286. // and somewhat slower. Turned off by default.
  287. // #define LOCK_INSTRUMENTATION 1
  288. #ifdef LOCK_INSTRUMENTATION
  289. // We generally don't want to instrument CSmallSpinLock in addition
  290. // to CSpinLock1, as it makes a CSpinLock1 huge.
  291. // #define LOCK_SMALL_SPIN_INSTRUMENTATION 1
  292. //--------------------------------------------------------------------
  293. // CLockStatistics: statistics for an individual lock
  294. class IRTL_DLLEXP CLockStatistics
  295. {
  296. public:
  297. enum {
  298. L_NAMELEN = 8,
  299. };
  300. double m_nContentions; // #times this lock was already locked
  301. double m_nSleeps; // Total #Sleep()s needed
  302. double m_nContentionSpins; // Total iterations this lock spun
  303. double m_nAverageSpins; // Average spins each contention needed
  304. double m_nReadLocks; // Number of times lock acquired for reading
  305. double m_nWriteLocks; // Number of times lock acquired for writing
  306. char m_szName[L_NAMELEN];// Name of this lock
  307. CLockStatistics()
  308. : m_nContentions(0),
  309. m_nSleeps(0),
  310. m_nContentionSpins(0),
  311. m_nAverageSpins(0),
  312. m_nReadLocks(0),
  313. m_nWriteLocks(0)
  314. {
  315. m_szName[0] = '\0';
  316. }
  317. };
  318. //--------------------------------------------------------------------
  319. // CGlobalLockStatistics: statistics for all the known locks
  320. class IRTL_DLLEXP CGlobalLockStatistics
  321. {
  322. public:
  323. LONG m_cTotalLocks; // Total number of locks created
  324. LONG m_cContendedLocks; // Total number of contended locks
  325. LONG m_nSleeps; // Total #Sleep()s needed by all locks
  326. LONGLONG m_cTotalSpins; // Total iterations all locks spun
  327. double m_nAverageSpins; // Average spins needed for each contended lock
  328. LONG m_nReadLocks; // Total ReadLocks
  329. LONG m_nWriteLocks; // Total WriteLocks
  330. CGlobalLockStatistics()
  331. : m_cTotalLocks(0),
  332. m_cContendedLocks(0),
  333. m_nSleeps(0),
  334. m_cTotalSpins(0),
  335. m_nAverageSpins(0),
  336. m_nReadLocks(0),
  337. m_nWriteLocks(0)
  338. {}
  339. };
  340. # define LOCK_INSTRUMENTATION_DECL() \
  341. private: \
  342. volatile LONG m_nContentionSpins; /* #iterations this lock spun */ \
  343. volatile WORD m_nContentions; /* #times lock was already locked */\
  344. volatile WORD m_nSleeps; /* #Sleep()s needed */ \
  345. volatile WORD m_nReadLocks; /* #ReadLocks */ \
  346. volatile WORD m_nWriteLocks; /* #WriteLocks */ \
  347. char m_szName[CLockStatistics::L_NAMELEN]; /* Name of lock */\
  348. \
  349. static LONG sm_cTotalLocks; /* Total number of locks created */ \
  350. static LONG sm_cContendedLocks; /* Total number of contended locks */\
  351. static LONG sm_nSleeps; /* Total #Sleep()s by all locks */ \
  352. static LONGLONG sm_cTotalSpins; /* Total iterations all locks spun */\
  353. static LONG sm_nReadLocks; /* Total ReadLocks */ \
  354. static LONG sm_nWriteLocks; /* Total WriteLocks */ \
  355. \
  356. public: \
  357. const char* Name() const {return m_szName;} \
  358. \
  359. CLockStatistics Statistics() const; \
  360. static CGlobalLockStatistics GlobalStatistics(); \
  361. static void ResetGlobalStatistics(); \
  362. private: \
  363. // Add this to constructors
  364. # define LOCK_INSTRUMENTATION_INIT(pszName) \
  365. m_nContentionSpins = 0; \
  366. m_nContentions = 0; \
  367. m_nSleeps = 0; \
  368. m_nReadLocks = 0; \
  369. m_nWriteLocks = 0; \
  370. ++sm_cTotalLocks; \
  371. if (pszName == NULL) \
  372. m_szName[0] = '\0'; \
  373. else \
  374. strncpy(m_szName, pszName, sizeof(m_szName))
  375. // Note: we are not using Interlocked operations for the shared
  376. // statistical counters. We'll lose perfect accuracy, but we'll
  377. // gain by reduced bus synchronization traffic.
  378. # define LOCK_READLOCK_INSTRUMENTATION() \
  379. { ++m_nReadLocks; \
  380. ++sm_nReadLocks; }
  381. # define LOCK_WRITELOCK_INSTRUMENTATION() \
  382. { ++m_nWriteLocks; \
  383. ++sm_nWriteLocks; }
  384. #else // !LOCK_INSTRUMENTATION
  385. # define LOCK_INSTRUMENTATION_DECL()
  386. # define LOCK_READLOCK_INSTRUMENTATION() ((void) 0)
  387. # define LOCK_WRITELOCK_INSTRUMENTATION() ((void) 0)
  388. #endif // !LOCK_INSTRUMENTATION
  389. //--------------------------------------------------------------------
  390. // CAutoReadLock<Lock> and CAutoWriteLock<Lock> provide exception-safe
  391. // acquisition and release of the other locks defined below
  392. template <class _Lock>
  393. class CAutoReadLock
  394. {
  395. private:
  396. bool m_fLocked;
  397. _Lock& m_Lock;
  398. public:
  399. CAutoReadLock(
  400. _Lock& rLock,
  401. bool fLockNow = true)
  402. : m_fLocked(false), m_Lock(rLock)
  403. {
  404. if (fLockNow)
  405. Lock();
  406. }
  407. ~CAutoReadLock()
  408. {
  409. Unlock();
  410. }
  411. void Lock()
  412. {
  413. // disallow recursive acquisition of the lock through this wrapper
  414. if (!m_fLocked)
  415. {
  416. m_fLocked = true;
  417. m_Lock.ReadLock();
  418. }
  419. }
  420. void Unlock()
  421. {
  422. if (m_fLocked)
  423. {
  424. m_Lock.ReadUnlock();
  425. m_fLocked = false;
  426. }
  427. }
  428. };
  429. template <class _Lock>
  430. class CAutoWriteLock
  431. {
  432. private:
  433. bool m_fLocked;
  434. _Lock& m_Lock;
  435. public:
  436. CAutoWriteLock(
  437. _Lock& rLock,
  438. bool fLockNow = true)
  439. : m_fLocked(false), m_Lock(rLock)
  440. {
  441. if (fLockNow)
  442. Lock();
  443. }
  444. ~CAutoWriteLock()
  445. {
  446. Unlock();
  447. }
  448. void Lock()
  449. {
  450. // disallow recursive acquisition of the lock through this wrapper
  451. if (!m_fLocked)
  452. {
  453. m_fLocked = true;
  454. m_Lock.WriteLock();
  455. }
  456. }
  457. void Unlock()
  458. {
  459. if (m_fLocked)
  460. {
  461. m_fLocked = false;
  462. m_Lock.WriteUnlock();
  463. }
  464. }
  465. };
  466. //--------------------------------------------------------------------
  467. // A spinlock is a sort of lightweight critical section. Its main
  468. // advantage over a true Win32 CRITICAL_SECTION is that it occupies 4 bytes
  469. // instead of 24 (+ another 32 bytes for the RTL_CRITICAL_SECTION_DEBUG data),
  470. // which is important when we have many thousands of locks
  471. // and we're trying to be L1 cache-conscious. A CRITICAL_SECTION also
  472. // contains a HANDLE to a semaphore, although this is not initialized until
  473. // the first time that the CRITICAL_SECTION blocks.
  474. //
  475. // On a multiprocessor machine, a spinlock tries to acquire the lock. If
  476. // it fails, it sits in a tight loop, testing the lock and decrementing a
  477. // counter. If the counter reaches zero, it does a Sleep(0), yielding the
  478. // processor to another thread. When control returns to the thread, the
  479. // lock is probably free. If not, the loop starts again and it is
  480. // terminated only when the lock is acquired. The theory is that it is
  481. // less costly to spin in a busy loop for a short time rather than
  482. // immediately yielding the processor, forcing an expensive context switch
  483. // that requires the old thread's state (registers, etc) be saved, the new
  484. // thread's state be reloaded, and the L1 and L2 caches be left full of
  485. // stale data.
  486. //
  487. // You can tune the spin count (global only: per-lock spin counts are
  488. // disabled) and the backoff algorithm (the factor by which the spin
  489. // count is multiplied after each Sleep).
  490. //
  491. // On a 1P machine, the loop is pointless---this thread has control,
  492. // hence no other thread can possibly release the lock while this thread
  493. // is looping---so the processor is yielded immediately.
  494. //
  495. // The kernel uses spinlocks internally and spinlocks were also added to
  496. // CRITICAL_SECTIONs in NT 4.0 sp3. In the CRITICAL_SECTION implementation,
  497. // however, the counter counts down only once and waits on a semaphore
  498. // thereafter (i.e., the same blocking behavior that it exhibits without
  499. // the spinlock).
  500. //
  501. // A disadvantage of a user-level spinlock such as this is that if the
  502. // thread that owns the spinlock blocks for any reason (or is preempted by
  503. // the scheduler), all the other threads will continue to spin on the
  504. // spinlock, wasting CPU, until the owning thread completes its wait and
  505. // releases the lock. (The kernel spinlocks, however, are smart enough to
  506. // switch to another runnable thread instead of wasting time spinning.)
  507. // The backoff algorithm decreases the spin count on each iteration in an
  508. // attempt to minimize this effect. The best policy---and this is true for
  509. // all locks---is to hold the lock for as short as time as possible.
  510. //
  511. // Note: unlike a CRITICAL_SECTION, a CSmallSpinLock cannot be recursively
  512. // acquired; i.e., if you acquire a spinlock and then attempt to acquire it
  513. // again *on the same thread* (perhaps from a different function), the
  514. // thread will hang forever. Use CSpinLock instead, which is safe though a
  515. // little slower than a CSmallSpinLock. If you own all the code
  516. // that is bracketed by Lock() and Unlock() (e.g., no callbacks or passing
  517. // back of locked data structures to callers) and know for certain that it
  518. // will not attempt to reacquire the lock, you can use CSmallSpinLock.
  519. //
  520. // See also http://muralik/work/performance/spinlocks.htm and John Vert's
  521. // MSDN article, "Writing Scalable Applications for Windows NT".
  522. //
  523. // The original implementation is due to PALarson.
  524. class IRTL_DLLEXP CSmallSpinLock :
  525. public CLockBase<LOCK_SMALLSPINLOCK, LOCK_MUTEX,
  526. LOCK_NON_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  527. LOCK_CLASS_SPIN
  528. >
  529. {
  530. private:
  531. volatile LONG m_lTid; // The lock state variable
  532. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  533. LOCK_INSTRUMENTATION_DECL();
  534. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  535. LOCK_FORCEINLINE static LONG _CurrentThreadId()
  536. {
  537. DWORD dwTid = ::GetCurrentThreadId();
  538. return (LONG) (dwTid);
  539. }
  540. private:
  541. // Does all the spinning (and instrumentation) if the lock is contended.
  542. void _LockSpin();
  543. LOCK_FORCEINLINE bool _TryLock()
  544. {
  545. if (m_lTid == 0)
  546. {
  547. LONG l = _CurrentThreadId();
  548. return (Lock_AtomicCompareExchange(const_cast<LONG*>(&m_lTid), l,0)
  549. == 0);
  550. }
  551. else
  552. return false;
  553. }
  554. public:
  555. #ifndef LOCK_SMALL_SPIN_INSTRUMENTATION
  556. CSmallSpinLock()
  557. : m_lTid(0)
  558. {}
  559. #else // LOCK_SMALL_SPIN_INSTRUMENTATION
  560. CSmallSpinLock(
  561. const char* pszName)
  562. : m_lTid(0)
  563. {
  564. LOCK_INSTRUMENTATION_INIT(pszName);
  565. }
  566. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  567. #ifdef IRTLDEBUG
  568. ~CSmallSpinLock()
  569. {
  570. IRTLASSERT(m_lTid == 0);
  571. }
  572. #endif // IRTLDEBUG
  573. // Acquire an exclusive lock for writing. Blocks until acquired.
  574. inline void WriteLock()
  575. {
  576. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  577. LOCK_WRITELOCK_INSTRUMENTATION();
  578. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  579. // Optimize for the common case by helping the processor's branch
  580. // prediction algorithm.
  581. if (_TryLock())
  582. return;
  583. _LockSpin();
  584. }
  585. // Acquire a (possibly shared) lock for reading. Blocks until acquired.
  586. inline void ReadLock()
  587. {
  588. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  589. LOCK_READLOCK_INSTRUMENTATION();
  590. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  591. if (_TryLock())
  592. return;
  593. _LockSpin();
  594. }
  595. // Try to acquire an exclusive lock for writing. Returns true
  596. // if successful. Non-blocking.
  597. inline bool TryWriteLock()
  598. {
  599. bool fAcquired = _TryLock();
  600. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  601. if (fAcquired)
  602. LOCK_WRITELOCK_INSTRUMENTATION();
  603. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  604. return fAcquired;
  605. }
  606. // Try to acquire a (possibly shared) lock for reading. Returns true
  607. // if successful. Non-blocking.
  608. inline bool TryReadLock()
  609. {
  610. bool fAcquired = _TryLock();
  611. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  612. if (fAcquired)
  613. LOCK_READLOCK_INSTRUMENTATION();
  614. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  615. return fAcquired;
  616. }
  617. // Unlock the lock after a successful call to {,Try}WriteLock().
  618. // Assumes caller owned the lock.
  619. inline void WriteUnlock()
  620. {
  621. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid), 0);
  622. }
  623. // Unlock the lock after a successful call to {,Try}ReadLock().
  624. // Assumes caller owned the lock.
  625. inline void ReadUnlock()
  626. {
  627. WriteUnlock();
  628. }
  629. // Is the lock already locked for writing by this thread?
  630. bool IsWriteLocked() const
  631. {
  632. return (m_lTid == _CurrentThreadId());
  633. }
  634. // Is the lock already locked for reading?
  635. bool IsReadLocked() const
  636. {
  637. return IsWriteLocked();
  638. }
  639. // Is the lock unlocked for writing?
  640. bool IsWriteUnlocked() const
  641. {
  642. return (m_lTid == 0);
  643. }
  644. // Is the lock unlocked for reading?
  645. bool IsReadUnlocked() const
  646. {
  647. return IsWriteUnlocked();
  648. }
  649. // Convert a reader lock to a writer lock
  650. void ConvertSharedToExclusive()
  651. {
  652. // no-op
  653. }
  654. // Convert a writer lock to a reader lock
  655. void ConvertExclusiveToShared()
  656. {
  657. // no-op
  658. }
  659. // Set the spin count for this lock.
  660. // Returns true if successfully set the per-lock spincount, false otherwise
  661. bool SetSpinCount(WORD wSpins)
  662. {
  663. UNREFERENCED_PARAMETER(wSpins);
  664. IRTLASSERT((wSpins == LOCK_DONT_SPIN)
  665. || (wSpins == LOCK_USE_DEFAULT_SPINS)
  666. || (LOCK_MINIMUM_SPINS <= wSpins
  667. && wSpins <= LOCK_MAXIMUM_SPINS));
  668. return false;
  669. }
  670. // Return the spin count for this lock.
  671. WORD GetSpinCount() const
  672. {
  673. return sm_wDefaultSpinCount;
  674. }
  675. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  676. static const TCHAR* ClassName() {return _TEXT("CSmallSpinLock");}
  677. }; // CSmallSpinLock
  678. //--------------------------------------------------------------------
  679. // CSpinLock is a spinlock that doesn't deadlock if recursively acquired.
  680. // This version occupies only 4 bytes. Uses 28 bits for the thread id.
  681. class IRTL_DLLEXP CSpinLock :
  682. public CLockBase<LOCK_SPINLOCK, LOCK_MUTEX,
  683. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  684. LOCK_CLASS_SPIN
  685. >
  686. {
  687. private:
  688. // a union for convenience
  689. volatile LONG m_lTid;
  690. enum {
  691. THREAD_SHIFT = 0,
  692. THREAD_BITS = 28,
  693. OWNER_SHIFT = THREAD_BITS,
  694. OWNER_BITS = 4,
  695. THREAD_MASK = ((1 << THREAD_BITS) - 1) << THREAD_SHIFT,
  696. OWNER_INCR = 1 << THREAD_BITS,
  697. OWNER_MASK = ((1 << OWNER_BITS) - 1) << OWNER_SHIFT,
  698. };
  699. LOCK_INSTRUMENTATION_DECL();
  700. private:
  701. // Get the current thread ID. Assumes that it can fit into 28 bits,
  702. // which is fairly safe as NT recycles thread IDs and failing to fit into
  703. // 28 bits would mean that more than 268,435,456 threads were currently
  704. // active. This is improbable in the extreme as NT runs out of
  705. // resources if there are more than a few thousands threads in
  706. // existence and the overhead of context swapping becomes unbearable.
  707. LOCK_FORCEINLINE static LONG _CurrentThreadId()
  708. {
  709. DWORD dwTid = ::GetCurrentThreadId();
  710. // Thread ID 0 is used by the System Process (Process ID 0).
  711. // We use a thread-id of zero to indicate that the lock is unowned.
  712. // NT uses +ve thread ids, Win9x uses -ve ids
  713. IRTLASSERT(dwTid != 0
  714. && ((dwTid <= THREAD_MASK) || (dwTid > ~THREAD_MASK)));
  715. return (LONG) (dwTid & THREAD_MASK);
  716. }
  717. // Attempt to acquire the lock without blocking
  718. LOCK_FORCEINLINE bool _TryLock()
  719. {
  720. if (m_lTid == 0)
  721. {
  722. LONG l = _CurrentThreadId() | OWNER_INCR;
  723. return (Lock_AtomicCompareExchange(const_cast<LONG*>(&m_lTid), l,0)
  724. == 0);
  725. }
  726. else
  727. return false;
  728. }
  729. // Acquire the lock, recursively if need be
  730. void _Lock()
  731. {
  732. // Do we own the lock already? Just bump the count.
  733. if ((m_lTid & THREAD_MASK) == _CurrentThreadId())
  734. {
  735. // owner count isn't maxed out?
  736. IRTLASSERT((m_lTid & OWNER_MASK) != OWNER_MASK);
  737. Lock_AtomicExchangeAdd(const_cast<LONG*>(&m_lTid), OWNER_INCR);
  738. }
  739. // Some other thread owns the lock. We'll have to spin :-(.
  740. else
  741. _LockSpin();
  742. IRTLASSERT((m_lTid & OWNER_MASK) > 0
  743. && (m_lTid & THREAD_MASK) == _CurrentThreadId());
  744. }
  745. // Release the lock
  746. LOCK_FORCEINLINE void _Unlock()
  747. {
  748. IRTLASSERT((m_lTid & OWNER_MASK) > 0
  749. && (m_lTid & THREAD_MASK) == _CurrentThreadId());
  750. LONG l = m_lTid - OWNER_INCR;
  751. // Last owner? Release completely, if so
  752. if ((l & OWNER_MASK) == 0)
  753. l = 0;
  754. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid), l);
  755. }
  756. // Return true if the lock is owned by this thread
  757. bool _IsLocked() const
  758. {
  759. bool fLocked = ((m_lTid & THREAD_MASK) == _CurrentThreadId());
  760. IRTLASSERT(!fLocked || ((m_lTid & OWNER_MASK) > 0
  761. && (m_lTid & THREAD_MASK)==_CurrentThreadId()));
  762. return fLocked;
  763. }
  764. // Does all the spinning (and instrumentation) if the lock is contended.
  765. void _LockSpin();
  766. public:
  767. #ifndef LOCK_INSTRUMENTATION
  768. CSpinLock()
  769. : m_lTid(0)
  770. {}
  771. #else // LOCK_INSTRUMENTATION
  772. CSpinLock(
  773. const char* pszName)
  774. : m_lTid(0)
  775. {
  776. LOCK_INSTRUMENTATION_INIT(pszName);
  777. }
  778. #endif // LOCK_INSTRUMENTATION
  779. #ifdef IRTLDEBUG
  780. ~CSpinLock()
  781. {
  782. IRTLASSERT(m_lTid == 0);
  783. }
  784. #endif // IRTLDEBUG
  785. // Acquire an exclusive lock for writing. Blocks until acquired.
  786. inline void WriteLock()
  787. {
  788. LOCK_WRITELOCK_INSTRUMENTATION();
  789. // Is the lock unowned?
  790. if (_TryLock())
  791. return; // got the lock
  792. _Lock();
  793. }
  794. // Acquire a (possibly shared) lock for reading. Blocks until acquired.
  795. inline void ReadLock()
  796. {
  797. LOCK_READLOCK_INSTRUMENTATION();
  798. // Is the lock unowned?
  799. if (_TryLock())
  800. return; // got the lock
  801. _Lock();
  802. }
  803. // See the description under CReaderWriterLock3::ReadOrWriteLock
  804. inline bool ReadOrWriteLock()
  805. {
  806. ReadLock();
  807. return true;
  808. }
  809. // Try to acquire an exclusive lock for writing. Returns true
  810. // if successful. Non-blocking.
  811. inline bool TryWriteLock()
  812. {
  813. bool fAcquired = _TryLock();
  814. if (fAcquired)
  815. LOCK_WRITELOCK_INSTRUMENTATION();
  816. return fAcquired;
  817. }
  818. // Try to acquire a (possibly shared) lock for reading. Returns true
  819. // if successful. Non-blocking.
  820. inline bool TryReadLock()
  821. {
  822. bool fAcquired = _TryLock();
  823. if (fAcquired)
  824. LOCK_READLOCK_INSTRUMENTATION();
  825. return fAcquired;
  826. }
  827. // Unlock the lock after a successful call to {,Try}WriteLock().
  828. inline void WriteUnlock()
  829. {
  830. _Unlock();
  831. }
  832. // Unlock the lock after a successful call to {,Try}ReadLock().
  833. inline void ReadUnlock()
  834. {
  835. _Unlock();
  836. }
  837. // Unlock the lock after a call to ReadOrWriteLock().
  838. inline void ReadOrWriteUnlock(bool)
  839. {
  840. ReadUnlock();
  841. }
  842. // Is the lock already locked for writing?
  843. bool IsWriteLocked() const
  844. {
  845. return _IsLocked();
  846. }
  847. // Is the lock already locked for reading?
  848. bool IsReadLocked() const
  849. {
  850. return _IsLocked();
  851. }
  852. // Is the lock unlocked for writing?
  853. bool IsWriteUnlocked() const
  854. {
  855. return !IsWriteLocked();
  856. }
  857. // Is the lock unlocked for reading?
  858. bool IsReadUnlocked() const
  859. {
  860. return !IsReadLocked();
  861. }
  862. // Convert a reader lock to a writer lock
  863. void ConvertSharedToExclusive()
  864. {
  865. // no-op
  866. }
  867. // Convert a writer lock to a reader lock
  868. void ConvertExclusiveToShared()
  869. {
  870. // no-op
  871. }
  872. // Set the spin count for this lock.
  873. bool SetSpinCount(WORD) {return false;}
  874. // Return the spin count for this lock.
  875. WORD GetSpinCount() const
  876. {
  877. return sm_wDefaultSpinCount;
  878. }
  879. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  880. static const TCHAR* ClassName() {return _TEXT("CSpinLock");}
  881. }; // CSpinLock
  882. //--------------------------------------------------------------------
  883. // A dummy class, primarily useful as a template parameter
  884. class IRTL_DLLEXP CFakeLock :
  885. public CLockBase<LOCK_FAKELOCK, LOCK_MUTEX,
  886. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_FIFO,
  887. LOCK_NO_SPIN
  888. >
  889. {
  890. private:
  891. LOCK_INSTRUMENTATION_DECL();
  892. public:
  893. CFakeLock() {}
  894. #ifdef LOCK_INSTRUMENTATION
  895. CFakeLock(const char*) {}
  896. #endif // LOCK_INSTRUMENTATION
  897. ~CFakeLock() {}
  898. void WriteLock() {}
  899. void ReadLock() {}
  900. bool ReadOrWriteLock() {return true;}
  901. bool TryWriteLock() {return true;}
  902. bool TryReadLock() {return true;}
  903. void WriteUnlock() {}
  904. void ReadUnlock() {}
  905. void ReadOrWriteUnlock(bool) {}
  906. bool IsWriteLocked() const {return true;}
  907. bool IsReadLocked() const {return IsWriteLocked();}
  908. bool IsWriteUnlocked() const {return true;}
  909. bool IsReadUnlocked() const {return true;}
  910. void ConvertSharedToExclusive() {}
  911. void ConvertExclusiveToShared() {}
  912. bool SetSpinCount(WORD) {return false;}
  913. WORD GetSpinCount() const {return LOCK_DONT_SPIN;}
  914. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  915. static const TCHAR* ClassName() {return _TEXT("CFakeLock");}
  916. }; // CFakeLock
  917. //--------------------------------------------------------------------
  918. // A Win32 CRITICAL_SECTION
  919. class IRTL_DLLEXP CCritSec :
  920. public CLockBase<LOCK_CRITSEC, LOCK_MUTEX,
  921. LOCK_RECURSIVE, LOCK_WAIT_HANDLE, LOCK_QUEUE_KERNEL,
  922. LOCK_INDIVIDUAL_SPIN
  923. >
  924. {
  925. private:
  926. CRITICAL_SECTION m_cs;
  927. LOCK_INSTRUMENTATION_DECL();
  928. public:
  929. CCritSec()
  930. {
  931. InitializeCriticalSection(&m_cs);
  932. SetSpinCount(sm_wDefaultSpinCount);
  933. }
  934. #ifdef LOCK_INSTRUMENTATION
  935. CCritSec(const char*)
  936. {
  937. InitializeCriticalSection(&m_cs);
  938. SetSpinCount(sm_wDefaultSpinCount);
  939. }
  940. #endif // LOCK_INSTRUMENTATION
  941. ~CCritSec() { DeleteCriticalSection(&m_cs); }
  942. void WriteLock() { EnterCriticalSection(&m_cs); }
  943. void ReadLock() { WriteLock(); }
  944. bool ReadOrWriteLock() { ReadLock(); return true; }
  945. bool TryWriteLock();
  946. bool TryReadLock() { return TryWriteLock(); }
  947. void WriteUnlock() { LeaveCriticalSection(&m_cs); }
  948. void ReadUnlock() { WriteUnlock(); }
  949. void ReadOrWriteUnlock(bool) { ReadUnlock(); }
  950. bool IsWriteLocked() const {return true;} // TODO: fix this
  951. bool IsReadLocked() const {return IsWriteLocked();}
  952. bool IsWriteUnlocked() const {return true;} // TODO: fix this
  953. bool IsReadUnlocked() const {return true;} // TODO: fix this
  954. // Convert a reader lock to a writer lock
  955. void ConvertSharedToExclusive()
  956. {
  957. // no-op
  958. }
  959. // Convert a writer lock to a reader lock
  960. void ConvertExclusiveToShared()
  961. {
  962. // no-op
  963. }
  964. // Wrapper for ::SetCriticalSectionSpinCount which was introduced
  965. // in NT 4.0 sp3 and hence is not available on all platforms
  966. static DWORD SetSpinCount(LPCRITICAL_SECTION pcs,
  967. DWORD dwSpinCount=LOCK_DEFAULT_SPINS);
  968. bool SetSpinCount(WORD wSpins)
  969. {SetSpinCount(&m_cs, wSpins); return true;}
  970. WORD GetSpinCount() const { return sm_wDefaultSpinCount; } // TODO
  971. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  972. static const TCHAR* ClassName() {return _TEXT("CCritSec");}
  973. }; // CCritSec
  974. //--------------------------------------------------------------------
  975. // RTL_RESOURCE is a multi-reader, single-writer lock provided on NT, but
  976. // not published as part of the Win32 API. IIS exposes it in <tsres.hxx>.
  977. #include <tsres.hxx>
  978. class IRTL_DLLEXP CRtlResource :
  979. public CLockBase<LOCK_RTLRESOURCE, LOCK_MRSW,
  980. LOCK_RECURSIVE /*??*/, LOCK_WAIT_HANDLE, LOCK_QUEUE_KERNEL,
  981. LOCK_INDIVIDUAL_SPIN
  982. >
  983. {
  984. private:
  985. RTL_RESOURCE m_res;
  986. LOCK_INSTRUMENTATION_DECL();
  987. public:
  988. CRtlResource()
  989. {
  990. InetInitializeResource(&m_res);
  991. CCritSec::SetSpinCount(&m_res.CriticalSection, sm_wDefaultSpinCount);
  992. }
  993. #ifdef LOCK_INSTRUMENTATION
  994. CRtlResource(const char*)
  995. {
  996. InetInitializeResource(&m_res);
  997. CCritSec::SetSpinCount(&m_res.CriticalSection, sm_wDefaultSpinCount);
  998. }
  999. #endif // LOCK_INSTRUMENTATION
  1000. ~CRtlResource() { InetDeleteResource(&m_res); }
  1001. void WriteLock() { InetAcquireResourceExclusive(&m_res, TRUE); }
  1002. void ReadLock() { InetAcquireResourceShared(&m_res, TRUE); }
  1003. bool TryWriteLock() {return !!InetAcquireResourceExclusive(&m_res, FALSE);}
  1004. bool TryReadLock() { return !!InetAcquireResourceShared(&m_res, FALSE); }
  1005. void WriteUnlock() { InetReleaseResource(&m_res); }
  1006. void ReadUnlock() { WriteUnlock(); }
  1007. bool IsWriteLocked() const {return true;} // TODO: fix this
  1008. bool IsReadLocked() const {return IsWriteLocked();}
  1009. bool IsWriteUnlocked() const {return true;} // TODO: fix this
  1010. bool IsReadUnlocked() const {return true;} // TODO: fix this
  1011. // Convert a reader lock to a writer lock
  1012. void ConvertSharedToExclusive()
  1013. {
  1014. InetConvertSharedToExclusive(&m_res);
  1015. }
  1016. // Convert a writer lock to a reader lock
  1017. void ConvertExclusiveToShared()
  1018. {
  1019. InetConvertExclusiveToShared(&m_res);
  1020. }
  1021. bool SetSpinCount(WORD wSpins)
  1022. {CCritSec::SetSpinCount(&m_res.CriticalSection, wSpins); return true;}
  1023. WORD GetSpinCount() const { return sm_wDefaultSpinCount; } // TODO
  1024. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1025. static const char* ClassName() {return "CRtlResource";}
  1026. }; // CRtlResource
  1027. //--------------------------------------------------------------------
  1028. // CSharelock is a multi-reader, single-writer lock due to MParkes.
  1029. #include <sharelok.h>
  1030. class IRTL_DLLEXP CShareLock :
  1031. public CLockBase<LOCK_SHARELOCK, LOCK_MRSW,
  1032. LOCK_RECURSIVE /* ?? */, LOCK_WAIT_HANDLE, LOCK_QUEUE_KERNEL,
  1033. LOCK_INDIVIDUAL_SPIN
  1034. >
  1035. {
  1036. private:
  1037. CSharelock m_sl;
  1038. LOCK_INSTRUMENTATION_DECL();
  1039. public:
  1040. CShareLock()
  1041. : m_sl()
  1042. {
  1043. }
  1044. #ifdef LOCK_INSTRUMENTATION
  1045. CShareLock(const char*)
  1046. : m_sl()
  1047. {
  1048. }
  1049. #endif // LOCK_INSTRUMENTATION
  1050. void WriteLock() { m_sl.ClaimExclusiveLock(INFINITE); }
  1051. void ReadLock() { m_sl.ClaimShareLock(INFINITE); }
  1052. bool TryWriteLock() { return !!m_sl.ClaimExclusiveLock(0); }
  1053. bool TryReadLock() { return !!m_sl.ClaimShareLock(0); }
  1054. void WriteUnlock() { m_sl.ReleaseExclusiveLock(); }
  1055. void ReadUnlock() { m_sl.ReleaseShareLock(); }
  1056. bool IsWriteLocked() const {return true;} // TODO: fix this
  1057. bool IsReadLocked() const {return IsWriteLocked();}
  1058. bool IsWriteUnlocked() const {return true;} // TODO: fix this
  1059. bool IsReadUnlocked() const {return IsWriteUnlocked();}
  1060. // Convert a reader lock to a writer lock
  1061. void ConvertSharedToExclusive()
  1062. {
  1063. m_sl.ChangeSharedLockToExclusiveLock(INFINITE);
  1064. }
  1065. // Convert a writer lock to a reader lock
  1066. void ConvertExclusiveToShared()
  1067. {
  1068. m_sl.ChangeExclusiveLockToSharedLock();
  1069. }
  1070. bool SetSpinCount(WORD wSpins)
  1071. { m_sl.UpdateMaxSpins(wSpins); return true;}
  1072. WORD GetSpinCount() const { return sm_wDefaultSpinCount; } // TODO
  1073. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1074. static const char* ClassName() {return "CShareLock";}
  1075. }; // CShareLock
  1076. //--------------------------------------------------------------------
  1077. // CReaderWriterlock is a multi-reader, single-writer spinlock due to NJain,
  1078. // which in turn is derived from an exclusive spinlock by DmitryR.
  1079. // Gives priority to writers. Cannot be acquired recursively.
  1080. // No error checking. Use CReaderWriterLock3.
  1081. class IRTL_DLLEXP CReaderWriterLock :
  1082. public CLockBase<LOCK_READERWRITERLOCK, LOCK_MRSW,
  1083. LOCK_READ_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  1084. LOCK_CLASS_SPIN
  1085. >
  1086. {
  1087. private:
  1088. volatile LONG m_nState; // > 0 => that many readers
  1089. volatile LONG m_cWaiting; // number of would-be writers
  1090. LOCK_INSTRUMENTATION_DECL();
  1091. private:
  1092. enum {
  1093. SL_FREE = 0,
  1094. SL_EXCLUSIVE = -1,
  1095. };
  1096. void _LockSpin(bool fWrite);
  1097. void _WriteLockSpin() { _LockSpin(true); }
  1098. void _ReadLockSpin() { _LockSpin(false); }
  1099. // _CmpExch is equivalent to
  1100. // LONG lTemp = m_lRW;
  1101. // if (lTemp == lCurrent) m_lRW = lNew;
  1102. // return lCurrent == lTemp;
  1103. // except it's one atomic instruction. Using this gives us the basis of
  1104. // a protocol because the update only succeeds when we knew exactly what
  1105. // used to be in m_lRW. If some other thread slips in and modifies m_lRW
  1106. // before we do, the update will fail. In other words, it's transactional.
  1107. LOCK_FORCEINLINE bool _CmpExch(LONG lNew, LONG lCurrent)
  1108. {
  1109. return lCurrent == Lock_AtomicCompareExchange(
  1110. const_cast<LONG*>(&m_nState), lNew, lCurrent);
  1111. }
  1112. LOCK_FORCEINLINE bool _TryWriteLock()
  1113. {
  1114. return (m_nState == SL_FREE && _CmpExch(SL_EXCLUSIVE, SL_FREE));
  1115. }
  1116. LOCK_FORCEINLINE bool _TryReadLock()
  1117. {
  1118. LONG nCurrState = m_nState;
  1119. // Give writers priority
  1120. return (nCurrState != SL_EXCLUSIVE && m_cWaiting == 0
  1121. && _CmpExch(nCurrState + 1, nCurrState));
  1122. }
  1123. public:
  1124. CReaderWriterLock()
  1125. : m_nState(SL_FREE),
  1126. m_cWaiting(0)
  1127. {
  1128. }
  1129. #ifdef LOCK_INSTRUMENTATION
  1130. CReaderWriterLock(
  1131. const char* pszName)
  1132. : m_nState(SL_FREE),
  1133. m_cWaiting(0)
  1134. {
  1135. LOCK_INSTRUMENTATION_INIT(pszName);
  1136. }
  1137. #endif // LOCK_INSTRUMENTATION
  1138. #ifdef IRTLDEBUG
  1139. ~CReaderWriterLock()
  1140. {
  1141. IRTLASSERT(m_nState == SL_FREE && m_cWaiting == 0);
  1142. }
  1143. #endif // IRTLDEBUG
  1144. inline void WriteLock()
  1145. {
  1146. LOCK_WRITELOCK_INSTRUMENTATION();
  1147. // Add ourselves to the queue of waiting writers
  1148. Lock_AtomicIncrement(const_cast<LONG*>(&m_cWaiting));
  1149. if (_TryWriteLock())
  1150. return;
  1151. _WriteLockSpin();
  1152. }
  1153. inline void ReadLock()
  1154. {
  1155. LOCK_READLOCK_INSTRUMENTATION();
  1156. if (_TryReadLock())
  1157. return;
  1158. _ReadLockSpin();
  1159. }
  1160. inline bool TryWriteLock()
  1161. {
  1162. // Add ourselves to the queue of waiting writers
  1163. Lock_AtomicIncrement(const_cast<LONG*>(&m_cWaiting));
  1164. if (_TryWriteLock())
  1165. {
  1166. LOCK_WRITELOCK_INSTRUMENTATION();
  1167. return true;
  1168. }
  1169. Lock_AtomicDecrement(const_cast<LONG*>(&m_cWaiting));
  1170. return false;
  1171. }
  1172. inline bool TryReadLock()
  1173. {
  1174. if (_TryReadLock())
  1175. {
  1176. LOCK_READLOCK_INSTRUMENTATION();
  1177. return true;
  1178. }
  1179. return false;
  1180. }
  1181. inline void WriteUnlock()
  1182. {
  1183. Lock_AtomicExchange(const_cast<LONG*>(&m_nState), SL_FREE);
  1184. Lock_AtomicDecrement(const_cast<LONG*>(&m_cWaiting));
  1185. }
  1186. inline void ReadUnlock()
  1187. {
  1188. Lock_AtomicDecrement(const_cast<LONG*>(&m_nState));
  1189. }
  1190. bool IsWriteLocked() const {return m_nState == SL_EXCLUSIVE;}
  1191. bool IsReadLocked() const {return m_nState > SL_FREE;}
  1192. bool IsWriteUnlocked() const {return m_nState != SL_EXCLUSIVE;}
  1193. bool IsReadUnlocked() const {return m_nState <= SL_FREE;}
  1194. void ConvertSharedToExclusive()
  1195. {
  1196. IRTLASSERT(IsReadLocked());
  1197. Lock_AtomicIncrement(const_cast<LONG*>(&m_cWaiting));
  1198. // single reader?
  1199. if (m_nState == SL_FREE + 1 && _CmpExch(SL_EXCLUSIVE, SL_FREE + 1))
  1200. return;
  1201. // release the reader lock and spin
  1202. Lock_AtomicDecrement(const_cast<LONG*>(&m_nState));
  1203. _WriteLockSpin();
  1204. IRTLASSERT(IsWriteLocked());
  1205. }
  1206. void ConvertExclusiveToShared()
  1207. {
  1208. IRTLASSERT(IsWriteLocked());
  1209. Lock_AtomicExchange(const_cast<LONG*>(&m_nState), SL_FREE + 1);
  1210. Lock_AtomicDecrement(const_cast<LONG*>(&m_cWaiting));
  1211. IRTLASSERT(IsReadLocked());
  1212. }
  1213. bool SetSpinCount(WORD) {return false;}
  1214. WORD GetSpinCount() const {return sm_wDefaultSpinCount;}
  1215. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1216. static const TCHAR* ClassName() {return _TEXT("CReaderWriterLock");}
  1217. }; // CReaderWriterLock
  1218. //--------------------------------------------------------------------
  1219. // CReaderWriterlock2 is a multi-reader, single-writer spinlock due to NJain,
  1220. // which in turn is derived from an exclusive spinlock by DmitryR.
  1221. // Gives priority to writers. Cannot be acquired recursively.
  1222. // No error checking. The difference between this and CReaderWriterLock is
  1223. // that all the state is packed into a single LONG, instead of two LONGs.
  1224. class IRTL_DLLEXP CReaderWriterLock2 :
  1225. public CLockBase<LOCK_READERWRITERLOCK2, LOCK_MRSW,
  1226. LOCK_READ_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  1227. LOCK_CLASS_SPIN
  1228. >
  1229. {
  1230. private:
  1231. volatile LONG m_lRW;
  1232. // LoWord is state. ==0 => free; >0 => readers; ==0xFFFF => 1 writer.
  1233. // HiWord is count of writers, W.
  1234. // If LoWord==0xFFFF => W-1 waiters, 1 writer;
  1235. // otherwise W waiters.
  1236. enum {
  1237. SL_FREE = 0x00000000,
  1238. SL_STATE_MASK = 0x0000FFFF,
  1239. SL_STATE_SHIFT = 0,
  1240. SL_WAITING_MASK = 0xFFFF0000, // waiting writers
  1241. SL_WAITING_SHIFT = 16,
  1242. SL_READER_INCR = 0x00000001,
  1243. SL_READER_MASK = 0x00007FFF,
  1244. SL_EXCLUSIVE = 0x0000FFFF, // one writer
  1245. SL_WRITER_INCR = 0x00010000,
  1246. SL_ONE_WRITER = SL_EXCLUSIVE | SL_WRITER_INCR,
  1247. SL_ONE_READER = (SL_FREE + 1),
  1248. SL_WRITERS_MASK = ~SL_READER_MASK,
  1249. };
  1250. LOCK_INSTRUMENTATION_DECL();
  1251. private:
  1252. void _LockSpin(bool fWrite);
  1253. void _WriteLockSpin();
  1254. void _ReadLockSpin() { _LockSpin(false); }
  1255. // _CmpExch is equivalent to
  1256. // LONG lTemp = m_lRW;
  1257. // if (lTemp == lCurrent) m_lRW = lNew;
  1258. // return lCurrent == lTemp;
  1259. // except it's one atomic instruction. Using this gives us the basis of
  1260. // a protocol because the update only succeeds when we knew exactly what
  1261. // used to be in m_lRW. If some other thread slips in and modifies m_lRW
  1262. // before we do, the update will fail. In other words, it's transactional.
  1263. LOCK_FORCEINLINE bool _CmpExch(LONG lNew, LONG lCurrent)
  1264. {
  1265. return lCurrent ==Lock_AtomicCompareExchange(const_cast<LONG*>(&m_lRW),
  1266. lNew, lCurrent);
  1267. }
  1268. LOCK_FORCEINLINE bool _TryWriteLock(
  1269. LONG nIncr)
  1270. {
  1271. LONG l = m_lRW;
  1272. // Grab exclusive access to the lock if it's free. Works even
  1273. // if there are other writers queued up.
  1274. return ((l & SL_STATE_MASK) == SL_FREE
  1275. && _CmpExch((l + nIncr) | SL_EXCLUSIVE, l));
  1276. }
  1277. LOCK_FORCEINLINE bool _TryReadLock()
  1278. {
  1279. LONG l = m_lRW;
  1280. // Give writers priority
  1281. return ((l & SL_WRITERS_MASK) == 0
  1282. && _CmpExch(l + SL_READER_INCR, l));
  1283. }
  1284. public:
  1285. CReaderWriterLock2()
  1286. : m_lRW(SL_FREE)
  1287. {}
  1288. #ifdef LOCK_INSTRUMENTATION
  1289. CReaderWriterLock2(
  1290. const char* pszName)
  1291. : m_lRW(SL_FREE)
  1292. {
  1293. LOCK_INSTRUMENTATION_INIT(pszName);
  1294. }
  1295. #endif // LOCK_INSTRUMENTATION
  1296. #ifdef IRTLDEBUG
  1297. ~CReaderWriterLock2()
  1298. {
  1299. IRTLASSERT(m_lRW == SL_FREE);
  1300. }
  1301. #endif // IRTLDEBUG
  1302. inline void WriteLock()
  1303. {
  1304. LOCK_WRITELOCK_INSTRUMENTATION();
  1305. // Optimize for the common case
  1306. if (_TryWriteLock(SL_WRITER_INCR))
  1307. return;
  1308. _WriteLockSpin();
  1309. }
  1310. inline void ReadLock()
  1311. {
  1312. LOCK_READLOCK_INSTRUMENTATION();
  1313. // Optimize for the common case
  1314. if (_TryReadLock())
  1315. return;
  1316. _ReadLockSpin();
  1317. }
  1318. inline bool TryWriteLock()
  1319. {
  1320. if (_TryWriteLock(SL_WRITER_INCR))
  1321. {
  1322. LOCK_WRITELOCK_INSTRUMENTATION();
  1323. return true;
  1324. }
  1325. return false;
  1326. }
  1327. inline bool TryReadLock()
  1328. {
  1329. if (_TryReadLock())
  1330. {
  1331. LOCK_READLOCK_INSTRUMENTATION();
  1332. return true;
  1333. }
  1334. return false;
  1335. }
  1336. inline void WriteUnlock()
  1337. {
  1338. IRTLASSERT(IsWriteLocked());
  1339. for (LONG l = m_lRW;
  1340. // decrement waiter count, clear loword to SL_FREE
  1341. !_CmpExch((l - SL_WRITER_INCR) & ~SL_STATE_MASK, l);
  1342. l = m_lRW)
  1343. {
  1344. IRTLASSERT(IsWriteLocked());
  1345. Lock_Yield();
  1346. }
  1347. }
  1348. inline void ReadUnlock()
  1349. {
  1350. IRTLASSERT(IsReadLocked());
  1351. for (LONG l = m_lRW; !_CmpExch(l - SL_READER_INCR, l); l = m_lRW)
  1352. {
  1353. IRTLASSERT(IsReadLocked());
  1354. Lock_Yield();
  1355. }
  1356. }
  1357. bool IsWriteLocked() const
  1358. {return (m_lRW & SL_STATE_MASK) == SL_EXCLUSIVE;}
  1359. bool IsReadLocked() const
  1360. {return (m_lRW & SL_READER_MASK) >= SL_READER_INCR ;}
  1361. bool IsWriteUnlocked() const
  1362. {return !IsWriteLocked();}
  1363. bool IsReadUnlocked() const
  1364. {return !IsReadLocked();}
  1365. void ConvertSharedToExclusive()
  1366. {
  1367. IRTLASSERT(IsReadLocked());
  1368. // single reader?
  1369. if (m_lRW != SL_ONE_READER || !_CmpExch(SL_ONE_WRITER,SL_ONE_READER))
  1370. {
  1371. // no, multiple readers
  1372. ReadUnlock();
  1373. _WriteLockSpin();
  1374. }
  1375. IRTLASSERT(IsWriteLocked());
  1376. }
  1377. void ConvertExclusiveToShared()
  1378. {
  1379. IRTLASSERT(IsWriteLocked());
  1380. for (LONG l = m_lRW;
  1381. !_CmpExch(((l-SL_WRITER_INCR) & SL_WAITING_MASK) | SL_READER_INCR,
  1382. l);
  1383. l = m_lRW)
  1384. {
  1385. IRTLASSERT(IsWriteLocked());
  1386. Lock_Yield();
  1387. }
  1388. IRTLASSERT(IsReadLocked());
  1389. }
  1390. bool SetSpinCount(WORD) {return false;}
  1391. WORD GetSpinCount() const {return sm_wDefaultSpinCount;}
  1392. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1393. static const TCHAR* ClassName() {return _TEXT("CReaderWriterLock2");}
  1394. }; // CReaderWriterLock2
  1395. //--------------------------------------------------------------------
  1396. // CReaderWriterLock3 is a multi-reader, single-writer spinlock due
  1397. // to NJain, which in turn is derived from an exclusive spinlock by DmitryR.
  1398. // Gives priority to writers. Cannot be acquired recursively.
  1399. // No error checking. Much like CReaderWriterLock2, except that the WriteLock
  1400. // can be acquired recursively.
  1401. class IRTL_DLLEXP CReaderWriterLock3 :
  1402. public CLockBase<LOCK_READERWRITERLOCK3, LOCK_MRSW,
  1403. LOCK_RECURSIVE, LOCK_WAIT_SLEEP, LOCK_QUEUE_KERNEL,
  1404. LOCK_CLASS_SPIN
  1405. >
  1406. {
  1407. private:
  1408. volatile LONG m_lRW; // Reader-Writer state
  1409. volatile LONG m_lTid; // Owning Thread ID + recursion count
  1410. // m_lRW:
  1411. // LoWord is state. =0 => free; >0 => readers; ==0xFFFF => 1 writer
  1412. // HiWord is count of writers. If LoWord==0xFFFF => N-1 waiters, 1 writer;
  1413. // otherwise N waiters.
  1414. // m_lTid:
  1415. // If readers, then 0; if a write lock, then thread id + recursion count
  1416. enum {
  1417. // m_lRW
  1418. SL_FREE = 0x00000000,
  1419. SL_STATE_MASK = 0x0000FFFF,
  1420. SL_STATE_SHIFT = 0,
  1421. SL_WAITING_MASK = 0xFFFF0000, // waiting writers
  1422. SL_WAITING_SHIFT = 16,
  1423. SL_READER_INCR = 0x00000001,
  1424. SL_READER_MASK = 0x00007FFF,
  1425. SL_EXCLUSIVE = 0x0000FFFF, // one writer
  1426. SL_WRITER_INCR = 0x00010000,
  1427. SL_ONE_WRITER = SL_EXCLUSIVE | SL_WRITER_INCR,
  1428. SL_ONE_READER = (SL_FREE + 1),
  1429. SL_WRITERS_MASK = ~SL_READER_MASK,
  1430. // m_lTid
  1431. SL_THREAD_SHIFT = 0,
  1432. SL_THREAD_BITS = 28,
  1433. SL_OWNER_SHIFT = SL_THREAD_BITS,
  1434. SL_OWNER_BITS = 4,
  1435. SL_THREAD_MASK = ((1 << SL_THREAD_BITS) - 1) << SL_THREAD_SHIFT,
  1436. SL_OWNER_INCR = 1 << SL_THREAD_BITS,
  1437. SL_OWNER_MASK = ((1 << SL_OWNER_BITS) - 1) << SL_OWNER_SHIFT,
  1438. };
  1439. LOCK_INSTRUMENTATION_DECL();
  1440. private:
  1441. enum SPIN_TYPE {
  1442. SPIN_WRITE = 1,
  1443. SPIN_READ,
  1444. SPIN_READ_RECURSIVE,
  1445. };
  1446. void _LockSpin(SPIN_TYPE st);
  1447. void _WriteLockSpin();
  1448. void _ReadLockSpin(SPIN_TYPE st) { _LockSpin(st); }
  1449. // _CmpExch is equivalent to
  1450. // LONG lTemp = m_lRW;
  1451. // if (lTemp == lCurrent) m_lRW = lNew;
  1452. // return lCurrent == lTemp;
  1453. // except it's one atomic instruction. Using this gives us the basis of
  1454. // a protocol because the update only succeeds when we knew exactly what
  1455. // used to be in m_lRW. If some other thread slips in and modifies m_lRW
  1456. // before we do, the update will fail. In other words, it's transactional.
  1457. LOCK_FORCEINLINE bool _CmpExch(LONG lNew, LONG lCurrent)
  1458. {
  1459. return lCurrent==Lock_AtomicCompareExchange(const_cast<LONG*>(&m_lRW),
  1460. lNew, lCurrent);
  1461. }
  1462. // Get the current thread ID. Assumes that it can fit into 28 bits,
  1463. // which is fairly safe as NT recycles thread IDs and failing to fit into
  1464. // 28 bits would mean that more than 268,435,456 threads were currently
  1465. // active. This is improbable in the extreme as NT runs out of
  1466. // resources if there are more than a few thousands threads in
  1467. // existence and the overhead of context swapping becomes unbearable.
  1468. inline static LONG _CurrentThreadId()
  1469. {
  1470. DWORD dwTid = ::GetCurrentThreadId();
  1471. // Thread ID 0 is used by the System Process (Process ID 0).
  1472. // We use a thread-id of zero to indicate lock is unowned.
  1473. // NT uses +ve thread ids, Win9x uses -ve ids
  1474. IRTLASSERT(dwTid != 0
  1475. && ((dwTid <= SL_THREAD_MASK) || (dwTid > ~SL_THREAD_MASK)));
  1476. return (LONG) (dwTid & SL_THREAD_MASK);
  1477. }
  1478. LOCK_FORCEINLINE bool _TryWriteLock(
  1479. LONG nIncr)
  1480. {
  1481. // The common case: the writelock has no owner
  1482. if (m_lTid == 0)
  1483. {
  1484. // IRTLASSERT((m_lRW & SL_STATE_MASK) != SL_EXCLUSIVE);
  1485. LONG l = m_lRW;
  1486. // Grab exclusive access to the lock if it's free. Works even
  1487. // if there are other writers queued up.
  1488. if ((l & SL_STATE_MASK) == SL_FREE
  1489. && _CmpExch((l + nIncr) | SL_EXCLUSIVE, l))
  1490. {
  1491. l = Lock_AtomicExchange(const_cast<LONG*>(&m_lTid),
  1492. _CurrentThreadId() | SL_OWNER_INCR);
  1493. IRTLASSERT(l == 0);
  1494. return true;
  1495. }
  1496. }
  1497. return _TryWriteLock2();
  1498. }
  1499. // split into a separate function to make _TryWriteLock more inlineable
  1500. bool _TryWriteLock2()
  1501. {
  1502. if ((m_lTid & SL_THREAD_MASK) == _CurrentThreadId())
  1503. {
  1504. IRTLASSERT((m_lRW & SL_STATE_MASK) == SL_EXCLUSIVE);
  1505. IRTLASSERT((m_lTid & SL_OWNER_MASK) != SL_OWNER_MASK);
  1506. Lock_AtomicExchangeAdd(const_cast<LONG*>(&m_lTid), SL_OWNER_INCR);
  1507. return true;
  1508. }
  1509. return false;
  1510. }
  1511. LOCK_FORCEINLINE bool _TryReadLock()
  1512. {
  1513. // Give writers priority
  1514. LONG l = m_lRW;
  1515. bool fLocked = (((l & SL_WRITERS_MASK) == 0)
  1516. && _CmpExch(l + SL_READER_INCR, l));
  1517. IRTLASSERT(!fLocked || m_lTid == 0);
  1518. return fLocked;
  1519. }
  1520. LOCK_FORCEINLINE bool _TryReadLockRecursive()
  1521. {
  1522. // Do *not* give writers priority. If the inner call attempts
  1523. // to reacquire the read lock while another thread is waiting on
  1524. // the write lock, we would deadlock if we waited for the queue
  1525. // of writers to empty: the writer(s) can't acquire the lock
  1526. // exclusively, as this thread holds a readlock. The inner call
  1527. // typically releases the lock very quickly, so there is no
  1528. // danger of writer starvation.
  1529. LONG l = m_lRW;
  1530. bool fLocked = (((l & SL_STATE_MASK) != SL_EXCLUSIVE)
  1531. && _CmpExch(l + SL_READER_INCR, l));
  1532. IRTLASSERT(!fLocked || m_lTid == 0);
  1533. return fLocked;
  1534. }
  1535. public:
  1536. CReaderWriterLock3()
  1537. : m_lRW(SL_FREE),
  1538. m_lTid(0)
  1539. {}
  1540. #ifdef LOCK_INSTRUMENTATION
  1541. CReaderWriterLock3(
  1542. const char* pszName)
  1543. : m_lRW(SL_FREE),
  1544. m_lTid(0)
  1545. {
  1546. LOCK_INSTRUMENTATION_INIT(pszName);
  1547. }
  1548. #endif // LOCK_INSTRUMENTATION
  1549. #ifdef IRTLDEBUG
  1550. ~CReaderWriterLock3()
  1551. {
  1552. IRTLASSERT(m_lRW == SL_FREE && m_lTid == 0);
  1553. }
  1554. #endif // IRTLDEBUG
  1555. inline void WriteLock()
  1556. {
  1557. LOCK_WRITELOCK_INSTRUMENTATION();
  1558. // Optimize for the common case
  1559. if (_TryWriteLock(SL_WRITER_INCR))
  1560. return;
  1561. _WriteLockSpin();
  1562. }
  1563. inline void ReadLock()
  1564. {
  1565. LOCK_READLOCK_INSTRUMENTATION();
  1566. // Optimize for the common case
  1567. if (_TryReadLock())
  1568. return;
  1569. _ReadLockSpin(SPIN_READ);
  1570. }
  1571. // If already locked, recursively acquires another lock of the same
  1572. // kind (read or write). Otherwise, just acquires a read lock.
  1573. // Needed for cases like this.
  1574. // pTable->WriteLock();
  1575. // if (!pTable->FindKey(&SomeKey))
  1576. // InsertRecord(&Whatever);
  1577. // pTable->WriteUnlock();
  1578. // where FindKey looks like
  1579. // Table::FindKey(pKey) {
  1580. // ReadOrWriteLock();
  1581. // // find pKey if present in table
  1582. // ReadOrWriteUnlock();
  1583. // }
  1584. // and InsertRecord looks like
  1585. // Table::InsertRecord(pRecord) {
  1586. // WriteLock();
  1587. // // insert pRecord into table
  1588. // WriteUnlock();
  1589. // }
  1590. // If FindKey called ReadLock while the thread already had done a
  1591. // WriteLock, the thread would deadlock.
  1592. inline bool ReadOrWriteLock()
  1593. {
  1594. if (IsWriteLocked())
  1595. {
  1596. WriteLock();
  1597. return false; // => not read locked
  1598. }
  1599. else
  1600. {
  1601. LOCK_READLOCK_INSTRUMENTATION();
  1602. if (!_TryReadLockRecursive())
  1603. _ReadLockSpin(SPIN_READ_RECURSIVE);
  1604. return true; // => is read locked
  1605. }
  1606. }
  1607. inline bool TryWriteLock()
  1608. {
  1609. if (_TryWriteLock(SL_WRITER_INCR))
  1610. {
  1611. LOCK_WRITELOCK_INSTRUMENTATION();
  1612. return true;
  1613. }
  1614. return false;
  1615. }
  1616. inline bool TryReadLock()
  1617. {
  1618. if (_TryReadLock())
  1619. {
  1620. LOCK_READLOCK_INSTRUMENTATION();
  1621. return true;
  1622. }
  1623. return false;
  1624. }
  1625. inline void WriteUnlock()
  1626. {
  1627. IRTLASSERT(IsWriteLocked());
  1628. LONG lNew = m_lTid - SL_OWNER_INCR;
  1629. // Last owner? Release completely, if so
  1630. if ((lNew & SL_OWNER_MASK) == 0)
  1631. {
  1632. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid), 0);
  1633. for (LONG l = m_lRW;
  1634. // decrement waiter count, clear loword to SL_FREE
  1635. !_CmpExch((l - SL_WRITER_INCR) & ~SL_STATE_MASK, l);
  1636. l = m_lRW)
  1637. {
  1638. Lock_Yield();
  1639. }
  1640. }
  1641. else
  1642. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid), lNew);
  1643. }
  1644. inline void ReadUnlock()
  1645. {
  1646. IRTLASSERT(IsReadLocked());
  1647. for (LONG l = m_lRW; !_CmpExch(l - SL_READER_INCR, l); l = m_lRW)
  1648. {
  1649. IRTLASSERT(IsReadLocked());
  1650. Lock_Yield();
  1651. }
  1652. }
  1653. inline void ReadOrWriteUnlock(bool fIsReadLocked)
  1654. {
  1655. if (fIsReadLocked)
  1656. ReadUnlock();
  1657. else
  1658. WriteUnlock();
  1659. }
  1660. // Does current thread hold a write lock?
  1661. bool IsWriteLocked() const
  1662. {
  1663. // bool fLocked = ((m_lTid & SL_THREAD_MASK) == _CurrentThreadId());
  1664. bool fLocked = !((m_lTid ^ GetCurrentThreadId()) & SL_THREAD_MASK);
  1665. IRTLASSERT(!fLocked || (((m_lRW & SL_STATE_MASK) == SL_EXCLUSIVE)
  1666. && ((m_lTid & SL_OWNER_MASK) > 0)));
  1667. return fLocked;
  1668. }
  1669. bool IsReadLocked() const
  1670. {return (m_lRW & SL_READER_MASK) >= SL_READER_INCR ;}
  1671. bool IsWriteUnlocked() const
  1672. {return !IsWriteLocked();}
  1673. bool IsReadUnlocked() const
  1674. {return !IsReadLocked();}
  1675. // Note: if there's more than one reader, then there's a window where
  1676. // another thread can acquire and release a writelock before this routine
  1677. // returns.
  1678. void ConvertSharedToExclusive()
  1679. {
  1680. IRTLASSERT(IsReadLocked());
  1681. // single reader?
  1682. if (m_lRW == SL_ONE_READER && _CmpExch(SL_ONE_WRITER, SL_ONE_READER))
  1683. {
  1684. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid),
  1685. _CurrentThreadId() | SL_OWNER_INCR);
  1686. }
  1687. else
  1688. {
  1689. // no, multiple readers
  1690. ReadUnlock();
  1691. _WriteLockSpin();
  1692. }
  1693. IRTLASSERT(IsWriteLocked());
  1694. }
  1695. bool TryConvertSharedToExclusive()
  1696. {
  1697. IRTLASSERT(IsReadLocked());
  1698. // single reader?
  1699. if (m_lRW == SL_ONE_READER && _CmpExch(SL_ONE_WRITER, SL_ONE_READER))
  1700. {
  1701. InterlockedExchange(const_cast<LONG*>(&m_lTid),
  1702. _CurrentThreadId() | SL_OWNER_INCR);
  1703. IRTLASSERT(IsWriteLocked());
  1704. return true;
  1705. }
  1706. IRTLASSERT(!IsWriteLocked());
  1707. IRTLASSERT(IsReadLocked());
  1708. return false;
  1709. }
  1710. // There is no such window when converting from a writelock to a readlock
  1711. void ConvertExclusiveToShared()
  1712. {
  1713. IRTLASSERT(IsWriteLocked());
  1714. // assume writelock is not held recursively
  1715. IRTLASSERT((m_lTid & SL_OWNER_MASK) == SL_OWNER_INCR);
  1716. Lock_AtomicExchange(const_cast<LONG*>(&m_lTid), 0);
  1717. for (LONG l = m_lRW;
  1718. !_CmpExch(((l-SL_WRITER_INCR) & SL_WAITING_MASK) | SL_READER_INCR,
  1719. l);
  1720. l = m_lRW)
  1721. {
  1722. Lock_Yield();
  1723. }
  1724. IRTLASSERT(IsReadLocked());
  1725. }
  1726. bool SetSpinCount(WORD) {return false;}
  1727. WORD GetSpinCount() const {return sm_wDefaultSpinCount;}
  1728. LOCK_DEFAULT_SPIN_IMPLEMENTATION();
  1729. static const TCHAR* ClassName() {return _TEXT("CReaderWriterLock3");}
  1730. }; // CReaderWriterLock3
  1731. // Global initialization and termination
  1732. // (these don't need to be called if locks functionality is used
  1733. // off the iisutil of iisrtl.
  1734. // When static locks library is used then calling Locks_Initialize() and Locks_Cleanup()
  1735. // is required
  1736. extern "C" {
  1737. BOOL
  1738. Locks_Initialize();
  1739. BOOL
  1740. Locks_Cleanup();
  1741. };
  1742. #endif // __LOCKS_H__