Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

790 lines
25 KiB

  1. /*++
  2. Copyright (c) 1998-2002 Microsoft Corporation
  3. Module Name :
  4. locks.cpp
  5. Abstract:
  6. A collection of locks for multithreaded access to data structures
  7. Author:
  8. George V. Reilly (GeorgeRe) 06-Jan-1998
  9. Environment:
  10. Win32 - User Mode
  11. Project:
  12. LKRhash
  13. Revision History:
  14. --*/
  15. #include "precomp.hxx"
  16. #define DLL_IMPLEMENTATION
  17. #define IMPLEMENTATION_EXPORT
  18. #include <locks.h>
  19. #include "_locks.h"
  20. //------------------------------------------------------------------------
  21. // Not all Win32 platforms support all the functions we want. Set up dummy
  22. // thunks and use GetProcAddress to find their addresses at runtime.
  23. typedef
  24. BOOL
  25. (WINAPI * PFN_SWITCH_TO_THREAD)(
  26. VOID
  27. );
  28. static BOOL WINAPI
  29. FakeSwitchToThread(
  30. VOID)
  31. {
  32. return FALSE;
  33. }
  34. PFN_SWITCH_TO_THREAD g_pfnSwitchToThread = NULL;
  35. typedef
  36. BOOL
  37. (WINAPI * PFN_TRY_ENTER_CRITICAL_SECTION)(
  38. IN OUT LPCRITICAL_SECTION lpCriticalSection
  39. );
  40. static BOOL WINAPI
  41. FakeTryEnterCriticalSection(
  42. LPCRITICAL_SECTION /*lpCriticalSection*/)
  43. {
  44. return FALSE;
  45. }
  46. PFN_TRY_ENTER_CRITICAL_SECTION g_pfnTryEnterCritSec = NULL;
  47. typedef
  48. DWORD
  49. (WINAPI * PFN_SET_CRITICAL_SECTION_SPIN_COUNT)(
  50. LPCRITICAL_SECTION lpCriticalSection,
  51. DWORD dwSpinCount
  52. );
  53. static DWORD WINAPI
  54. FakeSetCriticalSectionSpinCount(
  55. LPCRITICAL_SECTION /*lpCriticalSection*/,
  56. DWORD /*dwSpinCount*/)
  57. {
  58. // For faked critical sections, the previous spin count is just ZERO!
  59. return 0;
  60. }
  61. PFN_SET_CRITICAL_SECTION_SPIN_COUNT g_pfnSetCSSpinCount = NULL;
  62. DWORD g_cProcessors = 0;
  63. BOOL g_fLocksInitialized = FALSE;
  64. CSimpleLock g_lckInit;
  65. BOOL
  66. Locks_Initialize()
  67. {
  68. if (!g_fLocksInitialized)
  69. {
  70. g_lckInit.Enter();
  71. if (!g_fLocksInitialized)
  72. {
  73. // load kernel32 and get NT-specific entry points
  74. HMODULE hKernel32 = GetModuleHandle(TEXT("kernel32.dll"));
  75. if (hKernel32 != NULL)
  76. {
  77. g_pfnSwitchToThread = (PFN_SWITCH_TO_THREAD)
  78. GetProcAddress(hKernel32, "SwitchToThread");
  79. g_pfnTryEnterCritSec = (PFN_TRY_ENTER_CRITICAL_SECTION)
  80. GetProcAddress(hKernel32, "TryEnterCriticalSection");
  81. g_pfnSetCSSpinCount = (PFN_SET_CRITICAL_SECTION_SPIN_COUNT)
  82. GetProcAddress(hKernel32, "SetCriticalSectionSpinCount");
  83. }
  84. if (g_pfnSwitchToThread == NULL)
  85. g_pfnSwitchToThread = FakeSwitchToThread;
  86. if (g_pfnTryEnterCritSec == NULL)
  87. g_pfnTryEnterCritSec = FakeTryEnterCriticalSection;
  88. if (g_pfnSetCSSpinCount == NULL)
  89. g_pfnSetCSSpinCount = FakeSetCriticalSectionSpinCount;
  90. g_cProcessors = NumProcessors();
  91. Lock_AtomicExchange((LONG*) &g_fLocksInitialized, TRUE);
  92. }
  93. g_lckInit.Leave();
  94. }
  95. return TRUE;
  96. }
  97. BOOL
  98. Locks_Cleanup()
  99. {
  100. return TRUE;
  101. }
  102. #ifdef __LOCKS_NAMESPACE__
  103. namespace Locks {
  104. #endif // __LOCKS_NAMESPACE__
  105. #define LOCK_DEFAULT_SPIN_DATA(CLASS) \
  106. WORD CLASS::sm_wDefaultSpinCount = LOCK_DEFAULT_SPINS; \
  107. double CLASS::sm_dblDfltSpinAdjFctr = 0.5
  108. #ifdef LOCK_INSTRUMENTATION
  109. # define LOCK_STATISTICS_DATA(CLASS) \
  110. LONG CLASS::sm_cTotalLocks = 0; \
  111. LONG CLASS::sm_cContendedLocks = 0; \
  112. LONG CLASS::sm_nSleeps = 0; \
  113. LONGLONG CLASS::sm_cTotalSpins = 0; \
  114. LONG CLASS::sm_nReadLocks = 0; \
  115. LONG CLASS::sm_nWriteLocks = 0
  116. # define LOCK_STATISTICS_DUMMY_IMPLEMENTATION(CLASS) \
  117. CLockStatistics CLASS::Statistics() const \
  118. {return CLockStatistics();} \
  119. CGlobalLockStatistics CLASS::GlobalStatistics() \
  120. {return CGlobalLockStatistics();} \
  121. void CLASS::ResetGlobalStatistics() \
  122. {}
  123. # define LOCK_STATISTICS_REAL_IMPLEMENTATION(CLASS) \
  124. \
  125. /* Per-lock statistics */ \
  126. CLockStatistics \
  127. CLASS::Statistics() const \
  128. { \
  129. CLockStatistics ls; \
  130. \
  131. ls.m_nContentions = m_nContentions; \
  132. ls.m_nSleeps = m_nSleeps; \
  133. ls.m_nContentionSpins = m_nContentionSpins; \
  134. if (m_nContentions > 0) \
  135. ls.m_nAverageSpins = m_nContentionSpins / m_nContentions;\
  136. else \
  137. ls.m_nAverageSpins = 0; \
  138. ls.m_nReadLocks = m_nReadLocks; \
  139. ls.m_nWriteLocks = m_nWriteLocks; \
  140. _tcscpy(ls.m_tszName, m_tszName); \
  141. \
  142. return ls; \
  143. } \
  144. \
  145. \
  146. /* Global statistics for CLASS */ \
  147. CGlobalLockStatistics \
  148. CLASS::GlobalStatistics() \
  149. { \
  150. CGlobalLockStatistics gls; \
  151. \
  152. gls.m_cTotalLocks = sm_cTotalLocks; \
  153. gls.m_cContendedLocks = sm_cContendedLocks; \
  154. gls.m_nSleeps = sm_nSleeps; \
  155. gls.m_cTotalSpins = sm_cTotalSpins; \
  156. if (sm_cContendedLocks > 0) \
  157. gls.m_nAverageSpins = static_cast<LONG>(sm_cTotalSpins / \
  158. sm_cContendedLocks);\
  159. else \
  160. gls.m_nAverageSpins = 0; \
  161. gls.m_nReadLocks = sm_nReadLocks; \
  162. gls.m_nWriteLocks = sm_nWriteLocks; \
  163. \
  164. return gls; \
  165. } \
  166. \
  167. \
  168. /* Reset global statistics for CLASS */ \
  169. void \
  170. CLASS::ResetGlobalStatistics() \
  171. { \
  172. sm_cTotalLocks = 0; \
  173. sm_cContendedLocks = 0; \
  174. sm_nSleeps = 0; \
  175. sm_cTotalSpins = 0; \
  176. sm_nReadLocks = 0; \
  177. sm_nWriteLocks = 0; \
  178. }
  179. // Note: we are not using Interlocked operations for the shared
  180. // statistical counters. We'll lose perfect accuracy, but we'll
  181. // gain by reduced bus synchronization traffic.
  182. # define LOCK_INSTRUMENTATION_PROLOG() \
  183. ++sm_cContendedLocks; \
  184. LONG cTotalSpins = 0; \
  185. WORD cSleeps = 0
  186. // Don't need InterlockedIncrement or InterlockedExchangeAdd for
  187. // member variables, as the lock is now locked by this thread.
  188. # define LOCK_INSTRUMENTATION_EPILOG() \
  189. ++m_nContentions; \
  190. m_nSleeps += cSleeps; \
  191. m_nContentionSpins += cTotalSpins; \
  192. sm_nSleeps += cSleeps; \
  193. sm_cTotalSpins += cTotalSpins
  194. #else // !LOCK_INSTRUMENTATION
  195. # define LOCK_STATISTICS_DATA(CLASS)
  196. # define LOCK_STATISTICS_DUMMY_IMPLEMENTATION(CLASS)
  197. # define LOCK_STATISTICS_REAL_IMPLEMENTATION(CLASS)
  198. # define LOCK_INSTRUMENTATION_PROLOG()
  199. # define LOCK_INSTRUMENTATION_EPILOG()
  200. #endif // !LOCK_INSTRUMENTATION
  201. //------------------------------------------------------------------------
  202. // Function: RandomBackoffFactor
  203. // Synopsis: A fudge factor to help avoid synchronization problems
  204. //------------------------------------------------------------------------
  205. double
  206. RandomBackoffFactor()
  207. {
  208. static const double s_aFactors[] = {
  209. 1.020, 0.965, 0.890, 1.065,
  210. 1.025, 1.115, 0.940, 0.995,
  211. 1.050, 1.080, 0.915, 0.980,
  212. 1.010,
  213. };
  214. const int nFactors = sizeof(s_aFactors) / sizeof(s_aFactors[0]);
  215. // Alternatives for nRand include a static counter
  216. // or the low DWORD of QueryPerformanceCounter().
  217. DWORD nRand = ::GetCurrentThreadId();
  218. return s_aFactors[nRand % nFactors];
  219. }
  220. //------------------------------------------------------------------------
  221. // Function: SwitchOrSleep
  222. // Synopsis: If possible, yields the thread with SwitchToThread. If that
  223. // doesn't work, calls Sleep.
  224. //------------------------------------------------------------------------
  225. void
  226. SwitchOrSleep(
  227. DWORD dwSleepMSec)
  228. {
  229. #ifdef LOCKS_SWITCH_TO_THREAD
  230. if (!g_pfnSwitchToThread())
  231. #endif
  232. Sleep(dwSleepMSec);
  233. }
  234. // CSmallSpinLock static member variables
  235. LOCK_DEFAULT_SPIN_DATA(CSmallSpinLock);
  236. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  237. LOCK_STATISTICS_DATA(CSmallSpinLock);
  238. LOCK_STATISTICS_REAL_IMPLEMENTATION(CSmallSpinLock);
  239. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  240. //------------------------------------------------------------------------
  241. // Function: CSmallSpinLock::_LockSpin
  242. // Synopsis: Acquire an exclusive lock. Blocks until acquired.
  243. //------------------------------------------------------------------------
  244. void
  245. CSmallSpinLock::_LockSpin()
  246. {
  247. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  248. LOCK_INSTRUMENTATION_PROLOG();
  249. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  250. DWORD dwSleepTime = 0;
  251. LONG cBaseSpins = sm_wDefaultSpinCount;
  252. LONG cBaseSpins2 = static_cast<LONG>(cBaseSpins * RandomBackoffFactor());
  253. // This lock cannot be acquired recursively. Attempting to do so will
  254. // deadlock this thread forever. Use CSpinLock instead if you need that
  255. // kind of lock.
  256. if (m_lTid == _CurrentThreadId())
  257. {
  258. IRTLASSERT(
  259. !"CSmallSpinLock: Illegally attempted to acquire lock recursively");
  260. DebugBreak();
  261. }
  262. while (!_TryLock())
  263. {
  264. // Only spin on a multiprocessor machine and then only if
  265. // spinning is enabled
  266. if (g_cProcessors > 1 && cBaseSpins != LOCK_DONT_SPIN)
  267. {
  268. LONG cSpins = cBaseSpins2;
  269. // Check no more than cBaseSpins2 times then yield.
  270. // It is important not to use the InterlockedExchange in the
  271. // inner loop in order to minimize system memory bus traffic.
  272. while (m_lTid != 0)
  273. {
  274. if (--cSpins < 0)
  275. {
  276. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  277. cTotalSpins += cBaseSpins2;
  278. ++cSleeps;
  279. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  280. SwitchOrSleep(dwSleepTime) ;
  281. // Backoff algorithm: reduce (or increase) busy wait time
  282. cBaseSpins2 = (int) (cBaseSpins2 * sm_dblDfltSpinAdjFctr);
  283. // LOCK_MINIMUM_SPINS <= cBaseSpins2 <= LOCK_MAXIMUM_SPINS
  284. cBaseSpins2 = min(LOCK_MAXIMUM_SPINS, cBaseSpins2);
  285. cBaseSpins2 = max(cBaseSpins2, LOCK_MINIMUM_SPINS);
  286. cSpins = cBaseSpins2;
  287. // Using Sleep(0) leads to the possibility of priority
  288. // inversion. Sleep(0) only yields the processor if
  289. // there's another thread of the same priority that's
  290. // ready to run. If a high-priority thread is trying to
  291. // acquire the lock, which is held by a low-priority
  292. // thread, then the low-priority thread may never get
  293. // scheduled and hence never free the lock. NT attempts
  294. // to avoid priority inversions by temporarily boosting
  295. // the priority of low-priority runnable threads, but the
  296. // problem can still occur if there's a medium-priority
  297. // thread that's always runnable. If Sleep(1) is used,
  298. // then the thread unconditionally yields the CPU. We
  299. // only do this for the second and subsequent even
  300. // iterations, since a millisecond is a long time to wait
  301. // if the thread can be scheduled in again sooner
  302. // (~100,000 instructions).
  303. // Avoid priority inversion: 0, 1, 0, 1,...
  304. dwSleepTime = !dwSleepTime;
  305. }
  306. else
  307. {
  308. Lock_Yield();
  309. }
  310. }
  311. // Lock is now available, but we still need to do the
  312. // InterlockedExchange to atomically grab it for ourselves.
  313. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  314. cTotalSpins += cBaseSpins2 - cSpins;
  315. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  316. }
  317. // On a 1P machine, busy waiting is a waste of time
  318. else
  319. {
  320. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  321. ++cSleeps;
  322. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  323. SwitchOrSleep(dwSleepTime);
  324. // Avoid priority inversion: 0, 1, 0, 1,...
  325. dwSleepTime = !dwSleepTime;
  326. }
  327. }
  328. #ifdef LOCK_SMALL_SPIN_INSTRUMENTATION
  329. LOCK_INSTRUMENTATION_EPILOG();
  330. #endif // LOCK_SMALL_SPIN_INSTRUMENTATION
  331. }
  332. // CSpinLock static member variables
  333. LOCK_DEFAULT_SPIN_DATA(CSpinLock);
  334. LOCK_STATISTICS_DATA(CSpinLock);
  335. LOCK_STATISTICS_REAL_IMPLEMENTATION(CSpinLock);
  336. //------------------------------------------------------------------------
  337. // Function: CSpinLock::_LockSpin
  338. // Synopsis: Acquire an exclusive lock. Blocks until acquired.
  339. //------------------------------------------------------------------------
  340. void
  341. CSpinLock::_LockSpin()
  342. {
  343. LOCK_INSTRUMENTATION_PROLOG();
  344. DWORD dwSleepTime = 0;
  345. bool fAcquiredLock = false;
  346. LONG cBaseSpins = sm_wDefaultSpinCount;
  347. cBaseSpins = static_cast<LONG>(cBaseSpins * RandomBackoffFactor());
  348. while (!fAcquiredLock)
  349. {
  350. // Only spin on a multiprocessor machine and then only if
  351. // spinning is enabled
  352. if (g_cProcessors > 1 && sm_wDefaultSpinCount != LOCK_DONT_SPIN)
  353. {
  354. LONG cSpins = cBaseSpins;
  355. // Check no more than cBaseSpins times then yield
  356. while (m_lTid != 0)
  357. {
  358. if (--cSpins < 0)
  359. {
  360. #ifdef LOCK_INSTRUMENTATION
  361. cTotalSpins += cBaseSpins;
  362. ++cSleeps;
  363. #endif // LOCK_INSTRUMENTATION
  364. SwitchOrSleep(dwSleepTime) ;
  365. // Backoff algorithm: reduce (or increase) busy wait time
  366. cBaseSpins = (int) (cBaseSpins * sm_dblDfltSpinAdjFctr);
  367. // LOCK_MINIMUM_SPINS <= cBaseSpins <= LOCK_MAXIMUM_SPINS
  368. cBaseSpins = min(LOCK_MAXIMUM_SPINS, cBaseSpins);
  369. cBaseSpins = max(cBaseSpins, LOCK_MINIMUM_SPINS);
  370. cSpins = cBaseSpins;
  371. // Avoid priority inversion: 0, 1, 0, 1,...
  372. dwSleepTime = !dwSleepTime;
  373. }
  374. else
  375. {
  376. Lock_Yield();
  377. }
  378. }
  379. // Lock is now available, but we still need to atomically
  380. // update m_cOwners and m_nThreadId to grab it for ourselves.
  381. #ifdef LOCK_INSTRUMENTATION
  382. cTotalSpins += cBaseSpins - cSpins;
  383. #endif // LOCK_INSTRUMENTATION
  384. }
  385. // on a 1P machine, busy waiting is a waste of time
  386. else
  387. {
  388. #ifdef LOCK_INSTRUMENTATION
  389. ++cSleeps;
  390. #endif // LOCK_INSTRUMENTATION
  391. SwitchOrSleep(dwSleepTime);
  392. // Avoid priority inversion: 0, 1, 0, 1,...
  393. dwSleepTime = !dwSleepTime;
  394. }
  395. // Is the lock unowned?
  396. if (_TryLock())
  397. fAcquiredLock = true; // got the lock
  398. }
  399. IRTLASSERT((m_lTid & OWNER_MASK) > 0
  400. && (m_lTid & THREAD_MASK) == _CurrentThreadId());
  401. LOCK_INSTRUMENTATION_EPILOG();
  402. }
  403. // CCritSec static member variables
  404. LOCK_DEFAULT_SPIN_DATA(CCritSec);
  405. LOCK_STATISTICS_DATA(CCritSec);
  406. LOCK_STATISTICS_DUMMY_IMPLEMENTATION(CCritSec);
  407. bool
  408. CCritSec::TryWriteLock()
  409. {
  410. IRTLASSERT(g_pfnTryEnterCritSec != NULL);
  411. return g_pfnTryEnterCritSec(&m_cs) ? true : false;
  412. }
  413. //------------------------------------------------------------------------
  414. // Function: CCritSec::SetSpinCount
  415. // Synopsis: This function is used to call the appropriate underlying
  416. // functions to set the spin count for the supplied critical
  417. // section. The original function is supposed to be exported out
  418. // of kernel32.dll from NT 4.0 SP3. If the func is not available
  419. // from the dll, we will use a fake function.
  420. //
  421. // Arguments:
  422. // lpCriticalSection
  423. // Points to the critical section object.
  424. //
  425. // dwSpinCount
  426. // Supplies the spin count for the critical section object. For UP
  427. // systems, the spin count is ignored and the critical section spin
  428. // count is set to 0. For MP systems, if contention occurs, instead of
  429. // waiting on a semaphore associated with the critical section, the
  430. // calling thread will spin for spin count iterations before doing the
  431. // hard wait. If the critical section becomes free during the spin, a
  432. // wait is avoided.
  433. //
  434. // Returns:
  435. // The previous spin count for the critical section is returned.
  436. //------------------------------------------------------------------------
  437. DWORD
  438. CCritSec::SetSpinCount(
  439. LPCRITICAL_SECTION pcs,
  440. DWORD dwSpinCount)
  441. {
  442. IRTLASSERT(g_pfnSetCSSpinCount != NULL);
  443. return g_pfnSetCSSpinCount(pcs, dwSpinCount);
  444. }
  445. // CFakeLock static member variables
  446. LOCK_DEFAULT_SPIN_DATA(CFakeLock);
  447. LOCK_STATISTICS_DATA(CFakeLock);
  448. LOCK_STATISTICS_DUMMY_IMPLEMENTATION(CFakeLock);
  449. // CReaderWriterLock static member variables
  450. LOCK_DEFAULT_SPIN_DATA(CReaderWriterLock);
  451. LOCK_STATISTICS_DATA(CReaderWriterLock);
  452. LOCK_STATISTICS_REAL_IMPLEMENTATION(CReaderWriterLock);
  453. void
  454. CReaderWriterLock::_LockSpin(
  455. bool fWrite)
  456. {
  457. LOCK_INSTRUMENTATION_PROLOG();
  458. DWORD dwSleepTime = 0;
  459. LONG cBaseSpins = static_cast<LONG>(sm_wDefaultSpinCount
  460. * RandomBackoffFactor());
  461. LONG cSpins = cBaseSpins;
  462. for (;;)
  463. {
  464. if (g_cProcessors < 2 || sm_wDefaultSpinCount == LOCK_DONT_SPIN)
  465. cSpins = 1; // must loop once to call _TryRWLock
  466. for (int i = cSpins; --i >= 0; )
  467. {
  468. bool fLock = fWrite ? _TryWriteLock() : _TryReadLock();
  469. if (fLock)
  470. {
  471. #ifdef LOCK_INSTRUMENTATION
  472. cTotalSpins += (cSpins - i - 1);
  473. #endif // LOCK_INSTRUMENTATION
  474. goto locked;
  475. }
  476. Lock_Yield();
  477. }
  478. #ifdef LOCK_INSTRUMENTATION
  479. cTotalSpins += cBaseSpins;
  480. ++cSleeps;
  481. #endif // LOCK_INSTRUMENTATION
  482. SwitchOrSleep(dwSleepTime) ;
  483. dwSleepTime = !dwSleepTime; // Avoid priority inversion: 0, 1, 0, 1,...
  484. // Backoff algorithm: reduce (or increase) busy wait time
  485. cBaseSpins = (int) (cBaseSpins * sm_dblDfltSpinAdjFctr);
  486. // LOCK_MINIMUM_SPINS <= cBaseSpins <= LOCK_MAXIMUM_SPINS
  487. cBaseSpins = min(LOCK_MAXIMUM_SPINS, cBaseSpins);
  488. cBaseSpins = max(cBaseSpins, LOCK_MINIMUM_SPINS);
  489. cSpins = cBaseSpins;
  490. }
  491. locked:
  492. IRTLASSERT(fWrite ? IsWriteLocked() : IsReadLocked());
  493. LOCK_INSTRUMENTATION_EPILOG();
  494. }
  495. // CReaderWriterLock2 static member variables
  496. LOCK_DEFAULT_SPIN_DATA(CReaderWriterLock2);
  497. LOCK_STATISTICS_DATA(CReaderWriterLock2);
  498. LOCK_STATISTICS_REAL_IMPLEMENTATION(CReaderWriterLock2);
  499. void
  500. CReaderWriterLock2::_WriteLockSpin()
  501. {
  502. // Add ourselves to the queue of waiting writers
  503. for (LONG l = m_lRW; !_CmpExch(l + SL_WRITER_INCR, l); l = m_lRW)
  504. {
  505. Lock_Yield();
  506. }
  507. _LockSpin(true);
  508. }
  509. void
  510. CReaderWriterLock2::_LockSpin(
  511. bool fWrite)
  512. {
  513. LOCK_INSTRUMENTATION_PROLOG();
  514. DWORD dwSleepTime = 0;
  515. LONG cBaseSpins = static_cast<LONG>(sm_wDefaultSpinCount
  516. * RandomBackoffFactor());
  517. LONG cSpins = cBaseSpins;
  518. for (;;)
  519. {
  520. if (g_cProcessors < 2 || sm_wDefaultSpinCount == LOCK_DONT_SPIN)
  521. cSpins = 1; // must loop once to call _TryRWLock
  522. for (int i = cSpins; --i >= 0; )
  523. {
  524. bool fLock = fWrite ? _TryWriteLock(0) : _TryReadLock();
  525. if (fLock)
  526. {
  527. #ifdef LOCK_INSTRUMENTATION
  528. cTotalSpins += (cSpins - i - 1);
  529. #endif // LOCK_INSTRUMENTATION
  530. goto locked;
  531. }
  532. Lock_Yield();
  533. }
  534. #ifdef LOCK_INSTRUMENTATION
  535. cTotalSpins += cBaseSpins;
  536. ++cSleeps;
  537. #endif // LOCK_INSTRUMENTATION
  538. SwitchOrSleep(dwSleepTime) ;
  539. dwSleepTime = !dwSleepTime; // Avoid priority inversion: 0, 1, 0, 1,...
  540. // Backoff algorithm: reduce (or increase) busy wait time
  541. cBaseSpins = (int) (cBaseSpins * sm_dblDfltSpinAdjFctr);
  542. // LOCK_MINIMUM_SPINS <= cBaseSpins <= LOCK_MAXIMUM_SPINS
  543. cBaseSpins = min(LOCK_MAXIMUM_SPINS, cBaseSpins);
  544. cBaseSpins = max(cBaseSpins, LOCK_MINIMUM_SPINS);
  545. cSpins = cBaseSpins;
  546. }
  547. locked:
  548. IRTLASSERT(fWrite ? IsWriteLocked() : IsReadLocked());
  549. LOCK_INSTRUMENTATION_EPILOG();
  550. }
  551. // CReaderWriterLock3 static member variables
  552. LOCK_DEFAULT_SPIN_DATA(CReaderWriterLock3);
  553. LOCK_STATISTICS_DATA(CReaderWriterLock3);
  554. LOCK_STATISTICS_REAL_IMPLEMENTATION(CReaderWriterLock3);
  555. void
  556. CReaderWriterLock3::_WriteLockSpin()
  557. {
  558. // Add ourselves to the queue of waiting writers
  559. for (LONG l = m_lRW; !_CmpExch(l + SL_WRITER_INCR, l); l = m_lRW)
  560. {
  561. Lock_Yield();
  562. }
  563. _LockSpin(SPIN_WRITE);
  564. }
  565. void
  566. CReaderWriterLock3::_LockSpin(
  567. SPIN_TYPE st)
  568. {
  569. LOCK_INSTRUMENTATION_PROLOG();
  570. DWORD dwSleepTime = 0;
  571. LONG cBaseSpins = static_cast<LONG>(sm_wDefaultSpinCount
  572. * RandomBackoffFactor());
  573. LONG cSpins = cBaseSpins;
  574. for (;;)
  575. {
  576. if (g_cProcessors < 2 || sm_wDefaultSpinCount == LOCK_DONT_SPIN)
  577. cSpins = 1; // must loop once to call _TryRWLock
  578. for (int i = cSpins; --i >= 0; )
  579. {
  580. bool fLock;
  581. if (st == SPIN_WRITE)
  582. fLock = _TryWriteLock(0);
  583. else if (st == SPIN_READ)
  584. fLock = _TryReadLock();
  585. else
  586. {
  587. IRTLASSERT(st == SPIN_READ_RECURSIVE);
  588. fLock = _TryReadLockRecursive();
  589. }
  590. if (fLock)
  591. {
  592. #ifdef LOCK_INSTRUMENTATION
  593. cTotalSpins += (cSpins - i - 1);
  594. #endif // LOCK_INSTRUMENTATION
  595. goto locked;
  596. }
  597. Lock_Yield();
  598. }
  599. #ifdef LOCK_INSTRUMENTATION
  600. cTotalSpins += cBaseSpins;
  601. ++cSleeps;
  602. #endif // LOCK_INSTRUMENTATION
  603. SwitchOrSleep(dwSleepTime) ;
  604. dwSleepTime = !dwSleepTime; // Avoid priority inversion: 0, 1, 0, 1,...
  605. // Backoff algorithm: reduce (or increase) busy wait time
  606. cBaseSpins = (int) (cBaseSpins * sm_dblDfltSpinAdjFctr);
  607. // LOCK_MINIMUM_SPINS <= cBaseSpins <= LOCK_MAXIMUM_SPINS
  608. cBaseSpins = min(LOCK_MAXIMUM_SPINS, cBaseSpins);
  609. cBaseSpins = max(cBaseSpins, LOCK_MINIMUM_SPINS);
  610. cSpins = cBaseSpins;
  611. }
  612. locked:
  613. IRTLASSERT((st == SPIN_WRITE) ? IsWriteLocked() : IsReadLocked());
  614. LOCK_INSTRUMENTATION_EPILOG();
  615. }
  616. #ifdef __LOCKS_NAMESPACE__
  617. }
  618. #endif // __LOCKS_NAMESPACE__