Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

615 lines
12 KiB

  1. /*++
  2. Copyright (c) 1998 Microsoft Corporation
  3. Module Name:
  4. isqspin.c
  5. Abstract:
  6. This module provides an (optionally) instrumented, platform independent
  7. implementation of the Kernel Import Queued Spinlock routines. Where
  8. optimal performance is required, platform dependent versions are
  9. used. The code in this file can be used to bootstrap a system or
  10. on UP systems where them MP version is only used during installation.
  11. ref: ACM Transactions on Computer Systems, Vol. 9, No. 1, Feb 1991.
  12. Algorithms for Global Synchronization on Shared Memory
  13. Multiprocessors.
  14. The basic algorithm is as follows:
  15. When attempting to acquire the spinlock, the contents of the spinlock
  16. is atomically exchanged with the address of the context of the acquirer.
  17. If the previous value was zero, the acquisition attempt is successful.
  18. If non-zero, it is a pointer to the context of the most recent attempt
  19. to acquire the lock (which may have been successful or may be waiting).
  20. The next pointer in this most recent context is updated to point to
  21. the context of the new waiter (this attempt).
  22. When releasing the lock, a compare exchange is done with the contents
  23. of the lock and the address of the releasing context, if the compare
  24. succeeds, zero is stored in the lock and it has been released. If
  25. not equal, another thread is waiting and that thread is granted the
  26. lock.
  27. Benefits:
  28. . Each processor spins on a local variable. Standard spinlocks
  29. have each processor spinning on the same variable which is possibly
  30. in a dirty cache line causing this cache line to be passed from
  31. processor to processor repeatedly.
  32. . The lock is granted to the requestors in the order the requests
  33. for the lock were made (ie fair).
  34. . Atomic operations are reduced to one for each acquire and one
  35. for each release.
  36. In this implementation, the context structure for the commonly
  37. used (high frequency) system locks is in a table in the PRCB,
  38. and references to a lock are made by the lock's index.
  39. Author:
  40. Peter L Johnston (peterj) 20-August-1998
  41. Environment:
  42. Kernel Mode only.
  43. Revision History:
  44. --*/
  45. #include "halp.h"
  46. #if defined(_X86_)
  47. #pragma intrinsic(_enable)
  48. #pragma intrinsic(_disable)
  49. #endif
  50. //
  51. // Define the YIELD instruction.
  52. //
  53. #if defined(_X86_) && !defined(NT_UP)
  54. #define YIELD() _asm { rep nop }
  55. #else
  56. #define YIELD()
  57. #endif
  58. #define INIT_DEBUG_BREAKER 0x10000000
  59. #if !defined(NT_UP)
  60. VOID
  61. FASTCALL
  62. HalpAcquireQueuedSpinLock (
  63. IN PKSPIN_LOCK_QUEUE Current
  64. )
  65. /*++
  66. Routine Description:
  67. This function acquires the specified queued spinlock. IRQL must be
  68. high enough on entry to grarantee a processor switch cannot occur.
  69. Arguments:
  70. Current Address of Queued Spinlock structure.
  71. Return Value:
  72. None.
  73. --*/
  74. {
  75. PKSPIN_LOCK_QUEUE Previous;
  76. PULONG Lock;
  77. #if DBG
  78. ULONG DebugBreaker;
  79. #endif
  80. //
  81. // Attempt to acquire the lock.
  82. //
  83. Lock = (PULONG)&Current->Lock;
  84. ASSERT((*Lock & 3) == 0);
  85. Previous = InterlockedExchangePointer(Current->Lock, Current);
  86. if (Previous == NULL) {
  87. *Lock |= LOCK_QUEUE_OWNER;
  88. } else {
  89. //
  90. // Lock is already held, update next pointer in previous
  91. // context to point to this new waiter and wait until the
  92. // lock is granted.
  93. //
  94. volatile ULONG * LockBusy = (ULONG *)&Current->Lock;
  95. ASSERT(Previous->Next == NULL);
  96. ASSERT(!(*LockBusy & LOCK_QUEUE_WAIT));
  97. *LockBusy |= LOCK_QUEUE_WAIT;
  98. Previous->Next = Current;
  99. #if DBG
  100. DebugBreaker = INIT_DEBUG_BREAKER;
  101. #endif
  102. while ((*LockBusy) & LOCK_QUEUE_WAIT) {
  103. YIELD();
  104. #if DBG
  105. if (--DebugBreaker == 0) {
  106. DbgBreakPoint();
  107. }
  108. #endif
  109. }
  110. ASSERT(*LockBusy & LOCK_QUEUE_OWNER);
  111. }
  112. }
  113. LOGICAL
  114. FASTCALL
  115. HalpTryToAcquireQueuedSpinLock (
  116. IN KSPIN_LOCK_QUEUE_NUMBER Number
  117. )
  118. /*++
  119. Routine Description:
  120. This function attempts to acquire the specified queued spinlock.
  121. Interrupts are disabled.
  122. Arguments:
  123. Number Queued Spinlock Number.
  124. Return Value:
  125. TRUE If the lock was acquired,
  126. FALSE if it is already held by another processor.
  127. --*/
  128. {
  129. PKSPIN_LOCK_QUEUE Current;
  130. PKSPIN_LOCK_QUEUE Owner;
  131. //
  132. // See if the lock is available.
  133. //
  134. Current = &(KeGetCurrentPrcb()->LockQueue[Number]);
  135. ASSERT(((ULONG)Current->Lock & 3) == 0);
  136. if (!*(Current->Lock)) {
  137. Owner = InterlockedCompareExchangePointer(Current->Lock, Current, NULL);
  138. if (Owner == NULL) {
  139. //
  140. // Lock has been acquired.
  141. //
  142. Current->Lock = (PKSPIN_LOCK)
  143. (((ULONG)Current->Lock) | LOCK_QUEUE_OWNER);
  144. return TRUE;
  145. }
  146. }
  147. return FALSE;
  148. }
  149. VOID
  150. FASTCALL
  151. HalpReleaseQueuedSpinLock (
  152. IN PKSPIN_LOCK_QUEUE Current
  153. )
  154. /*++
  155. Routine Description:
  156. Release a (queued) spinlock. If other processors are waiting
  157. on this lock, hand the lock to the next in line.
  158. Arguments:
  159. Current Address of Queued Spinlock structure.
  160. Return Value:
  161. None.
  162. --*/
  163. {
  164. PKSPIN_LOCK_QUEUE Next;
  165. PULONG Lock;
  166. volatile VOID ** Waiting;
  167. #if DBG
  168. ULONG DebugBreaker = INIT_DEBUG_BREAKER;
  169. #endif
  170. Lock = (PULONG)&Current->Lock;
  171. ASSERT((*Lock & 3) == LOCK_QUEUE_OWNER);
  172. //
  173. // Clear lock owner in my own struct.
  174. //
  175. *Lock ^= LOCK_QUEUE_OWNER;
  176. Next = Current->Next;
  177. if (!Next) {
  178. //
  179. // No waiter, attempt to release the lock. As there is no other
  180. // waiter, the current lock value should be THIS lock structure
  181. // ie "Current". We do a compare exchange Current against the
  182. // lock, if it succeeds, the lock value is replaced with NULL and
  183. // the lock has been released. If the compare exchange fails it
  184. // is because someone else has acquired but hadn't yet updated
  185. // our next field (which we checked above).
  186. //
  187. Next = InterlockedCompareExchangePointer(Current->Lock, NULL, Current);
  188. if (Next == Current) {
  189. //
  190. // Lock has been released.
  191. //
  192. return;
  193. }
  194. //
  195. // There is another waiter,... but our next pointer hadn't been
  196. // updated when we checked earlier. Wait for it to be updated.
  197. //
  198. Waiting = (volatile VOID **)&Current->Next;
  199. while (!*Waiting) {
  200. YIELD();
  201. #if DBG
  202. if (--DebugBreaker == 0) {
  203. DbgBreakPoint();
  204. }
  205. #endif
  206. }
  207. Next = (struct _KSPIN_LOCK_QUEUE *)*Waiting;
  208. }
  209. //
  210. // Hand the lock to the next waiter.
  211. //
  212. Lock = (PULONG)&Next->Lock;
  213. ASSERT((*Lock & 3) == LOCK_QUEUE_WAIT);
  214. Current->Next = NULL;
  215. *Lock ^= (LOCK_QUEUE_WAIT + LOCK_QUEUE_OWNER);
  216. }
  217. #endif
  218. VOID
  219. FASTCALL
  220. KeReleaseQueuedSpinLock (
  221. IN KSPIN_LOCK_QUEUE_NUMBER Number,
  222. IN KIRQL OldIrql
  223. )
  224. /*++
  225. Routine Description:
  226. Release a (queued) spinlock. If other processors are waiting
  227. on this lock, hand the lock to the next in line.
  228. Arguments:
  229. Number Queued Spinlock Number.
  230. OldIrql IRQL to lower to once the lock has been released.
  231. Return Value:
  232. None.
  233. --*/
  234. {
  235. #if !defined(NT_UP)
  236. HalpReleaseQueuedSpinLock(&KeGetCurrentPrcb()->LockQueue[Number]);
  237. #endif
  238. KfLowerIrql(OldIrql);
  239. }
  240. KIRQL
  241. FASTCALL
  242. KeAcquireQueuedSpinLock(
  243. IN KSPIN_LOCK_QUEUE_NUMBER Number
  244. )
  245. /*++
  246. Routine Description:
  247. Raise to DISPATCH_LEVEL and acquire the specified queued spinlock.
  248. Arguments:
  249. Number Queued Spinlock Number.
  250. Return Value:
  251. OldIrql The IRQL prior to raising to DISPATCH_LEVEL.
  252. --*/
  253. {
  254. KIRQL OldIrql;
  255. OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
  256. #if !defined(NT_UP)
  257. HalpAcquireQueuedSpinLock(&(KeGetCurrentPrcb()->LockQueue[Number]));
  258. #endif
  259. return OldIrql;
  260. }
  261. KIRQL
  262. FASTCALL
  263. KeAcquireQueuedSpinLockRaiseToSynch (
  264. IN KSPIN_LOCK_QUEUE_NUMBER Number
  265. )
  266. /*++
  267. Routine Description:
  268. Raise to SYNCH_LEVEL and acquire the specified queued spinlock.
  269. Arguments:
  270. Number Queued Spinlock Number.
  271. Return Value:
  272. OldIrql The IRQL prior to raising to SYNCH_LEVEL.
  273. --*/
  274. {
  275. KIRQL OldIrql;
  276. OldIrql = KfRaiseIrql(SYNCH_LEVEL);
  277. #if !defined(NT_UP)
  278. HalpAcquireQueuedSpinLock(&(KeGetCurrentPrcb()->LockQueue[Number]));
  279. #endif
  280. return OldIrql;
  281. }
  282. LOGICAL
  283. FASTCALL
  284. KeTryToAcquireQueuedSpinLock(
  285. IN KSPIN_LOCK_QUEUE_NUMBER Number,
  286. IN PKIRQL OldIrql
  287. )
  288. /*++
  289. Routine Description:
  290. Attempt to acquire the specified queued spinlock. If successful,
  291. raise IRQL to DISPATCH_LEVEL.
  292. Arguments:
  293. Number Queued Spinlock Number.
  294. OldIrql Pointer to KIRQL to receive the old IRQL.
  295. Return Value:
  296. TRUE if the lock was acquired,
  297. FALSE otherwise.
  298. --*/
  299. {
  300. #if !defined(NT_UP)
  301. LOGICAL Success;
  302. _disable();
  303. Success = HalpTryToAcquireQueuedSpinLock(Number);
  304. if (Success) {
  305. *OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
  306. }
  307. _enable();
  308. return Success;
  309. #else
  310. *OldIrql = KfRaiseIrql(DISPATCH_LEVEL);
  311. return TRUE;
  312. #endif
  313. }
  314. LOGICAL
  315. FASTCALL
  316. KeTryToAcquireQueuedSpinLockRaiseToSynch(
  317. IN KSPIN_LOCK_QUEUE_NUMBER Number,
  318. IN PKIRQL OldIrql
  319. )
  320. /*++
  321. Routine Description:
  322. Attempt to acquire the specified queued spinlock. If successful,
  323. raise IRQL to SYNCH_LEVEL.
  324. Arguments:
  325. Number Queued Spinlock Number.
  326. OldIrql Pointer to KIRQL to receive the old IRQL.
  327. Return Value:
  328. TRUE if the lock was acquired,
  329. FALSE otherwise.
  330. --*/
  331. {
  332. #if !defined(NT_UP)
  333. LOGICAL Success;
  334. _disable();
  335. Success = HalpTryToAcquireQueuedSpinLock(Number);
  336. if (Success) {
  337. *OldIrql = KfRaiseIrql(SYNCH_LEVEL);
  338. }
  339. _enable();
  340. return Success;
  341. #else
  342. *OldIrql = KfRaiseIrql(SYNCH_LEVEL);
  343. return TRUE;
  344. #endif
  345. }
  346. VOID
  347. FASTCALL
  348. KeAcquireInStackQueuedSpinLock (
  349. IN PKSPIN_LOCK SpinLock,
  350. IN PKLOCK_QUEUE_HANDLE LockHandle
  351. )
  352. {
  353. #if !defined(NT_UP)
  354. LockHandle->LockQueue.Next = NULL;
  355. LockHandle->LockQueue.Lock = SpinLock;
  356. #endif
  357. LockHandle->OldIrql = KeRaiseIrqlToDpcLevel();
  358. #if !defined(NT_UP)
  359. HalpAcquireQueuedSpinLock(&LockHandle->LockQueue);
  360. #endif
  361. return;
  362. }
  363. VOID
  364. FASTCALL
  365. KeAcquireInStackQueuedSpinLockRaiseToSynch (
  366. IN PKSPIN_LOCK SpinLock,
  367. IN PKLOCK_QUEUE_HANDLE LockHandle
  368. )
  369. {
  370. #if !defined(NT_UP)
  371. LockHandle->LockQueue.Next = NULL;
  372. LockHandle->LockQueue.Lock = SpinLock;
  373. #endif
  374. LockHandle->OldIrql = KeRaiseIrqlToSynchLevel();
  375. #if !defined(NT_UP)
  376. HalpAcquireQueuedSpinLock(&LockHandle->LockQueue);
  377. #endif
  378. return;
  379. }
  380. VOID
  381. FASTCALL
  382. KeReleaseInStackQueuedSpinLock (
  383. IN PKLOCK_QUEUE_HANDLE LockHandle
  384. )
  385. {
  386. #if !defined(NT_UP)
  387. HalpReleaseQueuedSpinLock(&LockHandle->LockQueue);
  388. #endif
  389. KeLowerIrql(LockHandle->OldIrql);
  390. return;
  391. }