Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1392 lines
34 KiB

  1. if NT_INST
  2. else
  3. TITLE "Spin Locks"
  4. ;++
  5. ;
  6. ; Copyright (c) 1989-1998 Microsoft Corporation
  7. ;
  8. ; Module Name:
  9. ;
  10. ; spinlock.asm
  11. ;
  12. ; Abstract:
  13. ;
  14. ; This module implements x86 spinlock functions for the PC+MP HAL.
  15. ;
  16. ; Author:
  17. ;
  18. ; Bryan Willman (bryanwi) 13 Dec 89
  19. ;
  20. ; Environment:
  21. ;
  22. ; Kernel mode only.
  23. ;
  24. ; Revision History:
  25. ;
  26. ; Ron Mosgrove (o-RonMo) Dec 93 - modified for PC+MP HAL.
  27. ;--
  28. PAGE
  29. .486p
  30. include callconv.inc ; calling convention macros
  31. include i386\kimacro.inc
  32. include hal386.inc
  33. include mac386.inc
  34. include apic.inc
  35. include ntapic.inc
  36. EXTRNP _KeBugCheckEx,5,IMPORT
  37. EXTRNP KfRaiseIrql, 1,,FASTCALL
  38. EXTRNP KfLowerIrql, 1,,FASTCALL
  39. EXTRNP _KeSetEventBoostPriority, 2, IMPORT
  40. EXTRNP _KeWaitForSingleObject,5, IMPORT
  41. extrn _HalpVectorToIRQL:byte
  42. extrn _HalpIRQLtoTPR:byte
  43. ifdef NT_UP
  44. LOCK_ADD equ add
  45. LOCK_DEC equ dec
  46. LOCK_CMPXCHG equ cmpxchg
  47. else
  48. LOCK_ADD equ lock add
  49. LOCK_DEC equ lock dec
  50. LOCK_CMPXCHG equ lock cmpxchg
  51. endif
  52. _TEXT SEGMENT PARA PUBLIC 'CODE'
  53. ASSUME DS:FLAT, ES:FLAT, SS:FLAT, FS:NOTHING, GS:NOTHING
  54. PAGE
  55. SUBTTL "Acquire Kernel Spin Lock"
  56. ;++
  57. ;
  58. ; KIRQL
  59. ; FASTCALL
  60. ; KfAcquireSpinLock (
  61. ; IN PKSPIN_LOCK SpinLock
  62. ; )
  63. ;
  64. ; Routine Description:
  65. ;
  66. ; This function raises to DISPATCH_LEVEL and then acquires a the
  67. ; kernel spin lock.
  68. ;
  69. ; Arguments:
  70. ;
  71. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  72. ;
  73. ; Return Value:
  74. ;
  75. ; OldIrql (TOS+8) - pointer to place old irql.
  76. ;
  77. ;--
  78. align 16
  79. cPublicFastCall KfAcquireSpinLock ,1
  80. cPublicFpo 0,0
  81. mov edx, dword ptr APIC[LU_TPR] ; (edx) = Old Priority (Vector)
  82. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; Write New Priority to the TPR
  83. shr edx, 4
  84. movzx eax, _HalpVectorToIRQL[edx] ; (al) = OldIrql
  85. ifndef NT_UP
  86. ;
  87. ; Attempt to assert the lock
  88. ;
  89. sl10: ACQUIRE_SPINLOCK ecx,<short sl20>
  90. fstRET KfAcquireSpinLock
  91. ;
  92. ; Lock is owned, spin till it looks free, then go get it again.
  93. ;
  94. align dword
  95. sl20: SPIN_ON_SPINLOCK ecx,sl10
  96. endif
  97. fstRET KfAcquireSpinLock
  98. fstENDP KfAcquireSpinLock
  99. PAGE
  100. SUBTTL "Acquire Kernel Spin Lock"
  101. ;++
  102. ;
  103. ; KIRQL
  104. ; FASTCALL
  105. ; KeAcquireSpinLockRaiseToSynch (
  106. ; IN PKSPIN_LOCK SpinLock
  107. ; )
  108. ;
  109. ; Routine Description:
  110. ;
  111. ; This function acquires the SpinLock at SYNCH_LEVEL. The function
  112. ; is optimized for hotter locks (the lock is tested before acquiring,
  113. ; and any spin occurs at OldIrql).
  114. ;
  115. ; Arguments:
  116. ;
  117. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  118. ;
  119. ; Return Value:
  120. ;
  121. ; OldIrql (TOS+8) - pointer to place old irql.
  122. ;
  123. ;--
  124. align 16
  125. cPublicFastCall KeAcquireSpinLockRaiseToSynch,1
  126. cPublicFpo 0,0
  127. mov edx, dword ptr APIC[LU_TPR] ; (ecx) = Old Priority (Vector)
  128. mov eax, edx
  129. shr eax, 4
  130. movzx eax, _HalpVectorToIRQL[eax] ; (al) = OldIrql
  131. ifdef NT_UP
  132. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR ; Write New Priority to the TPR
  133. fstRET KeAcquireSpinLockRaiseToSynch
  134. else
  135. ;
  136. ; Test lock
  137. ;
  138. TEST_SPINLOCK ecx,<short sls30>
  139. ;
  140. ; Raise irql.
  141. ;
  142. sls10: mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  143. ;
  144. ; Attempt to assert the lock
  145. ;
  146. ACQUIRE_SPINLOCK ecx,<short sls20>
  147. fstRET KeAcquireSpinLockRaiseToSynch
  148. ;
  149. ; Lock is owned, spin till it looks free, then go get it
  150. ;
  151. align dword
  152. sls20: mov dword ptr APIC[LU_TPR], edx
  153. align dword
  154. sls30: SPIN_ON_SPINLOCK ecx,sls10
  155. endif
  156. fstENDP KeAcquireSpinLockRaiseToSynch
  157. ifndef NT_UP
  158. ;++
  159. ;
  160. ; KIRQL
  161. ; FASTCALL
  162. ; KeAcquireSpinLockRaiseToSynchMCE (
  163. ; IN PKSPIN_LOCK SpinLock
  164. ; )
  165. ;
  166. ; Routine Description:
  167. ;
  168. ; This function performs the same function as KeAcquireSpinLockRaiseToSynch
  169. ; but provides a work around for an IFU errata for Pentium Pro processors
  170. ; prior to stepping 619.
  171. ;
  172. ; Arguments:
  173. ;
  174. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  175. ;
  176. ; Return Value:
  177. ;
  178. ; OldIrql (TOS+8) - pointer to place old irql.
  179. ;
  180. ;--
  181. align 16
  182. cPublicFastCall KeAcquireSpinLockRaiseToSynchMCE,1
  183. cPublicFpo 0,0
  184. mov edx, dword ptr APIC[LU_TPR] ; (ecx) = Old Priority (Vector)
  185. mov eax, edx
  186. shr eax, 4
  187. movzx eax, _HalpVectorToIRQL[eax] ; (al) = OldIrql
  188. ;
  189. ; Test lock
  190. ;
  191. ; TEST_SPINLOCK ecx,<short slm30> ; NOTE - Macro expanded below:
  192. test dword ptr [ecx], 1
  193. nop ; On a P6 prior to stepping B1 (619), we
  194. nop ; need these 5 NOPs to ensure that we
  195. nop ; do not take a machine check exception.
  196. nop ; The cost is just 1.5 clocks as the P6
  197. nop ; just tosses the NOPs.
  198. jnz short slm30
  199. ;
  200. ; Raise irql.
  201. ;
  202. slm10: mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  203. ;
  204. ; Attempt to assert the lock
  205. ;
  206. ACQUIRE_SPINLOCK ecx,<short slm20>
  207. fstRET KeAcquireSpinLockRaiseToSynchMCE
  208. ;
  209. ; Lock is owned, spin till it looks free, then go get it
  210. ;
  211. align dword
  212. slm20: mov dword ptr APIC[LU_TPR], edx
  213. align dword
  214. slm30: SPIN_ON_SPINLOCK ecx,slm10
  215. fstENDP KeAcquireSpinLockRaiseToSynchMCE
  216. endif
  217. PAGE
  218. SUBTTL "Release Kernel Spin Lock"
  219. ;++
  220. ;
  221. ; VOID
  222. ; FASTCALL
  223. ; KeReleaseSpinLock (
  224. ; IN PKSPIN_LOCK SpinLock,
  225. ; IN KIRQL NewIrql
  226. ; )
  227. ;
  228. ; Routine Description:
  229. ;
  230. ; This function releases a kernel spin lock and lowers to the new irql.
  231. ;
  232. ; Arguments:
  233. ;
  234. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  235. ; (dl) = NewIrql - New irql value to set.
  236. ;
  237. ; Return Value:
  238. ;
  239. ; None.
  240. ;
  241. ;--
  242. align 16
  243. cPublicFastCall KfReleaseSpinLock ,2
  244. cPublicFpo 0,0
  245. xor eax, eax
  246. mov al, dl ; (eax) = new irql value
  247. ifndef NT_UP
  248. RELEASE_SPINLOCK ecx ; release spinlock
  249. endif
  250. xor ecx, ecx
  251. mov cl, _HalpIRQLtoTPR[eax] ; get TPR value corresponding to IRQL
  252. mov dword ptr APIC[LU_TPR], ecx
  253. ;
  254. ; We have to ensure that the requested priority is set before
  255. ; we return. The caller is counting on it.
  256. ;
  257. mov eax, dword ptr APIC[LU_TPR]
  258. if DBG
  259. cmp ecx, eax ; Verify IRQL read back is same as
  260. je short @f ; set value
  261. int 3
  262. @@:
  263. endif
  264. fstRET KfReleaseSpinLock
  265. fstENDP KfReleaseSpinLock
  266. ;++
  267. ;
  268. ; KIRQL
  269. ; FASTCALL
  270. ; HalpAcquireHighLevelLock (
  271. ; IN PKSPIN_LOCK SpinLock
  272. ; )
  273. ;
  274. ; Routine Description:
  275. ;
  276. ; Acquires a spinlock with interrupts disabled.
  277. ;
  278. ; Arguments:
  279. ;
  280. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  281. ;
  282. ; Return Value:
  283. ;
  284. ; OldIrql (TOS+8) - pointer to place old irql.
  285. ;
  286. ;--
  287. cPublicFastCall HalpAcquireHighLevelLock ,1
  288. pushfd
  289. pop eax
  290. ahll10: cli
  291. ACQUIRE_SPINLOCK ecx, ahll20
  292. fstRET HalpAcquireHighLevelLock
  293. ahll20:
  294. push eax
  295. popfd
  296. SPIN_ON_SPINLOCK ecx, <ahll10>
  297. fstENDP HalpAcquireHighLevelLock
  298. ;++
  299. ;
  300. ; VOID
  301. ; FASTCALL
  302. ; HalpReleaseHighLevelLock (
  303. ; IN PKSPIN_LOCK SpinLock,
  304. ; IN KIRQL NewIrql
  305. ; )
  306. ;
  307. ; Routine Description:
  308. ;
  309. ; This function releases a kernel spin lock and lowers to the new irql.
  310. ;
  311. ; Arguments:
  312. ;
  313. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  314. ; (dl) = NewIrql - New irql value to set.
  315. ;
  316. ; Return Value:
  317. ;
  318. ; None.
  319. ;
  320. ;--
  321. cPublicFastCall HalpReleaseHighLevelLock ,2
  322. RELEASE_SPINLOCK ecx
  323. push edx
  324. popfd
  325. fstRET HalpReleaseHighLevelLock
  326. fstENDP HalpReleaseHighLevelLock
  327. ;++
  328. ;
  329. ; VOID
  330. ; FASTCALL
  331. ; ExAcquireFastMutex (
  332. ; IN PFAST_MUTEX FastMutex
  333. ; )
  334. ;
  335. ; Routine description:
  336. ;
  337. ; This function acquires ownership of the specified FastMutex.
  338. ;
  339. ; Arguments:
  340. ;
  341. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  342. ;
  343. ; Return Value:
  344. ;
  345. ; None.
  346. ;
  347. ;--
  348. cPublicFastCall ExAcquireFastMutex,1
  349. cPublicFpo 0,0
  350. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  351. if DBG
  352. ;
  353. ; Caller must already be at or below APC_LEVEL.
  354. ;
  355. cmp eax, APC_VECTOR
  356. jg short afm11 ; irql too high ==> fatal.
  357. endif
  358. mov dword ptr APIC[LU_TPR], APC_VECTOR ; Write New Priority to the TPR
  359. LOCK_DEC dword ptr [ecx].FmCount ; Get count
  360. jz short afm_ret ; The owner? Yes, Done
  361. inc dword ptr [ecx].FmContention
  362. cPublicFpo 0,2
  363. push ecx
  364. push eax
  365. add ecx, FmEvent ; Wait on Event
  366. stdCall _KeWaitForSingleObject,<ecx,WrExecutive,0,0,0>
  367. pop eax ; (al) = OldTpr
  368. pop ecx ; (ecx) = FAST_MUTEX
  369. cPublicFpo 0,0
  370. afm_ret:
  371. mov byte ptr [ecx].FmOldIrql, al
  372. ;
  373. ; Use esp to track the owning thread for debugging purposes.
  374. ; !thread from kd will find the owning thread. Note that the
  375. ; owner isn't cleared on release, check if the mutex is owned
  376. ; first.
  377. ;
  378. mov dword ptr [ecx].FmOwner, esp
  379. fstRet ExAcquireFastMutex
  380. if DBG
  381. cPublicFpo 0,1
  382. afm11: stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,033h,0>
  383. endif
  384. fstENDP ExAcquireFastMutex
  385. ;++
  386. ;
  387. ; VOID
  388. ; FASTCALL
  389. ; ExReleaseFastMutex (
  390. ; IN PFAST_MUTEX FastMutex
  391. ; )
  392. ;
  393. ; Routine description:
  394. ;
  395. ; This function releases ownership of the FastMutex.
  396. ;
  397. ; Arguments:
  398. ;
  399. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  400. ;
  401. ; Return Value:
  402. ;
  403. ; None.
  404. ;
  405. ;--
  406. cPublicFastCall ExReleaseFastMutex,1
  407. cPublicFpo 0,0
  408. if DBG
  409. ;
  410. ; Caller must already be at APC_LEVEL or have APCs blocked.
  411. ;
  412. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  413. cmp eax, APC_VECTOR
  414. je short rfm04 ; irql is ok.
  415. if 0
  416. mov eax, PCR[PcPrcb]
  417. mov eax, [eax].PbCurrentThread ; (eax) = Current Thread
  418. cmp dword ptr [eax]+ThKernelApcDisable, 0
  419. jne short rfm04 ; APCs disabled, this is ok
  420. cmp dword ptr [eax]+ThTeb, 0
  421. je short rfm04 ; No TEB ==> system thread, this is ok
  422. test dword ptr [eax]+ThTeb, 080000000h
  423. jnz short rfm04 ; TEB in system space, this is ok
  424. endif
  425. jmp short rfm20
  426. rfm04:
  427. endif
  428. xor eax, eax
  429. mov al, byte ptr [ecx].FmOldIrql ; (eax) = OldTpr
  430. LOCK_ADD dword ptr [ecx].FmCount, 1 ; Remove our count
  431. js short rfm05 ; if < 0, set event
  432. jnz short rfm10 ; if != 0, don't set event
  433. cPublicFpo 0,1
  434. rfm05: add ecx, FmEvent
  435. push eax ; save new tpr
  436. stdCall _KeSetEventBoostPriority, <ecx, 0>
  437. pop eax ; restore tpr
  438. cPublicFpo 0,0
  439. rfm10: mov dword ptr APIC[LU_TPR], eax
  440. mov ecx, dword ptr APIC[LU_TPR]
  441. if DBG
  442. cmp eax, ecx ; Verify TPR is what was
  443. je short @f ; written
  444. int 3
  445. @@:
  446. endif
  447. fstRet ExReleaseFastMutex
  448. if DBG
  449. cPublicFpo 0,1
  450. rfm20: stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,034h,0>
  451. endif
  452. fstENDP ExReleaseFastMutex
  453. ;++
  454. ;
  455. ; BOOLEAN
  456. ; FASTCALL
  457. ; ExTryToAcquireFastMutex (
  458. ; IN PFAST_MUTEX FastMutex
  459. ; )
  460. ;
  461. ; Routine description:
  462. ;
  463. ; This function acquires ownership of the FastMutex.
  464. ;
  465. ; Arguments:
  466. ;
  467. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  468. ;
  469. ; Return Value:
  470. ;
  471. ; Returns TRUE if the FAST_MUTEX was acquired; otherwise FALSE.
  472. ;
  473. ;--
  474. cPublicFastCall ExTryToAcquireFastMutex,1
  475. cPublicFpo 0,0
  476. if DBG
  477. ;
  478. ; Caller must already be at or below APC_LEVEL.
  479. ;
  480. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  481. cmp eax, APC_VECTOR
  482. jg short tam11 ; irql too high ==> fatal.
  483. endif
  484. ;
  485. ; Try to acquire.
  486. ;
  487. cmp dword ptr [ecx].FmCount, 1 ; Busy?
  488. jne short tam25 ; Yes, abort
  489. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  490. mov dword ptr APIC[LU_TPR], APC_VECTOR ; Write New Priority to the TPR
  491. cPublicFpo 0,1
  492. push eax ; Save Old TPR
  493. mov edx, 0 ; Value to set
  494. mov eax, 1 ; Value to compare against
  495. LOCK_CMPXCHG dword ptr [ecx].FmCount, edx ; Attempt to acquire
  496. jnz short tam20 ; got it?
  497. cPublicFpo 0,0
  498. pop edx ; (edx) = Old TPR
  499. mov eax, 1 ; return TRUE
  500. mov byte ptr [ecx].FmOldIrql, dl ; Store Old TPR
  501. fstRet ExTryToAcquireFastMutex
  502. tam20: pop ecx ; (ecx) = Old TPR
  503. mov dword ptr APIC[LU_TPR], ecx
  504. mov eax, dword ptr APIC[LU_TPR]
  505. if DBG
  506. cmp ecx, eax ; Verify TPR is what was
  507. je short @f ; written
  508. int 3
  509. @@:
  510. endif
  511. tam25: xor eax, eax ; return FALSE
  512. YIELD
  513. fstRet ExTryToAcquireFastMutex ; all done
  514. if DBG
  515. cPublicFpo 0,1
  516. tam11: stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,033h,0>
  517. endif
  518. fstENDP ExTryToAcquireFastMutex
  519. page ,132
  520. subttl "Acquire Queued SpinLock Raise to Synch"
  521. ; compile time assert sizeof(KSPIN_LOCK_QUEUE) == 8
  522. .errnz (LOCK_QUEUE_HEADER_SIZE - 8)
  523. align 16
  524. ;++
  525. ;
  526. ; VOID
  527. ; KeAcquireInStackQueuedSpinLock (
  528. ; IN PKSPIN_LOCK SpinLock,
  529. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  530. ; )
  531. ;
  532. ; VOID
  533. ; KeAcquireInStackQueuedSpinLockRaiseToSynch (
  534. ; IN PKSPIN_LOCK SpinLock,
  535. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  536. ; )
  537. ;
  538. ; Routine Description:
  539. ;
  540. ; KeAcquireInStackQueuedSpinLock...
  541. ;
  542. ; The Kx versions use a LOCK_QUEUE_HANDLE structure rather than
  543. ; LOCK_QUEUE structures in the PRCB. Old IRQL is stored in the
  544. ; LOCK_QUEUE_HANDLE.
  545. ;
  546. ; Arguments:
  547. ;
  548. ; SpinLock (ecx) Address of Actual Lock.
  549. ; LockHandle (edx) Address of lock context.
  550. ;
  551. ; Return Value:
  552. ;
  553. ; None. Actually returns OldIrql because common code is used
  554. ; for all implementations.
  555. ;
  556. ;--
  557. cPublicFastCall KeAcquireInStackQueuedSpinLockRaiseToSynch,2
  558. cPublicFpo 0,0
  559. ifdef NT_UP
  560. ; In the Uniprocessor case, just raise IRQL to SYNCH
  561. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority
  562. shr eax, 4
  563. mov al, byte ptr _HalpVectorToIRQL[eax] ; (al) = OldIrql
  564. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  565. mov [edx].LqhOldIrql, al ; save old IRQL in lock handle
  566. fstRET KeAcquireInStackQueuedSpinLockRaiseToSynch
  567. else
  568. ; MP case, use KeAcquireInStackQueuedSpinLock to get the lock and raise
  569. ; to SYNCH asap afterwards.
  570. call @KeAcquireInStackQueuedSpinLock@8
  571. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  572. fstRET KeAcquireInStackQueuedSpinLockRaiseToSynch
  573. endif
  574. fstENDP KeAcquireInStackQueuedSpinLockRaiseToSynch
  575. cPublicFastCall KeAcquireInStackQueuedSpinLock,2
  576. cPublicFpo 0,0
  577. ; Raise IRQL to DISPATCH_LEVEL
  578. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority
  579. shr eax, 4
  580. mov al, byte ptr _HalpVectorToIRQL[eax] ; (al) = OldIrql
  581. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; raise to DISPATCH level
  582. mov [edx].LqhOldIrql, al ; save old IRQL in lock handle
  583. ifdef NT_UP
  584. fstRET KeAcquireInStackQueuedSpinLock
  585. else
  586. ; Save actual lock address in lock handle.
  587. mov [edx].LqhLock, ecx
  588. mov dword ptr [edx].LqhNext, 0
  589. ; ecx contains the address of the actual lock
  590. ; and edx the address of a queued lock structure.
  591. cPublicFpo 0,1
  592. mov eax, edx ; save Lock Queue entry address
  593. if DBG
  594. test ecx, LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT
  595. jnz short iqsl98 ; jiff lock already held (or
  596. ; this proc already waiting).
  597. endif
  598. ; Exchange the value of the lock with the address of this
  599. ; Lock Queue entry.
  600. xchg [ecx], edx
  601. cmp edx, 0 ; check if lock is held
  602. jnz short iqsl40 ; jiff held
  603. ; lock has been acquired.
  604. cPublicFpo 0,0
  605. ; note: the actual lock address is word aligned, we use
  606. ; the bottom two bits as indicators, bit 0 is LOCK_QUEUE_WAIT,
  607. ; bit 1 is LOCK_QUEUE_OWNER.
  608. or ecx, LOCK_QUEUE_OWNER ; mark self as lock owner
  609. mov [eax].LqLock, ecx
  610. iqsl20:
  611. fstRET KeAcquireInStackQueuedSpinLock
  612. cPublicFpo 0,1
  613. iqsl40:
  614. ; The lock is already held by another processor. Set the wait
  615. ; bit in this processor's Lock Queue entry, then set the next
  616. ; field in the Lock Queue entry of the last processor to attempt
  617. ; to acquire the lock (this is the address returned by the xchg
  618. ; above) to point to THIS processor's lock queue entry.
  619. or ecx, LOCK_QUEUE_WAIT ; set waiting bit
  620. mov [eax].LqLock, ecx
  621. mov [edx].LqNext, eax ; set previous acquirer's
  622. ; next field.
  623. ; Wait.
  624. @@:
  625. test [eax].LqLock, LOCK_QUEUE_WAIT ; check if still waiting
  626. jz short iqsl20 ; jif lock acquired
  627. YIELD ; fire avoidance.
  628. jmp short @b ; else, continue waiting
  629. ; Wait.
  630. if DBG
  631. cPublicFpo 0,1
  632. iqsl98: stdCall _KeBugCheckEx,<SPIN_LOCK_ALREADY_OWNED,ecx,edx,0,0>
  633. int 3 ; so stacktrace works
  634. endif
  635. endif
  636. fstENDP KeAcquireInStackQueuedSpinLock
  637. ;++
  638. ;
  639. ; KIRQL
  640. ; KeAcquireQueuedSpinLockRaiseToSynch (
  641. ; IN KSPIN_LOCK_QUEUE_NUMBER Number
  642. ; )
  643. ;
  644. ; Routine Description:
  645. ;
  646. ; This function raises the current IRQL to DISPATCH/SYNCH level
  647. ; and acquires the specified queued spinlock.
  648. ;
  649. ; Arguments:
  650. ;
  651. ; Number (ecx) - Supplies the queued spinlock number.
  652. ;
  653. ; Return Value:
  654. ;
  655. ; The previous IRQL is returned as the function value.
  656. ;
  657. ;--
  658. cPublicFastCall KeAcquireQueuedSpinLockRaiseToSynch,1
  659. cPublicFpo 0,0
  660. ifdef NT_UP
  661. ; In the Uniprocessor case, just raise IRQL to SYNCH
  662. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority
  663. shr eax, 4
  664. movzx eax, byte ptr _HalpVectorToIRQL[eax] ; (al) = OldIrql
  665. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  666. fstRET KeAcquireQueuedSpinLockRaiseToSynch
  667. else
  668. ; MP case, use KeAcquireQueuedSpinLock to get the lock and raise
  669. ; to SYNCH asap afterwards.
  670. call @KeAcquireQueuedSpinLock@4
  671. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  672. fstRET KeAcquireQueuedSpinLockRaiseToSynch
  673. endif
  674. fstENDP KeAcquireQueuedSpinLockRaiseToSynch
  675. page ,132
  676. subttl "Acquire Queued SpinLock"
  677. ;++
  678. ;
  679. ; KIRQL
  680. ; KeAcquireQueuedSpinLock (
  681. ; IN KSPIN_LOCK_QUEUE_NUMBER Number
  682. ; )
  683. ;
  684. ; Routine Description:
  685. ;
  686. ; This function raises the current IRQL to DISPATCH level
  687. ; and acquires the specified queued spinlock.
  688. ;
  689. ; Arguments:
  690. ;
  691. ; Number (ecx) - Supplies the queued spinlock number.
  692. ;
  693. ; Return Value:
  694. ;
  695. ; The previous IRQL is returned as the function value.
  696. ;
  697. ;--
  698. cPublicFastCall KeAcquireQueuedSpinLock,1
  699. cPublicFpo 0,0
  700. ; Get old priority (vector) from Local APIC's Task Priority
  701. ; Register and set the new priority.
  702. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority
  703. shr eax, 4
  704. movzx eax, byte ptr _HalpVectorToIRQL[eax] ; (al) = OldIrql
  705. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; raise to DISPATCH level
  706. ifdef NT_UP
  707. ; in the Uniprocessor version all we do is raise IRQL.
  708. fstRET KeAcquireQueuedSpinLock
  709. else
  710. ; Get address of Lock Queue entry
  711. mov edx, PCR[PcPrcb] ; get address of PRCB
  712. lea edx, [edx+ecx*8].PbLockQueue ; get &PRCB->LockQueue[Number]
  713. ; Get address of the actual lock.
  714. mov ecx, [edx].LqLock
  715. aqsl10:
  716. push eax ; save old IRQL
  717. cPublicFpo 0,1
  718. mov eax, edx ; save Lock Queue entry address
  719. if DBG
  720. test ecx, LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT
  721. jnz short aqsl98 ; jiff lock already held (or
  722. ; this proc already waiting).
  723. endif
  724. ; Exchange the value of the lock with the address of this
  725. ; Lock Queue entry.
  726. xchg [ecx], edx
  727. cmp edx, 0 ; check if lock is held
  728. jnz short aqsl40 ; jiff held
  729. ; lock has been acquired.
  730. cPublicFpo 0,0
  731. ; note: the actual lock address is word aligned, we use
  732. ; the bottom two bits as indicators, bit 0 is LOCK_QUEUE_WAIT,
  733. ; bit 1 is LOCK_QUEUE_OWNER.
  734. or ecx, LOCK_QUEUE_OWNER ; mark self as lock owner
  735. mov [eax].LqLock, ecx
  736. aqsl20:
  737. pop eax ; return old IRQL
  738. fstRET KeAcquireQueuedSpinLock
  739. cPublicFpo 0,1
  740. aqsl40:
  741. ; The lock is already held by another processor. Set the wait
  742. ; bit in this processor's Lock Queue entry, then set the next
  743. ; field in the Lock Queue entry of the last processor to attempt
  744. ; to acquire the lock (this is the address returned by the xchg
  745. ; above) to point to THIS processor's lock queue entry.
  746. or ecx, LOCK_QUEUE_WAIT ; set waiting bit
  747. mov [eax].LqLock, ecx
  748. mov [edx].LqNext, eax ; set previous acquirer's
  749. ; next field.
  750. cPublicFpo 0,0
  751. ; Wait.
  752. @@:
  753. test [eax].LqLock, LOCK_QUEUE_WAIT ; check if still waiting
  754. jz short aqsl20 ; jif lock acquired
  755. YIELD ; fire avoidance.
  756. jmp short @b ; else, continue waiting
  757. if DBG
  758. cPublicFpo 0,1
  759. aqsl98: stdCall _KeBugCheckEx,<SPIN_LOCK_ALREADY_OWNED,ecx,edx,0,0>
  760. int 3 ; so stacktrace works
  761. endif
  762. endif
  763. fstENDP KeAcquireQueuedSpinLock
  764. page ,132
  765. subttl "Release Queued SpinLock"
  766. ;++
  767. ;
  768. ; VOID
  769. ; KeReleaseInStackQueuedSpinLock (
  770. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  771. ; )
  772. ;
  773. ; Routine Description:
  774. ;
  775. ; This function releases a queued spinlock and lowers the IRQL to
  776. ; its previous value.
  777. ;
  778. ; This differs from KeReleaseQueuedSpinLock in that this version
  779. ; uses a caller supplied lock context where that one uses a
  780. ; predefined lock context in the processor's PRCB.
  781. ;
  782. ; This version sets up a compatible register context and uses
  783. ; KeReleaseQueuedSpinLock to do the actual work.
  784. ;
  785. ; Arguments:
  786. ;
  787. ; LockHandle (ecx) - Address of Lock Queue Handle structure.
  788. ;
  789. ; Return Value:
  790. ;
  791. ; None.
  792. ;
  793. ;--
  794. cPublicFastCall KeReleaseInStackQueuedSpinLock,1
  795. cPublicFpo 0,0
  796. movzx edx, byte ptr [ecx].LqhOldIrql ; get old irql
  797. ifndef NT_UP
  798. lea eax, [ecx].LqhNext ; get address of lock struct
  799. jmp short rqsl10 ; continue in common code
  800. else
  801. ; Set the IO APIC's Task Priority Register to the value
  802. ; corresponding to the new IRQL.
  803. movzx ecx, byte ptr _HalpIRQLtoTPR[edx]
  804. mov dword ptr APIC[LU_TPR], ecx
  805. ; Ensure that the requested priority is set before returning,
  806. ; the caller is counting on it.
  807. mov eax, dword ptr APIC[LU_TPR]
  808. if DBG
  809. cmp ecx, eax ; Verify IRQL read back is same as
  810. je short @f ; set value
  811. int 3
  812. @@:
  813. endif
  814. fstRET KeReleaseInStackQueuedSpinLock
  815. endif
  816. fstENDP KeReleaseInStackQueuedSpinLock
  817. ;++
  818. ;
  819. ; VOID
  820. ; KeReleaseQueuedSpinLock (
  821. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  822. ; IN KIRQL OldIrql
  823. ; )
  824. ;
  825. ; Routine Description:
  826. ;
  827. ; This function releases a queued spinlock and lowers the IRQL to
  828. ; its previous value.
  829. ;
  830. ; Arguments:
  831. ;
  832. ; Number (ecx) - Supplies the queued spinlock number.
  833. ; OldIrql (dl) - Supplies the IRQL value to lower to.
  834. ;
  835. ; Return Value:
  836. ;
  837. ; None.
  838. ;
  839. ;--
  840. cPublicFastCall KeReleaseQueuedSpinLock,2
  841. cPublicFpo 0,0
  842. .errnz (LOCK_QUEUE_OWNER - 2) ; error if not bit 1 for btr
  843. ifndef NT_UP
  844. ; Get address of Lock Queue entry
  845. mov eax, PCR[PcPrcb] ; get address of PRCB
  846. endif
  847. movzx edx, dl ; Irql = 8 bits from edx
  848. ifndef NT_UP
  849. lea eax, [eax+ecx*8].PbLockQueue ; get &PRCB->LockQueue[Number]
  850. rqsl10:
  851. push ebx ; need another register
  852. cPublicFpo 0,1
  853. ; Clear the lock field in the Lock Queue entry.
  854. mov ebx, [eax].LqNext
  855. mov ecx, [eax].LqLock
  856. ; Quick check: If Lock Queue entry's Next field is not NULL,
  857. ; there is another waiter. Don't bother with ANY atomic ops
  858. ; in this case.
  859. ;
  860. ; Note: test clears CF and sets ZF appropriately, the following
  861. ; btr sets CF appropriately for the owner check.
  862. test ebx, ebx
  863. ; clear the "I am owner" bit in the Lock entry.
  864. btr ecx, 1 ; clear owner bit.
  865. if DBG
  866. jnc short rqsl98 ; bugcheck if was not set
  867. ; tests CF
  868. endif
  869. mov [eax].LqLock, ecx ; clear lock bit in queue entry
  870. jnz short rqsl40 ; jif another processor waits
  871. ; tests ZF
  872. ; ebx contains zero here which will be used to set the new owner NULL
  873. push eax ; save &PRCB->LockQueue[Number]
  874. cPublicFpo 0,2
  875. ; Use compare exchange to attempt to clear the actual lock.
  876. ; If there are still no processors waiting for the lock when
  877. ; the compare exchange happens, the old contents of the lock
  878. ; should be the address of this lock entry (eax).
  879. lock cmpxchg [ecx], ebx ; store 0 if no waiters
  880. pop eax ; restore lock queue address
  881. cPublicFpo 0,1
  882. jnz short rqsl60 ; jif store failed
  883. ; The lock has been released. Lower IRQL and return to caller.
  884. endif
  885. rqsl20:
  886. ; Set the IO APIC's Task Priority Register to the value
  887. ; corresponding to the new IRQL.
  888. movzx ecx, byte ptr _HalpIRQLtoTPR[edx]
  889. ifndef NT_UP
  890. pop ebx ; restore ebx
  891. cPublicFpo 0,0
  892. endif
  893. mov dword ptr APIC[LU_TPR], ecx
  894. ; Ensure that the requested priority is set before returning,
  895. ; the caller is counting on it.
  896. mov eax, dword ptr APIC[LU_TPR]
  897. if DBG
  898. cmp ecx, eax ; Verify IRQL read back is same as
  899. je short @f ; set value
  900. int 3
  901. @@:
  902. endif
  903. fstRET KeReleaseQueuedSpinLock
  904. ifndef NT_UP
  905. cPublicFpo 0,1
  906. ; Another processor is waiting on this lock. Hand the lock
  907. ; to that processor by getting the address of its LockQueue
  908. ; entry, turning ON its owner bit and OFF its wait bit.
  909. rqsl40: xor [ebx].LqLock, (LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT)
  910. ; Done, the other processor now owns the lock, clear the next
  911. ; field in my LockQueue entry (to preserve the order for entering
  912. ; the queue again) and proceed to lower IRQL and return.
  913. mov [eax].LqNext, 0
  914. jmp short rqsl20
  915. ; We get here if another processor is attempting to acquire
  916. ; the lock but had not yet updated the next field in this
  917. ; processor's Queued Lock Next field. Wait for the next
  918. ; field to be updated.
  919. rqsl60: mov ebx, [eax].LqNext
  920. test ebx, ebx ; check if still 0
  921. jnz short rqsl40 ; jif Next field now set.
  922. YIELD ; wait a bit
  923. jmp short rqsl60 ; continue waiting
  924. if DBG
  925. cPublicFpo 0,1
  926. rqsl98: stdCall _KeBugCheckEx,<SPIN_LOCK_NOT_OWNED,ecx,eax,0,1>
  927. int 3 ; so stacktrace works
  928. endif
  929. endif
  930. fstENDP KeReleaseQueuedSpinLock
  931. page ,132
  932. subttl "Try to Acquire Queued SpinLock"
  933. ;++
  934. ;
  935. ; LOGICAL
  936. ; KeTryToAcquireQueuedSpinLock (
  937. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  938. ; OUT PKIRQL OldIrql
  939. ; )
  940. ;
  941. ; LOGICAL
  942. ; KeTryToAcquireQueuedSpinLockRaiseToSynch (
  943. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  944. ; OUT PKIRQL OldIrql
  945. ; )
  946. ;
  947. ; Routine Description:
  948. ;
  949. ; This function raises the current IRQL to DISPATCH/SYNCH level
  950. ; and attempts to acquire the specified queued spinlock. If the
  951. ; spinlock is already owned by another thread, IRQL is restored
  952. ; to its previous value and FALSE is returned.
  953. ;
  954. ; Arguments:
  955. ;
  956. ; Number (ecx) - Supplies the queued spinlock number.
  957. ; OldIrql (edx) - A pointer to the variable to receive the old
  958. ; IRQL.
  959. ;
  960. ; Return Value:
  961. ;
  962. ; TRUE if the lock was acquired, FALSE otherwise.
  963. ; N.B. ZF is set if FALSE returned, clear otherwise.
  964. ;
  965. ;--
  966. align 16
  967. cPublicFastCall KeTryToAcquireQueuedSpinLockRaiseToSynch,2
  968. cPublicFpo 0,0
  969. push APIC_SYNCH_VECTOR ; raise to SYNCH
  970. jmp short taqsl10 ; continue in common code
  971. fstENDP KeTryToAcquireQueuedSpinLockRaiseToSynch
  972. cPublicFastCall KeTryToAcquireQueuedSpinLock,2
  973. cPublicFpo 0,0
  974. push DPC_VECTOR ; raise to DPC level
  975. ; Attempt to get the lock with interrupts disabled, raising
  976. ; the priority in the interrupt controller only if acquisition
  977. ; is successful.
  978. taqsl10:
  979. ifndef NT_UP
  980. push edx ; save address of OldIrql
  981. pushfd ; save interrupt state
  982. cPublicFpo 0,3
  983. ; Get address of Lock Queue entry
  984. cli
  985. mov edx, PCR[PcPrcb] ; get address of PRCB
  986. lea edx, [edx+ecx*8].PbLockQueue ; get &PRCB->LockQueue[Number]
  987. ; Get address of the actual lock.
  988. mov ecx, [edx].LqLock
  989. if DBG
  990. test ecx, LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT
  991. jnz short taqsl98 ; jiff lock already held (or
  992. ; this proc already waiting).
  993. endif
  994. ; quick test, get out if already taken
  995. cmp dword ptr [ecx], 0 ; check if already taken
  996. jnz short taqsl60 ; jif already taken
  997. xor eax, eax ; comparison value (not locked)
  998. ; Store the Lock Queue entry address in the lock ONLY if the
  999. ; current lock value is 0.
  1000. lock cmpxchg [ecx], edx
  1001. jnz short taqsl60
  1002. ; Lock has been acquired.
  1003. ; note: the actual lock address will be word aligned, we use
  1004. ; the bottom two bits as indicators, bit 0 is LOCK_QUEUE_WAIT,
  1005. ; bit 1 is LOCK_QUEUE_OWNER.
  1006. or ecx, LOCK_QUEUE_OWNER ; mark self as lock owner
  1007. mov [edx].LqLock, ecx
  1008. mov eax, [esp+8] ; get new IRQL
  1009. mov edx, [esp+4] ; get addr to save OldIrql
  1010. else
  1011. mov eax, [esp] ; get new IRQL
  1012. endif
  1013. ; Raise IRQL and return success.
  1014. ; Get old priority (vector) from Local APIC's Task Priority
  1015. ; Register and set the new priority.
  1016. mov ecx, dword ptr APIC[LU_TPR] ; (ecx) = Old Priority
  1017. mov dword ptr APIC[LU_TPR], eax ; Set New Priority
  1018. ifndef NT_UP
  1019. popfd ; restore interrupt state
  1020. add esp, 8 ; free locals
  1021. else
  1022. add esp, 4 ; free local
  1023. endif
  1024. cPublicFpo 0,0
  1025. shr ecx, 4
  1026. movzx eax, _HalpVectorToIRQL[ecx] ; (al) = OldIrql
  1027. mov [edx], al ; save OldIrql
  1028. xor eax, eax ; return TRUE
  1029. or eax, 1
  1030. fstRET KeTryToAcquireQueuedSpinLock
  1031. ifndef NT_UP
  1032. taqsl60:
  1033. ; The lock is already held by another processor. Indicate
  1034. ; failure to the caller.
  1035. popfd ; restore interrupt state
  1036. add esp, 8 ; free locals
  1037. xor eax, eax ; return FALSE
  1038. fstRET KeTryToAcquireQueuedSpinLock
  1039. if DBG
  1040. cPublicFpo 0,2
  1041. taqsl98: stdCall _KeBugCheckEx,<SPIN_LOCK_ALREADY_OWNED,ecx,edx,0,0>
  1042. int 3 ; so stacktrace works
  1043. endif
  1044. endif
  1045. fstENDP KeTryToAcquireQueuedSpinLock
  1046. _TEXT ends
  1047. ENDIF ; NT_INST
  1048. end