Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1287 lines
32 KiB

  1. TITLE "Spin Locks"
  2. ;++
  3. ;
  4. ; Copyright (c) 1989-1998 Microsoft Corporation
  5. ;
  6. ; Module Name:
  7. ;
  8. ; spinlock.asm
  9. ;
  10. ; Abstract:
  11. ;
  12. ; This module implements x86 spinlock functions for the PC+MP HAL.
  13. ;
  14. ; Author:
  15. ;
  16. ; Bryan Willman (bryanwi) 13 Dec 89
  17. ;
  18. ; Environment:
  19. ;
  20. ; Kernel mode only.
  21. ;
  22. ; Revision History:
  23. ;
  24. ; Ron Mosgrove (o-RonMo) Dec 93 - modified for PC+MP HAL.
  25. ;--
  26. .486p
  27. include callconv.inc ; calling convention macros
  28. include i386\kimacro.inc
  29. include hal386.inc
  30. include mac386.inc
  31. include apic.inc
  32. include ntapic.inc
  33. EXTRNP _KeBugCheckEx,5,IMPORT
  34. EXTRNP KfRaiseIrql, 1,,FASTCALL
  35. EXTRNP KfLowerIrql, 1,,FASTCALL
  36. EXTRNP _KeSetEventBoostPriority, 2, IMPORT
  37. EXTRNP _KeWaitForSingleObject,5, IMPORT
  38. extrn _HalpVectorToIRQL:byte
  39. extrn _HalpIRQLtoTPR:byte
  40. ifdef NT_UP
  41. LOCK_ADD equ add
  42. LOCK_DEC equ dec
  43. LOCK_CMPXCHG equ cmpxchg
  44. else
  45. LOCK_ADD equ lock add
  46. LOCK_DEC equ lock dec
  47. LOCK_CMPXCHG equ lock cmpxchg
  48. endif
  49. _TEXT SEGMENT PARA PUBLIC 'CODE'
  50. ASSUME DS:FLAT, ES:FLAT, SS:FLAT, FS:NOTHING, GS:NOTHING
  51. SUBTTL "Acquire Kernel Spin Lock"
  52. ;++
  53. ;
  54. ; KIRQL
  55. ; FASTCALL
  56. ; KfAcquireSpinLock (
  57. ; IN PKSPIN_LOCK SpinLock
  58. ; )
  59. ;
  60. ; Routine Description:
  61. ;
  62. ; This function raises to DISPATCH_LEVEL and acquires the specified
  63. ; spin lock.
  64. ;
  65. ; Arguments:
  66. ;
  67. ; SpinLock (ecx) - Supplies a pointer to a kernel spin lock.
  68. ;
  69. ; Return Value:
  70. ;
  71. ; The old IRQL is returned as the function value.
  72. ;
  73. ;--
  74. align 16
  75. cPublicFastCall KfAcquireSpinLock ,1
  76. cPublicFpo 0,0
  77. mov edx, dword ptr APIC[LU_TPR] ; get old IRQL vector
  78. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; raise IRQL
  79. jmp short sls10 ; finish in common code
  80. fstENDP KfAcquireSpinLock
  81. SUBTTL "Acquire Kernel Spin Lock"
  82. ;++
  83. ;
  84. ; KIRQL
  85. ; FASTCALL
  86. ; KeAcquireSpinLockRaiseToSynch (
  87. ; IN PKSPIN_LOCK SpinLock
  88. ; )
  89. ;
  90. ; Routine Description:
  91. ;
  92. ; This function raises to SYNCH_LEVEL and acquires the specified
  93. ; spin lock.
  94. ;
  95. ; Arguments:
  96. ;
  97. ; SpinLock (ecx) - Supplies a pointer to a kernel spin lock.
  98. ;
  99. ; Return Value:
  100. ;
  101. ; The old IRQL is returned as the function value.
  102. ;
  103. ;--
  104. align 16
  105. cPublicFastCall KeAcquireSpinLockRaiseToSynch,1
  106. cPublicFpo 0,0
  107. mov edx, dword ptr APIC[LU_TPR] ; get old vector
  108. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR ; raise IRQL
  109. sls10: shr edx, 4 ; extract high 4 bits of vector
  110. movzx eax, _HalpVectorToIRQL[edx] ; translate TPR to old IRQL
  111. ifndef NT_UP
  112. ;
  113. ; Attempt to acquire the specified spin lock.
  114. ;
  115. sls20: ACQUIRE_SPINLOCK ecx, <short sls30> ;
  116. fstRET KeAcquireSpinLockRaiseToSynch
  117. ;
  118. ; Lock is owned - spin until it is free, then try again.
  119. ;
  120. sls30: SPIN_ON_SPINLOCK ecx, sls20 ;
  121. else
  122. fstRET KeAcquireSpinLockRaiseToSynch
  123. endif
  124. fstENDP KeAcquireSpinLockRaiseToSynch
  125. SUBTTL "KeAcquireSpinLockRaiseToSynchMCE"
  126. ;++
  127. ;
  128. ; KIRQL
  129. ; FASTCALL
  130. ; KeAcquireSpinLockRaiseToSynchMCE (
  131. ; IN PKSPIN_LOCK SpinLock
  132. ; )
  133. ;
  134. ; Routine Description:
  135. ;
  136. ; This function performs the same function as KeAcquireSpinLockRaiseToSynch
  137. ; but provides a work around for an IFU errata for Pentium Pro processors
  138. ; prior to stepping 619.
  139. ;
  140. ; Arguments:
  141. ;
  142. ; (ecx) = SpinLock - Supplies a pointer to a kernel spin lock.
  143. ;
  144. ; Return Value:
  145. ;
  146. ; OldIrql (TOS+8) - pointer to place old irql.
  147. ;
  148. ;--
  149. ifndef NT_UP
  150. align 16
  151. cPublicFastCall KeAcquireSpinLockRaiseToSynchMCE,1
  152. cPublicFpo 0,0
  153. mov edx, dword ptr APIC[LU_TPR] ; (ecx) = Old Priority (Vector)
  154. mov eax, edx
  155. shr eax, 4
  156. movzx eax, _HalpVectorToIRQL[eax] ; (al) = OldIrql
  157. ;
  158. ; Test lock
  159. ;
  160. ; TEST_SPINLOCK ecx,<short slm30> ; NOTE - Macro expanded below:
  161. test dword ptr [ecx], 1
  162. nop ; On a P6 prior to stepping B1 (619), we
  163. nop ; need these 5 NOPs to ensure that we
  164. nop ; do not take a machine check exception.
  165. nop ; The cost is just 1.5 clocks as the P6
  166. nop ; just tosses the NOPs.
  167. jnz short slm30
  168. ;
  169. ; Raise irql.
  170. ;
  171. slm10: mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR
  172. ;
  173. ; Attempt to assert the lock
  174. ;
  175. ACQUIRE_SPINLOCK ecx,<short slm20>
  176. fstRET KeAcquireSpinLockRaiseToSynchMCE
  177. ;
  178. ; Lock is owned, spin till it looks free, then go get it
  179. ;
  180. align dword
  181. slm20: mov dword ptr APIC[LU_TPR], edx
  182. align dword
  183. slm30: SPIN_ON_SPINLOCK ecx,slm10
  184. fstENDP KeAcquireSpinLockRaiseToSynchMCE
  185. endif
  186. SUBTTL "Release Kernel Spin Lock"
  187. ;++
  188. ;
  189. ; VOID
  190. ; FASTCALL
  191. ; KeReleaseSpinLock (
  192. ; IN PKSPIN_LOCK SpinLock,
  193. ; IN KIRQL OldIrql
  194. ; )
  195. ;
  196. ; Routine Description:
  197. ;
  198. ; This function releases a spin lock and lowers to the old IRQL.
  199. ;
  200. ; Arguments:
  201. ;
  202. ; SpinLock (ecx) - Supplies a pointer to a spin lock.
  203. ; OldIrql (dl) - Supplies the old IRQL value.
  204. ;
  205. ; Return Value:
  206. ;
  207. ; None.
  208. ;
  209. ;--
  210. align 16
  211. cPublicFastCall KfReleaseSpinLock ,2
  212. cPublicFpo 0,0
  213. movzx eax, dl ; zero extend old IRQL
  214. ifndef NT_UP
  215. RELEASE_SPINLOCK ecx ; release spin lock
  216. endif
  217. ;
  218. ; Lower IRQL to its previous level.
  219. ;
  220. ; N.B. Ensure that the requested priority is set before returning.
  221. ;
  222. movzx ecx, _HalpIRQLtoTPR[eax] ; translate IRQL to TPR value
  223. mov dword ptr APIC[LU_TPR], ecx ; lower to old IRQL
  224. mov eax, dword ptr APIC[LU_TPR] ; synchronize
  225. fstRET KfReleaseSpinLock
  226. fstENDP KfReleaseSpinLock
  227. SUBTTL "Acquire Lock With Interrupts Disabled"
  228. ;++
  229. ;
  230. ; ULONG
  231. ; FASTCALL
  232. ; HalpAcquireHighLevelLock (
  233. ; IN PKSPIN_LOCK SpinLock
  234. ; )
  235. ;
  236. ; Routine Description:
  237. ;
  238. ; This function disables interrupts and acquires a spinlock.
  239. ;
  240. ; Arguments:
  241. ;
  242. ; SpinLock (ecx) - Supplies a pointer to a spin lock.
  243. ;
  244. ; Return Value:
  245. ;
  246. ; The old EFLAGS are returned as the function value.
  247. ;
  248. ;--
  249. align 16
  250. cPublicFastCall HalpAcquireHighLevelLock, 1
  251. pushfd ; save EFLAGS
  252. pop eax ;
  253. ahll10: cli ; disable interrupts
  254. ACQUIRE_SPINLOCK ecx, ahll20 ; attempt to acquire spin lock
  255. fstRET HalpAcquireHighLevelLock
  256. ahll20: push eax ; restore EFLAGS
  257. popfd ;
  258. SPIN_ON_SPINLOCK ecx, <ahll10> ; wait for lock to be free
  259. fstENDP HalpAcquireHighLevelLock
  260. SUBTTL "Release Lock And Enable Interrupts"
  261. ;++
  262. ;
  263. ; VOID
  264. ; FASTCALL
  265. ; HalpReleaseHighLevelLock (
  266. ; IN PKSPIN_LOCK SpinLock,
  267. ; IN ULONG Eflags
  268. ; )
  269. ;
  270. ; Routine Description:
  271. ;
  272. ; This function releases a kernel spin lock and restores the old EFLAGS.
  273. ;
  274. ; Arguments:
  275. ;
  276. ; SpinLock (ecx) - Supplies a pointer to a spin lock.
  277. ; Eflags (edx) - supplies the old EFLAGS value.
  278. ;
  279. ; Return Value:
  280. ;
  281. ; None.
  282. ;
  283. ;--
  284. align 16
  285. cPublicFastCall HalpReleaseHighLevelLock, 2
  286. RELEASE_SPINLOCK ecx ; release spin lock
  287. push edx ; restore old EFLAGS
  288. popfd ;
  289. fstRET HalpReleaseHighLevelLock
  290. fstENDP HalpReleaseHighLevelLock
  291. SUBTTL "Acquire Fast Mutex"
  292. ;++
  293. ;
  294. ; VOID
  295. ; FASTCALL
  296. ; ExAcquireFastMutex (
  297. ; IN PFAST_MUTEX FastMutex
  298. ; )
  299. ;
  300. ; Routine description:
  301. ;
  302. ; This function acquires ownership of the specified FastMutex.
  303. ;
  304. ; Arguments:
  305. ;
  306. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  307. ;
  308. ; Return Value:
  309. ;
  310. ; None.
  311. ;
  312. ;--
  313. align 16
  314. cPublicFastCall ExAcquireFastMutex,1
  315. cPublicFpo 0,0
  316. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  317. if DBG
  318. ;
  319. ; Caller must already be at or below APC_LEVEL.
  320. ;
  321. cmp eax, APC_VECTOR
  322. jg short afm11 ; irql too high ==> fatal.
  323. endif
  324. mov dword ptr APIC[LU_TPR], APC_VECTOR ; Write New Priority to the TPR
  325. LOCK_DEC dword ptr [ecx].FmCount ; Get count
  326. jnz short afm10 ; Not the owner so go wait.
  327. mov dword ptr [ecx].FmOldIrql, eax
  328. ;
  329. ; Use esp to track the owning thread for debugging purposes.
  330. ; !thread from kd will find the owning thread. Note that the
  331. ; owner isn't cleared on release, check if the mutex is owned
  332. ; first.
  333. ;
  334. mov dword ptr [ecx].FmOwner, esp
  335. fstRet ExAcquireFastMutex
  336. cPublicFpo 0,0
  337. afm10:
  338. inc dword ptr [ecx].FmContention
  339. cPublicFpo 0,2
  340. push ecx
  341. push eax
  342. add ecx, FmEvent ; Wait on Event
  343. stdCall _KeWaitForSingleObject,<ecx,WrExecutive,0,0,0>
  344. pop eax ; (al) = OldTpr
  345. pop ecx ; (ecx) = FAST_MUTEX
  346. mov dword ptr [ecx].FmOldIrql, eax
  347. ;
  348. ; Use esp to track the owning thread for debugging purposes.
  349. ; !thread from kd will find the owning thread. Note that the
  350. ; owner isn't cleared on release, check if the mutex is owned
  351. ; first.
  352. ;
  353. mov dword ptr [ecx].FmOwner, esp
  354. fstRet ExAcquireFastMutex
  355. if DBG
  356. cPublicFpo 0,1
  357. afm11: stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,033h,0>
  358. endif
  359. fstENDP ExAcquireFastMutex
  360. SUBTTL "Release Fast Mutex"
  361. ;++
  362. ;
  363. ; VOID
  364. ; FASTCALL
  365. ; ExReleaseFastMutex (
  366. ; IN PFAST_MUTEX FastMutex
  367. ; )
  368. ;
  369. ; Routine description:
  370. ;
  371. ; This function releases ownership of the FastMutex.
  372. ;
  373. ; Arguments:
  374. ;
  375. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  376. ;
  377. ; Return Value:
  378. ;
  379. ; None.
  380. ;
  381. ;--
  382. align 16
  383. cPublicFastCall ExReleaseFastMutex,1
  384. cPublicFpo 0,0
  385. if DBG
  386. ;
  387. ; Caller must already be at APC_LEVEL or have APCs blocked.
  388. ;
  389. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  390. cmp eax, APC_VECTOR
  391. je short rfm04 ; irql is ok.
  392. cPublicFpo 0,1
  393. stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,034h,0>
  394. rfm04:
  395. endif
  396. mov eax, dword ptr [ecx].FmOldIrql ; (eax) = OldTpr
  397. LOCK_ADD dword ptr [ecx].FmCount, 1 ; Remove our count
  398. jle short rfm05 ; if <= 0, set event
  399. cPublicFpo 0,0
  400. mov dword ptr APIC[LU_TPR], eax
  401. mov ecx, dword ptr APIC[LU_TPR]
  402. if DBG
  403. cmp eax, ecx ; Verify TPR is what was
  404. je short @f ; written
  405. int 3
  406. @@:
  407. endif
  408. fstRet ExReleaseFastMutex
  409. cPublicFpo 0,1
  410. rfm05: add ecx, FmEvent
  411. push eax ; save new tpr
  412. stdCall _KeSetEventBoostPriority, <ecx, 0>
  413. pop eax ; restore tpr
  414. cPublicFpo 0,0
  415. mov dword ptr APIC[LU_TPR], eax
  416. mov ecx, dword ptr APIC[LU_TPR]
  417. if DBG
  418. cmp eax, ecx ; Verify TPR is what was
  419. je short @f ; written
  420. int 3
  421. @@:
  422. endif
  423. fstRet ExReleaseFastMutex
  424. if DBG
  425. endif
  426. fstENDP ExReleaseFastMutex
  427. SUBTTL "Try To Acquire Fast Mutex"
  428. ;++
  429. ;
  430. ; BOOLEAN
  431. ; FASTCALL
  432. ; ExTryToAcquireFastMutex (
  433. ; IN PFAST_MUTEX FastMutex
  434. ; )
  435. ;
  436. ; Routine description:
  437. ;
  438. ; This function acquires ownership of the FastMutex.
  439. ;
  440. ; Arguments:
  441. ;
  442. ; (ecx) = FastMutex - Supplies a pointer to the fast mutex.
  443. ;
  444. ; Return Value:
  445. ;
  446. ; Returns TRUE if the FAST_MUTEX was acquired; otherwise FALSE.
  447. ;
  448. ;--
  449. align 16
  450. cPublicFastCall ExTryToAcquireFastMutex,1
  451. cPublicFpo 0,0
  452. if DBG
  453. ;
  454. ; Caller must already be at or below APC_LEVEL.
  455. ;
  456. mov eax, dword ptr APIC[LU_TPR] ; (eax) = Old Priority (Vector)
  457. cmp eax, APC_VECTOR
  458. jg short tam11 ; irql too high ==> fatal.
  459. endif
  460. ;
  461. ; Try to acquire.
  462. ;
  463. push dword ptr APIC[LU_TPR] ; Save Old Priority (Vector)
  464. mov dword ptr APIC[LU_TPR], APC_VECTOR ; Write New Priority to the TPR
  465. mov edx, 0 ; Value to set
  466. mov eax, 1 ; Value to compare against
  467. LOCK_CMPXCHG dword ptr [ecx].FmCount, edx ; Attempt to acquire
  468. jnz short tam20 ; got it?
  469. cPublicFpo 0,0
  470. mov eax, 1 ; return TRUE
  471. pop dword ptr [ecx].FmOldIrql ; Store Old TPR
  472. ;
  473. ; Use esp to track the owning thread for debugging purposes.
  474. ; !thread from kd will find the owning thread. Note that the
  475. ; owner isn't cleared on release, check if the mutex is owned
  476. ; first.
  477. ;
  478. mov dword ptr [ecx].FmOwner, esp
  479. fstRet ExTryToAcquireFastMutex
  480. tam20: pop ecx ; (ecx) = Old TPR
  481. mov dword ptr APIC[LU_TPR], ecx
  482. mov eax, dword ptr APIC[LU_TPR]
  483. if DBG
  484. cmp ecx, eax ; Verify TPR is what was
  485. je short @f ; written
  486. int 3
  487. @@:
  488. endif
  489. xor eax, eax ; return FALSE
  490. YIELD
  491. fstRet ExTryToAcquireFastMutex ; all done
  492. if DBG
  493. cPublicFpo 0,1
  494. tam11: stdCall _KeBugCheckEx,<IRQL_NOT_GREATER_OR_EQUAL,ecx,eax,033h,0>
  495. endif
  496. fstENDP ExTryToAcquireFastMutex
  497. SUBTTL "Acquire In Stack Queued SpinLock"
  498. ;++
  499. ;
  500. ; VOID
  501. ; FASTCALL
  502. ; KeAcquireInStackQueuedSpinLock (
  503. ; IN PKSPIN_LOCK SpinLock,
  504. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  505. ; )
  506. ;
  507. ; VOID
  508. ; FASTCALL
  509. ; KeAcquireInStackQueuedSpinLockRaiseToSynch (
  510. ; IN PKSPIN_LOCK SpinLock,
  511. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  512. ; )
  513. ;
  514. ; Routine Description:
  515. ;
  516. ; These functions raise IRQL and acquire an in-stack queued spin lock.
  517. ;
  518. ; Arguments:
  519. ;
  520. ; SpinLock (ecx) - Supplies a pointer to a spin lock.
  521. ; LockHandle (edx) - supplies a pointer to a lock context.
  522. ;
  523. ; Return Value:
  524. ;
  525. ; None.
  526. ;
  527. ;--
  528. align 16
  529. cPublicFastCall KeAcquireInStackQueuedSpinLock, 2
  530. cPublicFpo 0,0
  531. mov eax, dword ptr APIC[LU_TPR] ; get old IRQL vector
  532. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; raise IRQL
  533. jmp short iqsl10 ; finish in common code
  534. fstENDP KeAcquireInStackQueuedSpinLock
  535. align 16
  536. cPublicFastCall KeAcquireInStackQueuedSpinLockRaiseToSynch, 2
  537. cPublicFpo 0,0
  538. mov eax, dword ptr APIC[LU_TPR] ; get old IRQL vector
  539. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR ; raise IRQL
  540. iqsl10: shr eax, 4 ; extract high 4 bits of vector
  541. mov al, _HalpVectorToIRQL[eax] ; translate to old IRQL
  542. mov [edx].LqhOldIrql, al ; save old IRQL in lock context
  543. ;
  544. ; Set spin lock address in lock context and clear next queue link.
  545. ;
  546. ifndef NT_UP
  547. mov [edx].LqhLock, ecx ; set spin lock address
  548. and dword ptr [edx].LqhNext, 0 ; clear next link
  549. ifdef CAPKERN_SYNCH_POINTS
  550. push ecx ; lock address
  551. push 000010101h ; 1 Dword, Timestamp, Subcode = 1
  552. call _CAP_Log_NInt ;
  553. add esp, 8 ;
  554. endif
  555. ;
  556. ; Exchange the value of the lock with the address of the lock context.
  557. ;
  558. mov eax, edx ; save lock context address
  559. xchg [ecx], edx ; exchange lock context address
  560. cmp edx, 0 ; check if lock is already held
  561. jnz short iqsl30 ; if nz, lock already held
  562. ;
  563. ; N.B. The spin lock address is dword aligned and the bottom two bits are
  564. ; used as indicators.
  565. ;
  566. ; Bit 0 is LOCK_QUEUE_WAIT.
  567. ; Bit 1 is LOCK_QUEUE_OWNER.
  568. ;
  569. or [eax].LqLock, LOCK_QUEUE_OWNER ; set lock owner
  570. endif
  571. iqsl20: fstRET KeAcquireInStackQueuedSpinLockRaiseToSynch
  572. ;
  573. ; The lock is already held by another processor. Set the wait bit in the
  574. ; lock context, then set the next field in the lock context of the last
  575. ; waiter in the lock queue.
  576. ;
  577. ifndef NT_UP
  578. iqsl30: or [eax].LqLock, LOCK_QUEUE_WAIT ; set lock wait
  579. mov [edx].LqNext, eax ; set next entry in previous last
  580. ifdef CAPKERN_SYNCH_POINTS
  581. xor edx, edx ; clear wait counter
  582. iqsl40: inc edx ; count wait time
  583. test [eax].LqLock, LOCK_QUEUE_WAIT ; check if lock ownership granted
  584. jz short iqsl50 ; if z, lock owner granted
  585. YIELD ; yield to other SMT processors
  586. jmp short iqsl40 ;
  587. iqsl50: push ecx ; lock address
  588. push edx ; wait counter
  589. push 000020104h ; 2 Dwords, Timestamp, Subcode = 4
  590. call _CAP_Log_NInt ;
  591. add esp, 12 ;
  592. fstRET KeAcquireInStackQueuedSpinLockRaiseToSynch
  593. else
  594. iqsl40: test [eax].LqLock, LOCK_QUEUE_WAIT ; check if lock ownership granted
  595. jz short iqsl20 ; if z, lock ownership granted
  596. YIELD ; yield to other SMT processors
  597. jmp short iqsl40 ;
  598. endif
  599. endif
  600. fstENDP KeAcquireInStackQueuedSpinLockRaiseToSynch
  601. SUBTTL "Acquire Queued Spin Lock"
  602. ;++
  603. ;
  604. ; KIRQL
  605. ; FASTCALL
  606. ; KeAcquireQueuedSpinLock (
  607. ; IN KSPIN_LOCK_QUEUE_NUMBER Number
  608. ; )
  609. ;
  610. ; KIRQL
  611. ; FASTCALL
  612. ; KeAcquireQueuedSpinLockRaiseToSynch (
  613. ; IN KSPIN_LOCK_QUEUE_NUMBER Number
  614. ; )
  615. ;
  616. ; Routine Description:
  617. ;
  618. ; These function raise IRQL and acquire a processor specific queued spin
  619. ; lock.
  620. ;
  621. ; Arguments:
  622. ;
  623. ; Number (ecx) - Supplies the queued spinlock number.
  624. ;
  625. ; Return Value:
  626. ;
  627. ; The old IRQL is returned as the function value.
  628. ;
  629. ;--
  630. .errnz (LOCK_QUEUE_HEADER_SIZE - 8)
  631. align 16
  632. cPublicFastCall KeAcquireQueuedSpinLock, 1
  633. cPublicFpo 0,0
  634. mov eax, dword ptr APIC[LU_TPR] ; get old IRQL vector
  635. mov dword ptr APIC[LU_TPR], DPC_VECTOR ; raise IRQL
  636. jmp short aqsl10 ; finish in common code
  637. fstENDP KeAcquireQueuedSpinLock
  638. align 16
  639. cPublicFastCall KeAcquireQueuedSpinLockRaiseToSynch, 1
  640. cPublicFpo 0,0
  641. mov eax, dword ptr APIC[LU_TPR] ; get old IRQL vector
  642. mov dword ptr APIC[LU_TPR], APIC_SYNCH_VECTOR ; raise IRQL
  643. aqsl10: shr eax, 4 ; extract high 4 bits of vector
  644. movzx eax, byte ptr _HalpVectorToIRQL[eax] ; translate to old IRQL
  645. ;
  646. ; Get address of per processor lock queue entry.
  647. ;
  648. ifndef NT_UP
  649. mov edx, PCR[PcPrcb] ; get address of PRCB
  650. lea edx, [edx+ecx*8].PbLockQueue ; get lock queue address
  651. mov ecx, [edx].LqLock ; get spin lock address
  652. ifdef CAPKERN_SYNCH_POINTS
  653. push ecx ; lock address
  654. push 000010101h ; 1 Dword, Timestamp, Subcode = 1
  655. call _CAP_Log_NInt ;
  656. add esp, 8 ;
  657. endif
  658. push eax ; save old IRQL
  659. cPublicFpo 0,1
  660. if DBG
  661. test ecx, LOCK_QUEUE_OWNER + LOCK_QUEUE_WAIT ; inconsistent state?
  662. jnz short aqsl60 ; if nz, inconsistent state
  663. endif
  664. ;
  665. ; Exchange the value of the lock with the address of the lock context.
  666. ;
  667. mov eax, edx ; save lock queue entry address
  668. xchg [ecx], edx ; exchange lock queue address
  669. cmp edx, 0 ; check if lock is already held
  670. jnz short aqsl30 ; if nz, lock already held
  671. ;
  672. ; N.B. The spin lock address is dword aligned and the bottom two bits are
  673. ; used as indicators.
  674. ;
  675. ; Bit 0 is LOCK_QUEUE_WAIT.
  676. ; Bit 1 is LOCK_QUEUE_OWNER.
  677. ;
  678. or [eax].LqLock, LOCK_QUEUE_OWNER ; set lock owner
  679. aqsl20: pop eax ; restore old IRQL
  680. cPublicFpo 0,0
  681. endif
  682. fstRET KeAcquireQueuedSpinLockRaiseToSynch
  683. ;
  684. ; The lock is already held by another processor. Set the wait bit in the
  685. ; lock context, then set the next field in the lock context of the last
  686. ; waiter in the lock queue.
  687. ;
  688. ifndef NT_UP
  689. cPublicFpo 0,1
  690. aqsl30: or [eax].LqLock, LOCK_QUEUE_WAIT ; set lock wait
  691. mov [edx].LqNext, eax ; set next entry in previous last
  692. ifdef CAPKERN_SYNCH_POINTS
  693. xor edx, edx ; clear wait counter
  694. aqsl40: inc edx ; count wait time
  695. test [eax].LqLock, LOCK_QUEUE_WAIT ; check if lock ownership granted
  696. jz short aqsl50 ; if z, lock owner granted
  697. YIELD ; yield to other SMT processors
  698. jmp short aqsl40 ;
  699. aqsl50: push ecx ; lock address
  700. push edx ; wait counter
  701. push 000020104h ; 2 Dwords, Timestamp, Subcode = 4
  702. call _CAP_Log_NInt ;
  703. add esp, 12 ;
  704. jmp short aqsl20 ;
  705. else
  706. aqsl40: test [eax].LqLock, LOCK_QUEUE_WAIT ; check if lock ownership granted
  707. jz short aqsl20 ; if z, lock owner granted
  708. YIELD ; yield to other SMT processors
  709. jmp short aqsl40 ;
  710. endif
  711. ;
  712. ; Inconsistent state in lock queue entry.
  713. ;
  714. if DBG
  715. cPublicFpo 0,1
  716. aqsl60: stdCall _KeBugCheckEx,<SPIN_LOCK_ALREADY_OWNED, ecx, edx, 0, 0>
  717. int 3 ; so stacktrace works
  718. endif
  719. endif
  720. fstENDP KeAcquireQueuedSpinLockRaiseToSynch
  721. SUBTTL "Release Queued SpinLock"
  722. ;++
  723. ;
  724. ; VOID
  725. ; FASTCALL
  726. ; KeReleaseQueuedSpinLock (
  727. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  728. ; IN KIRQL OldIrql
  729. ; )
  730. ;
  731. ; Arguments:
  732. ;
  733. ; Number (ecx) - Supplies the queued spinlock number.
  734. ;
  735. ; OldIrql (dl) - Supplies the old IRQL.
  736. ;
  737. ; VOID
  738. ; KeReleaseInStackQueuedSpinLock (
  739. ; IN PKLOCK_QUEUE_HANDLE LockHandle
  740. ; )
  741. ;
  742. ; Arguments:
  743. ;
  744. ; LockHandle (ecx) - Address of Lock Queue Handle structure.
  745. ;
  746. ; Routine Description:
  747. ;
  748. ; These functions release a queued spinlock and lower IRQL to the old
  749. ; value.
  750. ;
  751. ; Return Value:
  752. ;
  753. ; None.
  754. ;
  755. ;--
  756. .errnz (LqhNext)
  757. .errnz (LOCK_QUEUE_OWNER - 2)
  758. align 16
  759. cPublicFastCall KeReleaseInStackQueuedSpinLock, 1
  760. cPublicFpo 0,0
  761. mov dl, byte ptr [ecx].LqhOldIrql ; set old IRQL
  762. mov eax, ecx ; set lock queue entry address
  763. jmp short rqsl10 ; finish in common code
  764. fstENDP KeReleaseInStackQueuedSpinLock
  765. align 16
  766. cPublicFastCall KeReleaseQueuedSpinLock, 2
  767. cPublicFpo 0,0
  768. ifndef NT_UP
  769. mov eax, PCR[PcPrcb] ; get address of PRCB
  770. lea eax, [eax+ecx*8].PbLockQueue ; set lock queue entry address
  771. endif
  772. rqsl10: movzx edx, dl ; zero extend old IRQL
  773. ifndef NT_UP
  774. push ebx ; save nonvolatile register
  775. cPublicFpo 0,1
  776. mov ebx, [eax].LqNext ; get next entry address
  777. mov ecx, [eax].LqLock ; get spin lock home address
  778. ifdef CAPKERN_SYNCH_POINTS
  779. push ecx ; lock address
  780. push 000010107h ; 1 Dword, Timestamp, Subcode = 7
  781. call _CAP_Log_NInt ;
  782. add esp, 8 ;
  783. endif
  784. ;
  785. ; Make sure we own the lock and clear the bit
  786. ;
  787. if DBG
  788. btr ecx, 1 ; clear lock owner bit
  789. jnc short rqsl80 ; if nc, owner not set
  790. cmp dword ptr [ecx], 0 ; lock must be owned for a release
  791. jz short rqsl80
  792. else
  793. and ecx, NOT LOCK_QUEUE_OWNER ; Clear out the owner bit
  794. endif
  795. ;
  796. ; Test if lock waiter present.
  797. ;
  798. test ebx, ebx ; test if lock waiter present
  799. mov [eax].LqLock, ecx ; clear lock owner bit
  800. jnz short rqsl40 ; if nz, lock waiter present
  801. ;
  802. ; Attempt to release queued spin lock.
  803. ;
  804. push eax ; save lock queue entry address
  805. lock cmpxchg [ecx], ebx ; release spin lock if no waiter
  806. pop eax ; restore lock queue entry address
  807. jnz short rqsl50 ; if nz, lock waiter present
  808. rqs120: pop ebx ; restore nonvolatile register
  809. cPublicFpo 0,0
  810. endif
  811. ;
  812. ; Lower IRQL to its previous level.
  813. ;
  814. ; N.B. Ensure that the requested priority is set before returning.
  815. ;
  816. rqsl30: movzx ecx, byte ptr _HalpIRQLtoTPR[edx] ; translate IRQL to TPR value
  817. mov dword ptr APIC[LU_TPR], ecx ; lower to old IRQL
  818. mov eax, dword ptr APIC[LU_TPR] ; synchronize
  819. fstRET KeReleaseQueuedSpinLock
  820. ;
  821. ; Lock waiter is present.
  822. ;
  823. ; Clear wait bit and set owner bit in next owner lock queue entry.
  824. ;
  825. ifndef NT_UP
  826. cPublicFpo 0,1
  827. rqsl40: xor [ebx].LqLock, (LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT) ; set bits
  828. and [eax].LqNext, 0 ; clear next waiter address
  829. jmp short rqs120 ;
  830. ;
  831. ; Another processor is attempting to acquire the spin lock.
  832. ;
  833. ifdef CAPKERN_SYNCH_POINTS
  834. rqsl50: push ecx ; lock address (for CAP_Log)
  835. xor ecx, ecx ; clear wait counter
  836. rqsl60: inc ecx ; increment wait counter
  837. mov ebx, [eax].LqNext ; get address of next entry
  838. test ebx, ebx ; check if waiter present
  839. jnz short rqsl70 ; if nz, waiter is present
  840. YIELD ; yield to other SMT processors
  841. jmp short rqsl60 ;
  842. rqsl70: push ecx ; wait counter
  843. push 000020104h ; 2 Dwords, Timestamp, Subcode = 4
  844. call _CAP_Log_NInt ;
  845. add esp, 12 ;
  846. jmp short rqsl40 ;
  847. else
  848. rqsl50: mov ebx, [eax].LqNext ; get address of next entry
  849. test ebx, ebx ; check if waiter present
  850. jnz short rqsl40 ; if nz, waiter is present
  851. YIELD ; yield to other SMT processors
  852. jmp short rqsl50 ;
  853. endif
  854. ;
  855. ; Inconsistent state in lock queue entry.
  856. ;
  857. if DBG
  858. cPublicFpo 0,1
  859. rqsl80: stdCall _KeBugCheckEx, <SPIN_LOCK_NOT_OWNED, ecx, eax, 0, 1>
  860. int 3 ; so stacktrace works
  861. endif
  862. endif
  863. fstENDP KeReleaseQueuedSpinLock
  864. SUBTTL "Try to Acquire Queued SpinLock"
  865. ;++
  866. ;
  867. ; LOGICAL
  868. ; KeTryToAcquireQueuedSpinLock (
  869. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  870. ; OUT PKIRQL OldIrql
  871. ; )
  872. ;
  873. ; LOGICAL
  874. ; KeTryToAcquireQueuedSpinLockRaiseToSynch (
  875. ; IN KSPIN_LOCK_QUEUE_NUMBER Number,
  876. ; OUT PKIRQL OldIrql
  877. ; )
  878. ;
  879. ; Routine Description:
  880. ;
  881. ; This function raises the current IRQL to DISPATCH/SYNCH level
  882. ; and attempts to acquire the specified queued spinlock. If the
  883. ; spinlock is already owned by another thread, IRQL is restored
  884. ; to its previous value and FALSE is returned.
  885. ;
  886. ; Arguments:
  887. ;
  888. ; Number (ecx) - Supplies the queued spinlock number.
  889. ; OldIrql (edx) - A pointer to the variable to receive the old
  890. ; IRQL.
  891. ;
  892. ; Return Value:
  893. ;
  894. ; TRUE if the lock was acquired, FALSE otherwise.
  895. ; N.B. ZF is set if FALSE returned, clear otherwise.
  896. ;
  897. ;--
  898. align 16
  899. cPublicFastCall KeTryToAcquireQueuedSpinLockRaiseToSynch,2
  900. cPublicFpo 0,0
  901. push APIC_SYNCH_VECTOR ; raise to SYNCH
  902. jmp short taqsl10 ; continue in common code
  903. fstENDP KeTryToAcquireQueuedSpinLockRaiseToSynch
  904. cPublicFastCall KeTryToAcquireQueuedSpinLock,2
  905. cPublicFpo 0,0
  906. push DPC_VECTOR ; raise to DPC level
  907. ; Attempt to get the lock with interrupts disabled, raising
  908. ; the priority in the interrupt controller only if acquisition
  909. ; is successful.
  910. taqsl10:
  911. ifndef NT_UP
  912. push edx ; save address of OldIrql
  913. pushfd ; save interrupt state
  914. cPublicFpo 0,3
  915. ; Get address of Lock Queue entry
  916. cli
  917. mov edx, PCR[PcPrcb] ; get address of PRCB
  918. lea edx, [edx+ecx*8].PbLockQueue ; get &PRCB->LockQueue[Number]
  919. ; Get address of the actual lock.
  920. mov ecx, [edx].LqLock
  921. ifdef CAPKERN_SYNCH_POINTS
  922. push ecx ; lock address
  923. push 000010108h ; 1 Dword, Timestamp, Subcode = 8
  924. call _CAP_Log_NInt
  925. add esp, 8
  926. endif
  927. if DBG
  928. test ecx, LOCK_QUEUE_OWNER+LOCK_QUEUE_WAIT
  929. jnz short taqsl98 ; jiff lock already held (or
  930. ; this proc already waiting).
  931. endif
  932. ; quick test, get out if already taken
  933. cmp dword ptr [ecx], 0 ; check if already taken
  934. jnz short taqsl60 ; jif already taken
  935. xor eax, eax ; comparison value (not locked)
  936. ; Store the Lock Queue entry address in the lock ONLY if the
  937. ; current lock value is 0.
  938. lock cmpxchg [ecx], edx
  939. jnz short taqsl60
  940. ; Lock has been acquired.
  941. ; note: the actual lock address will be word aligned, we use
  942. ; the bottom two bits as indicators, bit 0 is LOCK_QUEUE_WAIT,
  943. ; bit 1 is LOCK_QUEUE_OWNER.
  944. or ecx, LOCK_QUEUE_OWNER ; mark self as lock owner
  945. mov [edx].LqLock, ecx
  946. mov eax, [esp+8] ; get new IRQL
  947. mov edx, [esp+4] ; get addr to save OldIrql
  948. else
  949. mov eax, [esp] ; get new IRQL
  950. endif
  951. ; Raise IRQL and return success.
  952. ; Get old priority (vector) from Local APIC's Task Priority
  953. ; Register and set the new priority.
  954. mov ecx, dword ptr APIC[LU_TPR] ; (ecx) = Old Priority
  955. mov dword ptr APIC[LU_TPR], eax ; Set New Priority
  956. ifndef NT_UP
  957. popfd ; restore interrupt state
  958. add esp, 8 ; free locals
  959. else
  960. add esp, 4 ; free local
  961. endif
  962. cPublicFpo 0,0
  963. shr ecx, 4
  964. movzx eax, _HalpVectorToIRQL[ecx] ; (al) = OldIrql
  965. mov [edx], al ; save OldIrql
  966. xor eax, eax ; return TRUE
  967. or eax, 1
  968. fstRET KeTryToAcquireQueuedSpinLock
  969. ifndef NT_UP
  970. taqsl60:
  971. ; The lock is already held by another processor. Indicate
  972. ; failure to the caller.
  973. popfd ; restore interrupt state
  974. add esp, 8 ; free locals
  975. xor eax, eax ; return FALSE
  976. fstRET KeTryToAcquireQueuedSpinLock
  977. if DBG
  978. cPublicFpo 0,2
  979. taqsl98: stdCall _KeBugCheckEx,<SPIN_LOCK_ALREADY_OWNED,ecx,edx,0,0>
  980. int 3 ; so stacktrace works
  981. endif
  982. endif
  983. fstENDP KeTryToAcquireQueuedSpinLock
  984. _TEXT ends
  985. end