Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1230 lines
33 KiB

  1. // TITLE("Spin Locks")
  2. //++
  3. //
  4. // Copyright (c) 1990 Microsoft Corporation
  5. // Copyright (c) 1992 Digital Equipment Corporation
  6. //
  7. // Module Name:
  8. //
  9. // spinlock.s
  10. //
  11. // Abstract:
  12. //
  13. // This module implements the routines for acquiring and releasing
  14. // spin locks.
  15. //
  16. // Author:
  17. //
  18. // David N. Cutler (davec) 23-Mar-1990
  19. // Joe Notarangelo 06-Apr-1992
  20. //
  21. // Environment:
  22. //
  23. // Kernel mode only.
  24. //
  25. // Revision History:
  26. //
  27. //--
  28. #include "ksalpha.h"
  29. //++
  30. //
  31. // VOID
  32. // KeInitializeSpinLock (
  33. // IN PKSPIN_LOCK SpinLock
  34. // )
  35. //
  36. // Routine Description:
  37. //
  38. // This function initializes an executive spin lock.
  39. //
  40. // Argument:
  41. //
  42. // SpinLock (a0) - Supplies a pointer to the executive spin lock.
  43. //
  44. // Return Value:
  45. //
  46. // None.
  47. //
  48. //--
  49. LEAF_ENTRY( KeInitializeSpinLock )
  50. STP zero, 0(a0) // set spin lock not owned
  51. ret zero, (ra) // return
  52. .end KeInitializeSpinLock
  53. //++
  54. //
  55. // VOID
  56. // KeAcquireSpinLock (
  57. // IN PKSPIN_LOCK SpinLock
  58. // OUT PKIRQL OldIrql
  59. // )
  60. //
  61. // Routine Description:
  62. //
  63. // This function raises the current IRQL to DISPATCH_LEVEL and acquires
  64. // the specified executive spinlock.
  65. //
  66. // Arguments:
  67. //
  68. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  69. //
  70. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  71. // the previous IRQL value.
  72. //
  73. // Return Value:
  74. //
  75. // None.
  76. //
  77. //--
  78. LEAF_ENTRY(KeAcquireSpinLock)
  79. //
  80. // Raise IRQL to DISPATCH_LEVEL and acquire the specified spinlock.
  81. //
  82. // N.B. The raise IRQL code is duplicated here is avoid any extra overhead
  83. // since this is such a common operation.
  84. //
  85. // N.B. The previous IRQL must not be stored until the lock is owned.
  86. //
  87. // N.B. The longword surrounding the previous IRQL must not be read
  88. // until the lock is owned.
  89. //
  90. bis a0, zero, t5 // t5 = address of spin lock
  91. ldil a0, DISPATCH_LEVEL // set new IRQL
  92. bis a1, zero, t0 // t0 = a1, a1 may be destroyed
  93. SWAP_IRQL // swap irql, on return v0 = old irql
  94. //
  95. // Acquire the specified spinlock.
  96. //
  97. // N.B. code below branches forward if spinlock fails intentionally
  98. // because branch forwards are predicted to miss
  99. //
  100. #if !defined(NT_UP)
  101. 10: LDP_L t3, 0(t5) // get current lock value
  102. bis t5, zero, t4 // set ownership value
  103. bne t3, 15f // if ne => lock owned
  104. STP_C t4, 0(t5) // set lock owned
  105. beq t4, 15f // if eq => stx_c failed
  106. mb // synchronize memory access
  107. #endif
  108. //
  109. // Save the old Irql at the address saved by the caller.
  110. // Insure that the old Irql is updated with longword granularity.
  111. //
  112. ldq_u t1, 0(t0) // read quadword surrounding KIRQL
  113. bic t0, 3, t2 // get address of containing longword
  114. mskbl t1, t0, t1 // clear KIRQL byte in quadword
  115. insbl v0, t0, v0 // get new KIRQL to correct byte
  116. bis t1, v0, t1 // merge KIRQL into quadword
  117. extll t1, t2, t1 // get longword containg KIRQL
  118. stl t1, 0(t2) // store containing longword
  119. ret zero, (ra) // return
  120. //
  121. // Attempt to acquire spinlock failed.
  122. //
  123. #if !defined(NT_UP)
  124. 15: LDP t3, 0(t5) // get current lock value
  125. beq t3, 10b // retry acquire lock if unowned
  126. br zero, 15b // loop in cache until lock free
  127. #endif
  128. .end KeAcquireSpinLock
  129. SBTTL("Acquire SpinLock and Raise to Synch")
  130. //++
  131. //
  132. // KIRQL
  133. // KeAcquireSpinLockRaiseToSynch (
  134. // IN PKSPIN_LOCK SpinLock
  135. // )
  136. //
  137. // Routine Description:
  138. //
  139. // This function raises the current IRQL to synchronization level and
  140. // acquires the specified spinlock.
  141. //
  142. // Arguments:
  143. //
  144. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  145. // acquired.
  146. //
  147. // Return Value:
  148. //
  149. // The previous IRQL is returned as the function value.
  150. //
  151. //--
  152. LEAF_ENTRY(KeAcquireSpinLockRaiseToSynch)
  153. #if !defined(NT_UP)
  154. bis a0, zero, t5 // save spinlock address
  155. ldl a0, KiSynchIrql // get synch level IRQL
  156. //
  157. // Raise IRQL and attempt to acquire the specified spinlock.
  158. //
  159. 10: SWAP_IRQL // raise IRQL to synch level
  160. LDP_L t3, 0(t5) // get current lock value
  161. bis t5, zero, t4 // set ownership value
  162. bne t3, 25f // if ne, lock owned
  163. STP_C t4, 0(t5) // set lock owned
  164. beq t4, 25f // if eq, conditional store failed
  165. mb // synchronize subsequent reads
  166. ret zero, (ra)
  167. //
  168. // Spinlock is owned, lower IRQL and spin in cache until it looks free.
  169. //
  170. 25: bis v0, zero, a0 // get previous IRQL value
  171. SWAP_IRQL // lower IRQL
  172. bis v0, zero, a0 // save synch level IRQL
  173. 26: LDP t3, 0(t5) // get current lock value
  174. beq t3, 10b // retry acquire if unowned
  175. br zero, 26b // loop in cache until free
  176. #else
  177. ldl a0, KiSynchIrql // get synch level IRQL
  178. SWAP_IRQL // rasie IRQL to synch levcel
  179. ret zero, (ra) // return
  180. .end KeAcquireSpinLockRaiseToSynch
  181. #endif
  182. //++
  183. //
  184. // KIRQL
  185. // KeAcquireSpinLockRaiseToDpc (
  186. // IN PKSPIN_LOCK SpinLock
  187. // )
  188. //
  189. // Routine Description:
  190. //
  191. // This function raises the current IRQL to dispatcher level and acquires
  192. // the specified spinlock.
  193. //
  194. // Arguments:
  195. //
  196. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  197. // acquired.
  198. //
  199. // Return Value:
  200. //
  201. // The previous IRQL is returned as the function value.
  202. //
  203. //--
  204. #if !defined(NT_UP)
  205. ALTERNATE_ENTRY(KeAcquireSpinLockRaiseToDpc)
  206. bis a0, zero, t5 // save spinlock address
  207. ldil a0, DISPATCH_LEVEL // set IRQL level
  208. br 10b // finish in common code
  209. .end KeAcquireSpinLockRaiseToSynch
  210. #else
  211. LEAF_ENTRY(KeAcquireSpinLockRaiseToDpc)
  212. ldil a0, DISPATCH_LEVEL // set new IRQL
  213. SWAP_IRQL // old irql in v0
  214. ret zero, (ra)
  215. .end KeAcquireSpinLockRaiseToDpc
  216. #endif
  217. //++
  218. //
  219. // VOID
  220. // KeReleaseSpinLock (
  221. // IN PKSPIN_LOCK SpinLock
  222. // IN KIRQL OldIrql
  223. // )
  224. //
  225. // Routine Description:
  226. //
  227. // This function releases an executive spin lock and lowers the IRQL
  228. // to its previous value.
  229. //
  230. // Arguments:
  231. //
  232. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  233. //
  234. // OldIrql (a1) - Supplies the previous IRQL value.
  235. //
  236. // Return Value:
  237. //
  238. // None.
  239. //
  240. //--
  241. LEAF_ENTRY(KeReleaseSpinLock)
  242. //
  243. // Release the specified spinlock.
  244. //
  245. #if !defined(NT_UP)
  246. mb // synchronize memory access
  247. STP zero, 0(a0) // set spin lock not owned
  248. #endif
  249. //
  250. // Lower the IRQL to the specified level.
  251. //
  252. // N.B. The lower IRQL code is duplicated here is avoid any extra overhead
  253. // since this is such a common operation.
  254. //
  255. 10: bis a1, zero, a0 // a0 = new irql
  256. SWAP_IRQL // change to new irql
  257. ret zero, (ra) // return
  258. .end KeReleaseSpinLock
  259. //++
  260. //
  261. // BOOLEAN
  262. // KeTryToAcquireSpinLock (
  263. // IN PKSPIN_LOCK SpinLock
  264. // OUT PKIRQL OldIrql
  265. // )
  266. //
  267. // Routine Description:
  268. //
  269. // This function raises the current IRQL to DISPATCH_LEVEL and attempts
  270. // to acquires the specified executive spinlock. If the spinlock can be
  271. // acquired, then TRUE is returned. Otherwise, the IRQL is restored to
  272. // its previous value and FALSE is returned.
  273. //
  274. // Arguments:
  275. //
  276. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  277. //
  278. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  279. // the previous IRQL value.
  280. //
  281. // Return Value:
  282. //
  283. // If the spin lock is acquired, then a value of TRUE is returned.
  284. // Otherwise, a value of FALSE is returned.
  285. //
  286. //--
  287. LEAF_ENTRY(KeTryToAcquireSpinLock)
  288. //
  289. // Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
  290. //
  291. // N.B. The raise IRQL code is duplicated here is avoid any extra overhead
  292. // since this is such a common operation.
  293. //
  294. bis a0, zero, t5 // t5 = address of spin lock
  295. ldil a0, DISPATCH_LEVEL // new irql
  296. bis a1, zero, t11 // t11 = a1, a1 may be clobbered
  297. SWAP_IRQL // a0 = new, on return v0 = old irql
  298. //
  299. // Try to acquire the specified spinlock.
  300. //
  301. // N.B. A noninterlocked test is done before the interlocked attempt. This
  302. // allows spinning without interlocked cycles.
  303. //
  304. #if !defined(NT_UP)
  305. LDP t0, 0(t5) // get current lock value
  306. bne t0, 20f // if ne, lock owned
  307. 10: LDP_L t0, 0(t5) // get current lock value
  308. bis t5, zero, t3 // t3 = ownership value
  309. bne t0, 20f // if ne, spin lock owned
  310. STP_C t3, 0(t5) // set lock owned
  311. beq t3, 15f // if eq, store conditional failure
  312. mb // synchronize memory access
  313. #endif
  314. //
  315. // The attempt to acquire the specified spin lock succeeded.
  316. //
  317. // Save the old Irql at the address saved by the caller.
  318. // Insure that the old Irql is updated with longword granularity.
  319. //
  320. ldq_u t1, 0(t11) // read quadword containing KIRQL
  321. bic t11, 3, t2 // get address of containing longword
  322. mskbl t1, t11, t1 // clear byte position of KIRQL
  323. bis v0, zero, a0 // save old irql
  324. insbl v0, t11, v0 // get KIRQL to correct byte
  325. bis t1, v0, t1 // merge KIRQL into quadword
  326. extll t1, t2, t1 // extract containing longword
  327. stl t1, 0(t2) // store containing longword
  328. ldil v0, TRUE // set return value
  329. ret zero, (ra) // return
  330. //
  331. // The attempt to acquire the specified spin lock failed. Lower IRQL to its
  332. // previous value and return FALSE.
  333. //
  334. // N.B. The lower IRQL code is duplicated here is avoid any extra overhead
  335. // since this is such a common operation.
  336. //
  337. #if !defined(NT_UP)
  338. 20: bis v0, zero, a0 // set old IRQL value
  339. SWAP_IRQL // change back to old irql(a0)
  340. ldil v0, FALSE // set return to failed
  341. ret zero, (ra) // return
  342. //
  343. // Attempt to acquire spinlock failed.
  344. //
  345. 15: br zero, 10b // retry spinlock
  346. #endif
  347. .end KeTryToAcquireSpinLock
  348. //++
  349. //
  350. // KIRQL
  351. // KiAcquireSpinLock (
  352. // IN PKSPIN_LOCK SpinLock
  353. // )
  354. //
  355. // Routine Description:
  356. //
  357. // This function acquires a kernel spin lock.
  358. //
  359. // N.B. This function assumes that the current IRQL is set properly.
  360. //
  361. // Arguments:
  362. //
  363. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  364. //
  365. // Return Value:
  366. //
  367. // None.
  368. //
  369. //--
  370. LEAF_ENTRY(KiAcquireSpinLock)
  371. ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
  372. #if !defined(NT_UP)
  373. GET_CURRENT_THREAD // v0 = current thread address
  374. 10: LDP_L t2, 0(a0) // get current lock value
  375. bis v0, zero, t3 // set ownership value
  376. bne t2, 15f // if ne, spin lock owned
  377. STP_C t3, 0(a0) // set spin lock owned
  378. beq t3, 15f // if eq, store conditional failure
  379. mb // synchronize memory access
  380. ret zero, (ra) // return
  381. //
  382. // attempt to acquire spinlock failed.
  383. //
  384. 15: LDP t2, 0(a0) // get current lock value
  385. beq t2, 10b // retry acquire lock if unowned
  386. br zero, 15b // loop in cache until lock free
  387. #else
  388. ret zero, (ra) // return
  389. #endif
  390. .end KiAcquireSpinLock
  391. //++
  392. //
  393. // VOID
  394. // KiReleaseSpinLock (
  395. // IN PKSPIN_LOCK SpinLock
  396. // )
  397. //
  398. // Routine Description:
  399. //
  400. // This function releases a kernel spin lock.
  401. //
  402. // N.B. This function assumes that the current IRQL is set properly.
  403. //
  404. // Arguments:
  405. //
  406. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  407. //
  408. // Return Value:
  409. //
  410. // None.
  411. //
  412. //--
  413. LEAF_ENTRY(KiReleaseSpinLock)
  414. ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
  415. #if !defined(NT_UP)
  416. mb // synchronize memory accesss
  417. STP zero, 0(a0) // set spin lock not owned
  418. #endif
  419. ret zero, (ra) // return
  420. .end KiReleaseSpinLock
  421. //++
  422. //
  423. // KIRQL
  424. // KiTryToAcquireSpinLock (
  425. // IN PKSPIN_LOCK SpinLock
  426. // )
  427. //
  428. // Routine Description:
  429. //
  430. // This function attempts to acquires the specified kernel spinlock. If
  431. // the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
  432. // is returned.
  433. //
  434. // N.B. This function assumes that the current IRQL is set properly.
  435. //
  436. // Arguments:
  437. //
  438. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  439. //
  440. // Return Value:
  441. //
  442. // If the spin lock is acquired, then a value of TRUE is returned.
  443. // Otherwise, a value of FALSE is returned.
  444. //
  445. //--
  446. LEAF_ENTRY(KiTryToAcquireSpinLock)
  447. #if !defined(NT_UP)
  448. GET_CURRENT_THREAD // v0 = current thread address
  449. 10: LDP_L t2, 0(a0) // get current lock value
  450. bis v0, zero, t3 // set ownership value
  451. bne t2, 20f // if ne, spin lock owned
  452. STP_C t3, 0(a0) // set spin lock owned
  453. beq t3, 15f // if eq, conditional store failed
  454. mb // synchronize memory access
  455. ldil v0, TRUE // set success return value
  456. ret zero, (ra) // return
  457. 20: ldil v0, FALSE // set failure return value
  458. ret zero, (ra) // return
  459. //
  460. // Attempt to acquire spinlock failed.
  461. //
  462. 15: br zero, 10b // retry
  463. #else
  464. ldil v0, TRUE // set success return value
  465. ret zero, (ra) // return
  466. #endif
  467. .end KiTryToAcquireSpinLock
  468. //++
  469. //
  470. // BOOLEAN
  471. // KeTestSpinLock (
  472. // IN PKSPIN_LOCK SpinLock
  473. // )
  474. //
  475. // Routine Description:
  476. //
  477. // This function tests a kernel spin lock. If the spinlock is
  478. // busy, FALSE is returned. If not, TRUE is returned. The spinlock
  479. // is never acquired. This is provided to allow code to spin at low
  480. // IRQL, only raising the IRQL when there is a reasonable hope of
  481. // acquiring the lock.
  482. //
  483. // Arguments:
  484. //
  485. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  486. //
  487. // Return Value:
  488. //
  489. // TRUE - Spinlock appears available
  490. // FALSE - SpinLock is busy
  491. //--
  492. #if !defined(NT_UP)
  493. LEAF_ENTRY(KeTestSpinLock)
  494. LDP t0, (a0) // get current spinlock value
  495. ldil v0, 1 // default TRUE
  496. cmovne t0, zero, v0 // if t0 != 0, return FALSE
  497. ret zero, (ra) // return
  498. .end KeTestSpinLock
  499. #endif
  500. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  501. //++
  502. //
  503. // VOID
  504. // KeAcquireInStackQueuedSpinLock (
  505. // IN PKSPIN_LOCK SpinLock,
  506. // IN PKLOCK_QUEUE_HANDLE LockHandle
  507. // )
  508. //
  509. // VOID
  510. // KeAcquireInStackQueuedSpinLockRaiseToSynch (
  511. // IN PKSPIN_LOCK SpinLock,
  512. // IN PKLOCK_QUEUE_HANDLE LockHandle
  513. // )
  514. //
  515. // Routine Description:
  516. //
  517. // This function raises the current IRQL to either synchronization or
  518. // dispatch level and acquires the specified queued spinlock.
  519. //
  520. // Arguments:
  521. //
  522. // SpinLock (a0) - Supplies a pointer to a spin lock.
  523. //
  524. // LockHandle (a1) - Supplies a pointer to a lock handle.
  525. //
  526. // Return Value:
  527. //
  528. // None.
  529. //
  530. //--
  531. LEAF_ENTRY(KeAcquireInStackQueuedSpinLock)
  532. ldil v0, DISPATCH_LEVEL // get dispatch level IRQL
  533. br zero, 5f //
  534. ALTERNATE_ENTRY(KeAcquireInStackQueuedSpinLockRaiseToSynch)
  535. ldl v0, KiSynchIrql // get synch level IRQL
  536. 5: mov a1, t5 // save address of lock handle
  537. #if !defined(NT_UP)
  538. STP zero, LqhNext(t5) // set next link to NULL
  539. STP a0, LqhLock(t5) // set spin lock address
  540. mb // synchronize memory access
  541. #endif
  542. mov v0, a0 // set new Irql value
  543. SWAP_IRQL // raise Irql to specified level
  544. stl v0, LqhOldIrql(t5) // save old IRQL
  545. #if !defined(NT_UP)
  546. ADDP t5, LqhNext, t5 // set address of lock queue
  547. br zero, KxqAcquireQueuedSpinLock // finish in common code
  548. #else
  549. ret zero, (ra) // return
  550. #endif
  551. .end KeAcquireInStackQueuedSpinLock
  552. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  553. //++
  554. //
  555. // KIRQL
  556. // KeAcquireQueuedSpinLock (
  557. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  558. // )
  559. //
  560. // KIRQL
  561. // KeAcquireQueuedSpinLockRaiseToSynch (
  562. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  563. // )
  564. //
  565. // Routine Description:
  566. //
  567. // This function raises the current IRQL to synchronization level and
  568. // acquires the specified queued spinlock.
  569. //
  570. // Arguments:
  571. //
  572. // Number (a0) - Supplies the queued spinlock number.
  573. //
  574. // Return Value:
  575. //
  576. // The previous IRQL is returned as the function value.
  577. //
  578. //--
  579. LEAF_ENTRY(KeAcquireQueuedSpinLock)
  580. addq a0, a0, t5 // account for two addresses
  581. ldil a0, DISPATCH_LEVEL // get dispatch level IRQL
  582. br zero, 5f //
  583. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockRaiseToSynch)
  584. addq a0, a0, t5 // account for two addresses
  585. ldl a0, KiSynchIrql // get synch level IRQL
  586. 5: SWAP_IRQL // raise Irql to specified level
  587. #if !defined(NT_UP)
  588. bis v0, zero, t0 // save previous Irql
  589. GET_PROCESSOR_CONTROL_BLOCK_BASE // get current prcb address
  590. SPADDP t5, v0, t5 // entry address
  591. lda t5, PbLockQueue(t5) //
  592. ALTERNATE_ENTRY(KxqAcquireQueuedSpinLock)
  593. LDP t4, LqLock(t5) // get associated spinlock address
  594. 10: LDP_L t3, 0(t4) // get current lock value
  595. bis t5, zero, t2 // set lock ownership value
  596. STP_C t2, 0(t4) //
  597. beq t2, 50f // if eq, conditional store failed
  598. bne t3, 30f // if ne, lock already owned
  599. bis t4, LOCK_QUEUE_OWNER, t4 // set lock owner bit in lock entry
  600. STP t4, LqLock(t5) //
  601. mb // synchronize subsequent reads
  602. 20: bis t0, zero, v0 // set old IRQL value
  603. #endif
  604. ret zero, (ra) // return
  605. //
  606. // The lock is owned by another processor. Set the lock bit in the current
  607. // processor lock queue entry, set the next link in the previous lock queue
  608. // entry, and spin on the current processor's lock bit.
  609. //
  610. #if !defined(NT_UP)
  611. 30: bis t4, LOCK_QUEUE_WAIT, t4 // set lock wait bit in lock entry
  612. STP t4, LqLock(t5) //
  613. mb // synchronize memory access
  614. STP t5, LqNext(t3) // set address of lock queue entry
  615. 40: LDP t4, LqLock(t5) // get lock address and lock wait bit
  616. blbc t4, 20b // if lbc (lock wait), ownership granted
  617. br zero, 40b // try again
  618. //
  619. // Conditional store failed.
  620. //
  621. 50: br zero, 10b // try again
  622. #endif
  623. .end KeAcquireQueuedSpinLock
  624. SBTTL("Release Queued SpinLock and Lower IRQL")
  625. //++
  626. //
  627. // VOID
  628. // KeReleaseInStackQueuedSpinLock (
  629. // IN PKLOCK_QUEUE_HANDLE LockHandle
  630. // )
  631. //
  632. // Routine Description:
  633. //
  634. // This function releases a queued spinlock and lowers the IRQL to its
  635. // previous value.
  636. //
  637. // Arguments:
  638. //
  639. // LockHandle (a0) - Supplies a pointer to a lock handle.
  640. //
  641. // Return Value:
  642. //
  643. // None.
  644. //
  645. //--
  646. LEAF_ENTRY(KeReleaseInStackQueuedSpinLock)
  647. ldl t0, LqhOldIrql(a0) // save old IRQL
  648. ADDP a0, LqhNext, t5 // set address of lock queue
  649. br zero, KxqReleaseQueuedSpinLock // finish in common code
  650. .end KeReleaseInStackQueuedSpinLock
  651. SBTTL("Release Queued SpinLock and Lower IRQL")
  652. //++
  653. //
  654. // VOID
  655. // KeReleaseQueuedSpinLock (
  656. // IN KSPIN_LOCK_QUEUE_NUMBER Number,
  657. // IN KIRQL OldIrql
  658. // )
  659. //
  660. // Routine Description:
  661. //
  662. // This function releases a queued spinlock and lowers the IRQL to its
  663. // previous value.
  664. //
  665. // Arguments:
  666. //
  667. // Number (a0) - Supplies the queued spinlock number.
  668. //
  669. // OldIrql (a1) - Supplies the previous IRQL value.
  670. //
  671. // Return Value:
  672. //
  673. // None.
  674. //
  675. //--
  676. LEAF_ENTRY(KeReleaseQueuedSpinLock)
  677. bis a1, zero, t0 // save old IRQL
  678. #if !defined(NT_UP)
  679. addq a0, a0, t5 // account for two addresses
  680. GET_PROCESSOR_CONTROL_BLOCK_BASE // get current prcb address
  681. SPADDP t5, v0, t5 // compute per processor lock queue
  682. lda t5, PbLockQueue(t5) // entry address
  683. #endif
  684. ALTERNATE_ENTRY(KxqReleaseQueuedSpinLock)
  685. #if !defined(NT_UP)
  686. mb // synchronize memory access
  687. LDP t4, LqLock(t5) // get associated spin lock address
  688. bic t4, LOCK_QUEUE_OWNER, t4 // clear lock owner bit in lock entry
  689. 10: LDP_L t3, 0(t4) // get current lock ownership value
  690. xor t3, t5, t2 // set lock ownership value
  691. bne t2, 20f // if ne, another processor waiting
  692. STP_C t2, 0(t4) // set new ownership value
  693. beq t2, 10b // if eq, conditional store failed
  694. STP t4, LqLock(t5) // store associated spin lock address
  695. mb // synchronize memory access
  696. #endif
  697. bis t0, zero, a0 // set old IRQL value
  698. SWAP_IRQL // lower IRQL to previous level
  699. ret zero, (ra) // return
  700. //
  701. // Another processor has inserted its lock queue entry in the lock queue,
  702. // but has not yet written its lock queue entry address in the current
  703. // processor's next link. Spin until the lock queue address is written.
  704. //
  705. #if !defined(NT_UP)
  706. 20: LDP t3, LqNext(t5) // get next lock queue entry address
  707. beq t3, 20b // if eq, address not written yet
  708. //
  709. // Grant the next process in the lock queue ownership of the spinlock.
  710. //
  711. LDP t2, LqLock(t3) // get spinlock address and wait bit
  712. STP zero, LqNext(t5) // clear next lock queue entry address
  713. STP t4, LqLock(t5) // store associated spin lock address
  714. bic t2, LOCK_QUEUE_WAIT, t2 // clear lock wait bit in lock entry
  715. bis t2, LOCK_QUEUE_OWNER, t2 // set lock owner bit in lock entry
  716. STP t2, LqLock(t3) //
  717. mb // synchronize memory access
  718. bis t0, zero, a0 // set old IRQL value
  719. SWAP_IRQL // lower IRQL to previous level
  720. ret zero, (ra) // return
  721. #endif
  722. .end KeReleaseQueuedSpinLock
  723. SBTTL("Try to Acquire Queued SpinLock and Raise IRQL")
  724. //++
  725. //
  726. // LOGICAL
  727. // KeTryToAcquireQueuedSpinLock (
  728. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  729. // OUT PKIRQL OldIrql
  730. // )
  731. //
  732. // LOGICAL
  733. // KeTryToAcquireQueuedSpinLockRaiseToSynch (
  734. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  735. // OUT PKIRQL OldIrql
  736. // )
  737. //
  738. // Routine Description:
  739. //
  740. // This function raises the current IRQL to synchronization level and
  741. // attempts to acquire the specified queued spinlock. If the spinlock
  742. // cannot be acquired, then IRQL is restored and FALSE is returned as
  743. // the function value. Otherwise, TRUE is returned as the function
  744. // value.
  745. //
  746. // Arguments:
  747. //
  748. // Number (a0) - Supplies the queued spinlock number.
  749. //
  750. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  751. // the previous IRQL value.
  752. //
  753. // Return Value:
  754. //
  755. // If the spin lock is acquired, then a value of TRUE is returned.
  756. // Otherwise, a value of FALSE is returned.
  757. //
  758. //--
  759. LEAF_ENTRY(KeTryToAcquireQueuedSpinLock)
  760. addq a0, a0, t5 // account for two addresses
  761. ldil a0, DISPATCH_LEVEL // get dispatch level irql
  762. br zero, 5f //
  763. ALTERNATE_ENTRY(KeTryToAcquireQueuedSpinLockRaiseToSynch)
  764. addq a0, a0, t5 // account for two addresses
  765. ldl a0, KiSynchIrql // get synch level irql
  766. 5: bis a1, zero, t0 // save previous irql address
  767. SWAP_IRQL // raise irql to specified level
  768. bis v0, zero, t1 // save previous irql
  769. #if !defined(NT_UP)
  770. GET_PROCESSOR_CONTROL_BLOCK_BASE // get current prcb address
  771. SPADDP t5, v0, t5 // compute per processor lock queue
  772. lda t5, PbLockQueue(t5) // entry address
  773. LDP t4, LqLock(t5) // get associated spinlock address
  774. //
  775. // Try to acquire the specified spinlock.
  776. //
  777. // N.B. A noninterlocked test is done before the interlocked attempt. This
  778. // allows spinning without interlocked cycles.
  779. //
  780. LDP t3, 0(t4) // get current lock value
  781. bne t3, 20f // if ne, lock owned
  782. 10: LDP_L t3, 0(t4) // get current lock value
  783. bis t5, zero, t2 // set lock ownership value
  784. bne t3, 20f // if ne, spin lock owned
  785. STP_C t2, 0(t4) // set lock owned
  786. beq t2, 30f // if eq, store conditional failure
  787. //
  788. // The attempt to acquire the specified spin lock succeeded. Set the spin
  789. // lock owner bit and save the old irql at the address specified by the
  790. // caller. Insure that the old Irql is updated with longword granularity.
  791. //
  792. bis t4, LOCK_QUEUE_OWNER, t4 // set lock owner bit in lock entry
  793. STP t4, LqLock(t5) //
  794. mb // synchronize memory access
  795. #endif
  796. ldq_u t2, 0(t0) // get quadword containing irql
  797. bic t0, 3, t3 // get containing longword address
  798. mskbl t2, t0, t2 // clear byte position of Irql
  799. insbl t1, t0, t1 // shift irql to correct byte
  800. bis t2, t1, t2 // merge irql into quadword
  801. extll t2, t3, t2 // extract containing longword
  802. stl t2, 0(t3) // store containing longword
  803. ldil v0, TRUE // set return value
  804. ret zero, (ra) // return
  805. //
  806. // The attempt to acquire the specified spin lock failed. Lower IRQL to its
  807. // previous value and return FALSE.
  808. //
  809. #if !defined(NT_UP)
  810. 20: bis t1, zero, a0 // set old irql value
  811. SWAP_IRQL // lower irql to previous level
  812. ldil v0, FALSE // set return value
  813. ret zero, (ra) // return
  814. //
  815. // Attempt to acquire spinlock failed.
  816. //
  817. 30: br zero, 10b // retry spinlock
  818. #endif
  819. .end KeTryToAcquireQueuedSpinLock
  820. SBTTL("Acquire Queued SpinLock at Current IRQL")
  821. //++
  822. // VOID
  823. // KeAcquireInStackQueuedSpinLockAtDpcLevel (
  824. // IN PKSPIN_LOCK SpinLock,
  825. // IN PKLOCK_QUEUE_HANDLE LockHandle
  826. // )
  827. //
  828. // Routine Description:
  829. //
  830. // This function acquires the specified queued spinlock at the current
  831. // IRQL.
  832. //
  833. // Arguments:
  834. //
  835. // SpinLock (a0) - Supplies the address of a spin lock.
  836. //
  837. // LockHandle (a1) - Supplies the address of an in stack lock handle.
  838. //
  839. // Return Value:
  840. //
  841. // None.
  842. //
  843. //--
  844. LEAF_ENTRY(KeAcquireInStackQueuedSpinLockAtDpcLevel)
  845. #if !defined(NT_UP)
  846. STP zero, LqhNext(a1) // set next link to NULL
  847. STP a0, LqhLock(a1) // set spin lock address
  848. #endif
  849. ADDP a1, LqhNext, a0 // compute address of lock queue
  850. //++
  851. //
  852. // VOID
  853. // KeAcquireQueuedSpinLockAtDpcLevel (
  854. // IN PKSPIN_LOCK_QUEUE LockQueue
  855. // )
  856. //
  857. // Routine Description:
  858. //
  859. // This function acquires the specified queued spinlock at the current
  860. // IRQL.
  861. //
  862. // Arguments:
  863. //
  864. // LockQueue (a0) - Supplies the address of the lock queue entry.
  865. //
  866. // Return Value:
  867. //
  868. // None.
  869. //
  870. //--
  871. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockAtDpcLevel)
  872. #if !defined(NT_UP)
  873. mb // synchronize memory access
  874. LDP t4, LqLock(a0) // get associated spinlock address
  875. 10: LDP_L t3, 0(t4) // get current lock value
  876. bis a0, zero, t2 // set lock ownership value
  877. STP_C t2, 0(t4) //
  878. beq t2, 50f // if eq, conditional store failed
  879. bne t3, 30f // if ne, lock already owned
  880. bis t4, LOCK_QUEUE_OWNER, t4 // set lock owner bit in lock entry
  881. STP t4, LqLock(a0) //
  882. mb // synchronize subsequent reads
  883. #endif
  884. 20: ret zero, (ra) // return
  885. //
  886. // The lock is owned by another processor. Set the lock bit in the current
  887. // processor lock queue entry, set the next link in the previous lock queue
  888. // entry, and spin on the current processor's lock bit.
  889. //
  890. #if !defined(NT_UP)
  891. 30: bis t4, LOCK_QUEUE_WAIT, t4 // set lock wait bit in lock entry
  892. STP t4, LqLock(a0) //
  893. mb // synchronize memory access
  894. STP a0, LqNext(t3) // set address of lock queue entry
  895. 40: LDP t4, LqLock(a0) // get lock address and lock wait bit
  896. blbc t4, 20b // if lbc (lock wait), ownership granted
  897. br zero, 40b // try again
  898. //
  899. // Conditional store failed.
  900. //
  901. 50: br zero, 10b // try again
  902. #endif
  903. .end KeAcquireInStackQueuedSpinLockAtDpcLevel
  904. SBTTL("Release Queued SpinLock at Current IRQL")
  905. //++
  906. //
  907. // VOID
  908. // KeReleaseInStackQueuedSpinLockFromDpcLevel (
  909. // IN PKLOCK_QUEUE_HANDLE LockHandle
  910. // )
  911. //
  912. // Routine Description:
  913. //
  914. // This function releases a queued spinlock and preserves the current
  915. // IRQL.
  916. //
  917. // Arguments:
  918. //
  919. // LockHandle (a0) - Supplies the address of a lock handle.
  920. //
  921. // Return Value:
  922. //
  923. // None.
  924. //
  925. //--
  926. LEAF_ENTRY(KeReleaseInStackQueuedSpinLockFromDpcLevel)
  927. ADDP a0, LqhNext, a0 // compute address of lock queue
  928. //++
  929. //
  930. // VOID
  931. // KeReleaseQueuedSpinLockFromDpcLevel (
  932. // IN PKSPIN_LOCK_QUEUE LockQueue
  933. // )
  934. //
  935. // Routine Description:
  936. //
  937. // This function releases a queued spinlock and preserves the current
  938. // IRQL.
  939. //
  940. // Arguments:
  941. //
  942. // LockQueue (a0) - Supplies the address of the lock queue entry.
  943. //
  944. // Return Value:
  945. //
  946. // None.
  947. //
  948. //--
  949. ALTERNATE_ENTRY(KeReleaseQueuedSpinLockFromDpcLevel)
  950. #if !defined(NT_UP)
  951. mb // synchronize memory access
  952. LDP t4, LqLock(a0) // get associate spin lock address
  953. bic t4, LOCK_QUEUE_OWNER, t4 // clear lock owner bit in lock entry
  954. 10: LDP_L t3, 0(t4) // get current lock ownership value
  955. xor t3, a0, t2 // set lock ownership value
  956. bne t2, 20f // if ne, another processor waiting
  957. STP_C t2, 0(t4) // set new ownership value
  958. beq t2, 10b // if eq, conditional store failed
  959. STP t4, LqLock(a0) //
  960. mb // synchronize memory access
  961. ret zero, (ra) // return
  962. //
  963. // Another processor has inserted its lock queue entry in the lock queue,
  964. // but has not yet written its lock queue entry address in the current
  965. // processor's next link. Spin until the lock queue address is written.
  966. //
  967. 20: LDP t3, LqNext(a0) // get next lock queue entry address
  968. beq t3, 20b // if eq, address not written yet
  969. //
  970. // Grant the next process in the lock queue ownership of the spinlock.
  971. //
  972. LDP t2, LqLock(t3) // get spinlock address and lock bit
  973. STP zero, LqNext(a0) // clear next lock queue entry address
  974. STP t4, LqLock(a0) //
  975. bic t2, LOCK_QUEUE_WAIT, t2 // clear lock wait bit in lock entry
  976. bis t2, LOCK_QUEUE_OWNER, t2 // set lock owner bit in lock entry
  977. STP t2, LqLock(t3) //
  978. mb // synchronize memory access
  979. #endif
  980. ret zero, (ra) // return
  981. .end KeReleaseInStackQueuedSpinLockFromDpcLevel