Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1309 lines
29 KiB

  1. //++
  2. //
  3. // Module Name:
  4. //
  5. // spinlock.s
  6. //
  7. // Abstract:
  8. //
  9. // This module implements the routines for acquiring and releasing
  10. // spin locks.
  11. //
  12. // Author:
  13. //
  14. // William K. Cheung (wcheung) 29-Sep-1995
  15. //
  16. // Environment:
  17. //
  18. // Kernel mode only.
  19. //
  20. // Revision History:
  21. //
  22. // 31-Dec-1998 wc Updated to use xchg8
  23. //
  24. // 07-Jul-1997 bl Updated to EAS2.3
  25. //
  26. // 08-Feb-1996 Updated to EAS2.1
  27. //
  28. //--
  29. #include "ksia64.h"
  30. .file "spinlock.s"
  31. //
  32. // Define LOG2(x) for those values whose bit numbers are needed in
  33. // order to test a single bit with the tbit instruction.
  34. //
  35. #define _LOG2_0x1 0
  36. #define _LOG2_0x2 1
  37. #define _LOG2_x(n) _LOG2_##n
  38. #define LOG2(n) _LOG2_x(n)
  39. //
  40. // Globals
  41. //
  42. PublicFunction(KiLowerIrqlSoftwareInterruptPending)
  43. //++
  44. //
  45. // VOID
  46. // KiAcquireSpinLock (
  47. // IN PKSPIN_LOCK SpinLock
  48. // )
  49. //
  50. // Routine Description:
  51. //
  52. // This function acquires a kernel spin lock.
  53. //
  54. // N.B. This function assumes that the current IRQL is set properly.
  55. //
  56. // Arguments:
  57. //
  58. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  59. //
  60. // Return Value:
  61. //
  62. // None.
  63. //
  64. //--
  65. LEAF_ENTRY(KiAcquireSpinLock)
  66. ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
  67. #if !defined(NT_UP)
  68. ACQUIRE_SPINLOCK(a0,a0,Kiasl10)
  69. #endif // !defined(NT_UP)
  70. LEAF_RETURN
  71. LEAF_EXIT(KiAcquireSpinLock)
  72. //++
  73. //
  74. // BOOLEAN
  75. // KeTryToAcquireSpinLockAtDpcLevel (
  76. // IN PKSPIN_LOCK SpinLock
  77. // )
  78. //
  79. // Routine Description:
  80. //
  81. // This function attempts to acquires the specified kernel spinlock. If
  82. // the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
  83. // is returned.
  84. //
  85. // N.B. This function assumes that the current IRQL is set properly.
  86. //
  87. // Arguments:
  88. //
  89. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  90. //
  91. // Return Value:
  92. //
  93. // If the spin lock is acquired, then a value of TRUE is returned.
  94. // Otherwise, a value of FALSE is returned.
  95. //
  96. // N.B. The caller KeTryToAcquireSpinLock implicitly depends on the
  97. // contents of predicate registers pt1 & pt2.
  98. //
  99. //--
  100. LEAF_ENTRY(KeTryToAcquireSpinLockAtDpcLevel)
  101. #if !defined(NT_UP)
  102. xchg8 t0 = [a0], a0
  103. ;;
  104. cmp.ne pt0 = t0, zero // if ne, lock acq failed
  105. mov v0 = TRUE // acquire assumed succeed
  106. ;;
  107. nop.m 0
  108. (pt0) mov v0 = FALSE // return FALSE
  109. #else
  110. mov v0 = TRUE
  111. #endif
  112. LEAF_RETURN
  113. LEAF_EXIT(KeTryToAcquireSpinLockAtDpcLevel)
  114. //++
  115. //
  116. // VOID
  117. // KeInitializeSpinLock (
  118. // IN PKSPIN_LOCK SpinLock
  119. // )
  120. //
  121. // Routine Description:
  122. //
  123. // This function initialzies an executive spin lock.
  124. //
  125. // Arguments:
  126. //
  127. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  128. //
  129. // Return Value:
  130. //
  131. // None.
  132. //
  133. //--
  134. LEAF_ENTRY(KeInitializeSpinLock)
  135. st8 [a0] = zero // clear spin lock value
  136. LEAF_RETURN
  137. LEAF_EXIT(KeInitializeSpinLock)
  138. //++
  139. //
  140. // VOID
  141. // KeAcquireSpinLock (
  142. // IN PKSPIN_LOCK SpinLock
  143. // OUT PKIRQL OldIrql
  144. // )
  145. //
  146. // Routine Description:
  147. //
  148. // This function raises the current IRQL to DISPATCH_LEVEL and acquires
  149. // the specified executive spinlock.
  150. //
  151. // Arguments:
  152. //
  153. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  154. //
  155. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  156. // the previous IRQL value.
  157. //
  158. // N.B. The Old IRQL MUST be stored after the lock is acquired.
  159. //
  160. // Return Value:
  161. //
  162. // None.
  163. //
  164. //--
  165. LEAF_ENTRY(KeAcquireSpinLock)
  166. //
  167. // Get original IRQL, raise IRQL to DISPATCH_LEVEL
  168. // and then acquire the specified spinlock.
  169. //
  170. mov t0 = DISPATCH_LEVEL
  171. SWAP_IRQL(t0)
  172. #if !defined(NT_UP)
  173. ACQUIRE_SPINLOCK(a0,a0,Kasl10)
  174. #endif // !defined(NT_UP)
  175. st1 [a1] = v0 // save old IRQL
  176. LEAF_RETURN
  177. LEAF_EXIT(KeAcquireSpinLock)
  178. //++
  179. //
  180. // KIRQL
  181. // KeAcquireSpinLockRaiseToSynch (
  182. // IN PKSPIN_LOCK SpinLock
  183. // )
  184. //
  185. // Routine Description:
  186. //
  187. // This function raises the current IRQL to synchronization level and
  188. // acquires the specified spinlock.
  189. //
  190. // Arguments:
  191. //
  192. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  193. // acquired.
  194. //
  195. // Return Value:
  196. //
  197. // The previous IRQL is returned as the function value.
  198. //
  199. //--
  200. LEAF_ENTRY(KeAcquireSpinLockRaiseToSynch)
  201. //
  202. // Register aliases
  203. //
  204. pHeld = pt0
  205. pFree = pt1
  206. mov t1 = SYNCH_LEVEL
  207. #if !defined(NT_UP)
  208. GET_IRQL (v0)
  209. KaslrtsRetry:
  210. SET_IRQL (t1)
  211. xchg8 t0 = [a0], a0
  212. ;;
  213. cmp.eq pFree, pHeld = 0, t0
  214. ;;
  215. PSET_IRQL (pHeld, v0)
  216. (pFree) LEAF_RETURN
  217. ;;
  218. KaslrtsLoop:
  219. cmp.eq pFree, pHeld = 0, t0
  220. ld8.nt1 t0 = [a0]
  221. (pFree) br.cond.dpnt KaslrtsRetry
  222. (pHeld) br.cond.dptk KaslrtsLoop
  223. #else
  224. SWAP_IRQL (t1) // Raise IRQL
  225. LEAF_RETURN
  226. #endif // !defined(NT_UP)
  227. LEAF_EXIT(KeAcquireSpinLockRaiseToSynch)
  228. //++
  229. //
  230. // KIRQL
  231. // KeAcquireSpinLockRaiseToDpc (
  232. // IN PKSPIN_LOCK SpinLock
  233. // )
  234. //
  235. // Routine Description:
  236. //
  237. // This function raises the current IRQL to dispatcher level and acquires
  238. // the specified spinlock.
  239. //
  240. // Arguments:
  241. //
  242. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  243. // acquired.
  244. //
  245. // Return Value:
  246. //
  247. // The previous IRQL is returned as the function value.
  248. //
  249. //--
  250. LEAF_ENTRY(KeAcquireSpinLockRaiseToDpc)
  251. mov t2 = DISPATCH_LEVEL
  252. ;;
  253. SWAP_IRQL (t2)
  254. #if !defined(NT_UP)
  255. cmp.eq pt0, pt1 = zero, zero
  256. cmp.eq pt2, pt3 = zero, zero
  257. ;;
  258. Kaslrtp10:
  259. .pred.rel "mutex",pt0,pt1
  260. (pt0) xchg8 t0 = [a0], a0
  261. (pt1) ld8.nt1 t0 = [a0]
  262. ;;
  263. (pt0) cmp.ne pt2, pt3 = zero, t0
  264. cmp.eq pt0, pt1 = zero, t0
  265. ;;
  266. (pt2) br.dpnt Kaslrtp10
  267. (pt3) br.ret.dptk brp
  268. #else
  269. LEAF_RETURN
  270. #endif // !defined(NT_UP)
  271. LEAF_EXIT(KeAcquireSpinLockRaiseToDpc)
  272. //++
  273. //
  274. // VOID
  275. // KiReleaseSpinLock (
  276. // IN PKSPIN_LOCK SpinLock
  277. // )
  278. //
  279. // Routine Description:
  280. //
  281. // This function releases a kernel spin lock.
  282. //
  283. // N.B. This function assumes that the current IRQL is set properly.
  284. //
  285. // Arguments:
  286. //
  287. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  288. //
  289. // Return Value:
  290. //
  291. // None.
  292. //
  293. //--
  294. LEAF_ENTRY(KiReleaseSpinLock)
  295. ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
  296. #if !defined(NT_UP)
  297. st8.rel [a0] = zero // set spin lock not owned
  298. #endif
  299. LEAF_RETURN
  300. LEAF_EXIT(KiReleaseSpinLock)
  301. //++
  302. //
  303. // VOID
  304. // KeReleaseSpinLock (
  305. // IN PKSPIN_LOCK SpinLock
  306. // IN KIRQL OldIrql
  307. // )
  308. //
  309. // Routine Description:
  310. //
  311. // This function releases an executive spin lock and lowers the IRQL
  312. // to its previous value. Called at DPC_LEVEL.
  313. //
  314. // Arguments:
  315. //
  316. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  317. //
  318. // OldIrql (a1) - Supplies the previous IRQL value.
  319. //
  320. // Return Value:
  321. //
  322. // None.
  323. //
  324. //--
  325. LEAF_ENTRY(KeReleaseSpinLock)
  326. zxt1 a1 = a1
  327. #if !defined(NT_UP)
  328. st8.rel [a0] = zero // set spinlock not owned
  329. #endif
  330. ;;
  331. LEAF_LOWER_IRQL_AND_RETURN(a1) // Lower IRQL and return
  332. LEAF_EXIT(KeReleaseSpinLock)
  333. //++
  334. //
  335. // BOOLEAN
  336. // KeTryToAcquireSpinLock (
  337. // IN PKSPIN_LOCK SpinLock
  338. // OUT PKIRQL OldIrql
  339. // )
  340. //
  341. // Routine Description:
  342. //
  343. // This function raises the current IRQL to DISPATCH_LEVEL and attempts
  344. // to acquires the specified executive spinlock. If the spinlock can be
  345. // acquired, then TRUE is returned. Otherwise, the IRQL is restored to
  346. // its previous value and FALSE is returned. Called at IRQL <= DISPATCH_LEVEL.
  347. //
  348. // Arguments:
  349. //
  350. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  351. //
  352. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  353. // the previous IRQL value.
  354. //
  355. // Return Value:
  356. //
  357. // If the spin lock is acquired, then a value of TRUE is returned.
  358. // Otherwise, a value of FALSE is returned.
  359. //
  360. // N.B. This routine assumes KeTryToAcquireSpinLockAtDpcLevel pt1 & pt2 will
  361. // be set to reflect the result of the attempt to acquire the spinlock.
  362. //
  363. //--
  364. LEAF_ENTRY(KeTryToAcquireSpinLock)
  365. rOldIrql = t2
  366. //
  367. // Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
  368. // Return FALSE if failed; otherwise, return TRUE.
  369. //
  370. GET_IRQL (rOldIrql) // get original IRQL
  371. mov t0 = DISPATCH_LEVEL;;
  372. SET_IRQL (t0) // raise to dispatch level
  373. #if !defined(NT_UP)
  374. xchg8 t0 = [a0], a0
  375. ;;
  376. cmp.ne pt2 = t0, zero // if ne, lock acq failed
  377. ;;
  378. //
  379. // If successfully acquired, pt1 is set to TRUE while pt2 is set to FALSE.
  380. // Otherwise, pt2 is set to TRUE while pt1 is set to FALSE.
  381. //
  382. (pt2) mov v0 = FALSE // return FALSE
  383. PSET_IRQL (pt2, rOldIrql) // restore old IRQL
  384. (pt2) LEAF_RETURN
  385. ;;
  386. #endif // !defined(NT_UP)
  387. st1 [a1] = rOldIrql // save old IRQL
  388. mov v0 = TRUE // successfully acquired
  389. LEAF_RETURN
  390. LEAF_EXIT(KeTryToAcquireSpinLock)
  391. #if !defined(NT_UP)
  392. //++
  393. //
  394. // BOOLEAN
  395. // KeTestSpinLock (
  396. // IN PKSPIN_LOCK SpinLock
  397. // )
  398. //
  399. // Routine Description:
  400. //
  401. // This function tests a kernel spin lock. If the spinlock is
  402. // busy, FALSE is returned. If not, TRUE is returned. The spinlock
  403. // is never acquired. This is provided to allow code to spin at low
  404. // IRQL, only raising the IRQL when there is a reasonable hope of
  405. // acquiring the lock.
  406. //
  407. // Arguments:
  408. //
  409. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  410. //
  411. // Return Value:
  412. //
  413. // TRUE - Spinlock appears available
  414. // FALSE - SpinLock is busy
  415. //--
  416. LEAF_ENTRY(KeTestSpinLock)
  417. ld8.nt1 t0 = [a0]
  418. ;;
  419. cmp.ne pt0 = 0, t0
  420. mov v0 = TRUE // default TRUE
  421. ;;
  422. (pt0) mov v0 = FALSE // if t0 != 0 return FALSE
  423. LEAF_RETURN
  424. LEAF_EXIT(KeTestSpinLock)
  425. #endif // !defined(NT_UP)
  426. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  427. //++
  428. //
  429. // VOID
  430. // KeAcquireInStackQueuedSpinLock (
  431. // IN PKSPIN_LOCK SpinLock,
  432. // IN PKLOCK_QUEUE_HANDLE LockHandle
  433. // )
  434. //
  435. // Routine Description:
  436. //
  437. // This function raises the current IRQL to dispatch level and
  438. // acquires the specified queued spinlock.
  439. //
  440. // Arguments:
  441. //
  442. // SpinLock (a0) - Supplies a pointer to a spin lock.
  443. //
  444. // LockHandle (a1) - Supplies a pointer to a lock handle.
  445. //
  446. // Return Value:
  447. //
  448. // None.
  449. //
  450. //--
  451. LEAF_ENTRY(KeAcquireInStackQueuedSpinLockRaiseToSynch)
  452. add t5 = LqhNext, a1
  453. mov t1 = SYNCH_LEVEL
  454. br.sptk Kaisqsl10
  455. ;;
  456. ALTERNATE_ENTRY(KeAcquireInStackQueuedSpinLock)
  457. add t5 = LqhNext, a1
  458. mov t1 = DISPATCH_LEVEL
  459. ;;
  460. Kaisqsl10:
  461. #if !defined(NT_UP)
  462. st8 [t5] = zero // set next link to NULL
  463. add t4 = LqhLock, a1
  464. ;;
  465. st8.rel [t4] = a0 // set spin lock address
  466. #endif // !defined(NT_UP)
  467. SWAP_IRQL (t1) // old IRQL in register v0
  468. add t0 = LqhOldIrql, a1
  469. ;;
  470. st1 [t0] = v0 // save old IRQL
  471. #if !defined(NT_UP)
  472. //
  473. // Finish in common code. The following register values
  474. // are assumed in that code.
  475. //
  476. // t4 = &LockEntry->ActualLock
  477. // t5 = LockEntry
  478. // a0 = *(&LockEntry->ActualLock)
  479. //
  480. // Note: LqhNext == 0, otherwise we would have to trim t5 here.
  481. //
  482. br KxqAcquireQueuedSpinLock // finish in common code
  483. #else
  484. br.ret.sptk brp
  485. #endif !defined(NT_UP)
  486. LEAF_EXIT(KeAcquireInStackQueuedSpinLock)
  487. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  488. //++
  489. //
  490. // KIRQL
  491. // KeAcquireQueuedSpinLock (
  492. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  493. // )
  494. //
  495. // KIRQL
  496. // KeAcquireQueuedSpinLockRaiseToSynch (
  497. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  498. // )
  499. //
  500. // Routine Description:
  501. //
  502. // This function raises the current IRQL to synchronization level and
  503. // acquires the specified queued spinlock.
  504. //
  505. // Arguments:
  506. //
  507. // Number (a0) - Supplies the queued spinlock number.
  508. //
  509. // Return Value:
  510. //
  511. // The previous IRQL is returned as the function value.
  512. //
  513. //--
  514. LEAF_ENTRY(KeAcquireQueuedSpinLock)
  515. add t0 = a0, a0
  516. mov t1 = DISPATCH_LEVEL
  517. br Kaqsl10
  518. ;;
  519. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockRaiseToSynch)
  520. add t0 = a0, a0
  521. mov t1 = SYNCH_LEVEL
  522. ;;
  523. Kaqsl10:
  524. SWAP_IRQL (t1) // old IRQL in register v0
  525. #if !defined(NT_UP)
  526. movl t2 = KiPcr+PcPrcb
  527. ;;
  528. ld8 t2 = [t2]
  529. ;;
  530. shladd t3 = t0, 3, t2 // get associated spinlock addr
  531. ;;
  532. add t5 = PbLockQueue, t3
  533. ;;
  534. add t4 = LqLock, t5
  535. ;;
  536. ld8 t6 = [t4]
  537. mov t11 = 0x7
  538. ;;
  539. andcm a0 = t6, t11 // mask the lower 3 bits
  540. ;;
  541. ALTERNATE_ENTRY(KxqAcquireQueuedSpinLock)
  542. mf // Do a memory fence to ensure the write of LqhNext
  543. // occurs before the xchg8 on lock address.
  544. //
  545. // t4 = &LockEntry->ActualLock
  546. // t5 = LockEntry
  547. // a0 = *(&LockEntry->ActualLock)
  548. //
  549. xchg8 t7 = [a0], t5
  550. ;;
  551. cmp.ne pt0, pt1 = r0, t7 // if ne, lock already owned
  552. ;;
  553. (pt1) or t8 = LOCK_QUEUE_OWNER, a0
  554. (pt0) or t8 = LOCK_QUEUE_WAIT, a0
  555. ;;
  556. st8.rel [t4] = t8
  557. add t9 = LqNext, t7
  558. (pt1) br.ret.sptk brp
  559. ;;
  560. //
  561. // The lock is already held by another processor. Set the wait
  562. // bit in this processor's Lock Queue entry, then set the next
  563. // field in the Lock Queue entry of the last processor to attempt
  564. // to acquire the lock (this is the address returned by the xchg
  565. // above) to point to THIS processor's lock queue entry.
  566. //
  567. st8.rel [t9] = t5
  568. Kaqsl20:
  569. ld8 t10 = [t4]
  570. ;;
  571. tbit.z pt1, pt0 = t10, LOG2(LOCK_QUEUE_WAIT)
  572. (pt0) br.dptk.few Kaqsl20
  573. (pt1) br.ret.dpnt brp // if zero, lock acquired
  574. #else
  575. br.ret.sptk brp
  576. #endif // !defined(NT_UP)
  577. ;;
  578. LEAF_EXIT(KeAcquireQueuedSpinLock)
  579. SBTTL("Release Queued SpinLock and Lower IRQL")
  580. //++
  581. //
  582. // VOID
  583. // KeReleaseInStackQueuedSpinLock (
  584. // IN PKLOCK_QUEUE_HANDLE LockHandle
  585. // )
  586. //
  587. // Routine Description:
  588. //
  589. // This function releases a queued spinlock and lowers the IRQL to its
  590. // previous value.
  591. //
  592. // Arguments:
  593. //
  594. // LockHandle (a0) - Supplies a pointer to a lock handle.
  595. //
  596. // Return Value:
  597. //
  598. // None.
  599. //
  600. //--
  601. LEAF_ENTRY(KeReleaseInStackQueuedSpinLock)
  602. alloc t22 = ar.pfs, 2, 2, 2, 0
  603. add t9 = LqhOldIrql, a0
  604. add t5 = LqhNext, a0 // set address of lock queue
  605. ;;
  606. ld1.nt1 a1 = [t9] // get old IRQL
  607. br KxqReleaseQueuedSpinLock // finish in common code
  608. LEAF_EXIT(KeReleaseInStackQueuedSpinLock)
  609. SBTTL("Release Queued SpinLock and Lower IRQL")
  610. //++
  611. //
  612. // VOID
  613. // KeReleaseQueuedSpinLock (
  614. // IN KSPIN_LOCK_QUEUE_NUMBER Number,
  615. // IN KIRQL OldIrql
  616. // )
  617. //
  618. // Routine Description:
  619. //
  620. // This function releases a queued spinlock and lowers the IRQL to its
  621. // previous value.
  622. //
  623. // Arguments:
  624. //
  625. // Number (a0) - Supplies the queued spinlock number.
  626. //
  627. // OldIrql (a1) - Supplies the previous IRQL value.
  628. //
  629. // Return Value:
  630. //
  631. // None.
  632. //
  633. //--
  634. LEAF_ENTRY(KeReleaseQueuedSpinLock)
  635. PROLOGUE_BEGIN
  636. #if !defined(NT_UP)
  637. movl v0 = KiPcr+PcPrcb
  638. ;;
  639. ld8 v0 = [v0]
  640. add t0 = a0, a0
  641. ;;
  642. shladd t1 = t0, 3, v0
  643. ;;
  644. add t5 = PbLockQueue, t1
  645. ;;
  646. #endif // !defined(NT_UP)
  647. ALTERNATE_ENTRY(KxqReleaseQueuedSpinLock)
  648. #if !defined(NT_UP)
  649. add v0 = LqNext, t5
  650. add t2 = LqLock, t5
  651. ;;
  652. ld8.acq t4 = [t2]
  653. mov ar.ccv = t5
  654. ld8 t3 = [v0]
  655. ;;
  656. and t4 = ~LOCK_QUEUE_OWNER, t4 // clear lock owner bit
  657. ;;
  658. add t6 = LqLock, t3
  659. st8.rel [t2] = t4
  660. cmp.ne pt0, pt1 = r0, t3 // if ne, another processor waiting
  661. (pt0) br.sptk.few Krqsl30
  662. ld8 t7 = [t4]
  663. ;;
  664. cmp.ne pt2 = t5, t7
  665. (pt2) br.spnt.few Krqsl20
  666. cmpxchg8.rel t8 = [t4], r0, ar.ccv
  667. ;;
  668. cmp.ne pt0, pt1 = t8, t5 // if ne, another processor waiting
  669. (pt0) br.spnt.few Krqsl20
  670. ;;
  671. Krqsl10:
  672. #endif // !defined(NT_UP)
  673. LEAF_LOWER_IRQL_AND_RETURN(a1) // lower IRQL to previous level
  674. ;;
  675. #if !defined(NT_UP)
  676. //
  677. // Another processor has inserted its lock queue entry in the lock queue,
  678. // but has not yet written its lock queue entry address in the current
  679. // processor's next link. Spin until the lock queue address is written.
  680. //
  681. Krqsl20:
  682. ld8 t3 = [v0] // get next lock queue entry addr
  683. ;;
  684. cmp.eq pt0 = r0, t3 // if eq, addr not written yet
  685. add t6 = LqLock, t3
  686. (pt0) br.sptk Krqsl20 // try again
  687. ;;
  688. //
  689. // Grant the next process in the lock queue ownership of the spinlock.
  690. // (Turn off the WAIT bit and on the OWNER bit in the next entries lock
  691. // field).
  692. //
  693. Krqsl30:
  694. ld8.nt1 t2 = [t6] // get spinlock addr and lock bit
  695. ;;
  696. st8 [v0] = r0 // clear next lock queue entry addr
  697. ;;
  698. xor t2 = (LOCK_QUEUE_OWNER|LOCK_QUEUE_WAIT), t2
  699. ;;
  700. st8.rel [t6] = t2
  701. br Krqsl10
  702. ;;
  703. #endif // !defined(NT_UP)
  704. LEAF_EXIT(KeReleaseQueuedSpinLock)
  705. SBTTL("Try to Acquire Queued SpinLock and Raise IRQL")
  706. //++
  707. //
  708. // LOGICAL
  709. // KeTryToAcquireQueuedSpinLock (
  710. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  711. // OUT PKIRQL OldIrql
  712. // )
  713. //
  714. // LOGICAL
  715. // KeTryToAcquireQueuedSpinLockRaiseToSynch (
  716. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  717. // OUT PKIRQL OldIrql
  718. // )
  719. //
  720. // LOGICAL
  721. // KeTryToAcquireQueuedSpinLockAtRaisedIrql (
  722. // IN PKSPIN_LOCK_QUEUE LockQueue
  723. // )
  724. //
  725. // Routine Description:
  726. //
  727. // This function raises the current IRQL to synchronization level and
  728. // attempts to acquire the specified queued spinlock. If the spinlock
  729. // cannot be acquired, then IRQL is restored and FALSE is returned as
  730. // the function value. Otherwise, TRUE is returned as the function
  731. // value.
  732. //
  733. // Arguments:
  734. //
  735. // Number (a0) - Supplies the queued spinlock number.
  736. //
  737. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  738. // the previous IRQL value.
  739. //
  740. // Return Value:
  741. //
  742. // If the spin lock is acquired, then a value of TRUE is returned.
  743. // Otherwise, a value of FALSE is returned.
  744. //
  745. //--
  746. LEAF_ENTRY(KeTryToAcquireQueuedSpinLock)
  747. movl t2 = KiPcr+PcPrcb
  748. mov t1 = DISPATCH_LEVEL
  749. br Kttaqsl10
  750. ;;
  751. ALTERNATE_ENTRY(KeTryToAcquireQueuedSpinLockRaiseToSynch)
  752. mov t1 = SYNCH_LEVEL
  753. movl t2 = KiPcr+PcPrcb
  754. ;;
  755. Kttaqsl10:
  756. #if !defined(NT_UP)
  757. rsm 1 << PSR_I // disable interrupts
  758. ld8 t2 = [t2] // get PRCB address
  759. ;;
  760. shladd t3 = a0, 4, t2 // get address of lock queue entry
  761. ;;
  762. add t5 = PbLockQueue, t3
  763. add t6 = LqLock+PbLockQueue, t3
  764. ;;
  765. ld8 t4 = [t6] // get associated spinlock addr
  766. mov ar.ccv = r0 // cmpxchg oldvalue must be 0
  767. mov t11 = 0x7
  768. ;;
  769. andcm t12 = t4, t11
  770. ;;
  771. //
  772. // Try to acquire the specified spinlock.
  773. //
  774. // N.B. A noninterlocked test is done before the interlocked attempt. This
  775. // allows spinning without interlocked cycles.
  776. //
  777. ld8 t8 = [t12] // get current lock value
  778. ;;
  779. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  780. (pt0) br.spnt.few Kttaqs20
  781. ;;
  782. cmpxchg8.acq t8 = [t12], t5, ar.ccv // try to acquire the lock
  783. ;;
  784. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  785. or t4 = LOCK_QUEUE_OWNER, t4// set lock owner bit
  786. (pt0) br.spnt.few Kttaqs20
  787. ;;
  788. st8 [t6] = t4
  789. #endif
  790. SWAP_IRQL(t1)
  791. #if !defined(NT_UP)
  792. ssm 1 << PSR_I // enable interrupts
  793. #endif
  794. st1 [a1] = v0 // save old IRQL value
  795. mov v0 = TRUE // set return value to TRUE
  796. LEAF_RETURN
  797. ;;
  798. #if !defined(NT_UP)
  799. //
  800. // The attempt to acquire the specified spin lock failed. Lower IRQL to its
  801. // previous value and return FALSE.
  802. //
  803. Kttaqs20:
  804. ssm 1 << PSR_I // enable interrupts
  805. mov v0 = FALSE // set return value to FALSE
  806. LEAF_RETURN
  807. #endif
  808. LEAF_EXIT(KeTryToAcquireQueuedSpinLock)
  809. SBTTL("Try to Acquire Queued SpinLock without raising IRQL")
  810. //++
  811. //
  812. // LOGICAL
  813. // KeTryToAcquireQueuedSpinLockAtRaisedIrql (
  814. // IN PKSPIN_LOCK_QUEUE LockQueue
  815. // )
  816. //
  817. // Routine Description:
  818. //
  819. // This function attempts to acquire the specified queued spinlock.
  820. // If the spinlock cannot be acquired, then FALSE is returned as
  821. // the function value. Otherwise, TRUE is returned as the function
  822. // value.
  823. //
  824. // Arguments:
  825. //
  826. // LockQueue (a0) - Supplies the address of the queued spinlock.
  827. //
  828. // Return Value:
  829. //
  830. // If the spin lock is acquired, then a value of TRUE is returned.
  831. // Otherwise, a value of FALSE is returned.
  832. //
  833. //--
  834. LEAF_ENTRY(KeTryToAcquireQueuedSpinLockAtRaisedIrql)
  835. #if !defined(NT_UP)
  836. add t6 = LqLock, a0
  837. ;;
  838. ld8 t4 = [t6] // get associated spinlock addr
  839. mov ar.ccv = r0 // cmpxchg oldvalue must be 0
  840. mov t11 = 0x7
  841. ;;
  842. andcm t12 = t4, t11
  843. ;;
  844. //
  845. // Try to acquire the specified spinlock.
  846. //
  847. // N.B. A noninterlocked test is done before the interlocked attempt. This
  848. // allows spinning without interlocked cycles.
  849. //
  850. ld8 t8 = [t12] // get current lock value
  851. mov v0 = FALSE // assume failure
  852. ;;
  853. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  854. (pt0) br.ret.spnt.few.clr brp // if owned, return failure
  855. ;;
  856. cmpxchg8.acq t8 = [t12], a0, ar.ccv // try to acquire the lock
  857. ;;
  858. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  859. or t4 = LOCK_QUEUE_OWNER, t4// set lock owner bit
  860. (pt0) br.ret.spnt.few.clr brp // if owned, return failure
  861. ;;
  862. st8 [t6] = t4
  863. #endif
  864. mov v0 = TRUE // set return value to TRUE
  865. LEAF_RETURN
  866. ;;
  867. LEAF_EXIT(KeTryToAcquireQueuedSpinLockAtRaisedIrql)
  868. SBTTL("Acquire Queued SpinLock at Current IRQL")
  869. //++
  870. // VOID
  871. // KeAcquireInStackQueuedSpinLockAtDpcLevel (
  872. // IN PKSPIN_LOCK SpinLock,
  873. // IN PKLOCK_QUEUE_HANDLE LockHandle
  874. // )
  875. //
  876. // Routine Description:
  877. //
  878. // This function acquires the specified queued spinlock at the current
  879. // IRQL.
  880. //
  881. // Arguments:
  882. //
  883. // SpinLock (a0) - Supplies the address of a spin lock.
  884. //
  885. // LockHandle (a1) - Supplies the address of an in stack lock handle.
  886. //
  887. // Return Value:
  888. //
  889. // None.
  890. //
  891. //--
  892. LEAF_ENTRY(KeAcquireInStackQueuedSpinLockAtDpcLevel)
  893. #if !defined(NT_UP)
  894. add t0 = LqhNext, a1
  895. add t1 = LqhLock, a1
  896. ;;
  897. st8 [t0] = r0
  898. st8 [t1] = a0
  899. add a0 = LqhNext, a1
  900. ;;
  901. #endif // !defined(NT_UP)
  902. //++
  903. //
  904. // VOID
  905. // KeAcquireQueuedSpinLockAtDpcLevel (
  906. // IN PKSPIN_LOCK_QUEUE LockQueue
  907. // )
  908. //
  909. // Routine Description:
  910. //
  911. // This function acquires the specified queued spinlock at the current
  912. // IRQL.
  913. //
  914. // Arguments:
  915. //
  916. // LockQueue (a0) - Supplies the address of the lock queue entry.
  917. //
  918. // Return Value:
  919. //
  920. // None.
  921. //
  922. //--
  923. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockAtDpcLevel)
  924. #if !defined(NT_UP)
  925. add t0 = LqLock, a0
  926. add t1 = LqNext, a0
  927. mov t11 = 0x7
  928. ;;
  929. ld8 t4 = [t0]
  930. ;;
  931. mf
  932. andcm t12 = t4, t11 // mask the lower 3 bits
  933. ;;
  934. xchg8 t3 = [t12], a0
  935. or t5 = LOCK_QUEUE_OWNER, t4 // set lock owner bit
  936. ;;
  937. cmp.ne pt0, pt1 = r0, t3 // if ne, lock already owned
  938. add t2 = LqNext, t3
  939. ;;
  940. (pt0) or t5 = LOCK_QUEUE_WAIT, t4
  941. ;;
  942. st8.rel [t0] = t5
  943. (pt0) st8.rel [t2] = a0 // set addr of lock queue entry
  944. (pt1) br.ret.sptk brp
  945. ;;
  946. //
  947. // The lock is owned by another processor. Set the lock bit in the current
  948. // processor lock queue entry, set the next link in the previous lock queue
  949. // entry, and spin on the current processor's lock bit.
  950. //
  951. Kiaqsl10:
  952. ld8 t4 = [t0] // get lock addr and lock wait bit
  953. ;;
  954. tbit.z pt1, pt0 = t4, LOG2(LOCK_QUEUE_WAIT)
  955. (pt0) br.dptk.few Kiaqsl10
  956. (pt1) br.ret.dpnt brp // if zero, lock acquired
  957. #else
  958. br.ret.sptk brp
  959. #endif // !defined(NT_UP)
  960. ;;
  961. LEAF_EXIT(KeAcquireInStackQueuedSpinLockAtDpcLevel)
  962. SBTTL("Release Queued SpinLock at Current IRQL")
  963. //++
  964. //
  965. // VOID
  966. // KeReleaseInStackQueuedSpinLockFromDpcLevel (
  967. // IN PKLOCK_QUEUE_HANDLE LockHandle
  968. // )
  969. //
  970. // Routine Description:
  971. //
  972. // This function releases a queued spinlock and preserves the current
  973. // IRQL.
  974. //
  975. // Arguments:
  976. //
  977. // LockHandle (a0) - Supplies the address of a lock handle.
  978. //
  979. // Return Value:
  980. //
  981. // None.
  982. //
  983. //--
  984. LEAF_ENTRY(KeReleaseInStackQueuedSpinLockFromDpcLevel)
  985. #if !defined(NT_UP)
  986. add a0 = LqhNext, a0
  987. ;;
  988. #endif // !defined(NT_UP)
  989. //++
  990. //
  991. // VOID
  992. // KiReleaseQueuedSpinLock (
  993. // IN PKSPIN_LOCK_QUEUE LockQueue
  994. // )
  995. //
  996. // Routine Description:
  997. //
  998. // This function releases a queued spinlock and preserves the current
  999. // IRQL.
  1000. //
  1001. // Arguments:
  1002. //
  1003. // LockQueue (a0) - Supplies the address of the lock queue entry.
  1004. //
  1005. // Return Value:
  1006. //
  1007. // None.
  1008. //
  1009. //--
  1010. ALTERNATE_ENTRY(KeReleaseQueuedSpinLockFromDpcLevel)
  1011. #if !defined(NT_UP)
  1012. mov ar.ccv = a0
  1013. add t0 = LqNext, a0
  1014. add t1 = LqLock, a0
  1015. ;;
  1016. ld8 t3 = [t0] // get next lock queue entry addr
  1017. ld8 t4 = [t1] // get associate spin lock addr
  1018. ;;
  1019. and t4 = ~LOCK_QUEUE_OWNER, t4 // clear lock owner bit
  1020. ;;
  1021. st8.rel [t1] = t4
  1022. cmp.ne pt0, pt1 = r0, t3 // if ne, another processor waiting
  1023. (pt0) br.spnt.few Kirqsl30
  1024. KiIrqsl10:
  1025. ld8.nt1 t3 = [t4] // get current lock ownership
  1026. ;;
  1027. cmp.ne pt0, pt1 = a0, t3 // if ne, another processor waiting
  1028. (pt0) br.spnt.few Kirqsl20
  1029. ;;
  1030. cmpxchg8.rel t3 = [t4], r0, ar.ccv
  1031. ;;
  1032. cmp.ne pt0, pt1 = a0, t3 // if ne, try again
  1033. (pt1) br.ret.sptk brp
  1034. ;;
  1035. //
  1036. // Another processor has inserted its lock queue entry in the lock queue,
  1037. // but has not yet written its lock queue entry address in the current
  1038. // processor's next link. Spin until the lock queue address is written.
  1039. //
  1040. Kirqsl20:
  1041. ld8 t3 = [t0] // get next lock queue entry addr
  1042. ;;
  1043. cmp.eq pt0 = r0, t3 // if eq, addr not written yet
  1044. (pt0) br.sptk Kirqsl20 // try again
  1045. Kirqsl30:
  1046. add t6 = LqLock, t3
  1047. ;;
  1048. ld8.nt1 t2 = [t6] // get spinlock addr and lock bit
  1049. st8 [t0] = r0 // clear next lock queue entry addr
  1050. ;;
  1051. // Set owner bit, clear wait bit.
  1052. xor t2 = (LOCK_QUEUE_OWNER|LOCK_QUEUE_WAIT), t2
  1053. ;;
  1054. st8.rel [t6] = t2
  1055. #endif // !defined(NT_UP)
  1056. br.ret.sptk brp
  1057. ;;
  1058. LEAF_EXIT(KeReleaseInStackQueuedSpinLockFromDpcLevel)