Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1490 lines
35 KiB

  1. //++
  2. //
  3. // Module Name:
  4. //
  5. // spinlock.s
  6. //
  7. // Abstract:
  8. //
  9. // This module implements the routines for acquiring and releasing
  10. // spin locks.
  11. //
  12. // Author:
  13. //
  14. // William K. Cheung (wcheung) 29-Sep-1995
  15. //
  16. // Environment:
  17. //
  18. // Kernel mode only.
  19. //
  20. // Revision History:
  21. //
  22. // 31-Dec-1998 wc Updated to use xchg8
  23. //
  24. // 07-Jul-1997 bl Updated to EAS2.3
  25. //
  26. // 08-Feb-1996 Updated to EAS2.1
  27. //
  28. //--
  29. #include "ksia64.h"
  30. #include "icecap.h"
  31. .file "spinlock.s"
  32. //
  33. // Define LOG2(x) for those values whose bit numbers are needed in
  34. // order to test a single bit with the tbit instruction.
  35. //
  36. #define _LOG2_0x1 0
  37. #define _LOG2_0x2 1
  38. #define _LOG2_x(n) _LOG2_##n
  39. #define LOG2(n) _LOG2_x(n)
  40. //
  41. // Globals
  42. //
  43. PublicFunction(KiCheckForSoftwareInterrupt)
  44. //++
  45. //
  46. // VOID
  47. // KiAcquireSpinLock (
  48. // IN PKSPIN_LOCK SpinLock
  49. // )
  50. //
  51. // Routine Description:
  52. //
  53. // This function acquires a kernel spin lock.
  54. //
  55. // N.B. This function assumes that the current IRQL is set properly.
  56. //
  57. // Arguments:
  58. //
  59. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  60. //
  61. // Return Value:
  62. //
  63. // None.
  64. //
  65. //--
  66. LEAF_ENTRY(KiAcquireSpinLock)
  67. ALTERNATE_ENTRY(KeAcquireSpinLockAtDpcLevel)
  68. #if !defined(NT_UP)
  69. #ifndef CAPKERN_SYNCH_POINTS
  70. ACQUIRE_SPINLOCK(a0,a0,Kiasl10)
  71. #else
  72. CAP_ACQUIRE_SPINLOCK(a0,a0,Kiasl10,t0,t1,t2,t3)
  73. #endif
  74. #endif // !defined(NT_UP)
  75. LEAF_RETURN
  76. LEAF_EXIT(KiAcquireSpinLock)
  77. //++
  78. //
  79. // BOOLEAN
  80. // KeTryToAcquireSpinLockAtDpcLevel (
  81. // IN PKSPIN_LOCK SpinLock
  82. // )
  83. //
  84. // Routine Description:
  85. //
  86. // This function attempts to acquires the specified kernel spinlock. If
  87. // the spinlock can be acquired, then TRUE is returned. Otherwise, FALSE
  88. // is returned.
  89. //
  90. // N.B. This function assumes that the current IRQL is set properly.
  91. //
  92. // Arguments:
  93. //
  94. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  95. //
  96. // Return Value:
  97. //
  98. // If the spin lock is acquired, then a value of TRUE is returned.
  99. // Otherwise, a value of FALSE is returned.
  100. //
  101. // N.B. The caller KeTryToAcquireSpinLock implicitly depends on the
  102. // contents of predicate registers pt1 & pt2.
  103. //
  104. //--
  105. LEAF_ENTRY(KeTryToAcquireSpinLockAtDpcLevel)
  106. #if !defined(NT_UP)
  107. #ifdef CAPKERN_SYNCH_POINTS
  108. CAPSPINLOG1INT(a0, 8, t0, t1, t2, t3, pt0)
  109. #endif
  110. xchg8 t0 = [a0], a0
  111. ;;
  112. cmp.ne pt0 = t0, zero // if ne, lock acq failed
  113. mov v0 = TRUE // acquire assumed succeed
  114. ;;
  115. (pt0) YIELD
  116. (pt0) mov v0 = FALSE // return FALSE
  117. #ifdef CAPKERN_SYNCH_POINTS
  118. (pt0) br.cond.spnt kttasladlSkipLog
  119. CAPSPINLOG1INT(a0, 1, t0, t1, t2, t3, pt0)
  120. kttasladlSkipLog:
  121. #endif
  122. #else
  123. mov v0 = TRUE
  124. #endif
  125. LEAF_RETURN
  126. LEAF_EXIT(KeTryToAcquireSpinLockAtDpcLevel)
  127. //++
  128. //
  129. // VOID
  130. // KeAcquireSpinLock (
  131. // IN PKSPIN_LOCK SpinLock
  132. // OUT PKIRQL OldIrql
  133. // )
  134. //
  135. // Routine Description:
  136. //
  137. // This function raises the current IRQL to DISPATCH_LEVEL and acquires
  138. // the specified executive spinlock.
  139. //
  140. // Arguments:
  141. //
  142. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  143. //
  144. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  145. // the previous IRQL value.
  146. //
  147. // N.B. The Old IRQL MUST be stored after the lock is acquired.
  148. //
  149. // Return Value:
  150. //
  151. // None.
  152. //
  153. //--
  154. LEAF_ENTRY(KeAcquireSpinLock)
  155. //
  156. // Get original IRQL, raise IRQL to DISPATCH_LEVEL
  157. // and then acquire the specified spinlock.
  158. //
  159. mov t0 = DISPATCH_LEVEL
  160. SWAP_IRQL(t0)
  161. #if !defined(NT_UP)
  162. #ifndef CAPKERN_SYNCH_POINTS
  163. ACQUIRE_SPINLOCK(a0,a0,Kasl10)
  164. #else
  165. CAP_ACQUIRE_SPINLOCK(a0,a0,Kasl10,t1,t2,t3,t4)
  166. #endif
  167. #endif // !defined(NT_UP)
  168. st1 [a1] = v0 // save old IRQL
  169. LEAF_RETURN
  170. LEAF_EXIT(KeAcquireSpinLock)
  171. //++
  172. //
  173. // KIRQL
  174. // KeAcquireSpinLockRaiseToSynch (
  175. // IN PKSPIN_LOCK SpinLock
  176. // )
  177. //
  178. // Routine Description:
  179. //
  180. // This function raises the current IRQL to synchronization level and
  181. // acquires the specified spinlock.
  182. //
  183. // Arguments:
  184. //
  185. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  186. // acquired.
  187. //
  188. // Return Value:
  189. //
  190. // The previous IRQL is returned as the function value.
  191. //
  192. //--
  193. LEAF_ENTRY(KeAcquireSpinLockRaiseToSynch)
  194. //
  195. // Register aliases
  196. //
  197. pHeld = pt0
  198. pFree = pt1
  199. mov t1 = SYNCH_LEVEL
  200. #if !defined(NT_UP)
  201. #ifdef CAPKERN_SYNCH_POINTS
  202. CAPSPINLOG1INT(a0,1,t3,t4,t5,t6,pt2)
  203. mov t2 = zero
  204. #endif
  205. GET_IRQL (v0)
  206. KaslrtsRetry:
  207. SET_IRQL (t1)
  208. xchg8 t0 = [a0], a0
  209. ;;
  210. cmp.eq pFree, pHeld = 0, t0
  211. ;;
  212. PSET_IRQL (pHeld, v0)
  213. #ifdef CAPKERN_SYNCH_POINTS
  214. (pHeld) br.cond.dpnt KaslrtsSkipCollLog
  215. cmp.eq pt2 = t2, zero
  216. (pt2) br.cond.sptk KaslrtsSkipCollLog
  217. CAPSPINLOG2INT(t2,a0,2,t3,t4,t5,t6,pt2)
  218. KaslrtsSkipCollLog:
  219. #endif //CAPKERN_SYNCH_POINTS
  220. (pFree) LEAF_RETURN
  221. ;;
  222. KaslrtsLoop:
  223. YIELD
  224. #ifdef CAPKERN_SYNCH_POINTS
  225. add t2 = 1, t2
  226. #endif
  227. cmp.eq pFree, pHeld = 0, t0
  228. ld8.nt1 t0 = [a0]
  229. (pFree) br.cond.dpnt KaslrtsRetry
  230. (pHeld) br.cond.dptk KaslrtsLoop
  231. #else
  232. SWAP_IRQL (t1) // Raise IRQL
  233. LEAF_RETURN
  234. #endif // !defined(NT_UP)
  235. LEAF_EXIT(KeAcquireSpinLockRaiseToSynch)
  236. //++
  237. //
  238. // KIRQL
  239. // KeAcquireSpinLockRaiseToDpc (
  240. // IN PKSPIN_LOCK SpinLock
  241. // )
  242. //
  243. // Routine Description:
  244. //
  245. // This function raises the current IRQL to dispatcher level and acquires
  246. // the specified spinlock.
  247. //
  248. // Arguments:
  249. //
  250. // SpinLock (a0) - Supplies a pointer to the spinlock that is to be
  251. // acquired.
  252. //
  253. // Return Value:
  254. //
  255. // The previous IRQL is returned as the function value.
  256. //
  257. //--
  258. LEAF_ENTRY(KeAcquireSpinLockRaiseToDpc)
  259. mov t2 = DISPATCH_LEVEL
  260. ;;
  261. SWAP_IRQL (t2)
  262. #if !defined(NT_UP)
  263. cmp.eq pt0, pt1 = zero, zero
  264. cmp.eq pt2, pt3 = zero, zero
  265. #ifdef CAPKERN_SYNCH_POINTS
  266. CAPSPINLOG1INT(a0, 1, t3, t4, t5, t6, pt4)
  267. mov t1 = zero
  268. #endif
  269. ;;
  270. Kaslrtp10:
  271. .pred.rel "mutex",pt0,pt1
  272. (pt0) xchg8 t0 = [a0], a0
  273. (pt1) ld8.nt1 t0 = [a0]
  274. ;;
  275. (pt0) cmp.ne pt2, pt3 = zero, t0
  276. cmp.eq pt0, pt1 = zero, t0
  277. ;;
  278. (pt1) YIELD
  279. #ifdef CAPKERN_SYNCH_POINTS
  280. (pt2) add t1 = 1, t1
  281. #endif
  282. (pt2) br.dpnt Kaslrtp10
  283. #ifdef CAPKERN_SYNCH_POINTS
  284. cmp.eq pt1 = t1, zero
  285. (pt1) br.sptk KaslrtpSkipCollLog
  286. CAPSPINLOG2INT(t1,a0,2,t3,t4,t5,t6,pt1)
  287. KaslrtpSkipCollLog:
  288. #endif
  289. (pt3) br.ret.dptk brp
  290. #else
  291. LEAF_RETURN
  292. #endif // !defined(NT_UP)
  293. LEAF_EXIT(KeAcquireSpinLockRaiseToDpc)
  294. //++
  295. //
  296. // VOID
  297. // KiReleaseSpinLock (
  298. // IN PKSPIN_LOCK SpinLock
  299. // )
  300. //
  301. // Routine Description:
  302. //
  303. // This function releases a kernel spin lock.
  304. //
  305. // N.B. This function assumes that the current IRQL is set properly.
  306. //
  307. // Arguments:
  308. //
  309. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  310. //
  311. // Return Value:
  312. //
  313. // None.
  314. //
  315. //--
  316. LEAF_ENTRY(KiReleaseSpinLock)
  317. ALTERNATE_ENTRY(KeReleaseSpinLockFromDpcLevel)
  318. #if !defined(NT_UP)
  319. st8.rel [a0] = zero // set spin lock not owned
  320. #ifdef CAPKERN_SYNCH_POINTS
  321. CAPSPINLOG1INT(a0, 7, t0, t1, t2, t3, pt0)
  322. #endif
  323. #endif
  324. LEAF_RETURN
  325. LEAF_EXIT(KiReleaseSpinLock)
  326. //++
  327. //
  328. // VOID
  329. // KeReleaseSpinLock (
  330. // IN PKSPIN_LOCK SpinLock
  331. // IN KIRQL OldIrql
  332. // )
  333. //
  334. // Routine Description:
  335. //
  336. // This function releases an executive spin lock and lowers the IRQL
  337. // to its previous value. Called at DPC_LEVEL.
  338. //
  339. // Arguments:
  340. //
  341. // SpinLock (a0) - Supplies a pointer to an executive spin lock.
  342. //
  343. // OldIrql (a1) - Supplies the previous IRQL value.
  344. //
  345. // Return Value:
  346. //
  347. // None.
  348. //
  349. //--
  350. LEAF_ENTRY(KeReleaseSpinLock)
  351. zxt1 a1 = a1
  352. #if !defined(NT_UP)
  353. st8.rel [a0] = zero // set spinlock not owned
  354. #ifdef CAPKERN_SYNCH_POINTS
  355. CAPSPINLOG1INT(a0, 7, t0, t1, t2, t3, pt0)
  356. #endif
  357. #endif
  358. ;;
  359. LEAF_LOWER_IRQL_AND_RETURN(a1) // Lower IRQL and return
  360. LEAF_EXIT(KeReleaseSpinLock)
  361. //++
  362. //
  363. // BOOLEAN
  364. // KeTryToAcquireSpinLock (
  365. // IN PKSPIN_LOCK SpinLock
  366. // OUT PKIRQL OldIrql
  367. // )
  368. //
  369. // Routine Description:
  370. //
  371. // This function raises the current IRQL to DISPATCH_LEVEL and attempts
  372. // to acquires the specified executive spinlock. If the spinlock can be
  373. // acquired, then TRUE is returned. Otherwise, the IRQL is restored to
  374. // its previous value and FALSE is returned. Called at IRQL <= DISPATCH_LEVEL.
  375. //
  376. // Arguments:
  377. //
  378. // SpinLock (a0) - Supplies a pointer to a executive spinlock.
  379. //
  380. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  381. // the previous IRQL value.
  382. //
  383. // Return Value:
  384. //
  385. // If the spin lock is acquired, then a value of TRUE is returned.
  386. // Otherwise, a value of FALSE is returned.
  387. //
  388. // N.B. This routine assumes KeTryToAcquireSpinLockAtDpcLevel pt1 & pt2 will
  389. // be set to reflect the result of the attempt to acquire the spinlock.
  390. //
  391. //--
  392. LEAF_ENTRY(KeTryToAcquireSpinLock)
  393. #ifdef CAPKERN_SYNCH_POINTS
  394. CAPSPINLOG1INT(a0, 8, t0, t1, t2, t3, pt2)
  395. #endif
  396. rOldIrql = t2
  397. //
  398. // Raise IRQL to DISPATCH_LEVEL and try to acquire the specified spinlock.
  399. // Return FALSE if failed; otherwise, return TRUE.
  400. //
  401. GET_IRQL (rOldIrql) // get original IRQL
  402. mov t0 = DISPATCH_LEVEL;;
  403. SET_IRQL (t0) // raise to dispatch level
  404. #if !defined(NT_UP)
  405. xchg8 t0 = [a0], a0
  406. ;;
  407. cmp.ne pt2 = t0, zero // if ne, lock acq failed
  408. ;;
  409. //
  410. // If successfully acquired, pt1 is set to TRUE while pt2 is set to FALSE.
  411. // Otherwise, pt2 is set to TRUE while pt1 is set to FALSE.
  412. //
  413. (pt2) YIELD
  414. (pt2) mov v0 = FALSE // return FALSE
  415. PSET_IRQL (pt2, rOldIrql) // restore old IRQL
  416. #ifdef CAPKERN_SYNCH_POINTS
  417. (pt2) br.cond.dpnt KttasSkipLog
  418. CAPSPINLOG1INT(a0, 1, t0, t1, t2, t3, pt0)
  419. KttasSkipLog:
  420. #endif
  421. (pt2) LEAF_RETURN
  422. ;;
  423. #endif // !defined(NT_UP)
  424. st1 [a1] = rOldIrql // save old IRQL
  425. mov v0 = TRUE // successfully acquired
  426. LEAF_RETURN
  427. LEAF_EXIT(KeTryToAcquireSpinLock)
  428. //++
  429. //
  430. // BOOLEAN
  431. // KeTestSpinLock (
  432. // IN PKSPIN_LOCK SpinLock
  433. // )
  434. //
  435. // Routine Description:
  436. //
  437. // This function tests a kernel spin lock. If the spinlock is
  438. // busy, FALSE is returned. If not, TRUE is returned. The spinlock
  439. // is never acquired. This is provided to allow code to spin at low
  440. // IRQL, only raising the IRQL when there is a reasonable hope of
  441. // acquiring the lock.
  442. //
  443. // Arguments:
  444. //
  445. // SpinLock (a0) - Supplies a pointer to a kernel spin lock.
  446. //
  447. // Return Value:
  448. //
  449. // TRUE - Spinlock appears available
  450. // FALSE - SpinLock is busy
  451. //--
  452. LEAF_ENTRY(KeTestSpinLock)
  453. ld8.nt1 t0 = [a0]
  454. ;;
  455. cmp.ne pt0 = 0, t0
  456. mov v0 = TRUE // default TRUE
  457. ;;
  458. (pt0) YIELD
  459. (pt0) mov v0 = FALSE // if t0 != 0 return FALSE
  460. LEAF_RETURN
  461. LEAF_EXIT(KeTestSpinLock)
  462. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  463. //++
  464. //
  465. // VOID
  466. // KeAcquireInStackQueuedSpinLock (
  467. // IN PKSPIN_LOCK SpinLock,
  468. // IN PKLOCK_QUEUE_HANDLE LockHandle
  469. // )
  470. //
  471. // Routine Description:
  472. //
  473. // This function raises the current IRQL to dispatch level and
  474. // acquires the specified queued spinlock.
  475. //
  476. // Arguments:
  477. //
  478. // SpinLock (a0) - Supplies a pointer to a spin lock.
  479. //
  480. // LockHandle (a1) - Supplies a pointer to a lock handle.
  481. //
  482. // Return Value:
  483. //
  484. // None.
  485. //
  486. //--
  487. LEAF_ENTRY(KeAcquireInStackQueuedSpinLockRaiseToSynch)
  488. add t5 = LqhNext, a1
  489. mov t1 = SYNCH_LEVEL
  490. br.sptk Kaisqsl10
  491. ;;
  492. ALTERNATE_ENTRY(KeAcquireInStackQueuedSpinLock)
  493. add t5 = LqhNext, a1
  494. mov t1 = DISPATCH_LEVEL
  495. ;;
  496. Kaisqsl10:
  497. #if !defined(NT_UP)
  498. st8 [t5] = zero // set next link to NULL
  499. add t4 = LqhLock, a1
  500. ;;
  501. st8.rel [t4] = a0 // set spin lock address
  502. #endif // !defined(NT_UP)
  503. SWAP_IRQL (t1) // old IRQL in register v0
  504. add t0 = LqhOldIrql, a1
  505. ;;
  506. st1 [t0] = v0 // save old IRQL
  507. #if !defined(NT_UP)
  508. //
  509. // Finish in common code. The following register values
  510. // are assumed in that code.
  511. //
  512. // t4 = &LockEntry->ActualLock
  513. // t5 = LockEntry
  514. // a0 = *(&LockEntry->ActualLock)
  515. //
  516. // Note: LqhNext == 0, otherwise we would have to trim t5 here.
  517. //
  518. br KxqAcquireQueuedSpinLock // finish in common code
  519. #else
  520. br.ret.sptk brp
  521. #endif !defined(NT_UP)
  522. LEAF_EXIT(KeAcquireInStackQueuedSpinLock)
  523. SBTTL("Acquire Queued SpinLock and Raise IRQL")
  524. //++
  525. //
  526. // KIRQL
  527. // KeAcquireQueuedSpinLock (
  528. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  529. // )
  530. //
  531. // KIRQL
  532. // KeAcquireQueuedSpinLockRaiseToSynch (
  533. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  534. // )
  535. //
  536. // Routine Description:
  537. //
  538. // This function raises the current IRQL to synchronization level and
  539. // acquires the specified queued spinlock.
  540. //
  541. // Arguments:
  542. //
  543. // Number (a0) - Supplies the queued spinlock number.
  544. //
  545. // Return Value:
  546. //
  547. // The previous IRQL is returned as the function value.
  548. //
  549. //--
  550. LEAF_ENTRY(KeAcquireQueuedSpinLock)
  551. add t0 = a0, a0
  552. mov t1 = DISPATCH_LEVEL
  553. br Kaqsl10
  554. ;;
  555. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockRaiseToSynch)
  556. add t0 = a0, a0
  557. mov t1 = SYNCH_LEVEL
  558. ;;
  559. Kaqsl10:
  560. SWAP_IRQL (t1) // old IRQL in register v0
  561. #if !defined(NT_UP)
  562. movl t2 = KiPcr+PcPrcb
  563. ;;
  564. ld8 t2 = [t2]
  565. ;;
  566. shladd t3 = t0, 3, t2 // get associated spinlock addr
  567. ;;
  568. add t5 = PbLockQueue, t3
  569. ;;
  570. add t4 = LqLock, t5
  571. ;;
  572. ld8 t6 = [t4]
  573. mov t11 = 0x7
  574. ;;
  575. andcm a0 = t6, t11 // mask the lower 3 bits
  576. ;;
  577. ALTERNATE_ENTRY(KxqAcquireQueuedSpinLock)
  578. #ifdef CAPKERN_SYNCH_POINTS
  579. CAPSPINLOG1INT(a0,1,t6,t7,t8,t9,pt1)
  580. mov t12 = zero
  581. #endif
  582. mf // Do a memory fence to ensure the write of LqhNext
  583. // occurs before the xchg8 on lock address.
  584. //
  585. // t4 = &LockEntry->ActualLock
  586. // t5 = LockEntry
  587. // a0 = *(&LockEntry->ActualLock)
  588. //
  589. xchg8 t7 = [a0], t5
  590. ;;
  591. cmp.ne pt0, pt1 = r0, t7 // if ne, lock already owned
  592. ;;
  593. (pt1) or t8 = LOCK_QUEUE_OWNER, a0
  594. (pt0) or t8 = LOCK_QUEUE_WAIT, a0
  595. ;;
  596. st8.rel [t4] = t8
  597. add t9 = LqNext, t7
  598. (pt1) br.ret.sptk brp
  599. ;;
  600. //
  601. // The lock is already held by another processor. Set the wait
  602. // bit in this processor's Lock Queue entry, then set the next
  603. // field in the Lock Queue entry of the last processor to attempt
  604. // to acquire the lock (this is the address returned by the xchg
  605. // above) to point to THIS processor's lock queue entry.
  606. //
  607. st8.rel [t9] = t5
  608. Kaqsl20:
  609. #ifdef CAPKERN_SYNCH_POINTS
  610. add t12 = 1, t12
  611. #endif
  612. YIELD
  613. ld8 t10 = [t4]
  614. ;;
  615. tbit.z pt1, pt0 = t10, LOG2(LOCK_QUEUE_WAIT)
  616. (pt0) br.dptk.few Kaqsl20
  617. #ifdef CAPKERN_SYNCH_POINTS
  618. CAPSPINLOG2INT(t12,a0,4,t6,t7,t8,t9,pt2)
  619. #endif
  620. (pt1) br.ret.dpnt brp // if zero, lock acquired
  621. #else
  622. br.ret.sptk brp
  623. #endif // !defined(NT_UP)
  624. ;;
  625. LEAF_EXIT(KeAcquireQueuedSpinLock)
  626. SBTTL("Release Queued SpinLock and Lower IRQL")
  627. //++
  628. //
  629. // VOID
  630. // KeReleaseInStackQueuedSpinLock (
  631. // IN PKLOCK_QUEUE_HANDLE LockHandle
  632. // )
  633. //
  634. // Routine Description:
  635. //
  636. // This function releases a queued spinlock and lowers the IRQL to its
  637. // previous value.
  638. //
  639. // Arguments:
  640. //
  641. // LockHandle (a0) - Supplies a pointer to a lock handle.
  642. //
  643. // Return Value:
  644. //
  645. // None.
  646. //
  647. //--
  648. LEAF_ENTRY(KeReleaseInStackQueuedSpinLock)
  649. alloc t22 = ar.pfs, 2, 2, 2, 0
  650. add t9 = LqhOldIrql, a0
  651. add t5 = LqhNext, a0 // set address of lock queue
  652. ;;
  653. ld1.nt1 a1 = [t9] // get old IRQL
  654. br KxqReleaseQueuedSpinLock // finish in common code
  655. LEAF_EXIT(KeReleaseInStackQueuedSpinLock)
  656. SBTTL("Release Queued SpinLock and Lower IRQL")
  657. //++
  658. //
  659. // VOID
  660. // KeReleaseQueuedSpinLock (
  661. // IN KSPIN_LOCK_QUEUE_NUMBER Number,
  662. // IN KIRQL OldIrql
  663. // )
  664. //
  665. // Routine Description:
  666. //
  667. // This function releases a queued spinlock and lowers the IRQL to its
  668. // previous value.
  669. //
  670. // Arguments:
  671. //
  672. // Number (a0) - Supplies the queued spinlock number.
  673. //
  674. // OldIrql (a1) - Supplies the previous IRQL value.
  675. //
  676. // Return Value:
  677. //
  678. // None.
  679. //
  680. //--
  681. LEAF_ENTRY(KeReleaseQueuedSpinLock)
  682. PROLOGUE_BEGIN
  683. #if !defined(NT_UP)
  684. movl v0 = KiPcr+PcPrcb
  685. ;;
  686. ld8 v0 = [v0]
  687. add t0 = a0, a0
  688. ;;
  689. shladd t1 = t0, 3, v0
  690. ;;
  691. add t5 = PbLockQueue, t1
  692. ;;
  693. #endif // !defined(NT_UP)
  694. ALTERNATE_ENTRY(KxqReleaseQueuedSpinLock)
  695. #if !defined(NT_UP)
  696. add v0 = LqNext, t5
  697. add t2 = LqLock, t5
  698. ;;
  699. ld8.acq t4 = [t2]
  700. mov ar.ccv = t5
  701. ld8 t3 = [v0]
  702. ;;
  703. #ifdef CAPKERN_SYNCH_POINTS
  704. and t6 = ~7, t4
  705. CAPSPINLOG1INT(t6,7,t7,t8,t9,t10,pt0)
  706. mov ar.ccv = t5 /* CAPSPINLOG1INT mutates ar.ccv */
  707. mov t9 = zero
  708. #endif
  709. and t4 = ~LOCK_QUEUE_OWNER, t4 // clear lock owner bit
  710. ;;
  711. add t6 = LqLock, t3
  712. st8.rel [t2] = t4
  713. cmp.ne pt0, pt1 = r0, t3 // if ne, another processor waiting
  714. (pt0) br.sptk.few Krqsl30
  715. ld8 t7 = [t4]
  716. ;;
  717. cmp.ne pt2 = t5, t7
  718. (pt2) br.spnt.few Krqsl20
  719. cmpxchg8.rel t8 = [t4], r0, ar.ccv
  720. ;;
  721. cmp.ne pt0, pt1 = t8, t5 // if ne, another processor waiting
  722. (pt0) br.spnt.few Krqsl20
  723. ;;
  724. Krqsl10:
  725. #ifdef CAPKERN_SYNCH_POINTS
  726. cmp.eq pt2 = t9, zero
  727. (pt2) br.sptk KrqslSkipCollLog
  728. and t6 = ~7, t4
  729. CAPSPINLOG2INT(t9,t6,9,t7,t8,t10,t11,pt2)
  730. KrqslSkipCollLog:
  731. #endif
  732. #endif // !defined(NT_UP)
  733. LEAF_LOWER_IRQL_AND_RETURN(a1) // lower IRQL to previous level
  734. ;;
  735. #if !defined(NT_UP)
  736. //
  737. // Another processor has inserted its lock queue entry in the lock queue,
  738. // but has not yet written its lock queue entry address in the current
  739. // processor's next link. Spin until the lock queue address is written.
  740. //
  741. Krqsl20:
  742. YIELD
  743. #ifdef CAPKERN_SYNCH_POINTS
  744. and t9 = 1, t9
  745. #endif
  746. ld8 t3 = [v0] // get next lock queue entry addr
  747. ;;
  748. cmp.eq pt0 = r0, t3 // if eq, addr not written yet
  749. add t6 = LqLock, t3
  750. (pt0) br.sptk Krqsl20 // try again
  751. ;;
  752. //
  753. // Grant the next process in the lock queue ownership of the spinlock.
  754. // (Turn off the WAIT bit and on the OWNER bit in the next entries lock
  755. // field).
  756. //
  757. Krqsl30:
  758. ld8.nt1 t2 = [t6] // get spinlock addr and lock bit
  759. ;;
  760. st8 [v0] = r0 // clear next lock queue entry addr
  761. ;;
  762. xor t2 = (LOCK_QUEUE_OWNER|LOCK_QUEUE_WAIT), t2
  763. ;;
  764. st8.rel [t6] = t2
  765. br Krqsl10
  766. ;;
  767. #endif // !defined(NT_UP)
  768. LEAF_EXIT(KeReleaseQueuedSpinLock)
  769. SBTTL("Try to Acquire Queued SpinLock and Raise IRQL")
  770. //++
  771. //
  772. // LOGICAL
  773. // KeTryToAcquireQueuedSpinLock (
  774. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  775. // OUT PKIRQL OldIrql
  776. // )
  777. //
  778. // LOGICAL
  779. // KeTryToAcquireQueuedSpinLockRaiseToSynch (
  780. // IN KSPIN_LOCK_QUEUE_NUMBER Number
  781. // OUT PKIRQL OldIrql
  782. // )
  783. //
  784. // LOGICAL
  785. // KeTryToAcquireQueuedSpinLockAtRaisedIrql (
  786. // IN PKSPIN_LOCK_QUEUE LockQueue
  787. // )
  788. //
  789. // Routine Description:
  790. //
  791. // This function raises the current IRQL to synchronization level and
  792. // attempts to acquire the specified queued spinlock. If the spinlock
  793. // cannot be acquired, then IRQL is restored and FALSE is returned as
  794. // the function value. Otherwise, TRUE is returned as the function
  795. // value.
  796. //
  797. // Arguments:
  798. //
  799. // Number (a0) - Supplies the queued spinlock number.
  800. //
  801. // OldIrql (a1) - Supplies a pointer to a variable that receives the
  802. // the previous IRQL value.
  803. //
  804. // Return Value:
  805. //
  806. // If the spin lock is acquired, then a value of TRUE is returned.
  807. // Otherwise, a value of FALSE is returned.
  808. //
  809. //--
  810. LEAF_ENTRY(KeTryToAcquireQueuedSpinLock)
  811. movl t2 = KiPcr+PcPrcb
  812. mov t1 = DISPATCH_LEVEL
  813. br Kttaqsl10
  814. ;;
  815. ALTERNATE_ENTRY(KeTryToAcquireQueuedSpinLockRaiseToSynch)
  816. mov t1 = SYNCH_LEVEL
  817. movl t2 = KiPcr+PcPrcb
  818. ;;
  819. Kttaqsl10:
  820. #if !defined(NT_UP)
  821. rsm 1 << PSR_I // disable interrupts
  822. ld8 t2 = [t2] // get PRCB address
  823. ;;
  824. shladd t3 = a0, 4, t2 // get address of lock queue entry
  825. ;;
  826. add t5 = PbLockQueue, t3
  827. add t6 = LqLock+PbLockQueue, t3
  828. ;;
  829. ld8 t4 = [t6] // get associated spinlock addr
  830. mov ar.ccv = r0 // cmpxchg oldvalue must be 0
  831. mov t11 = 0x7
  832. ;;
  833. andcm t12 = t4, t11
  834. ;;
  835. #ifdef CAPKERN_SYNCH_POINTS
  836. CAPSPINLOG1INT(t12,8,t7,t8,t9,t10,pt0)
  837. mov ar.ccv = zero /* CAPSPINLOG1INT mutates ar.ccv */
  838. #endif
  839. //
  840. // Try to acquire the specified spinlock.
  841. //
  842. // N.B. A noninterlocked test is done before the interlocked attempt. This
  843. // allows spinning without interlocked cycles.
  844. //
  845. ld8 t8 = [t12] // get current lock value
  846. ;;
  847. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  848. (pt0) br.spnt.few Kttaqs20
  849. ;;
  850. cmpxchg8.acq t8 = [t12], t5, ar.ccv // try to acquire the lock
  851. ;;
  852. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  853. or t4 = LOCK_QUEUE_OWNER, t4// set lock owner bit
  854. (pt0) br.spnt.few Kttaqs20
  855. ;;
  856. st8 [t6] = t4
  857. #endif
  858. SWAP_IRQL(t1)
  859. #if !defined(NT_UP)
  860. ssm 1 << PSR_I // enable interrupts
  861. #ifdef CAPKERN_SYNCH_POINTS
  862. CAPSPINLOG1INT(t12,1,t7,t8,t9,t10,pt0)
  863. #endif
  864. #endif
  865. st1 [a1] = v0 // save old IRQL value
  866. mov v0 = TRUE // set return value to TRUE
  867. LEAF_RETURN
  868. ;;
  869. #if !defined(NT_UP)
  870. //
  871. // The attempt to acquire the specified spin lock failed. Lower IRQL to its
  872. // previous value and return FALSE.
  873. //
  874. Kttaqs20:
  875. ssm 1 << PSR_I // enable interrupts
  876. mov v0 = FALSE // set return value to FALSE
  877. YIELD
  878. LEAF_RETURN
  879. #endif
  880. LEAF_EXIT(KeTryToAcquireQueuedSpinLock)
  881. SBTTL("Try to Acquire Queued SpinLock without raising IRQL")
  882. //++
  883. //
  884. // LOGICAL
  885. // KeTryToAcquireQueuedSpinLockAtRaisedIrql (
  886. // IN PKSPIN_LOCK_QUEUE LockQueue
  887. // )
  888. //
  889. // Routine Description:
  890. //
  891. // This function attempts to acquire the specified queued spinlock.
  892. // If the spinlock cannot be acquired, then FALSE is returned as
  893. // the function value. Otherwise, TRUE is returned as the function
  894. // value.
  895. //
  896. // Arguments:
  897. //
  898. // LockQueue (a0) - Supplies the address of the queued spinlock.
  899. //
  900. // Return Value:
  901. //
  902. // If the spin lock is acquired, then a value of TRUE is returned.
  903. // Otherwise, a value of FALSE is returned.
  904. //
  905. //--
  906. LEAF_ENTRY(KeTryToAcquireQueuedSpinLockAtRaisedIrql)
  907. #if !defined(NT_UP)
  908. add t6 = LqLock, a0
  909. ;;
  910. ld8 t4 = [t6] // get associated spinlock addr
  911. mov ar.ccv = r0 // cmpxchg oldvalue must be 0
  912. mov t11 = 0x7
  913. ;;
  914. andcm t12 = t4, t11
  915. ;;
  916. #ifdef CAPKERN_SYNCH_POINTS
  917. CAPSPINLOG1INT(t12,8,t7,t8,t9,t10,pt0)
  918. mov ar.ccv = zero /*CAPSPINLOG1INT mutates ar.ccv*/
  919. #endif
  920. //
  921. // Try to acquire the specified spinlock.
  922. //
  923. // N.B. A noninterlocked test is done before the interlocked attempt. This
  924. // allows spinning without interlocked cycles.
  925. //
  926. ld8 t8 = [t12] // get current lock value
  927. mov v0 = FALSE // assume failure
  928. ;;
  929. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  930. (pt0) br.ret.spnt.few.clr brp // if owned, return failure
  931. ;;
  932. cmpxchg8.acq t8 = [t12], a0, ar.ccv // try to acquire the lock
  933. ;;
  934. cmp.ne pt0, pt1 = r0, t8 // if ne, lock owned
  935. or t4 = LOCK_QUEUE_OWNER, t4// set lock owner bit
  936. ;;
  937. (pt0) YIELD
  938. (pt0) br.ret.spnt.few.clr brp // if owned, return failure
  939. ;;
  940. st8 [t6] = t4
  941. #ifdef CAPKERN_SYNCH_POINTS
  942. CAPSPINLOG1INT(t12,1,t7,t8,t9,t10,pt0)
  943. #endif
  944. #endif
  945. mov v0 = TRUE // set return value to TRUE
  946. LEAF_RETURN
  947. ;;
  948. LEAF_EXIT(KeTryToAcquireQueuedSpinLockAtRaisedIrql)
  949. SBTTL("Acquire Queued SpinLock at Current IRQL")
  950. //++
  951. // VOID
  952. // KeAcquireInStackQueuedSpinLockAtDpcLevel (
  953. // IN PKSPIN_LOCK SpinLock,
  954. // IN PKLOCK_QUEUE_HANDLE LockHandle
  955. // )
  956. //
  957. // Routine Description:
  958. //
  959. // This function acquires the specified queued spinlock at the current
  960. // IRQL.
  961. //
  962. // Arguments:
  963. //
  964. // SpinLock (a0) - Supplies the address of a spin lock.
  965. //
  966. // LockHandle (a1) - Supplies the address of an in stack lock handle.
  967. //
  968. // Return Value:
  969. //
  970. // None.
  971. //
  972. //--
  973. LEAF_ENTRY(KeAcquireInStackQueuedSpinLockAtDpcLevel)
  974. #if !defined(NT_UP)
  975. add t0 = LqhNext, a1
  976. add t1 = LqhLock, a1
  977. ;;
  978. st8 [t0] = r0
  979. st8 [t1] = a0
  980. add a0 = LqhNext, a1
  981. ;;
  982. #endif // !defined(NT_UP)
  983. //++
  984. //
  985. // VOID
  986. // KeAcquireQueuedSpinLockAtDpcLevel (
  987. // IN PKSPIN_LOCK_QUEUE LockQueue
  988. // )
  989. //
  990. // Routine Description:
  991. //
  992. // This function acquires the specified queued spinlock at the current
  993. // IRQL.
  994. //
  995. // Arguments:
  996. //
  997. // LockQueue (a0) - Supplies the address of the lock queue entry.
  998. //
  999. // Return Value:
  1000. //
  1001. // None.
  1002. //
  1003. //--
  1004. ALTERNATE_ENTRY(KeAcquireQueuedSpinLockAtDpcLevel)
  1005. #if !defined(NT_UP)
  1006. add t0 = LqLock, a0
  1007. add t1 = LqNext, a0
  1008. mov t11 = 0x7
  1009. ;;
  1010. ld8 t4 = [t0]
  1011. ;;
  1012. mf
  1013. andcm t12 = t4, t11 // mask the lower 3 bits
  1014. ;;
  1015. #ifdef CAPKERN_SYNCH_POINTS
  1016. CAPSPINLOG1INT(t12,1,t5,t6,t7,t8,pt0)
  1017. mov t6 = zero
  1018. #endif
  1019. xchg8 t3 = [t12], a0
  1020. or t5 = LOCK_QUEUE_OWNER, t4 // set lock owner bit
  1021. ;;
  1022. cmp.ne pt0, pt1 = r0, t3 // if ne, lock already owned
  1023. add t2 = LqNext, t3
  1024. ;;
  1025. (pt0) or t5 = LOCK_QUEUE_WAIT, t4
  1026. ;;
  1027. st8.rel [t0] = t5
  1028. (pt0) st8.rel [t2] = a0 // set addr of lock queue entry
  1029. (pt1) br.ret.sptk brp
  1030. ;;
  1031. //
  1032. // The lock is owned by another processor. Set the lock bit in the current
  1033. // processor lock queue entry, set the next link in the previous lock queue
  1034. // entry, and spin on the current processor's lock bit.
  1035. //
  1036. Kiaqsl10:
  1037. #ifdef CAPKERN_SYNCH_POINTS
  1038. add t6 = 1, t6
  1039. #endif
  1040. YIELD
  1041. ld8 t4 = [t0] // get lock addr and lock wait bit
  1042. ;;
  1043. tbit.z pt1, pt0 = t4, LOG2(LOCK_QUEUE_WAIT)
  1044. (pt0) br.dptk.few Kiaqsl10
  1045. #ifdef CAPKERN_SYNCH_POINTS
  1046. CAPSPINLOG2INT(t6,t12,4,t7,t8,t9,t10,pt2)
  1047. #endif
  1048. (pt1) br.ret.dpnt brp // if zero, lock acquired
  1049. #else
  1050. br.ret.sptk brp
  1051. #endif // !defined(NT_UP)
  1052. ;;
  1053. LEAF_EXIT(KeAcquireInStackQueuedSpinLockAtDpcLevel)
  1054. SBTTL("Release Queued SpinLock at Current IRQL")
  1055. //++
  1056. //
  1057. // VOID
  1058. // KeReleaseInStackQueuedSpinLockFromDpcLevel (
  1059. // IN PKLOCK_QUEUE_HANDLE LockHandle
  1060. // )
  1061. //
  1062. // Routine Description:
  1063. //
  1064. // This function releases a queued spinlock and preserves the current
  1065. // IRQL.
  1066. //
  1067. // Arguments:
  1068. //
  1069. // LockHandle (a0) - Supplies the address of a lock handle.
  1070. //
  1071. // Return Value:
  1072. //
  1073. // None.
  1074. //
  1075. //--
  1076. LEAF_ENTRY(KeReleaseInStackQueuedSpinLockFromDpcLevel)
  1077. #if !defined(NT_UP)
  1078. add a0 = LqhNext, a0
  1079. ;;
  1080. #endif // !defined(NT_UP)
  1081. //++
  1082. //
  1083. // VOID
  1084. // KiReleaseQueuedSpinLock (
  1085. // IN PKSPIN_LOCK_QUEUE LockQueue
  1086. // )
  1087. //
  1088. // Routine Description:
  1089. //
  1090. // This function releases a queued spinlock and preserves the current
  1091. // IRQL.
  1092. //
  1093. // Arguments:
  1094. //
  1095. // LockQueue (a0) - Supplies the address of the lock queue entry.
  1096. //
  1097. // Return Value:
  1098. //
  1099. // None.
  1100. //
  1101. //--
  1102. ALTERNATE_ENTRY(KeReleaseQueuedSpinLockFromDpcLevel)
  1103. #if !defined(NT_UP)
  1104. mov ar.ccv = a0
  1105. add t0 = LqNext, a0
  1106. add t1 = LqLock, a0
  1107. ;;
  1108. ld8 t3 = [t0] // get next lock queue entry addr
  1109. ld8 t4 = [t1] // get associate spin lock addr
  1110. ;;
  1111. #ifdef CAPKERN_SYNCH_POINTS
  1112. and t6 = ~7, t4
  1113. CAPSPINLOG1INT(t6,7,t7,t8,t9,t10,pt0)
  1114. mov ar.ccv = a0 /* CAPSPINLOG1INT mutates ar.ccv */
  1115. mov t9 = zero
  1116. #endif
  1117. and t4 = ~LOCK_QUEUE_OWNER, t4 // clear lock owner bit
  1118. ;;
  1119. st8.rel [t1] = t4
  1120. cmp.ne pt0, pt1 = r0, t3 // if ne, another processor waiting
  1121. (pt0) br.spnt.few Kirqsl30
  1122. KiIrqsl10:
  1123. ld8.nt1 t3 = [t4] // get current lock ownership
  1124. ;;
  1125. cmp.ne pt0, pt1 = a0, t3 // if ne, another processor waiting
  1126. (pt0) br.spnt.few Kirqsl20
  1127. ;;
  1128. cmpxchg8.rel t3 = [t4], r0, ar.ccv
  1129. ;;
  1130. cmp.ne pt0, pt1 = a0, t3 // if ne, try again
  1131. (pt1) br.ret.sptk brp
  1132. ;;
  1133. //
  1134. // Another processor has inserted its lock queue entry in the lock queue,
  1135. // but has not yet written its lock queue entry address in the current
  1136. // processor's next link. Spin until the lock queue address is written.
  1137. //
  1138. Kirqsl20:
  1139. YIELD
  1140. #ifdef CAPKERN_SYNCH_POINTS
  1141. add t9 = 1, t9
  1142. #endif
  1143. ld8 t3 = [t0] // get next lock queue entry addr
  1144. ;;
  1145. cmp.eq pt0 = r0, t3 // if eq, addr not written yet
  1146. (pt0) br.sptk Kirqsl20 // try again
  1147. Kirqsl30:
  1148. add t6 = LqLock, t3
  1149. ;;
  1150. ld8.nt1 t2 = [t6] // get spinlock addr and lock bit
  1151. st8 [t0] = r0 // clear next lock queue entry addr
  1152. ;;
  1153. // Set owner bit, clear wait bit.
  1154. xor t2 = (LOCK_QUEUE_OWNER|LOCK_QUEUE_WAIT), t2
  1155. ;;
  1156. st8.rel [t6] = t2
  1157. #ifdef CAPKERN_SYNCH_POINTS
  1158. cmp.eq pt2 = t9, zero
  1159. (pt2) br.sptk KirqslSkipCollLog
  1160. and t6 = ~7, t4
  1161. CAPSPINLOG2INT(t9,t6,9,t7,t8,t10,t11,pt2)
  1162. KirqslSkipCollLog:
  1163. #endif
  1164. #endif // !defined(NT_UP)
  1165. br.ret.sptk brp
  1166. ;;
  1167. LEAF_EXIT(KeReleaseInStackQueuedSpinLockFromDpcLevel)
  1168. //++
  1169. //
  1170. // VOID
  1171. // KiLowerIrqlSoftwareInterruptPending(
  1172. // IN TEMP_REG NewIrql
  1173. // )
  1174. //
  1175. // Routine Description:
  1176. //
  1177. // This function is entered directly from a LEAF function that is
  1178. // lowering IRQL before it exits when there is a software interrupt
  1179. // pending that will fire as a result of lowering IRQL.
  1180. //
  1181. // In this special case, we need to promote to a nested entry in
  1182. // order to process the simulated interrupt.
  1183. //
  1184. // Return is directly to the caller of the leaf function.
  1185. //
  1186. // This routine is entered with interrupts disabled, this is a
  1187. // side effect of the code that branched here needing interrupts
  1188. // disabled while checking and lowering.
  1189. //
  1190. // Arguments:
  1191. //
  1192. // NewIrql - Because we are branched to from a leaf routine,
  1193. // the argument must be passed in non-windowed
  1194. // register t22 (r31).
  1195. //
  1196. // Return Value:
  1197. //
  1198. // None.
  1199. //
  1200. //--
  1201. NESTED_ENTRY(KiLowerIrqlSoftwareInterruptPending)
  1202. NESTED_SETUP(0,3,1,0)
  1203. PROLOGUE_END
  1204. mov out0 = t22
  1205. ssm 1 << PSR_I
  1206. ;;
  1207. br.call.sptk brp = KiCheckForSoftwareInterrupt;;
  1208. NESTED_RETURN
  1209. NESTED_EXIT(KiLowerIrqlSoftwareInterruptPending)