Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1396 lines
42 KiB

  1. // TITLE("Interlocked Support")
  2. //++
  3. //
  4. // Copyright (c) 1990 Microsoft Corporation
  5. // Copyright (c) 1992 Digital Equipment Corporation
  6. //
  7. // Module Name:
  8. //
  9. // intrlock.s
  10. //
  11. // Abstract:
  12. //
  13. // This module implements functions to support interlocked operations.
  14. // Interlocked operations can only operate on nonpaged data and the
  15. // specified spinlock cannot be used for any other purpose.
  16. //
  17. // Author:
  18. //
  19. // David N. Cutler (davec) 26-Mar-1990
  20. //
  21. // Environment:
  22. //
  23. // Kernel mode.
  24. //
  25. // Revision History:
  26. //
  27. // Thomas Van Baak (tvb) 18-May-1992
  28. //
  29. // Adapted for Alpha AXP.
  30. //
  31. //--
  32. #include "ksalpha.h"
  33. SBTTL("Interlocked Add Large Integer")
  34. //++
  35. //
  36. // LARGE_INTEGER
  37. // ExInterlockedAddLargeInteger (
  38. // IN PLARGE_INTEGER Addend,
  39. // IN LARGE_INTEGER Increment,
  40. // IN PKSPIN_LOCK Lock
  41. // )
  42. //
  43. // Routine Description:
  44. //
  45. // This function performs an interlocked add of an increment value to an
  46. // addend variable of type large integer. The initial value of the addend
  47. // variable is returned as the function value.
  48. //
  49. // Arguments:
  50. //
  51. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  52. // adjusted by the increment value.
  53. //
  54. // Increment (a1) - Supplies the increment value to be added to the
  55. // addend variable.
  56. //
  57. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  58. // access to the addend variable.
  59. //
  60. // Return Value:
  61. //
  62. // The result of the interlocked large integer add.
  63. //
  64. // Implementation Note:
  65. //
  66. // The specification of this function requires that the given lock must be
  67. // used to synchronize the update even though on Alpha the operation can
  68. // actually done atomically without using the specified lock.
  69. //
  70. //--
  71. LEAF_ENTRY(ExInterlockedAddLargeInteger)
  72. 10: DISABLE_INTERRUPTS // disable interrupts
  73. #if !defined(NT_UP)
  74. LDP_L t0, 0(a2) // get current lock value - locked
  75. bne t0, 20f // if ne, spin lock owned
  76. mov a2, t0 // set ownership value (lock address)
  77. STP_C t0, 0(a2) // set spin lock owned - conditionally
  78. beq t0, 20f // if eq, conditional store failed
  79. mb // synchronize memory access
  80. #endif
  81. ldq t0, 0(a0) // get addend
  82. addq t0, a1, v0 // do the add
  83. stq v0, 0(a0) // store result
  84. #if !defined(NT_UP)
  85. mb // synchronize memory access
  86. STP zero, 0(a2) // set spin lock not owned
  87. #endif
  88. ENABLE_INTERRUPTS // enable interrupts
  89. ret zero, (ra) // return
  90. //
  91. // We expect the store conditional will usually succeed the first time so it
  92. // is faster to branch forward (predicted not taken) to here and then branch
  93. // backward (predicted taken) to where we wanted to go.
  94. //
  95. #if !defined(NT_UP)
  96. 20: ENABLE_INTERRUPTS // enable interrupts
  97. 22: LDP t0, 0(a2) // read current lock value
  98. beq t0, 10b // if eq, lock not owned
  99. br zero, 22b // spin in cache until available
  100. #endif
  101. .end ExInterlockedAddLargeInteger
  102. SBTTL("Interlocked Add Large Statistic")
  103. //++
  104. //
  105. // VOID
  106. // ExInterlockedAddLargeStatistic (
  107. // IN PLARGE_INTEGER Addend,
  108. // IN ULONG Increment
  109. // )
  110. //
  111. // Routine Description:
  112. //
  113. // This function performs an interlocked add of an increment value to an
  114. // addend variable of type large integer.
  115. //
  116. // Arguments:
  117. //
  118. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  119. // adjusted by the increment value.
  120. //
  121. // Increment (a1) - Supplies the increment value to be added to the
  122. // addend variable.
  123. //
  124. // Return Value:
  125. //
  126. // None.
  127. //
  128. // Implementation Note:
  129. //
  130. // The specification of this function requires that the given lock must be
  131. // used to synchronize the update even though on Alpha the operation can
  132. // actually done atomically without using the specified lock.
  133. //
  134. //--
  135. LEAF_ENTRY(ExInterlockedAddLargeStatistic)
  136. zap a1, 0xf0, a1 // zero extend increment value
  137. 10: ldq_l t0, 0(a0) // get addend
  138. addq t0, a1, t0 // do the add
  139. stq_c t0, 0(a0) // store result
  140. beq t0, 20f // if eq, store conditional failed
  141. ret zero, (ra) // return
  142. //
  143. // We expect the store conditional will usually succeed the first time so it
  144. // is faster to branch forward (predicted not taken) to here and then branch
  145. // backward (predicted taken) to where we wanted to go.
  146. //
  147. 20: br zero, 10b // try again
  148. .end ExInterlockedAddLargeStatistic
  149. SBTTL("Interlocked Add Unsigned Long")
  150. //++
  151. //
  152. // ULONG
  153. // ExInterlockedAddUlong (
  154. // IN PULONG Addend,
  155. // IN ULONG Increment,
  156. // IN PKSPIN_LOCK Lock
  157. // )
  158. //
  159. // Routine Description:
  160. //
  161. // This function performs an interlocked add of an increment value to an
  162. // addend variable of type unsigned long. The initial value of the addend
  163. // variable is returned as the function value.
  164. //
  165. // Arguments:
  166. //
  167. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  168. // adjusted by the increment value.
  169. //
  170. // Increment (a1) - Supplies the increment value to be added to the
  171. // addend variable.
  172. //
  173. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  174. // access to the addend variable.
  175. //
  176. // Return Value:
  177. //
  178. // The initial value of the addend variable.
  179. //
  180. // Implementation Note:
  181. //
  182. // The specification of this function requires that the given lock must be
  183. // used to synchronize the update even though on Alpha the operation can
  184. // actually done atomically without using the specified lock.
  185. //
  186. //--
  187. LEAF_ENTRY(ExInterlockedAddUlong)
  188. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  189. #if !defined(NT_UP)
  190. LDP_L t0, 0(a2) // get current lock value - locked
  191. bne t0, 20f // if ne, spin lock still owned
  192. mov a2, t0 // set ownership value (lock address)
  193. STP_C t0, 0(a2) // set spin lock owned - conditionally
  194. beq t0, 20f // if eq, conditional store failed
  195. mb // synchronize memory access
  196. #endif
  197. //
  198. // Set the return value in t0 for now since PALcode may use v0.
  199. //
  200. ldl t0, 0(a0) // get addend value (return value also)
  201. addl t0, a1, t1 // compute adjusted value
  202. stl t1, 0(a0) // store updated value
  203. #if !defined(NT_UP)
  204. mb // synchronize memory access
  205. STP zero, 0(a2) // set spin lock not owned
  206. #endif
  207. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  208. mov t0, v0 // set return value
  209. ret zero, (ra) // return
  210. //
  211. // We expect the store conditional will usually succeed the first time so it
  212. // is faster to branch forward (predicted not taken) to here and then branch
  213. // backward (predicted taken) to where we wanted to go.
  214. //
  215. #if !defined(NT_UP)
  216. 20: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  217. 22: LDP t0, 0(a2) // read current lock value
  218. beq t0, 10b // try spinlock again if available
  219. br zero, 22b // spin in cache until available
  220. #endif
  221. .end ExInterlockedAddUlong
  222. SBTTL("Interlocked Exchange Unsigned Long")
  223. //++
  224. //
  225. // ULONG
  226. // ExInterlockedExchangeUlong (
  227. // IN PULONG Source,
  228. // IN ULONG Value,
  229. // IN PKSPIN_LOCK Lock
  230. // )
  231. //
  232. // Routine Description:
  233. //
  234. // This function performs an interlocked exchange of a longword value with
  235. // a longword in memory and returns the memory value.
  236. //
  237. // N.B. There is an alternate entry point provided for this routine which
  238. // is ALPHA target specific and whose prototype does not include the
  239. // spinlock parameter. Since the routine never refers to the spinlock
  240. // parameter, no additional code is required.
  241. //
  242. // Arguments:
  243. //
  244. // Source (a0) - Supplies a pointer to a variable whose value is to be
  245. // exchanged.
  246. //
  247. // Value (a1) - Supplies the value to exchange with the source value.
  248. //
  249. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  250. // access to the source variable.
  251. //
  252. // Return Value:
  253. //
  254. // The source value is returned as the function value.
  255. //
  256. //--
  257. LEAF_ENTRY(ExInterlockedExchangeUlong)
  258. ALTERNATE_ENTRY(ExAlphaInterlockedExchangeUlong)
  259. 10: ldl_l v0, 0(a0) // get current source value
  260. bis a1, zero, t0 // set exchange value
  261. stl_c t0, 0(a0) // replace source value
  262. beq t0, 20f // if eq, conditional store failed
  263. ret zero, (ra) // return old value to caller
  264. //
  265. // We expect the store conditional will usually succeed the first time so it
  266. // is faster to branch forward (predicted not taken) to here and then branch
  267. // backward (predicted taken) to where we wanted to go.
  268. //
  269. 20: br zero,10b // go try spin lock again
  270. .end ExInterlockedExchangeUlong
  271. SBTTL("Interlocked Decrement Long")
  272. //++
  273. //
  274. // INTERLOCKED_RESULT
  275. // ExInterlockedDecrementLong (
  276. // IN PLONG Addend,
  277. // IN PKSPIN_LOCK Lock
  278. // )
  279. //
  280. // Routine Description:
  281. //
  282. // This function performs an interlocked decrement on an addend variable
  283. // of type signed long. The sign and whether the result is zero is returned
  284. // as the function value.
  285. //
  286. // N.B. There is an alternate entry point provided for this routine which
  287. // is ALPHA target specific and whose prototype does not include the
  288. // spinlock parameter. Since the routine never refers to the spinlock
  289. // parameter, no additional code is required.
  290. //
  291. // Arguments:
  292. //
  293. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  294. // decremented.
  295. //
  296. // Lock (a1) - Supplies a pointer to a spin lock to be used to synchronize
  297. // access to the addend variable.
  298. //
  299. // Return Value:
  300. //
  301. // RESULT_NEGATIVE is returned if the resultant addend value is negative.
  302. // RESULT_ZERO is returned if the resultant addend value is zero.
  303. // RESULT_POSITIVE is returned if the resultant addend value is positive.
  304. //
  305. // Implementation Note:
  306. //
  307. // The specification of this function does not require that the given lock
  308. // be used to synchronize the update as long as the update is synchronized
  309. // somehow. On Alpha a single load locked-store conditional does the job.
  310. //
  311. //--
  312. LEAF_ENTRY(ExInterlockedDecrementLong)
  313. ALTERNATE_ENTRY(ExAlphaInterlockedDecrementLong)
  314. 10: ldl_l v0, 0(a0) // get current addend value - locked
  315. subl v0, 1, v0 // decrement addend value
  316. mov v0, t0 // copy updated value to t0 for store
  317. stl_c t0, 0(a0) // store updated value - conditionally
  318. beq t0, 20f // if eq, conditional store failed
  319. //
  320. // Determine the INTERLOCKED_RESULT value based on the updated addend value.
  321. // N.B. RESULT_ZERO = 0, RESULT_NEGATIVE = 1, RESULT_POSITIVE = 2.
  322. //
  323. sra v0, 63, t0 // replicate the sign bit to every bit
  324. addl t0, 2, t0 // if t0 = 0 return 2, if -1 return 1
  325. cmovne v0, t0, v0 // if v0 = 0 return 0
  326. ret zero, (ra) // return
  327. //
  328. // We expect the store conditional will usually succeed the first time so it
  329. // is faster to branch forward (predicted not taken) to here and then branch
  330. // backward (predicted taken) to where we wanted to go.
  331. //
  332. 20: br zero, 10b // go try spin lock again
  333. .end ExInterlockedDecrementLong
  334. SBTTL("Interlocked Increment Long")
  335. //++
  336. //
  337. // INTERLOCKED_RESULT
  338. // ExInterlockedIncrementLong (
  339. // IN PLONG Addend,
  340. // IN PKSPIN_LOCK Lock
  341. // )
  342. //
  343. // Routine Description:
  344. //
  345. // This function performs an interlocked increment on an addend variable
  346. // of type signed long. The sign and whether the result is zero is returned
  347. // as the function value.
  348. //
  349. // N.B. There is an alternate entry point provided for this routine which
  350. // is ALPHA target specific and whose prototype does not include the
  351. // spinlock parameter. Since the routine never refers to the spinlock
  352. // parameter, no additional code is required.
  353. //
  354. // Arguments:
  355. //
  356. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  357. // incremented.
  358. //
  359. // Lock (a1) - Supplies a pointer to a spin lock to be used to synchronize
  360. // access to the addend variable.
  361. //
  362. // Return Value:
  363. //
  364. // RESULT_NEGATIVE is returned if the resultant addend value is negative.
  365. // RESULT_ZERO is returned if the resultant addend value is zero.
  366. // RESULT_POSITIVE is returned if the resultant addend value is positive.
  367. //
  368. // Implementation Note:
  369. //
  370. // The specification of this function does not require that the given lock
  371. // be used to synchronize the update as long as the update is synchronized
  372. // somehow. On Alpha a single load locked-store conditional does the job.
  373. //
  374. //--
  375. LEAF_ENTRY(ExInterlockedIncrementLong)
  376. ALTERNATE_ENTRY(ExAlphaInterlockedIncrementLong)
  377. 10: ldl_l v0, 0(a0) // get current addend value - locked
  378. addl v0, 1, v0 // increment addend value
  379. mov v0, t0 // copy updated value to t0 for store
  380. stl_c t0, 0(a0) // store updated value - conditionally
  381. beq t0, 20f // if eq, conditional store failed
  382. //
  383. // Determine the INTERLOCKED_RESULT value based on the updated addend value.
  384. // N.B. RESULT_ZERO = 0, RESULT_NEGATIVE = 1, RESULT_POSITIVE = 2.
  385. //
  386. sra v0, 63, t0 // replicate the sign bit to every bit
  387. addl t0, 2, t0 // if t0 = 0 return 2, if -1 return 1
  388. cmovne v0, t0, v0 // if v0 = 0 return 0
  389. ret zero, (ra) // return
  390. //
  391. // We expect the store conditional will usually succeed the first time so it
  392. // is faster to branch forward (predicted not taken) to here and then branch
  393. // backward (predicted taken) to where we wanted to go.
  394. //
  395. 20: br zero, 10b // go try spin lock again
  396. .end ExInterlockedIncrementLong
  397. SBTTL("Interlocked Insert Head List")
  398. //++
  399. //
  400. // PLIST_ENTRY
  401. // ExInterlockedInsertHeadList (
  402. // IN PLIST_ENTRY ListHead,
  403. // IN PLIST_ENTRY ListEntry,
  404. // IN PKSPIN_LOCK Lock
  405. // )
  406. //
  407. // Routine Description:
  408. //
  409. // This function inserts an entry at the head of a doubly linked list
  410. // so that access to the list is synchronized in a multiprocessor system.
  411. //
  412. // Arguments:
  413. //
  414. // ListHead (a0) - Supplies a pointer to the head of the doubly linked
  415. // list into which an entry is to be inserted.
  416. //
  417. // ListEntry (a1) - Supplies a pointer to the entry to be inserted at the
  418. // head of the list.
  419. //
  420. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  421. // access to the list.
  422. //
  423. // Return Value:
  424. //
  425. // Pointer to entry that was at the head of the list or NULL if the list
  426. // was empty.
  427. //
  428. //--
  429. LEAF_ENTRY(ExInterlockedInsertHeadList)
  430. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  431. #if !defined(NT_UP)
  432. LDP_L t0, 0(a2) // get current lock value - locked
  433. bne t0, 20f // if ne, spin lock still owned
  434. mov a2, t0 // set ownership value (lock address)
  435. STP_C t0, 0(a2) // set spin lock owned - conditionally
  436. beq t0, 20f // if eq, conditional store failed
  437. mb // synchronize memory access
  438. #endif
  439. //
  440. // Set the return value in t0 for now since PALcode may use v0.
  441. //
  442. LDP t0, LsFlink(a0) // get address of next entry (return value also)
  443. STP t0, LsFlink(a1) // store next link in entry
  444. STP a0, LsBlink(a1) // store previous link in entry
  445. STP a1, LsBlink(t0) // store previous link in next
  446. STP a1, LsFlink(a0) // store next link in head
  447. #if !defined(NT_UP)
  448. mb // sychronize memory access
  449. STP zero, 0(a2) // set spin lock not owned
  450. #endif
  451. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  452. xor t0, a0, v0 // if t0=a0, list empty, set v0 to NULL
  453. cmovne v0, t0, v0 // else return previous entry at head
  454. ret zero, (ra) // return
  455. //
  456. // We expect the store conditional will usually succeed the first time so it
  457. // is faster to branch forward (predicted not taken) to here and then branch
  458. // backward (predicted taken) to where we wanted to go.
  459. //
  460. #if !defined(NT_UP)
  461. 20: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  462. 22: LDP t0, 0(a2) // read current lock value
  463. beq t0, 10b // try spinlock again if available
  464. br zero, 22b // spin in cache until available
  465. #endif
  466. .end ExInterlockedInsertHeadList
  467. SBTTL("Interlocked Insert Tail List")
  468. //++
  469. //
  470. // PLIST_ENTRY
  471. // ExInterlockedInsertTailList (
  472. // IN PLIST_ENTRY ListHead,
  473. // IN PLIST_ENTRY ListEntry,
  474. // IN PKSPIN_LOCK Lock
  475. // )
  476. //
  477. // Routine Description:
  478. //
  479. // This function inserts an entry at the tail of a doubly linked list
  480. // so that access to the list is synchronized in a multiprocessor system.
  481. //
  482. // Arguments:
  483. //
  484. // ListHead (a0) - Supplies a pointer to the head of the doubly linked
  485. // list into which an entry is to be inserted.
  486. //
  487. // ListEntry (a1) - Supplies a pointer to the entry to be inserted at the
  488. // tail of the list.
  489. //
  490. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  491. // access to the list.
  492. //
  493. // Return Value:
  494. //
  495. // Pointer to entry that was at the tail of the list or NULL if the list
  496. // was empty.
  497. //
  498. //--
  499. LEAF_ENTRY(ExInterlockedInsertTailList)
  500. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  501. #if !defined(NT_UP)
  502. LDP_L t0, 0(a2) // get current lock value - locked
  503. bne t0, 20f // if ne, spin lock still owned
  504. mov a2, t0 // set ownership value (lock address)
  505. STP_C t0, 0(a2) // set spin lock owned - conditionally
  506. beq t0, 20f // if eq, conditional store failed
  507. mb // sychronize memory access
  508. #endif
  509. //
  510. // Set the return value in t0 for now since PALcode may use v0.
  511. //
  512. LDP t0, LsBlink(a0) // get address of previous entry (return value also)
  513. STP a0, LsFlink(a1) // store next link in entry
  514. STP t0, LsBlink(a1) // store previous link in entry
  515. STP a1, LsBlink(a0) // store previous link in next
  516. STP a1, LsFlink(t0) // store next link in head
  517. #if !defined(NT_UP)
  518. mb // sychronize memory access
  519. STP zero, 0(a2) // set spin lock not owned
  520. #endif
  521. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  522. xor t0, a0, v0 // if t0=a0, list empty, set v0 to NULL
  523. cmovne v0, t0, v0 // else return previous entry at tail
  524. ret zero, (ra) // return
  525. //
  526. // We expect the store conditional will usually succeed the first time so it
  527. // is faster to branch forward (predicted not taken) to here and then branch
  528. // backward (predicted taken) to where we wanted to go.
  529. //
  530. #if !defined(NT_UP)
  531. 20: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  532. 22: LDP t0, 0(a2) // read current lock value
  533. beq t0, 10b // try spinlock again if available
  534. br zero, 22b // spin in cache until available
  535. #endif
  536. .end ExInterlockedInsertTailList
  537. SBTTL("Interlocked Remove Head List")
  538. //++
  539. //
  540. // PLIST_ENTRY
  541. // ExInterlockedRemoveHeadList (
  542. // IN PLIST_ENTRY ListHead,
  543. // IN PKSPIN_LOCK Lock
  544. // )
  545. //
  546. // Routine Description:
  547. //
  548. // This function removes an entry from the head of a doubly linked list
  549. // so that access to the list is synchronized in a multiprocessor system.
  550. // If there are no entries in the list, then a value of NULL is returned.
  551. // Otherwise, the address of the entry that is removed is returned as the
  552. // function value.
  553. //
  554. // Arguments:
  555. //
  556. // ListHead (a0) - Supplies a pointer to the head of the doubly linked
  557. // list from which an entry is to be removed.
  558. //
  559. // Lock (a1) - Supplies a pointer to a spin lock to be used to synchronize
  560. // access to the list.
  561. //
  562. // Return Value:
  563. //
  564. // The address of the entry removed from the list, or NULL if the list is
  565. // empty.
  566. //
  567. //--
  568. LEAF_ENTRY(ExInterlockedRemoveHeadList)
  569. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  570. #if !defined(NT_UP)
  571. LDP_L t0, 0(a1) // get current lock value - locked
  572. bne t0, 30f // if ne, spin lock still owned
  573. mov a1, t0 // set ownership value (lock address)
  574. STP_C t0, 0(a1) // set spin lock owned - conditionally
  575. beq t0, 30f // if eq, conditional store failed
  576. mb // synchronize memory access
  577. #endif
  578. //
  579. // Set the return value in t0 for now since PALcode may use v0.
  580. //
  581. LDP t2, LsFlink(a0) // get address of next entry
  582. xor t2, a0, t0 // if t2=a0, list empty, set t0 to NULL
  583. beq t0, 20f // if eq, list is empty
  584. LDP t1, LsFlink(t2) // get address of next entry
  585. STP t1, LsFlink(a0) // store address of next in head
  586. STP a0, LsBlink(t1) // store address of previous in next
  587. mov t2, t0 // return the address of entry removed
  588. 20: //
  589. #if !defined(NT_UP)
  590. mb // synchronize memory access
  591. STP zero, 0(a1) // set spin lock not owned
  592. #endif
  593. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  594. mov t0, v0 // set return value
  595. ret zero, (ra) // return
  596. //
  597. // We expect the store conditional will usually succeed the first time so it
  598. // is faster to branch forward (predicted not taken) to here and then branch
  599. // backward (predicted taken) to where we wanted to go.
  600. //
  601. #if !defined(NT_UP)
  602. 30: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  603. 32: LDP t0, 0(a1) // read current lock value
  604. beq t0, 10b // try spinlock again if available
  605. br zero, 32b // spin in cache until available
  606. #endif
  607. .end ExInterlockedRemoveHeadList
  608. SBTTL("Interlocked Pop Entry List")
  609. //++
  610. //
  611. // PSINGLE_LIST_ENTRY
  612. // ExInterlockedPopEntryList (
  613. // IN PSINGLE_LIST_ENTRY ListHead,
  614. // IN PKSPIN_LOCK Lock
  615. // )
  616. //
  617. // Routine Description:
  618. //
  619. // This function removes an entry from the head of a singly linked list
  620. // so that access to the list is synchronized in a multiprocessor system.
  621. // If there are no entries in the list, then a value of NULL is returned.
  622. // Otherwise, the address of the entry that is removed is returned as the
  623. // function value.
  624. //
  625. // Arguments:
  626. //
  627. // ListHead (a0) - Supplies a pointer to the head of the singly linked
  628. // list from which an entry is to be removed.
  629. //
  630. // Lock (a1) - Supplies a pointer to a spin lock to be used to synchronize
  631. // access to the list.
  632. //
  633. // Return Value:
  634. //
  635. // The address of the entry removed from the list, or NULL if the list is
  636. // empty.
  637. //
  638. //--
  639. LEAF_ENTRY(ExInterlockedPopEntryList)
  640. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  641. #if !defined(NT_UP)
  642. LDP_L t0, 0(a1) // get current lock value - locked
  643. bne t0, 30f // if ne, spin lock still owned
  644. mov a1, t0 // set ownership value (lock address)
  645. STP_C t0, 0(a1) // set spin lock owned - conditionally
  646. beq t0, 30f // if eq, conditional store failed
  647. mb // synchronize memory access
  648. #endif
  649. //
  650. // Set the return value in t0 for now since PALcode may use v0.
  651. //
  652. LDP t0, 0(a0) // get address of next entry (return value also)
  653. beq t0, 20f // if eq [NULL], list is empty
  654. LDP t1, 0(t0) // get address of next entry
  655. STP t1, 0(a0) // store address of next in head
  656. 20: //
  657. #if !defined(NT_UP)
  658. mb // synchronize memory access
  659. STP zero, 0(a1) // set spin lock not owned
  660. #endif
  661. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  662. mov t0, v0 // set return value
  663. ret zero, (ra) // return
  664. //
  665. // We expect the store conditional will usually succeed the first time so it
  666. // is faster to branch forward (predicted not taken) to here and then branch
  667. // backward (predicted taken) to where we wanted to go.
  668. //
  669. #if !defined(NT_UP)
  670. 30: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  671. 32: LDP t0, 0(a1) // read current lock value
  672. beq t0, 10b // try spinlock again if available
  673. br zero, 32b // spin in cache until available
  674. #endif
  675. .end ExInterlockedPopEntryList
  676. SBTTL("Interlocked Push Entry List")
  677. //++
  678. //
  679. // PSINGLE_LIST_ENTRY
  680. // ExInterlockedPushEntryList (
  681. // IN PSINGLE_LIST_ENTRY ListHead,
  682. // IN PSINGLE_LIST_ENTRY ListEntry,
  683. // IN PKSPIN_LOCK Lock
  684. // )
  685. //
  686. // Routine Description:
  687. //
  688. // This function inserts an entry at the head of a singly linked list
  689. // so that access to the list is synchronized in a multiprocessor system.
  690. //
  691. // Arguments:
  692. //
  693. // ListHead (a0) - Supplies a pointer to the head of the singly linked
  694. // list into which an entry is to be inserted.
  695. //
  696. // ListEntry (a1) - Supplies a pointer to the entry to be inserted at the
  697. // head of the list.
  698. //
  699. // Lock (a2) - Supplies a pointer to a spin lock to be used to synchronize
  700. // access to the list.
  701. //
  702. // Return Value:
  703. //
  704. // Previous contents of ListHead. NULL implies list went from empty
  705. // to not empty.
  706. //
  707. //--
  708. LEAF_ENTRY(ExInterlockedPushEntryList)
  709. 10: DISABLE_INTERRUPTS // (PALcode) v0 is clobbered
  710. #if !defined(NT_UP)
  711. LDP_L t0, 0(a2) // get current lock value - locked
  712. bne t0, 20f // if ne, spin lock still owned
  713. mov a2, t0 // set ownership value (lock address)
  714. STP_C t0, 0(a2) // set spin lock owned - conditionally
  715. beq t0, 20f // if eq, conditional store failed
  716. mb // synchronize memory access
  717. #endif
  718. //
  719. // Set the return value in t0 for now since PALcode may use v0.
  720. //
  721. LDP t0, 0(a0) // get address of first entry (return value also)
  722. STP t0, 0(a1) // set address of next in new entry
  723. STP a1, 0(a0) // set address of first entry
  724. #if !defined(NT_UP)
  725. mb // synchronize memory access
  726. STP zero, 0(a2) // set spin lock not owned
  727. #endif
  728. ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  729. mov t0, v0 // set return value
  730. ret zero, (ra) // return
  731. //
  732. // We expect the store conditional will usually succeed the first time so it
  733. // is faster to branch forward (predicted not taken) to here and then branch
  734. // backward (predicted taken) to where we wanted to go.
  735. //
  736. #if !defined(NT_UP)
  737. 20: ENABLE_INTERRUPTS // (PALcode) v0 is clobbered
  738. 22: LDP t0, 0(a2) // read current lock value
  739. beq t0, 10b // try spinlock again if available
  740. br zero, 22b // spin in cache until available
  741. #endif
  742. .end ExInterlockedPushEntryList
  743. SBTTL("Interlocked Compare Exchange")
  744. //++
  745. //
  746. // PVOID
  747. // InterlockedCompareExchange (
  748. // IN OUT PVOID *Destination,
  749. // IN PVOID Exchange,
  750. // IN PVOID Comperand
  751. // )
  752. //
  753. // Routine Description:
  754. //
  755. // This function performs an interlocked compare of the destination
  756. // value with the comperand value. If the destination value is equal
  757. // to the comperand value, then the exchange value is stored in the
  758. // destination. Otherwise, no opeation is performed.
  759. //
  760. // Arguments:
  761. //
  762. // Destination (a0) - Supplies a pointer to the destination value.
  763. //
  764. // Exchange (a1) - Supplies the exchange.
  765. //
  766. // Comperand (a2) - Supplies the comperand value.
  767. //
  768. // Return Value:
  769. //
  770. // The initial destination value is returned as the function value.
  771. //
  772. //--
  773. LEAF_ENTRY(InterlockedCompareExchange)
  774. 10: //
  775. #if !defined(NT_UP)
  776. mb // synchronize memory access
  777. #endif
  778. ldl_l v0, 0(a0) // get current value
  779. bis a1, zero, t0 // copy exchange value for store
  780. cmpeq v0, a2, t1 // check if operands match
  781. beq t1, 20f // if eq, operands mismatch
  782. stl_c t0, 0(a0) // store updated addend value
  783. beq t0,25f // if eq, store conditional failed
  784. #if !defined(NT_UP)
  785. mb // synchronize memory access
  786. #endif
  787. 20: ret zero, (ra) // return
  788. //
  789. // We expect the store conditional will usually succeed the first time so it
  790. // is faster to branch forward (predicted not taken) to here and then branch
  791. // backward (predicted taken) to where we wanted to go.
  792. //
  793. 25: br zero, 10b // go try spin lock again
  794. .end InterlockedCompareExchange
  795. SBTTL("Interlocked Exchange Add")
  796. //++
  797. //
  798. // LONG
  799. // ExInterlockedAdd (
  800. // IN PLONG Addend,
  801. // IN ULONG Increment
  802. // )
  803. //
  804. // Routine Description:
  805. //
  806. // This function performs an interlocked add of an increment value to an
  807. // addend variable of type unsigned long. The initial value of the addend
  808. // variable is returned as the function value.
  809. //
  810. // Arguments:
  811. //
  812. // Addend (a0) - Supplies a pointer to a variable whose value is to be
  813. // adjusted by the increment value.
  814. //
  815. // Increment (a1) - Supplies the increment value to be added to the
  816. // addend variable.
  817. //
  818. // Return Value:
  819. //
  820. // The initial value of the addend variable.
  821. //
  822. //--
  823. LEAF_ENTRY(InterlockedExchangeAdd)
  824. 10: //
  825. #if !defined(NT_UP)
  826. mb // synchronize memory access
  827. #endif
  828. ldl_l v0, 0(a0) // get current addend value - locked
  829. addl v0, a1, t0 // increment addend value
  830. stl_c t0, 0(a0) // store updated value - conditionally
  831. beq t0, 20f // if eq, conditonal store failed
  832. #if !defined(NT_UP)
  833. mb // synchronize memory access
  834. #endif
  835. ret zero, (ra) // return
  836. //
  837. // We expect the store conditional will usually succeed the first time so it
  838. // is faster to branch forward (predicted not taken) to here and then branch
  839. // backward (predicted taken) to where we wanted to go.
  840. //
  841. 20: br zero, 10b // go try spin lock again
  842. .end InterlockedExchangeAdd
  843. SBTTL("Interlocked Pop Entry Sequenced List")
  844. //++
  845. //
  846. // PSINGLE_LIST_ENTRY
  847. // ExpInterlockedPopEntrySList (
  848. // IN PSLIST_HEADER ListHead
  849. // )
  850. //
  851. // Routine Description:
  852. //
  853. // This function removes an entry from the front of a sequenced singly
  854. // linked list so that access to the list is synchronized in a MP system.
  855. // If there are no entries in the list, then a value of NULL is returned.
  856. // Otherwise, the address of the entry that is removed is returned as the
  857. // function value.
  858. //
  859. // Arguments:
  860. //
  861. // ListHead (a0) - Supplies a pointer to the sequenced listhead from which
  862. // an entry is to be removed.
  863. //
  864. // Return Value:
  865. //
  866. // The address of the entry removed from the list, or NULL if the list is
  867. // empty.
  868. //
  869. //--
  870. LEAF_ENTRY(ExpInterlockedPopEntrySList)
  871. //
  872. // N.B. The following code is the continuation address should a fault
  873. // occur in the rare case described below.
  874. //
  875. ALTERNATE_ENTRY(ExpInterlockedPopEntrySListResume)
  876. 10: ldq t0, 0(a0) // get next entry address and sequence
  877. #if defined(_AXP64_)
  878. sra t0, 63 - 42, v0 // extract next entry address
  879. bic v0, 7, v0 //
  880. beq v0, 30f // if eq, list is empty
  881. bis t0, zero, t1 // copy depth and sequence
  882. #else
  883. addl t0, zero, v0 // sign extend next entry address
  884. beq v0, 30f // if eq, list is empty
  885. srl t0, 32, t1 // shift sequence to low 32-bits
  886. #endif
  887. #if !defined(NT_UP)
  888. mb // synchronize memory access
  889. #endif
  890. //
  891. // N.B. It is possible for the following instruction to fault in the rare
  892. // case where the first entry in the list is allocated on another
  893. // processor and freed between the time the free pointer is read above
  894. // and the following instruction. When this happens, the access fault
  895. // code continues execution above at the resumption address and the
  896. // entire operation is retried.
  897. //
  898. ALTERNATE_ENTRY(ExpInterlockedPopEntrySListFault)
  899. LDP t5, 0(v0) // get address of successor entry
  900. #if defined(_AXP64_)
  901. sll t5, 63 - 42, t2 // shift address into position
  902. #else
  903. zapnot t5, 0xf ,t2 // clear high 32-bits for merge
  904. #endif
  905. ldq_l t3, 0(a0) // reload next entry address and sequence
  906. ldil t5, 0xffff // decrement list depth and
  907. addl t1, t5, t1 // increment sequence number
  908. #if defined(_AXP64_)
  909. zapnot t1, 0x7, t1 // clear upper five bytes
  910. #else
  911. sll t1, 32, t1 // shift depth and sequence into position
  912. #endif
  913. cmpeq t0, t3, t4 // check if listhead has changed
  914. beq t4, 15f // if eq, listhead changed
  915. bis t1, t2, t1 // merge address, depth, and sequence
  916. stq_c t1, 0(a0) // store next entry address and sequence
  917. beq t1, 15f // if eq, store conditional failed
  918. #if !defined(NT_UP)
  919. mb // synchronize memory access
  920. #endif
  921. 30: ret zero, (ra) //
  922. //
  923. // Conditional store attempt failed or listhead changed.
  924. //
  925. 15: br zero, 10b // retry
  926. .end ExpInterlockedPopEntrySList
  927. SBTTL("Interlocked Push Entry Sequenced List")
  928. //++
  929. //
  930. // PSINGLE_LIST_ENTRY
  931. // ExpInterlockedPushEntrySList (
  932. // IN PSLIST_HEADER ListHead,
  933. // IN PSINGLE_LIST_ENTRY ListEntry
  934. // )
  935. //
  936. // Routine Description:
  937. //
  938. // This function inserts an entry at the head of a sequenced singly linked
  939. // list so that access to the list is synchronized in an MP system.
  940. //
  941. // Arguments:
  942. //
  943. // ListHead (a0) - Supplies a pointer to the sequenced listhead into which
  944. // an entry is to be inserted.
  945. //
  946. // ListEntry (a1) - Supplies a pointer to the entry to be inserted at the
  947. // head of the list.
  948. //
  949. // Return Value:
  950. //
  951. // Previous contents of ListHead. NULL implies list went from empty
  952. // to not empty.
  953. //
  954. //--
  955. LEAF_ENTRY(ExpInterlockedPushEntrySList)
  956. 10: ldq t0, 0(a0) // get next entry address and sequence
  957. #if defined(_AXP64_)
  958. sra t0, 63 - 42, v0 // extract next entry address
  959. bic v0, 7, v0 //
  960. bis t0, zero, t1 // copy depth and sequence number
  961. #else
  962. addl t0, zero, v0 // sign extend next entry address
  963. srl t0, 32, t1 // shift sequence to low 32-bits
  964. #endif
  965. STP v0, 0(a1) // set next link in new first entry
  966. #if !defined(NT_UP)
  967. mb // synchronize memory access
  968. #endif
  969. #if defined(_AXP64_)
  970. sll a1, 63 - 42, t2 // shift address into position
  971. #else
  972. zapnot a1, 0xf, t2 // zero extend new first entry
  973. #endif
  974. ldq_l t3, 0(a0) // reload next entry address and sequence
  975. ldah t5, 1(zero) // get sequence adjustment value
  976. addl t1, 1, t1 // increment list depth
  977. addl t1, t5, t1 // increment sequence number
  978. #if defined(_AXP64_)
  979. zapnot t1, 0x7, t1 // clear upper five bytes
  980. #else
  981. sll t1, 32, t1 // merge new first entry address and sequence
  982. #endif
  983. cmpeq t0, t3, t4 // check if listhead changed
  984. beq t4, 15f // if eq, listhead changed
  985. bis t1, t2, t2 // merge address, depth, and sequence
  986. stq_c t2, 0(a0) // store next entry address and sequence
  987. beq t2, 15f // if eq, store conditional failed
  988. ret zero, (ra) // return
  989. //
  990. // Conditional store attempt failed or listhead changed.
  991. //
  992. 15: br zero, 10b // retry
  993. .end ExpInterlockedPushEntrySList
  994. SBTTL("Interlocked Flush Sequenced List")
  995. //++
  996. //
  997. // PSINGLE_LIST_ENTRY
  998. // ExpInterlockedFlushSList (
  999. // IN PSLIST_HEADER ListHead
  1000. // )
  1001. //
  1002. // Routine Description:
  1003. //
  1004. // This function flushes the entire list of entries on a sequenced singly
  1005. // linked list so that access to the list is synchronized in a MP system.
  1006. // If there are no entries in the list, then a value of NULL is returned.
  1007. // Otherwise, the address of the 1st entry on the list is returned as the
  1008. // function value.
  1009. //
  1010. // Arguments:
  1011. //
  1012. // ListHead (a0) - Supplies a pointer to the sequenced listhead from which
  1013. // an entry is to be removed.
  1014. //
  1015. // Return Value:
  1016. //
  1017. // The address of the entry removed from the list, or NULL if the list is
  1018. // empty.
  1019. //
  1020. //--
  1021. LEAF_ENTRY(ExpInterlockedFlushSList)
  1022. and t1, zero, t1 // set new listhead value
  1023. 10: ldq_l t0, 0(a0) // get next entry address and sequence
  1024. stq_c t1, 0(a0) // store new listhead value
  1025. beq t1, 15f // if eq, store conditional failed
  1026. #if defined(_AXP64_)
  1027. sra t0, 63 - 42, v0 // extract next entry address
  1028. bic v0, 7, v0 //
  1029. #else
  1030. addl t0, zero, v0 // sign extend next entry address
  1031. #endif
  1032. ret zero, (ra) // return
  1033. //
  1034. // Conditional store attempt failed or listhead changed.
  1035. //
  1036. 15: br zero, 10b // retry, store conditional failed
  1037. .end ExpInterlockedFlushSList
  1038. SBTTL("Interlocked Compare Exchange 64-bits")
  1039. //++
  1040. //
  1041. // LONGLONG
  1042. // ExpInterlockedCompareExchange64 (
  1043. // IN PLONGLONG Destination,
  1044. // IN PLONGLONG Exchange,
  1045. // IN PLONGLONG Comperand
  1046. // )
  1047. //
  1048. // Routine Description:
  1049. //
  1050. // This function performs an interlocked compare and exchange of 64-bits.
  1051. //
  1052. // Arguments:
  1053. //
  1054. // Destination (a0) - Supplies a pointer to the destination variable.
  1055. //
  1056. // Exchange (a1) - Supplies a pointer to the exchange value.
  1057. //
  1058. // Comperand (a2) - Supplies a pointer to the comperand value.
  1059. //
  1060. // Return Value:
  1061. //
  1062. // The current destination value are returned as the function value.
  1063. //
  1064. //--
  1065. LEAF_ENTRY(ExpInterlockedCompareExchange64)
  1066. ldq t0, 0(a1) // get exchange value
  1067. ldq t1, 0(a2) // get comperand value
  1068. 10: ldq_l v0, 0(a0) // get current destination value
  1069. bis t0, zero, t2 // set exchange value
  1070. cmpeq v0, t1, t3 // check if current and comperand match
  1071. beq t3, 20f // if eq, current and comperand mismatch
  1072. stq_c t2, 0(a0) // store exchange value
  1073. beq t2, 30f // if eq, store conditional failed
  1074. 20: ret zero, (ra)
  1075. //
  1076. // Conditional store attempt failed.
  1077. //
  1078. 30: br zero, 10b // retry
  1079. .end ExpInterlockedCompareExchange64
  1080. SBTTL("Interlocked Compare Exchange 64-bits")
  1081. //++
  1082. //
  1083. // LONGLONG
  1084. // InterlockedCompareExchange64 (
  1085. // IN PLONGLONG Destination,
  1086. // IN LONGLONG Exchange,
  1087. // IN LONGLONG Comperand
  1088. // )
  1089. //
  1090. // Routine Description:
  1091. //
  1092. // This function performs an interlocked compare and exchange of 64-bits.
  1093. //
  1094. // Arguments:
  1095. //
  1096. // Destination (a0) - Supplies a pointer to the destination variable.
  1097. //
  1098. // Exchange (a1) - Supplies the exchange value.
  1099. //
  1100. // Comperand (a2) - Supplies the comperand value.
  1101. //
  1102. // Return Value:
  1103. //
  1104. // The current destination value are returned as the function value.
  1105. //
  1106. //--
  1107. #if !defined(_AXP64_)
  1108. LEAF_ENTRY(InterlockedCompareExchange64)
  1109. 10: ldq_l v0, 0(a0) // get current destination value
  1110. bis a1, zero, t2 // set exchange value
  1111. cmpeq v0, a2, t3 // check if current and comperand match
  1112. beq t3, 20f // if eq, current and comperand mismatch
  1113. stq_c t2, 0(a0) // store exchange value
  1114. beq t2, 10b // if eq, store conditional failed
  1115. 20: ret zero, (ra)
  1116. .end InterlockedCompareExchange64
  1117. #endif