Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

461 lines
12 KiB

  1. #include "common.h"
  2. #include "rtinfo.h"
  3. #include <rt.h>
  4. #include "rtp.h"
  5. //#define SPEW 1
  6. extern volatile ULONG currentthread;
  7. extern volatile ULONG windowsthread;
  8. #define INITIALSPINLOCKVALUE 0
  9. // We need to make the Acquire and Release spinlock calls safe for
  10. // real time threads. We do this by ALWAYS acquiring
  11. // the spinlock. We have to acquire spinlocks now since real time threads
  12. // can run when normal windows threads are running at raised irql.
  13. volatile ULONG *pCurrentRtThread=&(volatile ULONG)currentthread;
  14. BOOL (*TransferControl)(WORD State, ULONG Data, BOOL (*DoTransfer)(PVOID), PVOID Context)=RtpTransferControl;
  15. VOID (*ForceAtomic)(VOID (*AtomicOperation)(PVOID), PVOID Context)=RtpForceAtomic;
  16. BOOL True(PVOID Context)
  17. {
  18. return TRUE;
  19. }
  20. #pragma warning( disable : 4035 )
  21. // If value==*destination, then *destination=source and function returns
  22. // true. Otherwise *destination is unchanged and function returns false.
  23. ULONG __inline CompareExchange(ULONG *destination, ULONG source, ULONG value)
  24. {
  25. ASSERT( destination!=NULL );
  26. ASSERT( source!=value );
  27. __asm {
  28. mov eax,value
  29. mov ecx,source
  30. mov edx,destination
  31. lock cmpxchg [edx],ecx
  32. mov eax,0
  33. jnz done
  34. inc eax
  35. done:
  36. }
  37. }
  38. #pragma warning( default : 4035 )
  39. /*
  40. To enable realtime threads to be switched out while they are holding spinlocks,
  41. I had to extend the significance and use of the KSPIN_LOCK variable.
  42. Previous NT spinlock code only ever sets or clears the bottom bit of a spinlock.
  43. Previous 9x spinlock code never touched the spinlock at all, since 9x is a
  44. uniprocessor platform - spinlocks simply raised and lowered irql. That is
  45. no longer true. Spinlocks in the rt world do more than they ever did on either
  46. NT or 9x.
  47. The bottom 2 bits (bits 0 and 1) mean the following
  48. If bit 0 is set, the spinlock is claimed. This is compatible with existing NT usage.
  49. If bit 1 is set, the spinlock has a next in line claim.
  50. The bottom 2 bits can transition between the following states:
  51. 00 -> 01 ; spinlock free -> spinlock claimed
  52. 01 -> 00 ; spinlock claimed -> spinlock free
  53. 01 -> 11 ; spinlock claimed -> spinlock claimed and next in line claimed
  54. 11 -> 10 ; spinlock claimed and next in line claimed -> spinlock not claimed and next in line claimed
  55. 10 -> 01 ; spinlock not claimed and next in line claimed -> spinlock claimed
  56. The top 30 bits hold a realtime thread handle. They identify which
  57. realtime thread is either holding the lock or is next in line.
  58. If bit 1 is set, then the top 30 bits identify the next in line thread
  59. otherwise the top 30 bits identify the current owner.
  60. Normally we have the following state transitions:
  61. 00->01 followed by 01->00
  62. That is the no contention for the lock case.
  63. Otherwise we have the following sequence:
  64. 00->01, 01->11, 11->10, 10->01
  65. after which we can have either 01->00 or 01->11
  66. */
  67. typedef struct {
  68. PKSPIN_LOCK SpinLock;
  69. ULONG YieldToThisThread;
  70. } YIELDINFO, *PYIELDINFO;
  71. BOOL NextInLine(PVOID Context)
  72. {
  73. PYIELDINFO YieldInfo=(PYIELDINFO)Context;
  74. return CompareExchange(YieldInfo->SpinLock, 3|*pCurrentRtThread, 1|(YieldInfo->YieldToThisThread&~(3)));
  75. }
  76. BOOL YieldToNextInLine(PVOID Context)
  77. {
  78. PYIELDINFO YieldInfo=(PYIELDINFO)Context;
  79. return CompareExchange(YieldInfo->SpinLock, YieldInfo->YieldToThisThread&~(1), YieldInfo->YieldToThisThread);
  80. }
  81. VOID
  82. FASTCALL
  83. RtKfAcquireLock(
  84. IN PKSPIN_LOCK SpinLock
  85. )
  86. {
  87. YIELDINFO YieldInfo;
  88. ULONG SpinCount;
  89. SpinCount=0;
  90. while (TRUE) {
  91. if (CompareExchange(SpinLock, 1|*pCurrentRtThread, INITIALSPINLOCKVALUE) || CompareExchange(SpinLock, 1|*pCurrentRtThread, 2|*pCurrentRtThread)) {
  92. // We got the spinlock. We're outa here.
  93. break;
  94. }
  95. // If we get here, then someone else is holding the spinlock. We will
  96. // try to queue up after them and ensure that we get it next.
  97. YieldInfo.SpinLock=SpinLock;
  98. YieldInfo.YieldToThisThread=*(volatile ULONG *)SpinLock;
  99. // Make sure the spinlock is not currently free.
  100. if (YieldInfo.YieldToThisThread==INITIALSPINLOCKVALUE) {
  101. continue;
  102. }
  103. // Make sure that someone is NOT trying to acquire a spinlock they already
  104. // acquired.
  105. if (((YieldInfo.YieldToThisThread^*pCurrentRtThread)&~(3))==0) {
  106. // Someone is trying to acquire a spinlock more than once.
  107. #ifdef SPEW
  108. if (!RtThread()) {
  109. DbgPrint("BugCheck 0xf: Acquiring already owned spinlock 0x%x.\n", SpinLock);
  110. }
  111. Break();
  112. #endif
  113. break;
  114. }
  115. // Make sure the spinlock is not in an invalid state.
  116. // ie: Make sure it has been initialized properly.
  117. if ((YieldInfo.YieldToThisThread&(3))==0 ||
  118. !(YieldInfo.YieldToThisThread&~(3))) {
  119. // Spinlock has either been trashed, or was not initialized properly.
  120. #ifdef SPEW
  121. if (!RtThread()) {
  122. DbgPrint("BugCheck 0x81: Invalid spinlock state 0x%x.\n", SpinLock);
  123. }
  124. Break();
  125. #endif
  126. break;
  127. }
  128. if (TransferControl!=NULL) {
  129. // Try to claim the lock next - so that we will get
  130. // Yielded to when the lock is released.
  131. if ((*TransferControl)(BLOCKEDONSPINLOCK, YieldInfo.YieldToThisThread, NextInLine, &YieldInfo)) {
  132. // We successfully queued up to get control when the lock is released,
  133. // and we updated our state atomically and transferred control up to the
  134. // realtime executive. All with one nasty call.
  135. // That means when control comes back, we should be able to get the spinlock
  136. // on the next try.
  137. // UNLESS this is the windows thread. In that case, we CAN come back
  138. // so that interrupts can get serviced.
  139. if (!RtThread()) {
  140. // Windows thread. We allow windows threads to get switched to even
  141. // when they are blocked on a spinlock so that interrupts can be
  142. // serviced.
  143. while (*SpinLock!=(2|*pCurrentRtThread)) {
  144. // If we get here, then the spinlock is still held by the realtime
  145. // thread, so we simply yield back again to it - now that interrupts
  146. // have all been serviced.
  147. if (SpinCount++>100) {
  148. Break();
  149. }
  150. (*TransferControl)(BLOCKEDONSPINLOCK, YieldInfo.YieldToThisThread, True, NULL);
  151. }
  152. }
  153. if (*SpinLock!=(2|*pCurrentRtThread)) {
  154. // If the spinlock doesn't have above state at this point, something
  155. // is horribly wrong.
  156. Break();
  157. }
  158. continue;
  159. }
  160. else {
  161. // We failed to get in line behind the owner. So, see if the owner
  162. // just released the lock.
  163. if (!SpinCount++) {
  164. continue;
  165. }
  166. else {
  167. //Break(); - TURN OFF FOR NOW - ADD a test based on whether windows
  168. // or not.
  169. // There must have been multiple threads queued up on this lock.
  170. // Yield.
  171. // OR the other possibility is that we yielded the spinlock to windows
  172. // which is servicing interrupts before it marks the spinlock as ready
  173. // to be queued up on - so we are stuck waiting for Windows to get around
  174. // to the compare exchange where it claims the spinlock.
  175. (*TransferControl)(SPINNINGONSPINLOCK, YieldInfo.YieldToThisThread, True, NULL);
  176. }
  177. }
  178. }
  179. else {
  180. // If we get here, then we are NOT running RT, and someone is trying to
  181. // acquire a held spinlock. That is a fatal error.
  182. // Break into debugger if present.
  183. #ifdef SPEW
  184. DbgPrint("BugCheck 0xf: Acquiring already owned spinlock 0x%x.\n", SpinLock);
  185. Break();
  186. #endif
  187. break;
  188. }
  189. }
  190. }
  191. VOID
  192. FASTCALL
  193. RtKfReleaseLock(
  194. IN PKSPIN_LOCK SpinLock
  195. )
  196. {
  197. // Release the spinlock. Break if spinlock not owned.
  198. if (!CompareExchange(SpinLock, INITIALSPINLOCKVALUE, 1|*pCurrentRtThread)) {
  199. // If we get here, then someone queued up behind us and tried to acquire
  200. // the lock while we were holding it - and they yielded their processor
  201. // time to us, so we must yield back to them. The nice thing about this
  202. // is that this yield happens before IRQL is lowered - so when this is the
  203. // windows thread we do not have to wait for all of the DPCs and preemtible
  204. // events to be processed before the realtime thread waiting on the lock
  205. // gets to run. He runs as soon as we yield, and when control again
  206. // comes around to us, we can then continue on and lower irql.
  207. if (TransferControl!=NULL) {
  208. YIELDINFO YieldInfo;
  209. YieldInfo.SpinLock=SpinLock;
  210. YieldInfo.YieldToThisThread=*SpinLock;
  211. if ((YieldInfo.YieldToThisThread&3)!=3) {
  212. // It is an ERROR if we get here and both low order bits are not set
  213. // in the spinlock.
  214. #ifdef SPEW
  215. if (!RtThread()) {
  216. DbgPrint("BugCheck 0x10: Releasing unowned spinlock 0x%x.\n", SpinLock);
  217. }
  218. Break();
  219. #endif
  220. return;
  221. }
  222. // Try to release the lock to the thread queued up behind us and then
  223. // transfer control to him and we're done. When he wakes up he will
  224. // claim the lock and someone else can get in behind him if needed.
  225. if ((*TransferControl)(YIELDAFTERSPINLOCKRELEASE, YieldInfo.YieldToThisThread, YieldToNextInLine, &YieldInfo)) {
  226. return;
  227. }
  228. else {
  229. // It is an ERROR to get here. We should never fail to release
  230. // the thread blocked on us.
  231. Break();
  232. }
  233. }
  234. else {
  235. // We get here if the realtime executive is not running, but we could
  236. // not release the spinlock. That will only happen if either the spinlock
  237. // was not owned, or InitialWindowsThread or pCurrentRtThread have
  238. // been corrupted.
  239. #ifdef SPEW
  240. DbgPrint("BugCheck 0x10: Releasing unowned spinlock 0x%x.\n", SpinLock);
  241. Break();
  242. #endif
  243. }
  244. }
  245. }
  246. KIRQL
  247. FASTCALL
  248. RtKfAcquireSpinLock(
  249. IN PKSPIN_LOCK SpinLock
  250. )
  251. {
  252. KIRQL OldIrql;
  253. //ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL );
  254. //KeRaiseIrql( DISPATCH_LEVEL, &OldIrql );
  255. OldIrql=*pCurrentIrql;
  256. if (OldIrql>DISPATCH_LEVEL) {
  257. DbgPrint("Acquiring spinlock 0x%x with IRQL == %d.\n", SpinLock, (ULONG)OldIrql);
  258. Trap();
  259. }
  260. *pCurrentIrql=DISPATCH_LEVEL;
  261. RtKfAcquireLock(SpinLock);
  262. return ( OldIrql );
  263. }
  264. VOID
  265. FASTCALL
  266. RtKefAcquireSpinLockAtDpcLevel(
  267. IN PKSPIN_LOCK SpinLock)
  268. {
  269. ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );
  270. RtKfAcquireLock(SpinLock);
  271. }
  272. KIRQL
  273. FASTCALL
  274. RtKeAcquireSpinLockRaiseToSynch(
  275. IN PKSPIN_LOCK SpinLock
  276. )
  277. {
  278. KIRQL OldIrql;
  279. ASSERT( KeGetCurrentIrql() <= SYNCH_LEVEL );
  280. KeRaiseIrql( SYNCH_LEVEL, &OldIrql );
  281. RtKfAcquireLock(SpinLock);
  282. return ( OldIrql );
  283. }
  284. VOID
  285. FASTCALL
  286. RtKfReleaseSpinLock(
  287. IN PKSPIN_LOCK SpinLock,
  288. IN KIRQL NewIrql
  289. )
  290. {
  291. KIRQL OldIrql;
  292. // We better be at DISPATCH_LEVEL if we are releasing a spinlock.
  293. OldIrql=*pCurrentIrql;
  294. if (OldIrql!=DISPATCH_LEVEL) {
  295. DbgPrint("Releasing spinlock 0x%x with IRQL == %d.\n", SpinLock, (ULONG)OldIrql);
  296. Trap();
  297. }
  298. // First release the spinlock.
  299. RtKfReleaseLock(SpinLock);
  300. // Set the new IRQL level.
  301. //ASSERT( NewIrql >= 0 && NewIrql < 32 );
  302. if ( !(NewIrql >= 0 && NewIrql < 32) ) {
  303. Trap();
  304. }
  305. // We only lower irql on non RT threads, since RT threads should always be running
  306. // at DISPATCH_LEVEL anyway.
  307. if (currentthread==windowsthread) {
  308. KeLowerIrql( NewIrql );
  309. }
  310. }
  311. VOID
  312. FASTCALL
  313. RtKefReleaseSpinLockFromDpcLevel (
  314. IN PKSPIN_LOCK SpinLock
  315. )
  316. {
  317. // Release the spinlock.
  318. RtKfReleaseLock(SpinLock);
  319. ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );
  320. }