Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

963 lines
22 KiB

  1. // A realtime audio glitch detector.
  2. // This will be used to make measurements of audio glitches.
  3. // Author: Joseph Ballantyne
  4. // Date: 11/17/99
  5. // This will first work with DMA based devices.
  6. // I will add PCI support as well once the DMA glitch detection is working
  7. // properly.
  8. #ifdef UNDER_NT
  9. #include <nthal.h>
  10. #include <ntmmapi.h>
  11. #define IO_NO_INCREMENT 0
  12. HANDLE IoGetCurrentProcess(VOID);
  13. #else
  14. #include <wdm.h>
  15. #include <windef.h>
  16. #include <winerror.h>
  17. #endif
  18. #include "common.h"
  19. #include <string.h>
  20. #include <rt.h>
  21. #include "glitch.h"
  22. #ifdef UNDER_NT
  23. #include "mapview.h"
  24. #else
  25. #include <vmm.h>
  26. #include <vwin32.h>
  27. #endif
  28. #pragma intrinsic ( strcpy )
  29. // Everything we touch HAS to be locked down.
  30. #pragma LOCKED_CODE
  31. #pragma LOCKED_DATA
  32. // This header file has CODE and DATA in it. It MUST be included after above pragmas.
  33. // The code and data in the following header MUST be LOCKED.
  34. #include "dma.h"
  35. PGLITCHDATA GlitchInfo;
  36. #pragma warning ( disable : 4035 )
  37. #define rdtsc __asm _emit 0x0f __asm _emit 0x31
  38. #define rdprf __asm _emit 0x0f __asm _emit 0x33
  39. LONGLONG
  40. __inline
  41. ReadCycleCounter (
  42. VOID
  43. )
  44. {
  45. __asm {
  46. rdtsc
  47. }
  48. }
  49. ULONGLONG
  50. __inline
  51. ReadPerformanceCounter (
  52. ULONG index
  53. )
  54. {
  55. __asm {
  56. mov ecx,index
  57. rdprf
  58. }
  59. }
  60. ULONG
  61. GetCR3 (
  62. VOID
  63. )
  64. {
  65. __asm mov eax, cr3;
  66. }
  67. ULONG
  68. InB (
  69. ULONG address
  70. )
  71. {
  72. __asm {
  73. mov edx,address
  74. xor eax,eax
  75. in al,dx
  76. }
  77. }
  78. #pragma PAGEABLE_CODE
  79. PVOID
  80. MapPhysicalToLinear (
  81. VOID *physicaladdress,
  82. ULONG numbytes,
  83. ULONG flags
  84. )
  85. {
  86. #ifndef UNDER_NT
  87. __asm push flags
  88. __asm push numbytes
  89. __asm push physicaladdress
  90. VMMCall( _MapPhysToLinear );
  91. __asm add esp,12
  92. #else
  93. PHYSICAL_ADDRESS Address;
  94. Address.QuadPart=(ULONG_PTR)physicaladdress;
  95. return (PVOID)MmMapIoSpace(Address, numbytes, FALSE);
  96. #endif
  97. }
  98. PVOID
  99. __cdecl
  100. ReservePages (
  101. ULONG page,
  102. ULONG npages,
  103. ULONG flags
  104. )
  105. /*++
  106. Routine Description:
  107. Arguments:
  108. Return Value:
  109. --*/
  110. {
  111. #ifndef UNDER_NT
  112. __asm {
  113. push flags
  114. push npages
  115. push page
  116. VMMCall( _PageReserve )
  117. __asm add esp, 12
  118. }
  119. #else
  120. return ExAllocatePool( NonPagedPool, npages*PROCPAGESIZE );
  121. #endif
  122. }
  123. ULONG
  124. __cdecl
  125. FreePages (
  126. PVOID hmem,
  127. ULONG flags
  128. )
  129. /*++
  130. Routine Description:
  131. Arguments:
  132. Return Value:
  133. --*/
  134. {
  135. #ifndef UNDER_NT
  136. __asm {
  137. push flags
  138. push hmem
  139. VMMCall( _PageFree )
  140. __asm add esp, 8
  141. }
  142. #else
  143. ExFreePool( hmem );
  144. return TRUE;
  145. #endif
  146. }
  147. #ifndef UNDER_NT
  148. PVOID
  149. __cdecl
  150. LockPages (
  151. ULONG page,
  152. ULONG npages,
  153. ULONG pageoffset,
  154. ULONG flags
  155. )
  156. {
  157. __asm {
  158. push flags
  159. push pageoffset
  160. push npages
  161. push page
  162. VMMCall( _PageLock )
  163. __asm add esp, 16
  164. }
  165. }
  166. #endif
  167. #pragma warning( default : 4035 )
  168. enum ControlCodes {
  169. CHECKLOADED,
  170. GETVERSION,
  171. GETBASEADDRESS
  172. };
  173. #ifndef UNDER_NT
  174. DWORD __stdcall GlitchWin32API(PDIOCPARAMETERS p)
  175. {
  176. switch (p->dwIoControlCode) {
  177. case CHECKLOADED:
  178. break;
  179. case GETVERSION:
  180. // Get version.
  181. if (!p->lpvOutBuffer || p->cbOutBuffer<4)
  182. return ERROR_INVALID_PARAMETER;
  183. *(PDWORD)p->lpvOutBuffer=0x0100;
  184. if (p->lpcbBytesReturned)
  185. *(PDWORD)p->lpcbBytesReturned=4;
  186. break;
  187. case GETBASEADDRESS:
  188. // Get base address.
  189. if (!p->lpvOutBuffer || p->cbOutBuffer<4)
  190. return ERROR_INVALID_PARAMETER;
  191. *(PDWORD)p->lpvOutBuffer=(DWORD)GlitchInfo;
  192. if (p->lpcbBytesReturned)
  193. *(PDWORD)p->lpcbBytesReturned=4;
  194. break;
  195. default:
  196. return ERROR_INVALID_PARAMETER;
  197. }
  198. return 0;
  199. }
  200. #else
  201. PVOID MappedBuffer=NULL;
  202. NTSTATUS
  203. DeviceIoCreate(
  204. IN PDEVICE_OBJECT DeviceObject,
  205. IN PIRP pIrp
  206. )
  207. {
  208. NTSTATUS Status=STATUS_SUCCESS;
  209. pIrp->IoStatus.Status = Status;
  210. pIrp->IoStatus.Information = 0;
  211. IoCompleteRequest(pIrp, IO_NO_INCREMENT);
  212. return Status;
  213. }
  214. NTSTATUS
  215. DeviceIoClose(
  216. IN PDEVICE_OBJECT DeviceObject,
  217. IN PIRP pIrp
  218. )
  219. {
  220. NTSTATUS Status=STATUS_SUCCESS;
  221. PVOID Pointer;
  222. // Make sure that we release our mapped buffer view when the user mode
  223. // app that opened our section handle closes its handle or goes away.
  224. // We screen these calls based on the handle we get to the process that
  225. // sucessfully opened a section handle. We only make the unmap call
  226. // if we are being called by the same process that made the map call.
  227. if (IoGetCurrentProcess()==Process) {
  228. if ((Pointer=InterlockedExchangePointer(&MappedBuffer, NULL))!=NULL &&
  229. UnMapContiguousBufferFromUserModeProcess(Pointer)!=STATUS_SUCCESS) {
  230. Trap();
  231. }
  232. }
  233. pIrp->IoStatus.Status = Status;
  234. pIrp->IoStatus.Information = 0;
  235. IoCompleteRequest(pIrp, IO_NO_INCREMENT);
  236. return Status;
  237. }
  238. NTSTATUS
  239. DeviceIoControl(
  240. IN PDEVICE_OBJECT DeviceObject,
  241. IN PIRP pIrp
  242. )
  243. {
  244. PVOID BaseAddress=NULL;
  245. PIO_STACK_LOCATION pIrpStack;
  246. NTSTATUS Status=STATUS_SUCCESS;
  247. pIrpStack = IoGetCurrentIrpStackLocation(pIrp);
  248. switch (pIrpStack->Parameters.DeviceIoControl.IoControlCode) {
  249. case 2:
  250. if (!pIrp->MdlAddress || pIrp->MdlAddress->ByteCount!=4) {
  251. Status=STATUS_INVALID_PARAMETER;
  252. break;
  253. }
  254. Status=MapContiguousBufferToUserModeProcess(GlitchInfo, &BaseAddress);
  255. // Remember the address of the mapped buffer.
  256. // We depend on the fact that BaseAddress will be NULL for requests
  257. // to map the buffer when it is already mapped.
  258. if (BaseAddress) {
  259. MappedBuffer=BaseAddress;
  260. }
  261. *(PDWORD_PTR)(MmGetSystemAddressForMdl(pIrp->MdlAddress))=(DWORD_PTR)BaseAddress;
  262. break;
  263. default:
  264. Status=STATUS_INVALID_PARAMETER;
  265. break;
  266. }
  267. pIrp->IoStatus.Status = Status;
  268. pIrp->IoStatus.Information = 0;
  269. IoCompleteRequest(pIrp, IO_NO_INCREMENT);
  270. return Status;
  271. }
  272. #endif
  273. // All the rest of the code in this file MUST be locked as it is all called from within
  274. // the realtime thread.
  275. #pragma LOCKED_CODE
  276. VOID
  277. OutB (
  278. ULONG address,
  279. ULONG data
  280. )
  281. {
  282. __asm {
  283. mov edx,address
  284. mov eax,data
  285. out dx,al
  286. }
  287. }
  288. // This function loads the DMA buffer just played with our starvation fill pattern.
  289. // Which is 0xffffffff. This is a nice pattern because for signed 16 bit data it
  290. // is a DC value close to zero. Moreover, it is DC for both stereo 16 bit, mono 16
  291. // bit, stereo 8 bit and mono 8 bit. For 8 bit data it is at the max of the data
  292. // range - so is pegged to max volume - for most cards. Since most cards do unsigned
  293. // 8 bit samples. This value MUST be different from the KMIXER starvation pattern
  294. // which is zero.
  295. // Note that this routine assumes that CurrentDmaPosition and LastDmaPosition are
  296. // multiples of 4 on entry. It also assumes that the DmaBufferSize is a
  297. // power of 2 and that DmaBufferSize is greater than 4 bytes.
  298. VOID
  299. FillDmaBuffer (
  300. ULONG CurrentDmaPosition,
  301. ULONG LastDmaPosition,
  302. PDMAINFO Context
  303. )
  304. {
  305. // Make positions relative to start of dma buffer.
  306. CurrentDmaPosition-=Context->PhysicalDmaBufferStart;
  307. LastDmaPosition-=Context->PhysicalDmaBufferStart;
  308. while (LastDmaPosition!=CurrentDmaPosition) {
  309. Context->pDmaBuffer[LastDmaPosition/4]=0xffffffff;
  310. LastDmaPosition+=4;
  311. LastDmaPosition&=Context->DmaBufferSize-1;
  312. }
  313. }
  314. // Glitches start whenever all of the samples in the buffer since we last checked
  315. // match our starvation fill pattern. This will happen whenever there is at least
  316. // 1 ms of starvation assuming that we run our starvation detection with a 1ms period.
  317. // This means that we WILL miss glitches that are for less than 1ms, but it also
  318. // means that we won't be prone to false positives.
  319. // Note that this routine assumes that CurrentDmaPosition and LastDmaPosition are
  320. // multiples of 4 on entry. It also assumes that the DmaBufferSize is a
  321. // power of 2 and that DmaBufferSize is greater than 4 bytes.
  322. BOOLEAN
  323. GlitchStarted (
  324. ULONG CurrentDmaPosition,
  325. ULONG LastDmaPosition,
  326. PDMAINFO Context
  327. )
  328. {
  329. if (CurrentDmaPosition==LastDmaPosition) {
  330. return FALSE;
  331. }
  332. // Make positions relative to start of dma buffer.
  333. CurrentDmaPosition-=Context->PhysicalDmaBufferStart;
  334. LastDmaPosition-=Context->PhysicalDmaBufferStart;
  335. while (LastDmaPosition!=CurrentDmaPosition) {
  336. if (Context->pDmaBuffer[LastDmaPosition/4]!=0xffffffff) {
  337. return FALSE;
  338. }
  339. LastDmaPosition+=4;
  340. LastDmaPosition&=Context->DmaBufferSize-1;
  341. }
  342. return TRUE;
  343. }
  344. // Glitches end as soon as any value in the buffer does not match
  345. // our starvation fill pattern.
  346. // Note that this routine assumes that CurrentDmaPosition and LastDmaPosition are
  347. // multiples of 4 on entry. It also assumes that the DmaBufferSize is a
  348. // power of 2 and that DmaBufferSize is greater than 4 bytes.
  349. BOOLEAN
  350. GlitchEnded (
  351. ULONG CurrentDmaPosition,
  352. ULONG LastDmaPosition,
  353. PDMAINFO Context
  354. )
  355. {
  356. // Make positions relative to start of dma buffer.
  357. CurrentDmaPosition-=Context->PhysicalDmaBufferStart;
  358. LastDmaPosition-=Context->PhysicalDmaBufferStart;
  359. while (LastDmaPosition!=CurrentDmaPosition) {
  360. if (Context->pDmaBuffer[LastDmaPosition/4]!=0xffffffff) {
  361. return TRUE;
  362. }
  363. LastDmaPosition+=4;
  364. LastDmaPosition&=Context->DmaBufferSize-1;
  365. }
  366. return FALSE;
  367. }
  368. ULONG UnmaskedChannels=0;
  369. ULONG DmaBufferRemapCount=0;
  370. ULONG gCurrentDmaPosition=0;
  371. ULONG gCurrentDmaCount=0;
  372. NTSTATUS MapDmaBuffer(PDMAINFO Context, ULONG CurrentDmaPosition, ULONG CurrentDmaCount)
  373. {
  374. ULONG i;
  375. // Whenever I get a non zero DMA position AND a non zero count, I have to
  376. // make sure that I have that location properly mapped.
  377. if (CurrentDmaPosition!=0 && CurrentDmaCount!=0) {
  378. // Check if this position and count are properly mapped.
  379. // If not, then we must remap the buffer.
  380. if (CurrentDmaPosition<Context->PhysicalDmaBufferStart ||
  381. CurrentDmaPosition>=Context->PhysicalDmaBufferStart+Context->DmaBufferSize ||
  382. CurrentDmaPosition+CurrentDmaCount<Context->PhysicalDmaBufferStart ||
  383. CurrentDmaPosition+CurrentDmaCount>Context->PhysicalDmaBufferStart+Context->DmaBufferSize
  384. ) {
  385. // Position or size is outside current mapping. Remap the buffer.
  386. DmaBufferRemapCount++;
  387. // Recalculate start and size of where the buffer should be.
  388. // WARNING: We ASSUME all buffers we map are multiples of 4k bytes in size.
  389. // For WDM audio drivers this assumption is currently valid.
  390. Context->PhysicalDmaBufferStart=CurrentDmaPosition&(~(PROCPAGESIZE-1));
  391. Context->DmaBufferSize=(CurrentDmaCount+PROCPAGESIZE-1)&(~(PROCPAGESIZE-1));
  392. if (Context->DmaBufferSize>MAXDMABUFFERSIZE) {
  393. // Clear start and size since we cannot map them anyway.
  394. // Even more important, we must clear them so we will try to remap
  395. // again the next time this routine is called.
  396. Context->PhysicalDmaBufferStart=0;
  397. Context->DmaBufferSize=0;
  398. return STATUS_INSUFFICIENT_RESOURCES;
  399. }
  400. // Walk the pages we have for mapping the DMA buffer and remap them
  401. // to our DMA channel buffer.
  402. for (i=0; i<(Context->DmaBufferSize/PROCPAGESIZE); i++) {
  403. Context->PageTable[i+(((ULONG)(Context->pDmaBuffer)>>12)&1023)]=
  404. (Context->PhysicalDmaBufferStart+(i*PROCPAGESIZE))|(PRESENT|WRITABLE|USER|CACHEDISABLED);
  405. }
  406. // Now flush the TLBs.
  407. // If cr3 changes - which it DOES on NT, then I MUST make
  408. // sure that we don't get thread switched between the 2
  409. // assembly instructions - otherwise we will corrupt cr3.
  410. // Not a good thing.
  411. while (((LONG)ReadPerformanceCounter(0)+50)>0) {
  412. Trap();
  413. RtYield(0, 0);
  414. }
  415. __asm mov eax, cr3;
  416. __asm mov cr3, eax;
  417. }
  418. }
  419. return STATUS_SUCCESS;
  420. }
  421. ULONG
  422. GetNextPrintPosition (
  423. PDMAINFO Context
  424. )
  425. {
  426. ULONG PrintLocation, NextLocation;
  427. NextLocation=*Context->pPrintLoad;
  428. do {
  429. PrintLocation=NextLocation;
  430. NextLocation=InterlockedCompareExchange((PULONG)Context->pPrintLoad, PrintLocation+PACKETSIZE, PrintLocation);
  431. } while (PrintLocation!=NextLocation);
  432. // Now we clear out the opposite half of the print buffer. We do this all in kernel mode.
  433. // This means that we have data only in 1/2 of the buffer. As we add new data, we
  434. // delete the old data. We do the deletion of data in kernel mode so that we only
  435. // need to read data from user mode. I do NOT want user mode code to be writing to
  436. // this buffer. User mode code can read out of the output buffer, but NOT write into
  437. // it. This means we MUST both fill and clear this buffer ourselves. Since user
  438. // mode code is dependent on the fact that all slots will be marked as having
  439. // NODATA in them until they have been completely loaded with data, at which point
  440. // they will be marked with something other than NODATA. We guarantee that
  441. // every slot we are loading starts as NODATA by simply clearing the print slots
  442. // in kernel mode before we fill them. The easiest way to do this is to start
  443. // by marking all entries in the buffer as NODATA, and then by continuing to make
  444. // sure that for every print slot we are going to fill with data, we clear the corresponding
  445. // print slot halfway around the buffer.
  446. // That simple algorithm guarantees that every slot starts out marked as NODATA and
  447. // then transitions to some other state after it is filled.
  448. ((ULONG *)Context->pPrintBuffer)[((PrintLocation+Context->PrintBufferSize/2)%Context->PrintBufferSize)/sizeof(ULONG)]=NODATA;
  449. PrintLocation%=Context->PrintBufferSize;
  450. return PrintLocation;
  451. }
  452. VOID
  453. GlitchDetect (
  454. PDMAINFO Context,
  455. ThreadStats *Statistics
  456. )
  457. {
  458. ULONG DmaMask, LastDmaMask;
  459. KIRQL OldIrql;
  460. ULONG LastDmaPosition;
  461. ULONG CurrentDmaPosition;
  462. ULONG CurrentDmaCount;
  463. ULONGLONG GlitchStart;
  464. ULONGLONG LastGlitchStart;
  465. ULONG GlitchLength;
  466. ULONG PrintLocation;
  467. ULONGLONG LastTimesliceStartTime;
  468. ULONG Owner;
  469. GlitchStart=0;
  470. GlitchLength=0;
  471. LastDmaMask=0;
  472. LastDmaPosition=0;
  473. LastGlitchStart=0;
  474. LastTimesliceStartTime=0;
  475. while (TRUE) {
  476. // If any other channels are unmasked. Punt.
  477. // Until I figure out what is broken, we only support tracking 1 channel
  478. // at a time.
  479. if (UnmaskedChannels&(~(1<<Context->Channel))) {
  480. goto ReleaseCurrentTimeslice;
  481. }
  482. if (LastTimesliceStartTime) {
  483. LastTimesliceStartTime=Statistics->ThisTimesliceStartTime-LastTimesliceStartTime;
  484. // At this point LastTimesliceStartTime is really the time between the
  485. // last timeslice start time, and the current timeslice start time.
  486. }
  487. // CR3 DOES change under NT! However, although there are multiple
  488. // page directories, they alias the same page tables - at least the
  489. // system non paged page tables. (They have to since otherwise the
  490. // kernel mode locked code would not work.)
  491. #ifndef UNDER_NT
  492. if (GetCR3()!=Context->CR3) {
  493. Trap();
  494. break;
  495. }
  496. #endif
  497. // Grab the DMA master adapter spinlock.
  498. KeAcquireSpinLock(Context->pMasterAdapterSpinLock, &OldIrql);
  499. // Check mask state of our channel.
  500. DmaMask=ReadDMAMask();
  501. // Unmasked.
  502. if ((~DmaMask)&(1<<Context->Channel)) {
  503. // Lock out glitch tracking on other channels. If this fails, then
  504. // someone else is already doing glitch tracking, so release the
  505. // spinlock and release the current timeslice.
  506. Owner=InterlockedCompareExchange(&UnmaskedChannels, 1<<Context->Channel, 0);
  507. if (!(Owner==(1<<Context->Channel) || Owner==0)) {
  508. // Release the DMA master adapter spinlock.
  509. KeReleaseSpinLock(Context->pMasterAdapterSpinLock, OldIrql);
  510. goto ReleaseCurrentTimeslice;
  511. }
  512. // Mask the DMA channel.
  513. MaskDmaChannel(Context->Channel);
  514. // Read the position.
  515. ReadDmaPosition(Context->Channel, &CurrentDmaPosition);
  516. // Align it.
  517. CurrentDmaPosition&=~3;
  518. // Read the count.
  519. ReadDmaCount(Context->Channel, &CurrentDmaCount);
  520. // Align it.
  521. CurrentDmaCount+=3;
  522. CurrentDmaCount&=~3;
  523. // Unmask the DMA channel.
  524. UnmaskDmaChannel(Context->Channel);
  525. }
  526. // Masked.
  527. else {
  528. #if 0
  529. // Read the position.
  530. ReadDmaPosition(Context->Channel, &CurrentDmaPosition);
  531. // Align it.
  532. CurrentDmaPosition&=~3;
  533. // Read the count.
  534. ReadDmaCount(Context->Channel, &CurrentDmaCount);
  535. // Align it.
  536. CurrentDmaCount+=3;
  537. CurrentDmaCount&=~3;
  538. #else
  539. CurrentDmaPosition=0;
  540. CurrentDmaCount=0;
  541. LastDmaPosition=0;
  542. // Release our lockout of glitch tracking on other channels.
  543. InterlockedCompareExchange(&UnmaskedChannels, 0, 1<<Context->Channel);
  544. #endif
  545. }
  546. // Release the DMA master adapter spinlock.
  547. KeReleaseSpinLock(Context->pMasterAdapterSpinLock, OldIrql);
  548. // Load globals - so I can see what they are.
  549. gCurrentDmaPosition=CurrentDmaPosition;
  550. gCurrentDmaCount=CurrentDmaCount;
  551. // Now find and map the physical DMA buffer.
  552. // Punt and exit thread if we cannot map the buffer.
  553. if (MapDmaBuffer(Context, CurrentDmaPosition, CurrentDmaCount)!=STATUS_SUCCESS) {
  554. Trap();
  555. break;
  556. }
  557. // If LastDmaPosition and CurrentDmaPosition are valid and different,
  558. // then look for glitches.
  559. if (CurrentDmaPosition!=0 &&
  560. LastDmaPosition!=0 &&
  561. CurrentDmaPosition!=LastDmaPosition) {
  562. // Make sure our position is within our mapped buffer. If not, log that
  563. // info and exit. That will kill this thread.
  564. if (CurrentDmaPosition<Context->PhysicalDmaBufferStart ||
  565. CurrentDmaPosition>=(Context->PhysicalDmaBufferStart+Context->DmaBufferSize)) {
  566. Trap();
  567. break;
  568. }
  569. // Make sure both current and last dma positions are DWORD aligned. Punt if not.
  570. if ((CurrentDmaPosition|LastDmaPosition)&3) {
  571. Trap();
  572. break;
  573. }
  574. // Make sure the dma buffer size is a power of 2. Punt if not.
  575. if (Context->DmaBufferSize&(Context->DmaBufferSize-1)) {
  576. Trap();
  577. break;
  578. }
  579. // Check if we see our FLAG value in the DMA buffer. Log glitch start time if so.
  580. if (!GlitchStart) {
  581. if (GlitchStarted(CurrentDmaPosition, LastDmaPosition, Context)) {
  582. GlitchStart=Statistics->PeriodIndex;
  583. }
  584. }
  585. // If we are tracking a glitch, then see if there is valid data now. Log glitch
  586. // stop time if so.
  587. else {
  588. if (GlitchEnded(CurrentDmaPosition, LastDmaPosition, Context)) {
  589. GlitchLength=(ULONG)(Statistics->PeriodIndex-GlitchStart);
  590. }
  591. }
  592. // Fill in with our flag value behind the DMA pointer back to previous DMA pointer.
  593. FillDmaBuffer(CurrentDmaPosition, LastDmaPosition, Context);
  594. }
  595. // Print interrupt holdoff time if we have been held off.
  596. // We do this only for channels that are unmasked.
  597. if ((~DmaMask)&(1<<Context->Channel) && LastTimesliceStartTime>=2000*USEC) {
  598. PrintLocation=GetNextPrintPosition(Context);
  599. // Load the packet type last. When the ring 3 code see's a packet
  600. // type that is not NODATA it assumes the rest of the packet has already
  601. // been written.
  602. ((ULONGLONG *)Context->pPrintBuffer)[1+PrintLocation/sizeof(ULONGLONG)]=LastTimesliceStartTime-MSEC;
  603. ((ULONG *)Context->pPrintBuffer)[PrintLocation/sizeof(ULONG)]=HELDOFF|(Context->Channel<<8);
  604. }
  605. // Print glitch information if any.
  606. if (GlitchLength) {
  607. PrintLocation=GetNextPrintPosition(Context);
  608. // Load the packet type last. When the ring 3 code see's a packet
  609. // type that is not NODATA it assumes the rest of the packet has already
  610. // been written.
  611. // We put the DMA channel in byte 1 of the packet type.
  612. ((ULONGLONG *)Context->pPrintBuffer)[1+PrintLocation/sizeof(ULONGLONG)]=GlitchStart-LastGlitchStart;
  613. ((ULONG *)Context->pPrintBuffer)[1+PrintLocation/sizeof(ULONG)]=GlitchLength;
  614. ((ULONG *)Context->pPrintBuffer)[PrintLocation/sizeof(ULONG)]=GLITCHED|(Context->Channel<<8);
  615. LastGlitchStart=GlitchStart;
  616. GlitchStart=0;
  617. GlitchLength=0;
  618. }
  619. // Print pause/running state changes.
  620. if ((LastDmaMask^DmaMask)&(1<<Context->Channel)) {
  621. if (DmaMask&(1<<Context->Channel)) {
  622. PrintLocation=GetNextPrintPosition(Context);
  623. // Load the packet type last. When the ring 3 code see's a packet
  624. // type that is not NODATA it assumes the rest of the packet has already
  625. // been written.
  626. // We put the DMA channel in byte 1 of the packet type.
  627. ((ULONG *)Context->pPrintBuffer)[PrintLocation/sizeof(ULONG)]=MASKED|(Context->Channel<<8);
  628. }
  629. else {
  630. PrintLocation=GetNextPrintPosition(Context);
  631. // Load the packet type last. When the ring 3 code see's a packet
  632. // type that is not NODATA it assumes the rest of the packet has already
  633. // been written.
  634. // We put the DMA channel in byte 1 of the packet type.
  635. ((ULONG *)Context->pPrintBuffer)[PrintLocation/sizeof(ULONG)]=UNMASKED|(Context->Channel<<8);
  636. }
  637. }
  638. // Update LastDmaPosition;
  639. LastDmaPosition=CurrentDmaPosition;
  640. LastDmaMask=DmaMask;
  641. ReleaseCurrentTimeslice:
  642. LastTimesliceStartTime=Statistics->ThisTimesliceStartTime;
  643. // Yield till next ms.
  644. RtYield(0, 0);
  645. }
  646. }