Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1344 lines
39 KiB

  1. /*==========================================================================
  2. *
  3. * Copyright (C) 1994-1999 Microsoft Corporation. All Rights Reserved.
  4. *
  5. * File: ddheap.c
  6. * Content: Top-level heap routines.
  7. * History:
  8. * Date By Reason
  9. * ==== == ======
  10. * 06-dec-94 craige initial implementation
  11. * 06-jan-95 craige integrated into DDRAW
  12. * 20-mar-95 craige prepare for rectangular memory manager
  13. * 27-mar-95 craige linear or rectangular vidmem
  14. * 01-apr-95 craige happy fun joy updated header file
  15. * 06-apr-95 craige fill in free video memory
  16. * 15-may-95 craige made separate VMEM struct for rect & linear
  17. * 10-jun-95 craige exported fns
  18. * 02-jul-95 craige fail if VidMemInit if linear or rect. fail;
  19. * removed linFindMemBlock
  20. * 17-jul-95 craige added VidMemLargestFree
  21. * 01-dec-95 colinmc added VidMemAmountAllocated
  22. * 11-dec-95 kylej added VidMemGetRectStride
  23. * 05-jul-96 colinmc Work Item: Removing the restriction on taking Win16
  24. * lock on VRAM surfaces (not including the primary)
  25. * 03-mar-97 jeffno Work item: Extended surface memory alignment
  26. * 13-mar-97 colinmc Bug 6533: Pass uncached flag to VMM correctly
  27. * 03-Feb-98 DrewB Made portable between user and kernel.
  28. *
  29. ***************************************************************************/
  30. #include "precomp.hxx"
  31. /*
  32. * VidMemInit - initialize video memory manager heap
  33. */
  34. LPVMEMHEAP WINAPI VidMemInit(
  35. DWORD flags,
  36. FLATPTR start,
  37. FLATPTR width_or_end,
  38. DWORD height,
  39. DWORD pitch )
  40. {
  41. LPVMEMHEAP pvmh;
  42. pvmh = (LPVMEMHEAP)MemAlloc( sizeof( VMEMHEAP ) );
  43. if( pvmh == NULL )
  44. {
  45. return NULL;
  46. }
  47. pvmh->dwFlags = flags;
  48. ZeroMemory( & pvmh->Alignment.ddsCaps, sizeof(pvmh->Alignment.ddsCaps) );
  49. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  50. {
  51. if( !linVidMemInit( pvmh, start, width_or_end ) )
  52. {
  53. MemFree( pvmh );
  54. return NULL;
  55. }
  56. }
  57. else
  58. {
  59. if( !rectVidMemInit( pvmh, start, (DWORD) width_or_end, height,
  60. pitch ) )
  61. {
  62. MemFree( pvmh );
  63. return NULL;
  64. }
  65. }
  66. return pvmh;
  67. } /* VidMemInit */
  68. /*
  69. * VidMemFini - done with video memory manager
  70. */
  71. void WINAPI VidMemFini( LPVMEMHEAP pvmh )
  72. {
  73. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  74. {
  75. linVidMemFini( pvmh );
  76. }
  77. else
  78. {
  79. rectVidMemFini( pvmh );
  80. }
  81. } /* VidMemFini */
  82. /*
  83. * InternalVidMemAlloc - alloc some flat video memory and give us back the size
  84. * we allocated
  85. */
  86. FLATPTR WINAPI InternalVidMemAlloc( LPVMEMHEAP pvmh, DWORD x, DWORD y,
  87. LPDWORD lpdwSize,
  88. LPSURFACEALIGNMENT lpAlignment,
  89. LPLONG lpNewPitch )
  90. {
  91. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  92. {
  93. return linVidMemAlloc( pvmh, x, y, lpdwSize, lpAlignment, lpNewPitch );
  94. }
  95. else
  96. {
  97. FLATPTR lp = rectVidMemAlloc( pvmh, x, y, lpdwSize, lpAlignment );
  98. if (lp && lpNewPitch )
  99. {
  100. *lpNewPitch = (LONG) pvmh->stride;
  101. }
  102. return lp;
  103. }
  104. return (FLATPTR) NULL;
  105. } /* InternalVidMemAlloc */
  106. /*
  107. * VidMemAlloc - alloc some flat video memory
  108. */
  109. FLATPTR WINAPI VidMemAlloc( LPVMEMHEAP pvmh, DWORD x, DWORD y )
  110. {
  111. DWORD dwSize;
  112. /*
  113. * We are not interested in the size here.
  114. */
  115. return InternalVidMemAlloc( pvmh, x, y, &dwSize , NULL , NULL );
  116. } /* VidMemAlloc */
  117. /*
  118. * DxDdHeapVidMemFree = free some flat video memory
  119. */
  120. void WINAPI DxDdHeapVidMemFree( LPVMEMHEAP pvmh, FLATPTR ptr )
  121. {
  122. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  123. {
  124. linVidMemFree( pvmh, ptr );
  125. }
  126. else
  127. {
  128. rectVidMemFree( pvmh, ptr );
  129. }
  130. } /* VidMemFree */
  131. /*
  132. * VidMemAmountAllocated
  133. */
  134. DWORD WINAPI VidMemAmountAllocated( LPVMEMHEAP pvmh )
  135. {
  136. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  137. {
  138. return linVidMemAmountAllocated( pvmh );
  139. }
  140. else
  141. {
  142. return rectVidMemAmountAllocated( pvmh );
  143. }
  144. } /* VidMemAmountAllocated */
  145. /*
  146. * VidMemAmountFree
  147. */
  148. DWORD WINAPI VidMemAmountFree( LPVMEMHEAP pvmh )
  149. {
  150. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  151. {
  152. return linVidMemAmountFree( pvmh );
  153. }
  154. else
  155. {
  156. return rectVidMemAmountFree( pvmh );
  157. }
  158. } /* VidMemAmountFree */
  159. /*
  160. * VidMemLargestFree
  161. */
  162. DWORD WINAPI VidMemLargestFree( LPVMEMHEAP pvmh )
  163. {
  164. if( pvmh->dwFlags & VMEMHEAP_LINEAR )
  165. {
  166. return linVidMemLargestFree( pvmh );
  167. }
  168. else
  169. {
  170. return 0;
  171. }
  172. } /* VidMemLargestFree */
  173. /*
  174. * HeapVidMemInit
  175. *
  176. * Top level heap initialization code which handles AGP stuff.
  177. */
  178. LPVMEMHEAP WINAPI HeapVidMemInit( LPVIDMEM lpVidMem,
  179. DWORD pitch,
  180. HANDLE hdev,
  181. LPHEAPALIGNMENT pgad)
  182. {
  183. DWORD dwSize;
  184. DDASSERT( NULL != lpVidMem );
  185. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  186. {
  187. /*
  188. * We do not actually call AGPReserve at this time since that would
  189. * mean calling it each time a mode change occurred. Insted, we defer
  190. * this until later when we call InitAgpHeap.
  191. */
  192. /*
  193. * Compute the size of the heap.
  194. */
  195. if( lpVidMem->dwFlags & VIDMEM_ISLINEAR )
  196. {
  197. dwSize = (DWORD)(lpVidMem->fpEnd - lpVidMem->fpStart) + 1UL;
  198. }
  199. else
  200. {
  201. DDASSERT( lpVidMem->dwFlags & VIDMEM_ISRECTANGULAR );
  202. dwSize = (pitch * lpVidMem->dwHeight);
  203. }
  204. DDASSERT( 0UL != dwSize );
  205. /*
  206. * Update the heap for the new start address
  207. * (and end address for a linear heap).
  208. */
  209. lpVidMem->fpStart = 0;
  210. if( lpVidMem->dwFlags & VIDMEM_ISLINEAR )
  211. {
  212. lpVidMem->fpEnd = dwSize - 1UL;
  213. }
  214. else
  215. {
  216. DDASSERT( lpVidMem->dwFlags & VIDMEM_ISRECTANGULAR );
  217. DDASSERT( pitch );
  218. lpVidMem->dwHeight = dwSize / pitch;
  219. }
  220. }
  221. if( lpVidMem->dwFlags & VIDMEM_ISLINEAR )
  222. {
  223. VDPF(( 1,V, "VidMemInit: Linear: fpStart = 0x%08x fpEnd = 0x%08x",
  224. lpVidMem->fpStart, lpVidMem->fpEnd ));
  225. lpVidMem->lpHeap = VidMemInit( VMEMHEAP_LINEAR, lpVidMem->fpStart,
  226. lpVidMem->fpEnd, 0, 0 );
  227. }
  228. else
  229. {
  230. // We have no way of testing a rectangular AGP heap, so I'm disabling
  231. // it for now.
  232. if( !( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL ) )
  233. {
  234. VDPF(( 1,V, "VidMemInit: Rectangular: fpStart = 0x%08x "
  235. "dwWidth = %ld dwHeight = %ld, pitch = %ld",
  236. lpVidMem->fpStart, lpVidMem->dwWidth, lpVidMem->dwHeight,
  237. pitch ));
  238. lpVidMem->lpHeap = VidMemInit( VMEMHEAP_RECTANGULAR, lpVidMem->fpStart,
  239. lpVidMem->dwWidth, lpVidMem->dwHeight,
  240. pitch );
  241. }
  242. }
  243. /*
  244. * Modify the caps and alt-caps so that you don't allocate local
  245. * video memory surfaces out of AGP memory and vice-verse.
  246. */
  247. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  248. {
  249. /*
  250. * Its an AGP heap. So don't let explict LOCAL video memory
  251. * be allocated out of this heap.
  252. */
  253. lpVidMem->ddsCaps.dwCaps |= DDSCAPS_LOCALVIDMEM;
  254. lpVidMem->ddsCapsAlt.dwCaps |= DDSCAPS_LOCALVIDMEM;
  255. }
  256. else
  257. {
  258. /*
  259. * Its a local video memory heap. So don't let explicity NON-LOCAL
  260. * video memory be allocated out of this heap.
  261. */
  262. lpVidMem->ddsCaps.dwCaps |= DDSCAPS_NONLOCALVIDMEM;
  263. lpVidMem->ddsCapsAlt.dwCaps |= DDSCAPS_NONLOCALVIDMEM;
  264. }
  265. /*
  266. * Copy any extended alignment data into the private heap structure
  267. */
  268. if ( lpVidMem->lpHeap )
  269. {
  270. if ( pgad )
  271. {
  272. lpVidMem->lpHeap->dwFlags |= VMEMHEAP_ALIGNMENT;
  273. lpVidMem->lpHeap->Alignment = *pgad;
  274. VDPF((5,V,"Extended alignment turned on for this heap."));
  275. VDPF((6,V,"Alignments are turned on for:"));
  276. VDPF((6,V," %08X",pgad->ddsCaps));
  277. }
  278. else
  279. {
  280. /*
  281. * This means the allocation routines will do no alignment modifications
  282. */
  283. VDPF((5,V,"Extended alignment turned OFF for this heap."));
  284. lpVidMem->lpHeap->dwFlags &= ~VMEMHEAP_ALIGNMENT;
  285. }
  286. }
  287. return lpVidMem->lpHeap;
  288. } /* HeapVidMemInit */
  289. /*
  290. * HeapVidMemFini
  291. *
  292. * Top level heap release code. Handle AGP stuff
  293. */
  294. void WINAPI HeapVidMemFini( LPVIDMEM lpVidMem, HANDLE hdev )
  295. {
  296. DWORD dwCommittedSize = 0UL;
  297. PVOID pvReservation;
  298. BYTE* pAgpCommitMask = NULL;
  299. DWORD dwAgpCommitMaskSize;
  300. DWORD dwTotalSize;
  301. /*
  302. * Remember how much memory we committed to the AGP heap.
  303. */
  304. DDASSERT( NULL != lpVidMem->lpHeap );
  305. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  306. {
  307. dwCommittedSize = lpVidMem->lpHeap->dwCommitedSize;
  308. pvReservation = lpVidMem->lpHeap->pvPhysRsrv;
  309. pAgpCommitMask = lpVidMem->lpHeap->pAgpCommitMask;
  310. dwAgpCommitMaskSize = lpVidMem->lpHeap->dwAgpCommitMaskSize;
  311. dwTotalSize = lpVidMem->lpHeap->dwTotalSize;
  312. }
  313. /*
  314. * Free the memory manager
  315. */
  316. VidMemFini( lpVidMem->lpHeap );
  317. lpVidMem->lpHeap = NULL;
  318. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  319. {
  320. BOOL fSuccess = TRUE;
  321. /*
  322. * If this is a non-local (AGP) heap then decommit and
  323. * free the GART memory now.
  324. */
  325. if( ( 0UL != dwCommittedSize ) &&
  326. ( pAgpCommitMask != NULL ) )
  327. {
  328. DWORD dwTemp;
  329. /*
  330. * Only decommit if we actually bothered to commit something
  331. * in the first place.
  332. */
  333. fSuccess = AGPDecommitAll( hdev, pvReservation,
  334. pAgpCommitMask,
  335. dwAgpCommitMaskSize,
  336. &dwTemp,
  337. dwTotalSize);
  338. /*
  339. * Should never fail and not much we can do if it does apart
  340. * from assert that something bad is happening.
  341. */
  342. DDASSERT( fSuccess );
  343. }
  344. if( pAgpCommitMask != NULL )
  345. {
  346. VFREEMEM(pAgpCommitMask);
  347. }
  348. if( pvReservation != NULL )
  349. {
  350. fSuccess = AGPFree( hdev, pvReservation );
  351. }
  352. /*
  353. * Again this should only fail if the OS is in an unstable state
  354. * or if I have screwed up (sadly the later is all too likely)
  355. * so assert.
  356. */
  357. DDASSERT( fSuccess );
  358. }
  359. } /* HeapVidMemFini */
  360. /*
  361. * This is an external entry point which can be used by drivers to allocate
  362. * aligned surfaces.
  363. */
  364. FLATPTR WINAPI DxDdHeapVidMemAllocAligned(
  365. LPVIDMEM lpVidMem,
  366. DWORD dwWidth,
  367. DWORD dwHeight,
  368. LPSURFACEALIGNMENT lpAlignment ,
  369. LPLONG lpNewPitch )
  370. {
  371. HANDLE hdev;
  372. FLATPTR ptr;
  373. DWORD dwSize;
  374. if ( lpVidMem == NULL ||
  375. lpVidMem->lpHeap == NULL ||
  376. (lpVidMem->dwFlags & VIDMEM_HEAPDISABLED))
  377. {
  378. return (FLATPTR) NULL;
  379. }
  380. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  381. {
  382. if( lpVidMem->lpHeap->pvPhysRsrv == NULL )
  383. {
  384. LPVIDMEM pHeap;
  385. DWORD dwHeap;
  386. // If we haven't yet initialized the AGP heap, then we will
  387. // do so now. This could be dangerous since initializing the
  388. // heap causes the driver to get re-entered by the
  389. // UpdateNonLocalVidMemHeap call.
  390. EDD_DIRECTDRAW_GLOBAL* peDirectDrawGlobal =
  391. (EDD_DIRECTDRAW_GLOBAL*) OsGetAGPDeviceHandle(lpVidMem->lpHeap);
  392. pHeap = peDirectDrawGlobal->pvmList;
  393. for (dwHeap = 0;
  394. dwHeap < peDirectDrawGlobal->dwNumHeaps;
  395. pHeap++, dwHeap++)
  396. {
  397. if( pHeap == lpVidMem )
  398. {
  399. break;
  400. }
  401. }
  402. if( dwHeap < peDirectDrawGlobal->dwNumHeaps )
  403. {
  404. InitAgpHeap( peDirectDrawGlobal,
  405. dwHeap,
  406. (HANDLE) peDirectDrawGlobal );
  407. }
  408. if( ( lpVidMem->lpHeap->pvPhysRsrv == NULL ) ||
  409. ( lpVidMem->dwFlags & VIDMEM_HEAPDISABLED ) )
  410. {
  411. return (FLATPTR) NULL;
  412. }
  413. }
  414. /*
  415. * As we may need to commit AGP memory we need a device handle
  416. * to communicate with the AGP controller. Rather than hunting
  417. * through the driver object list hoping we will find a
  418. * local object for this process we just create a handle
  419. * and discard it after the allocation. This should not be
  420. * performance critical code to start with.
  421. */
  422. hdev = OsGetAGPDeviceHandle(lpVidMem->lpHeap);
  423. if (hdev == NULL)
  424. {
  425. return 0;
  426. }
  427. }
  428. else
  429. {
  430. hdev = NULL;
  431. }
  432. /* Pass NULL Alignment and new pitch pointer */
  433. ptr = HeapVidMemAlloc( lpVidMem, dwWidth, dwHeight,
  434. hdev, lpAlignment, lpNewPitch, &dwSize );
  435. if( hdev != NULL )
  436. {
  437. OsCloseAGPDeviceHandle( hdev );
  438. }
  439. return ptr;
  440. }
  441. /*
  442. * HeapVidMemAlloc
  443. *
  444. * Top level video memory allocation function. Handles AGP stuff
  445. */
  446. FLATPTR WINAPI HeapVidMemAlloc( LPVIDMEM lpVidMem, DWORD x, DWORD y,
  447. HANDLE hdev, LPSURFACEALIGNMENT lpAlignment,
  448. LPLONG lpNewPitch, LPDWORD pdwSize )
  449. {
  450. FLATPTR fpMem;
  451. DWORD dwSize;
  452. DDASSERT( NULL != lpVidMem );
  453. DDASSERT( NULL != lpVidMem->lpHeap );
  454. /*
  455. * Validate the reserved and flags fields of the alignment structure
  456. */
  457. if( lpAlignment )
  458. {
  459. if( ( lpAlignment->Linear.dwReserved2 != 0 ) ||
  460. ( lpAlignment->Linear.dwFlags & ~( SURFACEALIGN_DISCARDABLE ) ) )
  461. {
  462. return NULL;
  463. }
  464. }
  465. if( ( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL ) &&
  466. ( lpVidMem->lpHeap->pvPhysRsrv == NULL ) )
  467. {
  468. return NULL;
  469. }
  470. fpMem = InternalVidMemAlloc( lpVidMem->lpHeap, x, y, &dwSize,
  471. lpAlignment, lpNewPitch );
  472. if( 0UL == fpMem )
  473. {
  474. return fpMem;
  475. }
  476. if( lpVidMem->dwFlags & VIDMEM_ISNONLOCAL )
  477. {
  478. DWORD dwCommittedSize;
  479. BOOL fSuccess;
  480. DWORD dwOffset;
  481. dwOffset = (DWORD)(fpMem - lpVidMem->fpStart);
  482. /*
  483. * Okay, we have the offset and the size we need to commit. So ask
  484. * the OS to commit memory to that portion of this previously
  485. * reserved GART range.
  486. */
  487. fSuccess = AGPCommit( hdev, lpVidMem->lpHeap->pvPhysRsrv,
  488. dwOffset, dwSize,
  489. lpVidMem->lpHeap->pAgpCommitMask,
  490. &dwCommittedSize,
  491. lpVidMem->lpHeap->dwTotalSize );
  492. if( !fSuccess )
  493. {
  494. /*
  495. * Couldn't commit. Must be out of memory.
  496. * Put the allocated memory back and fail.
  497. */
  498. DxDdHeapVidMemFree( lpVidMem->lpHeap, fpMem );
  499. return (FLATPTR) NULL;
  500. }
  501. lpVidMem->lpHeap->dwCommitedSize += dwCommittedSize;
  502. /*
  503. * Now we need to vitually commit the memory for all of the
  504. * DirectDrawLocals that we have. This is because some drivers
  505. * (nvidia) allocate a single buffer and then hand out pointers
  506. * into that buffer to multimple processes.
  507. */
  508. EDD_DIRECTDRAW_GLOBAL* peDirectDrawGlobal =
  509. (EDD_DIRECTDRAW_GLOBAL*) OsGetAGPDeviceHandle(lpVidMem->lpHeap);
  510. EDD_DIRECTDRAW_LOCAL* peDirectDrawLocal;
  511. LPVIDMEM pHeap;
  512. DWORD dwHeap;
  513. pHeap = peDirectDrawGlobal->pvmList;
  514. for (dwHeap = 0;
  515. dwHeap < peDirectDrawGlobal->dwNumHeaps;
  516. pHeap++, dwHeap++)
  517. {
  518. if( pHeap == lpVidMem )
  519. {
  520. break;
  521. }
  522. }
  523. if( dwHeap < peDirectDrawGlobal->dwNumHeaps )
  524. {
  525. peDirectDrawLocal = peDirectDrawGlobal->peDirectDrawLocalList;
  526. while( ( peDirectDrawLocal != NULL ) && fSuccess )
  527. {
  528. if( !( peDirectDrawLocal->fl & DD_LOCAL_DISABLED ) )
  529. {
  530. fSuccess = AGPCommitVirtual( peDirectDrawLocal,
  531. lpVidMem,
  532. dwHeap,
  533. dwOffset,
  534. dwSize);
  535. }
  536. peDirectDrawLocal = peDirectDrawLocal->peDirectDrawLocalNext;
  537. }
  538. }
  539. else
  540. {
  541. fSuccess = FALSE;
  542. }
  543. if( !fSuccess )
  544. {
  545. /*
  546. * Something went wrong on the virtual commit, so fail the allocation.
  547. */
  548. DxDdHeapVidMemFree( lpVidMem->lpHeap, fpMem );
  549. return (FLATPTR) NULL;
  550. }
  551. }
  552. if (pdwSize != NULL)
  553. {
  554. *pdwSize = dwSize;
  555. }
  556. return fpMem;
  557. } /* HeapVidMemAlloc */
  558. /*
  559. * IsDifferentPixelFormat
  560. *
  561. * determine if two pixel formats are the same or not
  562. *
  563. * (CMcC) 12/14/95 Really useful - so no longer static
  564. *
  565. * This function really shouldn't be in a heap file but it's
  566. * needed by both the user and kernel code so this is a convenient
  567. * place to put it to have it shared.
  568. */
  569. BOOL IsDifferentPixelFormat( LPDDPIXELFORMAT pdpf1, LPDDPIXELFORMAT pdpf2 )
  570. {
  571. /*
  572. * same flags?
  573. */
  574. if( pdpf1->dwFlags != pdpf2->dwFlags )
  575. {
  576. VDPF(( 8, S, "Flags differ!" ));
  577. return TRUE;
  578. }
  579. /*
  580. * same bitcount for non-YUV surfaces?
  581. */
  582. if( !(pdpf1->dwFlags & (DDPF_YUV | DDPF_FOURCC)) )
  583. {
  584. if( pdpf1->dwRGBBitCount != pdpf2->dwRGBBitCount )
  585. {
  586. VDPF(( 8, S, "RGB Bitcount differs!" ));
  587. return TRUE;
  588. }
  589. }
  590. /*
  591. * same RGB properties?
  592. */
  593. if( pdpf1->dwFlags & DDPF_RGB )
  594. {
  595. if( pdpf1->dwRBitMask != pdpf2->dwRBitMask )
  596. {
  597. VDPF(( 8, S, "RBitMask differs!" ));
  598. return TRUE;
  599. }
  600. if( pdpf1->dwGBitMask != pdpf2->dwGBitMask )
  601. {
  602. VDPF(( 8, S, "GBitMask differs!" ));
  603. return TRUE;
  604. }
  605. if( pdpf1->dwBBitMask != pdpf2->dwBBitMask )
  606. {
  607. VDPF(( 8, S, "BBitMask differs!" ));
  608. return TRUE;
  609. }
  610. if( pdpf1->dwRGBAlphaBitMask != pdpf2->dwRGBAlphaBitMask )
  611. {
  612. VDPF(( 8, S, "RGBAlphaBitMask differs!" ));
  613. return TRUE;
  614. }
  615. }
  616. /*
  617. * same YUV properties?
  618. */
  619. if( pdpf1->dwFlags & DDPF_YUV )
  620. {
  621. VDPF(( 8, S, "YUV???" ));
  622. if( pdpf1->dwFourCC != pdpf2->dwFourCC )
  623. {
  624. return TRUE;
  625. }
  626. if( pdpf1->dwYUVBitCount != pdpf2->dwYUVBitCount )
  627. {
  628. return TRUE;
  629. }
  630. if( pdpf1->dwYBitMask != pdpf2->dwYBitMask )
  631. {
  632. return TRUE;
  633. }
  634. if( pdpf1->dwUBitMask != pdpf2->dwUBitMask )
  635. {
  636. return TRUE;
  637. }
  638. if( pdpf1->dwVBitMask != pdpf2->dwVBitMask )
  639. {
  640. return TRUE;
  641. }
  642. if( pdpf1->dwYUVAlphaBitMask != pdpf2->dwYUVAlphaBitMask )
  643. {
  644. return TRUE;
  645. }
  646. }
  647. /*
  648. * Possible to use FOURCCs w/o setting the DDPF_YUV flag
  649. * ScottM 7/11/96
  650. */
  651. else if( pdpf1->dwFlags & DDPF_FOURCC )
  652. {
  653. VDPF(( 8, S, "FOURCC???" ));
  654. if( pdpf1->dwFourCC != pdpf2->dwFourCC )
  655. {
  656. return TRUE;
  657. }
  658. }
  659. /*
  660. * If Interleaved Z then check Z bit masks are the same
  661. */
  662. if( pdpf1->dwFlags & DDPF_ZPIXELS )
  663. {
  664. VDPF(( 8, S, "ZPIXELS???" ));
  665. if( pdpf1->dwRGBZBitMask != pdpf2->dwRGBZBitMask )
  666. return TRUE;
  667. }
  668. return FALSE;
  669. } /* IsDifferentPixelFormat */
  670. /*
  671. * SurfaceCapsToAlignment
  672. *
  673. * Return a pointer to the appropriate alignment element in a VMEMHEAP
  674. * structure given surface caps.
  675. *
  676. */
  677. LPSURFACEALIGNMENT SurfaceCapsToAlignment(
  678. LPVIDMEM lpVidmem ,
  679. LPDDRAWI_DDRAWSURFACE_LCL lpSurfaceLcl,
  680. LPVIDMEMINFO lpVidMemInfo)
  681. {
  682. LPVMEMHEAP lpHeap;
  683. LPDDSCAPS lpCaps;
  684. LPDDRAWI_DDRAWSURFACE_GBL lpSurfaceGbl;
  685. DDASSERT( lpVidmem );
  686. DDASSERT( lpSurfaceLcl );
  687. DDASSERT( lpVidMemInfo );
  688. DDASSERT( lpVidmem->lpHeap );
  689. if ( !lpVidmem->lpHeap )
  690. return NULL;
  691. lpCaps = &lpSurfaceLcl->ddsCaps;
  692. lpHeap = lpVidmem->lpHeap;
  693. lpSurfaceGbl = lpSurfaceLcl->lpGbl;
  694. if ( (lpHeap->dwFlags & VMEMHEAP_ALIGNMENT) == 0 )
  695. return NULL;
  696. if ( lpCaps->dwCaps & DDSCAPS_EXECUTEBUFFER )
  697. {
  698. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_EXECUTEBUFFER )
  699. {
  700. VDPF((6,V,"Aligning surface as execute buffer"));
  701. return & lpHeap->Alignment.ExecuteBuffer;
  702. }
  703. /*
  704. * If the surface is an execute buffer, then no other
  705. * alignment can apply
  706. */
  707. return NULL;
  708. }
  709. if ( lpCaps->dwCaps & DDSCAPS_OVERLAY )
  710. {
  711. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_OVERLAY )
  712. {
  713. VDPF((6,V,"Aligning surface as overlay"));
  714. return & lpHeap->Alignment.Overlay;
  715. }
  716. /*
  717. * If the surface is an overlay, then no other alignment can apply
  718. */
  719. return NULL;
  720. }
  721. if ( lpCaps->dwCaps & DDSCAPS_TEXTURE )
  722. {
  723. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_TEXTURE )
  724. {
  725. VDPF((6,V,"Aligning surface as texture"));
  726. return & lpHeap->Alignment.Texture;
  727. }
  728. /*
  729. * If it's a texture, it can't be an offscreen or any of the others
  730. */
  731. return NULL;
  732. }
  733. if ( lpCaps->dwCaps & DDSCAPS_ZBUFFER )
  734. {
  735. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_ZBUFFER )
  736. {
  737. VDPF((6,V,"Aligning surface as Z buffer"));
  738. return & lpHeap->Alignment.ZBuffer;
  739. }
  740. return NULL;
  741. }
  742. if ( lpCaps->dwCaps & DDSCAPS_ALPHA )
  743. {
  744. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_ALPHA )
  745. {
  746. VDPF((6,V,"Aligning surface as alpha buffer"));
  747. return & lpHeap->Alignment.AlphaBuffer;
  748. }
  749. return NULL;
  750. }
  751. /*
  752. * We need to give a surface which may potentially become a back buffer
  753. * the alignment which is reserved for potentially visible back buffers.
  754. * This includes any surface which has made it through the above checks
  755. * and has the same dimensions as the primary.
  756. * Note we check only the dimensions of the primary. There's an outside
  757. * chance that an app could create its back buffer before it creates
  758. * the primary
  759. */
  760. do
  761. {
  762. if ( lpSurfaceLcl->dwFlags & DDRAWISURF_HASPIXELFORMAT )
  763. {
  764. if (IsDifferentPixelFormat( &lpVidMemInfo->ddpfDisplay,
  765. &lpSurfaceGbl->ddpfSurface ))
  766. {
  767. /*
  768. * Different pixel format from primary means this surface
  769. * cannot be part of primary chain
  770. */
  771. break;
  772. }
  773. }
  774. if ( (DWORD)lpSurfaceGbl->wWidth != lpVidMemInfo->dwDisplayWidth )
  775. break;
  776. if ( (DWORD)lpSurfaceGbl->wHeight != lpVidMemInfo->dwDisplayHeight )
  777. break;
  778. /*
  779. * This surface could potentially be part of primary chain.
  780. * It has the same
  781. * pixel format as the primary and the same dimensions.
  782. */
  783. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_FLIP )
  784. {
  785. VDPF((6,V,"Aligning surface as potential primary surface"));
  786. return & lpHeap->Alignment.FlipTarget;
  787. }
  788. /*
  789. * Drop through and check for offscreen if driver specified no
  790. * part-of-primary-chain alignment
  791. */
  792. break;
  793. } while (0);
  794. if ( lpCaps->dwCaps & DDSCAPS_OFFSCREENPLAIN )
  795. {
  796. if ( lpHeap->Alignment.ddsCaps.dwCaps & DDSCAPS_OFFSCREENPLAIN )
  797. {
  798. VDPF((6,V,"Aligning surface as offscreen plain"));
  799. return & lpHeap->Alignment.Offscreen;
  800. }
  801. }
  802. VDPF((6,V,"No extended alignment for surface"));
  803. return NULL;
  804. }
  805. /*
  806. * DdHeapAlloc
  807. *
  808. * Search all heaps for one that has space and the appropriate
  809. * caps for the requested surface type and size.
  810. *
  811. * We AND the caps bits required and the caps bits not allowed
  812. * by the video memory. If the result is zero, it is OK.
  813. *
  814. * This is called in 2 passes. Pass1 is the preferred memory state,
  815. * pass2 is the "oh no no memory" state.
  816. *
  817. * On pass1, we use ddsCaps in the VIDMEM struct.
  818. * On pass2, we use ddsCapsAlt in the VIDMEM struct.
  819. *
  820. */
  821. FLATPTR DdHeapAlloc( DWORD dwNumHeaps,
  822. LPVIDMEM pvmHeaps,
  823. HANDLE hdev,
  824. LPVIDMEMINFO lpVidMemInfo,
  825. DWORD dwWidth,
  826. DWORD dwHeight,
  827. LPDDRAWI_DDRAWSURFACE_LCL lpSurfaceLcl,
  828. DWORD dwFlags,
  829. LPVIDMEM *ppvmHeap,
  830. LPLONG plNewPitch,
  831. LPDWORD pdwNewCaps,
  832. LPDWORD pdwSize)
  833. {
  834. LPVIDMEM pvm;
  835. DWORD vm_caps;
  836. int i;
  837. FLATPTR pvidmem;
  838. HANDLE hvxd;
  839. LPDDSCAPS lpCaps;
  840. LPDDSCAPSEX lpExtendedRestrictions;
  841. LPDDSCAPSEX lpExtendedCaps;
  842. DDASSERT( NULL != pdwNewCaps );
  843. DDASSERT( NULL != lpSurfaceLcl );
  844. lpCaps = &lpSurfaceLcl->ddsCaps;
  845. lpExtendedCaps = &lpSurfaceLcl->lpSurfMore->ddsCapsEx;
  846. for( i = 0 ; i < (int)dwNumHeaps ; i++ )
  847. {
  848. pvm = &pvmHeaps[i];
  849. // Skip disabled heaps.
  850. if (pvm->dwFlags & VIDMEM_HEAPDISABLED)
  851. {
  852. continue;
  853. }
  854. /*
  855. * Skip rectangular heaps if we were told to.
  856. */
  857. if (dwFlags & DDHA_SKIPRECTANGULARHEAPS)
  858. {
  859. if (pvm->dwFlags & VIDMEM_ISRECTANGULAR)
  860. {
  861. continue;
  862. }
  863. }
  864. /*
  865. * If local or non-local video memory has been explicity
  866. * specified then ignore heaps which don't match the required
  867. * memory type.
  868. */
  869. if( ( lpCaps->dwCaps & DDSCAPS_LOCALVIDMEM ) &&
  870. ( pvm->dwFlags & VIDMEM_ISNONLOCAL ) )
  871. {
  872. VDPF(( 4, V, "Local video memory was requested but heap is "
  873. "non local. Ignoring heap %d", i ));
  874. continue;
  875. }
  876. if( ( lpCaps->dwCaps & DDSCAPS_NONLOCALVIDMEM ) &&
  877. !( pvm->dwFlags & VIDMEM_ISNONLOCAL ) )
  878. {
  879. VDPF(( 4, V, "Non-local video memory was requested but "
  880. "heap is local. Ignoring heap %d", i ));
  881. continue;
  882. }
  883. if( !( lpCaps->dwCaps & DDSCAPS_NONLOCALVIDMEM ) &&
  884. ( pvm->dwFlags & VIDMEM_ISNONLOCAL ) &&
  885. ( dwFlags & DDHA_ALLOWNONLOCALMEMORY ) )
  886. {
  887. /*
  888. * We can allow textures to fail over to DMA model cards
  889. * if the card exposes an appropriate heap. This won't
  890. * affect cards which can't texture from nonlocal, because
  891. * they won't expose such a heap. This mod doesn't affect
  892. * execute model because all surfaces fail over to nonlocal
  893. * for them.
  894. * Note that we should only fail over to nonlocal if the
  895. * surface wasn't explicitly requested in local. There is a
  896. * clause a few lines up which guarantees this.
  897. */
  898. if ( !(lpCaps->dwCaps & DDSCAPS_TEXTURE) )
  899. {
  900. VDPF(( 4, V, "Non-local memory not explicitly requested "
  901. "for non-texture surface. Ignoring non-local heap %d",
  902. i ));
  903. continue;
  904. }
  905. /*
  906. * If the device can't texture out of AGP, we need to fail this
  907. * heap, since the app is probably expecting to texture out of
  908. * this surface.
  909. */
  910. if ( !(dwFlags & DDHA_ALLOWNONLOCALTEXTURES) )
  911. {
  912. continue;
  913. }
  914. }
  915. if( dwFlags & DDHA_USEALTCAPS )
  916. {
  917. vm_caps = pvm->ddsCapsAlt.dwCaps;
  918. lpExtendedRestrictions = &(pvm->lpHeap->ddsCapsExAlt);
  919. }
  920. else
  921. {
  922. vm_caps = pvm->ddsCaps.dwCaps;
  923. lpExtendedRestrictions = &(pvm->lpHeap->ddsCapsEx);
  924. }
  925. if( ((lpCaps->dwCaps & vm_caps) == 0) &&
  926. ((lpExtendedRestrictions->dwCaps2 & lpExtendedCaps->dwCaps2) == 0) &&
  927. ((lpExtendedRestrictions->dwCaps3 & lpExtendedCaps->dwCaps3) == 0) &&
  928. ((lpExtendedRestrictions->dwCaps4 & lpExtendedCaps->dwCaps4) == 0))
  929. {
  930. pvidmem = HeapVidMemAlloc(
  931. pvm,
  932. dwWidth,
  933. dwHeight,
  934. hdev,
  935. SurfaceCapsToAlignment(pvm, lpSurfaceLcl, lpVidMemInfo),
  936. plNewPitch,
  937. pdwSize);
  938. if( pvidmem != (FLATPTR) NULL )
  939. {
  940. *ppvmHeap = pvm;
  941. if( pvm->dwFlags & VIDMEM_ISNONLOCAL )
  942. *pdwNewCaps |= DDSCAPS_NONLOCALVIDMEM;
  943. else
  944. *pdwNewCaps |= DDSCAPS_LOCALVIDMEM;
  945. return pvidmem;
  946. }
  947. }
  948. }
  949. return (FLATPTR) NULL;
  950. } /* DdHeapAlloc */
  951. /*
  952. * GetHeapSizeInPages
  953. *
  954. * This is called to determine how much memory should be allocated
  955. * for the commit mask. Initially the mask had an entry for each page,
  956. * so it returned the number of pages in the heap, but the commit mask now has
  957. * a bit for each 16 page chunk, so we now return the number of chunks in the
  958. * heap.
  959. */
  960. DWORD GetHeapSizeInPages(LPVIDMEM lpVidMem, LONG pitch)
  961. {
  962. DWORD dwSize;
  963. if( lpVidMem->dwFlags & VIDMEM_ISLINEAR )
  964. {
  965. dwSize = (DWORD)(lpVidMem->fpEnd - lpVidMem->fpStart) + 1UL;
  966. }
  967. else
  968. {
  969. DDASSERT( lpVidMem->dwFlags & VIDMEM_ISRECTANGULAR );
  970. dwSize = (pitch * lpVidMem->dwHeight);
  971. }
  972. return AGPGetChunkCount(dwSize);
  973. }
  974. /*
  975. * CleanupAgpCommits
  976. *
  977. * Some drivers leave outstanding allocates after an app exits so that we
  978. * cannot completely cleanup the AGP heap, so what this function does is
  979. * decommit as much of the physical AGP memory as it can. To do this, it
  980. * determines what memory is still allocated and then decommits everything
  981. * esle.
  982. */
  983. VOID CleanupAgpCommits(LPVIDMEM lpVidMem, HANDLE hdev, EDD_DIRECTDRAW_GLOBAL* peDirectDrawGlobal, int iHeapIndex)
  984. {
  985. BYTE* pGoodCommits;
  986. BYTE* pAgpCommitMask;
  987. BYTE* pVirtCommitMask;
  988. BYTE* pTempMask;
  989. DWORD dwAgpCommitMaskSize;
  990. DWORD i;
  991. DWORD dwOffset;
  992. DWORD dwSize;
  993. DWORD dwDecommittedSize;
  994. EDD_DIRECTDRAW_LOCAL* peDirectDrawLocal;
  995. EDD_VMEMMAPPING* peMap;
  996. if( ( lpVidMem->lpHeap == NULL ) ||
  997. ( lpVidMem->lpHeap->pAgpCommitMask == NULL ) ||
  998. ( lpVidMem->lpHeap->dwAgpCommitMaskSize == 0 ) ||
  999. ( lpVidMem->lpHeap->dwCommitedSize == 0 ) ||
  1000. ( lpVidMem->lpHeap->pvPhysRsrv == NULL ) )
  1001. {
  1002. return;
  1003. }
  1004. pAgpCommitMask = lpVidMem->lpHeap->pAgpCommitMask;
  1005. dwAgpCommitMaskSize = lpVidMem->lpHeap->dwAgpCommitMaskSize;
  1006. pGoodCommits = (BYTE*) PALLOCMEM(dwAgpCommitMaskSize,
  1007. 'pddG');
  1008. if( pGoodCommits == NULL )
  1009. {
  1010. return;
  1011. }
  1012. pVirtCommitMask = (BYTE*) PALLOCMEM(dwAgpCommitMaskSize,
  1013. 'pddG');
  1014. if( pVirtCommitMask == NULL )
  1015. {
  1016. VFREEMEM(pGoodCommits);
  1017. return;
  1018. }
  1019. /*
  1020. * Walk the alloc list and build the list of all the pages that should
  1021. * be committed.
  1022. */
  1023. if( lpVidMem->lpHeap->dwFlags & VMEMHEAP_LINEAR )
  1024. {
  1025. LPVMEML pAlloc = (LPVMEML) lpVidMem->lpHeap->allocList;
  1026. while( pAlloc != NULL )
  1027. {
  1028. dwOffset = (DWORD)(pAlloc->ptr - lpVidMem->fpStart);
  1029. dwSize = pAlloc->size;
  1030. AGPUpdateCommitMask( pGoodCommits, dwOffset, dwSize, lpVidMem->lpHeap->dwTotalSize );
  1031. pAlloc = pAlloc->next;
  1032. }
  1033. }
  1034. else
  1035. {
  1036. LPVMEMR pAlloc = (LPVMEMR) lpVidMem->lpHeap->allocList;
  1037. /*
  1038. * This is a circular list where the end of the list is denoted by
  1039. * a node containing a sentinel value of 0x7fffffff
  1040. */
  1041. while(( pAlloc != NULL ) &&
  1042. ( pAlloc->size != 0x7fffffff ))
  1043. {
  1044. dwOffset = (DWORD)(pAlloc->ptr - lpVidMem->fpStart);
  1045. dwSize = (lpVidMem->lpHeap->stride *
  1046. (pAlloc->cy - 1)) + pAlloc->cx;
  1047. AGPUpdateCommitMask( pGoodCommits, dwOffset, dwSize, lpVidMem->lpHeap->dwTotalSize );
  1048. pAlloc = pAlloc->next;
  1049. }
  1050. }
  1051. /*
  1052. * Check here to verify that every page that we think should be committed
  1053. * actually is committed.
  1054. */
  1055. #if DBG
  1056. {
  1057. BYTE bit;
  1058. DWORD dwPage;
  1059. DWORD dwNumPages;
  1060. bit = 1;
  1061. dwNumPages = dwAgpCommitMaskSize * BITS_IN_BYTE;
  1062. dwPage = 0;
  1063. while( dwPage < dwNumPages )
  1064. {
  1065. ASSERTGDI(!((!(pAgpCommitMask[dwPage/BITS_IN_BYTE] & bit)&&(pGoodCommits[dwPage/BITS_IN_BYTE] & bit))),
  1066. "Page not commited when we think it should be!");
  1067. if( bit == 0x80 )
  1068. {
  1069. bit = 1;
  1070. }
  1071. else
  1072. {
  1073. bit <<= 1;
  1074. }
  1075. dwPage++;
  1076. }
  1077. }
  1078. #endif
  1079. /*
  1080. * Now build a list of pages that are committed but that don't need to be.
  1081. * To save space, we will re-use pAgpCommitMask for this purpose.
  1082. */
  1083. for( i = 0; i < dwAgpCommitMaskSize; i++ )
  1084. {
  1085. pAgpCommitMask[i] ^= pGoodCommits[i];
  1086. }
  1087. /*
  1088. * We don't want to physically decommit the memory w/o first virtually
  1089. * decommitting it for each processs.
  1090. */
  1091. peDirectDrawLocal = peDirectDrawGlobal->peDirectDrawLocalList;
  1092. while( peDirectDrawLocal != NULL )
  1093. {
  1094. if (peDirectDrawLocal->ppeMapAgp != NULL)
  1095. {
  1096. peMap = peDirectDrawLocal->ppeMapAgp[iHeapIndex];
  1097. if ((peMap != NULL) && !(peDirectDrawLocal->fl & DD_LOCAL_DISABLED))
  1098. {
  1099. // Replace the committed mask with the mask that we want to decommit
  1100. ASSERTGDI((dwAgpCommitMaskSize == peMap->dwAgpVirtualCommitMaskSize) ,
  1101. "Virtual AGP mask size does not equal physical mask size");
  1102. memcpy( pVirtCommitMask, pAgpCommitMask, dwAgpCommitMaskSize );
  1103. pTempMask = peMap->pAgpVirtualCommitMask;
  1104. peMap->pAgpVirtualCommitMask = pVirtCommitMask;
  1105. // pVirtCommitMask now contains all of the pages that are
  1106. // physically committed but that don't need to be, but that
  1107. // doesn't mean that these are virtually commited, so we AND
  1108. // out the pages that are not virtually committed.
  1109. for( i = 0; i < dwAgpCommitMaskSize; i++ )
  1110. {
  1111. pVirtCommitMask[i] &= pTempMask[i];
  1112. }
  1113. AGPDecommitVirtual( peMap,
  1114. peDirectDrawLocal->peDirectDrawGlobal,
  1115. peDirectDrawLocal,
  1116. lpVidMem->lpHeap->dwTotalSize);
  1117. // pTempMask contains all of the pages that used to be virtually
  1118. // committed but are not neccessarily committed now. pGoodCommits
  1119. // contains all of the physical pages that need to kept, so ANDing
  1120. // them will give us the pages that are currently committed.
  1121. peMap->pAgpVirtualCommitMask = pTempMask;
  1122. for( i = 0; i < dwAgpCommitMaskSize; i++ )
  1123. {
  1124. pTempMask[i] &= pGoodCommits[i];
  1125. }
  1126. }
  1127. }
  1128. peDirectDrawLocal = peDirectDrawLocal->peDirectDrawLocalNext;
  1129. }
  1130. AGPDecommitAll( hdev,
  1131. lpVidMem->lpHeap->pvPhysRsrv,
  1132. pAgpCommitMask,
  1133. dwAgpCommitMaskSize,
  1134. &dwDecommittedSize,
  1135. lpVidMem->lpHeap->dwTotalSize);
  1136. lpVidMem->lpHeap->dwCommitedSize -= dwDecommittedSize;
  1137. memcpy( pAgpCommitMask, pGoodCommits, dwAgpCommitMaskSize );
  1138. VFREEMEM(pGoodCommits);
  1139. VFREEMEM(pVirtCommitMask);
  1140. }
  1141. /*
  1142. * SwapHeaps
  1143. *
  1144. * During a mode change, we create a new heap and copy it over the old heap.
  1145. * For AGP heaps, however, we really only want one instance of the heap
  1146. * active at any given time, so the new heap is not fully initialized. This
  1147. * means that we just swapped our good heap with a abd one, which means that
  1148. * we need to selectively swap certain elements of the heap back.
  1149. */
  1150. void SwapHeaps( LPVIDMEM pOldVidMem, LPVIDMEM pNewVidMem )
  1151. {
  1152. LPVMEMHEAP pOldHeap = pOldVidMem->lpHeap;
  1153. LPVMEMHEAP pNewHeap = pNewVidMem->lpHeap;
  1154. FLATPTR fpTemp;
  1155. DWORD dwTemp;
  1156. LPVOID pvTemp;
  1157. LARGE_INTEGER liTemp;
  1158. fpTemp = pOldVidMem->fpStart;
  1159. pOldVidMem->fpStart = pNewVidMem->fpStart;
  1160. pNewVidMem->fpStart = fpTemp;
  1161. fpTemp = pOldVidMem->fpEnd;
  1162. pOldVidMem->fpEnd = pNewVidMem->fpEnd;
  1163. pNewVidMem->fpEnd = fpTemp;
  1164. dwTemp = pOldHeap->stride;
  1165. pOldHeap->stride = pNewHeap->stride;
  1166. pNewHeap->stride = dwTemp;
  1167. pvTemp = pOldHeap->freeList;
  1168. pOldHeap->freeList = pNewHeap->freeList;
  1169. pNewHeap->freeList = pvTemp;
  1170. pvTemp = pOldHeap->allocList;
  1171. pOldHeap->allocList = pNewHeap->allocList;
  1172. pNewHeap->allocList = pvTemp;
  1173. pOldHeap->dwTotalSize = min(pOldHeap->dwTotalSize,pNewHeap->dwTotalSize);
  1174. fpTemp = pOldHeap->fpGARTLin;
  1175. pOldHeap->fpGARTLin = pNewHeap->fpGARTLin;
  1176. pNewHeap->fpGARTLin = fpTemp;
  1177. fpTemp = pOldHeap->fpGARTDev;
  1178. pOldHeap->fpGARTDev = pNewHeap->fpGARTDev;
  1179. pNewHeap->fpGARTDev = fpTemp;
  1180. dwTemp = pOldHeap->dwCommitedSize;
  1181. pOldHeap->dwCommitedSize = pNewHeap->dwCommitedSize;
  1182. pNewHeap->dwCommitedSize = dwTemp;
  1183. liTemp = pOldHeap->liPhysAGPBase;
  1184. pOldHeap->liPhysAGPBase = pNewHeap->liPhysAGPBase;
  1185. pNewHeap->liPhysAGPBase = liTemp;
  1186. pvTemp = pOldHeap->pvPhysRsrv;
  1187. pOldHeap->pvPhysRsrv = pNewHeap->pvPhysRsrv;
  1188. pNewHeap->pvPhysRsrv = pvTemp;
  1189. pvTemp = (LPVOID) pOldHeap->pAgpCommitMask;
  1190. pOldHeap->pAgpCommitMask = pNewHeap->pAgpCommitMask;
  1191. pNewHeap->pAgpCommitMask = (BYTE*) pvTemp;
  1192. dwTemp = pOldHeap->dwAgpCommitMaskSize;
  1193. pOldHeap->dwAgpCommitMaskSize = pNewHeap->dwAgpCommitMaskSize;
  1194. pNewHeap->dwAgpCommitMaskSize = dwTemp;
  1195. }