Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1820 lines
63 KiB

  1. /******************************Module*Header*******************************\
  2. * Module Name: genaccel.c *
  3. * *
  4. * This module provides support routines for acceleration functions. *
  5. * *
  6. * Created: 18-Feb-1994 *
  7. * Author: Otto Berkes [ottob] *
  8. * *
  9. * Copyright (c) 1994 Microsoft Corporation *
  10. \**************************************************************************/
  11. #include "precomp.h"
  12. #pragma hdrstop
  13. #include "genline.h"
  14. #ifdef GL_WIN_specular_fog
  15. #define DO_NICEST_FOG(gc)\
  16. ((gc->state.hints.fog == GL_NICEST) && !(gc->polygon.shader.modeFlags & __GL_SHADE_SPEC_FOG))
  17. #else //GL_WIN_specular_fog
  18. #define DO_NICEST_FOG(gc)\
  19. (gc->state.hints.fog == GL_NICEST)
  20. #endif //GL_WIN_specular_fog
  21. static ULONG internalSolidTexture[4] = {0xffffffff, 0xffffffff,
  22. 0xffffffff, 0xffffffff};
  23. GENTEXCACHE *GetGenTexCache(__GLcontext *gc, __GLtexture *tex)
  24. {
  25. ULONG size;
  26. GENTEXCACHE *pGenTex;
  27. ULONG internalFormat;
  28. GLuint modeFlags = gc->polygon.shader.modeFlags;
  29. // Replace maps are only used for a subset of possible modes
  30. // 8 or 16bpp
  31. // 16-bit Z
  32. //
  33. // No dithering. Since dithering can turn on and off there
  34. // are two cases:
  35. // Dither off at TexImage time but on at texturing time -
  36. // We create a map that's unused
  37. // Dither on and then off - We won't create a map at
  38. // TexImage time but it'll be created on the fly when
  39. // dithering is turned on and everything is repicked
  40. //
  41. // Replace maps aren't created for DirectDraw textures because
  42. // the data isn't constant
  43. if (GENACCEL(gc).bpp < 8 ||
  44. GENACCEL(gc).bpp > 16 ||
  45. ((modeFlags & (__GL_SHADE_DEPTH_TEST | __GL_SHADE_DEPTH_ITER)) &&
  46. gc->modes.depthBits > 16) ||
  47. (modeFlags & __GL_SHADE_DITHER) ||
  48. gc->texture.ddtex.levels > 0)
  49. {
  50. return NULL;
  51. }
  52. internalFormat = tex->level[0].internalFormat;
  53. // We only support 8-bit palettes that are fully populated
  54. if (internalFormat == GL_COLOR_INDEX16_EXT ||
  55. (internalFormat == GL_COLOR_INDEX8_EXT &&
  56. tex->paletteSize != 256))
  57. {
  58. return NULL;
  59. }
  60. pGenTex = tex->pvUser;
  61. // Check and see whether the cached information can be reused
  62. // for the texture passed in
  63. if (pGenTex != NULL)
  64. {
  65. // gc's don't match so this must be a shared texture
  66. // Don't attempt to create a replace map for this gc
  67. if (gc != pGenTex->gc)
  68. {
  69. return NULL;
  70. }
  71. // Size and format must match to reuse the existing data area
  72. // If they don't, release the existing buffer. A new one
  73. // will then be allocated
  74. if (internalFormat == GL_COLOR_INDEX8_EXT)
  75. {
  76. if (pGenTex->internalFormat != internalFormat ||
  77. pGenTex->width != tex->paletteTotalSize)
  78. {
  79. GCFREE(gc, pGenTex);
  80. tex->pvUser = NULL;
  81. }
  82. }
  83. else
  84. {
  85. if (pGenTex->internalFormat != internalFormat ||
  86. pGenTex->width != tex->level[0].width ||
  87. pGenTex->height != tex->level[0].height)
  88. {
  89. GCFREE(gc, pGenTex);
  90. tex->pvUser = NULL;
  91. }
  92. }
  93. }
  94. if (tex->pvUser == NULL)
  95. {
  96. if (internalFormat == GL_COLOR_INDEX8_EXT)
  97. {
  98. size = tex->paletteTotalSize * sizeof(DWORD);
  99. }
  100. else
  101. {
  102. size = tex->level[0].width * tex->level[0].height *
  103. GENACCEL(gc).xMultiplier;
  104. }
  105. pGenTex = (GENTEXCACHE *)GCALLOC(gc, size + sizeof(GENTEXCACHE));
  106. if (pGenTex != NULL)
  107. {
  108. tex->pvUser = pGenTex;
  109. pGenTex->gc = gc;
  110. pGenTex->paletteTimeStamp =
  111. ((__GLGENcontext *)gc)->PaletteTimestamp;
  112. if (internalFormat == GL_COLOR_INDEX8_EXT)
  113. {
  114. pGenTex->height = 0;
  115. pGenTex->width = tex->paletteTotalSize;
  116. }
  117. else
  118. {
  119. pGenTex->height = tex->level[0].height;
  120. pGenTex->width = tex->level[0].width;
  121. }
  122. pGenTex->internalFormat = internalFormat;
  123. pGenTex->texImageReplace = (UCHAR *)(pGenTex+1);
  124. }
  125. }
  126. return pGenTex;
  127. }
  128. BOOL FASTCALL __fastGenLoadTexImage(__GLcontext *gc, __GLtexture *tex)
  129. {
  130. UCHAR *texBuffer;
  131. GLint internalFormat = tex->level[0].internalFormat;
  132. GENTEXCACHE *pGenTex;
  133. if (tex->level[0].buffer == NULL ||
  134. ((internalFormat != GL_BGR_EXT) &&
  135. (internalFormat != GL_BGRA_EXT) &&
  136. (internalFormat != GL_COLOR_INDEX8_EXT)))
  137. {
  138. return FALSE;
  139. }
  140. // OK, the texture doesn't have a compressed replace-mode format, so
  141. // make one...
  142. if ((internalFormat == GL_BGR_EXT) ||
  143. (internalFormat == GL_BGRA_EXT)) {
  144. ULONG size;
  145. UCHAR *replaceBuffer;
  146. ULONG bytesPerPixel = GENACCEL(gc).xMultiplier;
  147. pGenTex = GetGenTexCache(gc, tex);
  148. if (pGenTex == NULL)
  149. {
  150. return FALSE;
  151. }
  152. texBuffer = (UCHAR *)tex->level[0].buffer;
  153. replaceBuffer = pGenTex->texImageReplace;
  154. {
  155. __GLcolorBuffer *cfb = gc->drawBuffer;
  156. ULONG rShift = cfb->redShift;
  157. ULONG gShift = cfb->greenShift;
  158. ULONG bShift = cfb->blueShift;
  159. ULONG rBits = ((__GLGENcontext *)gc)->gsurf.pfd.cRedBits;
  160. ULONG gBits = ((__GLGENcontext *)gc)->gsurf.pfd.cGreenBits;
  161. ULONG bBits = ((__GLGENcontext *)gc)->gsurf.pfd.cBlueBits;
  162. BYTE *pXlat = ((__GLGENcontext *)gc)->pajTranslateVector;
  163. ULONG i;
  164. size = tex->level[0].width * tex->level[0].height;
  165. for (i = 0; i < size; i++, texBuffer += 4) {
  166. ULONG color;
  167. color = ((((ULONG)texBuffer[2] << rBits) >> 8) << rShift) |
  168. ((((ULONG)texBuffer[1] << gBits) >> 8) << gShift) |
  169. ((((ULONG)texBuffer[0] << bBits) >> 8) << bShift);
  170. if (GENACCEL(gc).bpp == 8)
  171. *replaceBuffer = pXlat[color & 0xff];
  172. else
  173. *((USHORT *)replaceBuffer) = (USHORT)color;
  174. replaceBuffer += bytesPerPixel;
  175. }
  176. }
  177. } else {
  178. ULONG size;
  179. ULONG *replaceBuffer;
  180. // If we don't have palette data yet we can't create the
  181. // fast version. It will be created when the ColorTable
  182. // call happens
  183. if (tex->paletteTotalData == NULL)
  184. {
  185. return FALSE;
  186. }
  187. pGenTex = GetGenTexCache(gc, tex);
  188. if (pGenTex == NULL)
  189. {
  190. return FALSE;
  191. }
  192. texBuffer = (UCHAR *)tex->paletteTotalData;
  193. replaceBuffer = (ULONG *)pGenTex->texImageReplace;
  194. size = tex->paletteTotalSize;
  195. {
  196. __GLcolorBuffer *cfb = gc->drawBuffer;
  197. ULONG rShift = cfb->redShift;
  198. ULONG gShift = cfb->greenShift;
  199. ULONG bShift = cfb->blueShift;
  200. ULONG rBits = ((__GLGENcontext *)gc)->gsurf.pfd.cRedBits;
  201. ULONG gBits = ((__GLGENcontext *)gc)->gsurf.pfd.cGreenBits;
  202. ULONG bBits = ((__GLGENcontext *)gc)->gsurf.pfd.cBlueBits;
  203. BYTE *pXlat = ((__GLGENcontext *)gc)->pajTranslateVector;
  204. ULONG i;
  205. for (i = 0; i < size; i++, texBuffer += 4) {
  206. ULONG color;
  207. color = ((((ULONG)texBuffer[2] << rBits) >> 8) << rShift) |
  208. ((((ULONG)texBuffer[1] << gBits) >> 8) << gShift) |
  209. ((((ULONG)texBuffer[0] << bBits) >> 8) << bShift);
  210. if (GENACCEL(gc).bpp == 8)
  211. color = pXlat[color & 0xff];
  212. *replaceBuffer++ = (color | ((ULONG)texBuffer[3] << 24));
  213. }
  214. }
  215. }
  216. GENACCEL(gc).texImageReplace =
  217. ((GENTEXCACHE *)tex->pvUser)->texImageReplace;
  218. return TRUE;
  219. }
  220. /*
  221. ** Pick the fastest triangle rendering implementation available based on
  222. ** the current mode set. Use any available accelerated resources if
  223. ** available, or use the generic routines for unsupported modes.
  224. */
  225. void FASTCALL __fastGenCalcDeltas(__GLcontext *gc, __GLvertex *a, __GLvertex *b, __GLvertex *c);
  226. void FASTCALL __fastGenCalcDeltasTexRGBA(__GLcontext *gc, __GLvertex *a, __GLvertex *b, __GLvertex *c);
  227. void FASTCALL __fastGenDrvCalcDeltas(__GLcontext *gc, __GLvertex *a, __GLvertex *b, __GLvertex *c);
  228. void __fastGenSetInitialParameters(__GLcontext *gc, const __GLvertex *a,
  229. __GLfloat dx, __GLfloat dy);
  230. void __fastGenSetInitialParametersTexRGBA(__GLcontext *gc, const __GLvertex *a,
  231. __GLfloat dx, __GLfloat dy);
  232. void __ZippyFT(
  233. __GLcontext *gc,
  234. __GLvertex *a,
  235. __GLvertex *b,
  236. __GLvertex *c,
  237. GLboolean ccw);
  238. VOID FASTCALL InitAccelTextureValues(__GLcontext *gc, __GLtexture *tex)
  239. {
  240. ULONG wLog2;
  241. ULONG hLog2;
  242. GENACCEL(gc).tex = tex;
  243. GENACCEL(gc).texImage = (ULONG *)tex->level[0].buffer;
  244. if (tex->level[0].internalFormat == GL_COLOR_INDEX8_EXT ||
  245. tex->level[0].internalFormat == GL_COLOR_INDEX16_EXT)
  246. {
  247. GENACCEL(gc).texPalette = (ULONG *)tex->paletteTotalData;
  248. }
  249. else
  250. {
  251. GENACCEL(gc).texPalette = NULL;
  252. }
  253. wLog2 = tex->level[0].widthLog2;
  254. hLog2 = tex->level[0].heightLog2;
  255. GENACCEL(gc).sMask = (~(~0 << wLog2)) << TEX_SCALESHIFT;
  256. GENACCEL(gc).tMask = (~(~0 << hLog2)) << TEX_SCALESHIFT;
  257. GENACCEL(gc).tShift = TEX_SCALESHIFT - (wLog2 + TEX_SHIFTPER4BPPTEXEL);
  258. GENACCEL(gc).tMaskSubDiv =
  259. (~(~0 << hLog2)) << (wLog2 + TEX_T_FRAC_BITS + TEX_SHIFTPER1BPPTEXEL);
  260. GENACCEL(gc).tShiftSubDiv =
  261. TEX_SCALESHIFT - (wLog2 + TEX_T_FRAC_BITS + TEX_SHIFTPER1BPPTEXEL);
  262. GENACCEL(gc).texXScale = (__GLfloat)tex->level[0].width * TEX_SCALEFACT;
  263. GENACCEL(gc).texYScale = (__GLfloat)tex->level[0].height * TEX_SCALEFACT;
  264. }
  265. BOOL FASTCALL bUseGenTriangles(__GLcontext *gc)
  266. {
  267. GLuint modeFlags = gc->polygon.shader.modeFlags;
  268. GLuint enables = gc->state.enables.general;
  269. __GLGENcontext *gengc = (__GLGENcontext *)gc;
  270. ULONG bpp = GENACCEL(gc).bpp;
  271. int iType;
  272. BOOL fZippy;
  273. BOOL bTryFastTexRGBA;
  274. PFNZIPPYSUB pfnZippySub;
  275. BOOL fUseFastGenSpan;
  276. GLboolean bMcdZ;
  277. ULONG internalFormat;
  278. ULONG textureMode;
  279. BOOL bRealTexture;
  280. BOOL bAccelDecal;
  281. if ((enables & (__GL_ALPHA_TEST_ENABLE |
  282. __GL_STENCIL_TEST_ENABLE)) ||
  283. (modeFlags & (__GL_SHADE_STENCIL_TEST | __GL_SHADE_LOGICOP |
  284. __GL_SHADE_ALPHA_TEST | __GL_SHADE_SLOW_FOG
  285. #ifdef GL_WIN_specular_fog
  286. | __GL_SHADE_SPEC_FOG
  287. #endif //GL_WIN_specular_fog
  288. )) ||
  289. !gc->state.raster.rMask ||
  290. !gc->state.raster.gMask ||
  291. !gc->state.raster.bMask ||
  292. (gc->drawBuffer->buf.flags & COLORMASK_ON) ||
  293. ALPHA_WRITE_ENABLED( gc->drawBuffer ) ||
  294. (gengc->gsurf.pfd.cColorBits < 8) ||
  295. ((modeFlags & __GL_SHADE_DEPTH_TEST) && (!gc->state.depth.writeEnable))
  296. )
  297. return FALSE;
  298. if (modeFlags & __GL_SHADE_TEXTURE) {
  299. internalFormat = gc->texture.currentTexture->level[0].internalFormat;
  300. textureMode = gc->state.texture.env[0].mode;
  301. bAccelDecal = (gc->texture.currentTexture->level[0].baseFormat !=
  302. GL_RGBA);
  303. if (!((((textureMode == GL_DECAL) && bAccelDecal) ||
  304. (textureMode == GL_REPLACE) ||
  305. (textureMode == GL_MODULATE)) &&
  306. (gc->texture.currentTexture &&
  307. (gc->texture.currentTexture->params.minFilter == GL_NEAREST) &&
  308. (gc->texture.currentTexture->params.magFilter == GL_NEAREST) &&
  309. (gc->texture.currentTexture->params.sWrapMode == GL_REPEAT) &&
  310. (gc->texture.currentTexture->params.tWrapMode == GL_REPEAT) &&
  311. (gc->texture.currentTexture->level[0].border == 0) &&
  312. (internalFormat == GL_BGR_EXT ||
  313. internalFormat == GL_BGRA_EXT ||
  314. internalFormat == GL_COLOR_INDEX8_EXT))))
  315. return FALSE;
  316. InitAccelTextureValues(gc, gc->texture.currentTexture);
  317. }
  318. bMcdZ = ((((__GLGENcontext *)gc)->pMcdState != NULL) &&
  319. (((__GLGENcontext *)gc)->pMcdState->pDepthSpan != NULL) &&
  320. (((__GLGENcontext *)gc)->pMcdState->pMcdSurf != NULL) &&
  321. !(((__GLGENcontext *)gc)->pMcdState->McdBuffers.mcdDepthBuf.bufFlags & MCDBUF_ENABLED));
  322. bTryFastTexRGBA = ((gc->state.raster.drawBuffer != GL_FRONT_AND_BACK) &&
  323. ((modeFlags & __GL_SHADE_DEPTH_TEST &&
  324. modeFlags & __GL_SHADE_DEPTH_ITER)
  325. || (!(modeFlags & __GL_SHADE_DEPTH_TEST) &&
  326. !(modeFlags & __GL_SHADE_DEPTH_ITER))) &&
  327. (modeFlags & __GL_SHADE_STIPPLE) == 0);
  328. fZippy = (bTryFastTexRGBA &&
  329. ((gc->drawBuffer->buf.flags & DIB_FORMAT) != 0) &&
  330. ((gc->drawBuffer->buf.flags & MEMORY_DC) != 0) &&
  331. gc->transform.reasonableViewport);
  332. GENACCEL(gc).flags &= ~(
  333. GEN_DITHER | GEN_RGBMODE | GEN_TEXTURE | GEN_SHADE |
  334. GEN_FASTZBUFFER | GEN_LESS | SURFACE_TYPE_DIB | GEN_TEXTURE_ORTHO
  335. );
  336. if ((enables & __GL_BLEND_ENABLE) ||
  337. (modeFlags & __GL_SHADE_TEXTURE)) {
  338. GENACCEL(gc).__fastCalcDeltaPtr = __fastGenCalcDeltasTexRGBA;
  339. GENACCEL(gc).__fastSetInitParamPtr = __fastGenSetInitialParametersTexRGBA;
  340. } else {
  341. GENACCEL(gc).__fastCalcDeltaPtr = __fastGenCalcDeltas;
  342. GENACCEL(gc).__fastSetInitParamPtr = __fastGenSetInitialParameters;
  343. }
  344. #ifdef GL_WIN_phong_shading
  345. if (modeFlags & __GL_SHADE_PHONG)
  346. {
  347. gc->procs.fillTriangle = __glFillPhongTriangle;
  348. }
  349. else
  350. #endif //GL_WIN_phong_shading
  351. {
  352. #ifdef _MCD_
  353. // If MCD driver is being used, then we need to call the "floating
  354. // point state safe" version of fillTriangle. This version will
  355. // not attempt to span floating point operations over a call that
  356. // may invoke the MCD driver (which will corrupt the FP state).
  357. if (gengc->pMcdState)
  358. {
  359. gc->procs.fillTriangle = __fastGenMcdFillTriangle;
  360. }
  361. else
  362. {
  363. gc->procs.fillTriangle = __fastGenFillTriangle;
  364. }
  365. #else //_MCD_
  366. gc->procs.fillTriangle = __fastGenFillTriangle;
  367. #endif //_MCD_
  368. }
  369. // If we're doing perspective-corrected texturing, we will support
  370. // the following combinations:
  371. // z....... <, <=
  372. // alpha... src, 1-src
  373. // dither.. on/off
  374. // bpp..... 332, 555, 565, 888
  375. // NOTE: We will always try this path first for general texturing.
  376. if ((modeFlags & __GL_SHADE_TEXTURE) || (enables & __GL_BLEND_ENABLE)) {
  377. LONG pixType = -1;
  378. if (gc->state.hints.perspectiveCorrection != GL_NICEST)
  379. GENACCEL(gc).flags |= GEN_TEXTURE_ORTHO;
  380. if (!bTryFastTexRGBA)
  381. goto perspTexPathFail;
  382. if ((enables & __GL_BLEND_ENABLE) &&
  383. ((gc->state.raster.blendSrc != GL_SRC_ALPHA) ||
  384. (gc->state.raster.blendDst != GL_ONE_MINUS_SRC_ALPHA)))
  385. return FALSE;
  386. if (!(modeFlags & __GL_SHADE_TEXTURE)) {
  387. if (!(modeFlags & __GL_SHADE_RGB))
  388. goto perspTexPathFail;
  389. bRealTexture = FALSE;
  390. GENACCEL(gc).flags |= GEN_TEXTURE_ORTHO;
  391. GENACCEL(gc).texPalette = NULL;
  392. textureMode = GL_MODULATE;
  393. internalFormat = GL_BGRA_EXT;
  394. GENACCEL(gc).texImage = (ULONG *)internalSolidTexture;
  395. GENACCEL(gc).sMask = 0;
  396. GENACCEL(gc).tMask = 0;
  397. GENACCEL(gc).tShift = 0;
  398. GENACCEL(gc).tMaskSubDiv = 0;
  399. GENACCEL(gc).tShiftSubDiv = 0;
  400. }
  401. else
  402. {
  403. bRealTexture = TRUE;
  404. }
  405. if (bpp == 8) {
  406. if ((gengc->gc.drawBuffer->redShift == 0) &&
  407. (gengc->gc.drawBuffer->greenShift == 3) &&
  408. (gengc->gc.drawBuffer->blueShift == 6))
  409. pixType = 0;
  410. } else if (bpp == 16) {
  411. if ((gengc->gc.drawBuffer->greenShift == 5) &&
  412. (gengc->gc.drawBuffer->blueShift == 0)) {
  413. if (gengc->gc.drawBuffer->redShift == 10)
  414. pixType = 1;
  415. else if (gengc->gc.drawBuffer->redShift == 11)
  416. pixType = 2;
  417. }
  418. } else if ((bpp == 32) || (bpp == 24)) {
  419. if ((gengc->gc.drawBuffer->redShift == 16) &&
  420. (gengc->gc.drawBuffer->greenShift == 8) &&
  421. (gengc->gc.drawBuffer->blueShift == 0))
  422. pixType = 3;
  423. }
  424. if (pixType < 0)
  425. goto perspTexPathFail;
  426. pixType *= 6;
  427. if (modeFlags & __GL_SHADE_DEPTH_ITER) {
  428. if (bMcdZ)
  429. goto perspTexPathFail;
  430. if (!((gc->state.depth.testFunc == GL_LESS) ||
  431. (gc->state.depth.testFunc == GL_LEQUAL)))
  432. goto perspTexPathFail;
  433. if (gc->modes.depthBits > 16)
  434. goto perspTexPathFail;
  435. if (gc->state.depth.testFunc == GL_LEQUAL)
  436. pixType += 1;
  437. else
  438. pixType += 2;
  439. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTZ;
  440. }
  441. if (enables & __GL_BLEND_ENABLE)
  442. pixType += 3;
  443. // Note: For selecting the sub-triangle filling routine, assume
  444. // that we will use one of the "zippy" routines. Then, check at the
  445. // end whether or not we can actually do this, or if we have to fall
  446. // back to a more generic (and slower) routine.
  447. if (internalFormat != GL_COLOR_INDEX8_EXT &&
  448. internalFormat != GL_COLOR_INDEX16_EXT) {
  449. //
  450. // Handle full RGB(A) textures
  451. //
  452. // Check if we can support the size...
  453. if (bRealTexture &&
  454. GENACCEL(gc).tex &&
  455. ((GENACCEL(gc).tex->level[0].widthLog2 > TEX_MAX_SIZE_LOG2) ||
  456. (GENACCEL(gc).tex->level[0].heightLog2 > TEX_MAX_SIZE_LOG2)))
  457. goto perspTexPathFail;
  458. if ((textureMode == GL_DECAL) ||
  459. (textureMode == GL_REPLACE)) {
  460. // we don't handle the goofy alpha case for decal...
  461. if ((textureMode == GL_DECAL) &&
  462. (enables & __GL_BLEND_ENABLE))
  463. return FALSE;
  464. // If we're not dithering, we can go with the compressed
  465. // texture format. Otherwise, we're forced to use flat-shading
  466. // procs to get the texture colors to dither properly. Ouch...
  467. // We'd like to also go through this path if a DirectDraw
  468. // texture is used because replace maps can't be created,
  469. // but they only work with dithering
  470. if (modeFlags & __GL_SHADE_DITHER) {
  471. GENACCEL(gc).__fastTexSpanFuncPtr =
  472. __fastPerspTexFlatFuncs[pixType];
  473. } else {
  474. if ((bpp >= 8 && bpp <= 16) &&
  475. !(enables & __GL_BLEND_ENABLE)) {
  476. // handle the case where we can use compressed textures
  477. // for optimal performance. We do this for bit depths
  478. // <= 16 bits, no dithering, and no blending.
  479. if (!GENACCEL(gc).tex->pvUser) {
  480. if (!__fastGenLoadTexImage(gc, GENACCEL(gc).tex))
  481. return FALSE;
  482. } else {
  483. // If the compressed texture image was created for
  484. // another gc, revert to using the RGBA image.
  485. // We do this by using the alpha paths.
  486. //
  487. // NOTE: This logic depends on A being forced to
  488. // 1 for all RGB textures.
  489. if (gc != ((GENTEXCACHE *)GENACCEL(gc).tex->pvUser)->gc)
  490. {
  491. pixType += 3;
  492. }
  493. else
  494. {
  495. // Check that the cached data is the right size
  496. ASSERTOPENGL(((GENTEXCACHE *)GENACCEL(gc).tex->pvUser)->width == GENACCEL(gc).tex->level[0].width &&
  497. ((GENTEXCACHE *)GENACCEL(gc).tex->pvUser)->height == GENACCEL(gc).tex->level[0].height,
  498. "Cached texture size mismatch\n");
  499. }
  500. }
  501. }
  502. GENACCEL(gc).__fastTexSpanFuncPtr =
  503. __fastPerspTexReplaceFuncs[pixType];
  504. }
  505. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  506. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTTex;
  507. } else if (textureMode == GL_MODULATE) {
  508. if (modeFlags & __GL_SHADE_SMOOTH) {
  509. GENACCEL(gc).__fastTexSpanFuncPtr =
  510. __fastPerspTexSmoothFuncs[pixType];
  511. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  512. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTRGBTex;
  513. } else {
  514. GENACCEL(gc).__fastTexSpanFuncPtr =
  515. __fastPerspTexFlatFuncs[pixType];
  516. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  517. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTTex;
  518. }
  519. }
  520. } else {
  521. //
  522. // Handle palettized textures
  523. //
  524. // Check if we can support the size...
  525. if (bRealTexture &&
  526. GENACCEL(gc).tex &&
  527. ((GENACCEL(gc).tex->level[0].widthLog2 > TEX_MAX_SIZE_LOG2) ||
  528. (GENACCEL(gc).tex->level[0].heightLog2 > TEX_MAX_SIZE_LOG2)))
  529. return FALSE;
  530. if ((textureMode == GL_DECAL) ||
  531. (textureMode == GL_REPLACE)) {
  532. // we don't handle the goofy alpha case for decal...
  533. if ((textureMode == GL_DECAL) &&
  534. (enables & __GL_BLEND_ENABLE))
  535. return FALSE;
  536. // If we're not dithering, we can go with the compressed
  537. // texture format. Otherwise, we're forced to use flat-shading
  538. // procs to get the texture colors to dither properly. Ouch...
  539. // We'd like to also go through this path if a DirectDraw
  540. // texture is used because replace maps can't be created,
  541. // but they only work with dithering
  542. if (modeFlags & __GL_SHADE_DITHER) {
  543. GENACCEL(gc).__fastTexSpanFuncPtr =
  544. __fastPerspTexFlatFuncs[pixType];
  545. } else {
  546. GENACCEL(gc).__fastTexSpanFuncPtr =
  547. __fastPerspTexPalReplaceFuncs[pixType];
  548. if (bpp >= 8 && bpp <= 16) {
  549. // handle the case where we can use compressed paletted
  550. // textures for optimal performance. We do this for
  551. // bit depths <= 16 bits with no dithering.
  552. if (!GENACCEL(gc).tex->pvUser) {
  553. if (!__fastGenLoadTexImage(gc, GENACCEL(gc).tex))
  554. return FALSE;
  555. } else {
  556. // If the compressed texture image was created for
  557. // another gc, we have no choice but to fall back to flat shading.
  558. // We should find a better solution for this...
  559. if (gc != ((GENTEXCACHE *)GENACCEL(gc).tex->pvUser)->gc)
  560. {
  561. GENACCEL(gc).__fastTexSpanFuncPtr =
  562. __fastPerspTexFlatFuncs[pixType];
  563. }
  564. else
  565. {
  566. ASSERTOPENGL(((GENTEXCACHE *)GENACCEL(gc).tex->pvUser)->width == GENACCEL(gc).tex->paletteTotalSize,
  567. "Cached texture size mismatch\n");
  568. }
  569. }
  570. }
  571. }
  572. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  573. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTTex;
  574. } else if (textureMode == GL_MODULATE) {
  575. if (modeFlags & __GL_SHADE_SMOOTH) {
  576. GENACCEL(gc).__fastTexSpanFuncPtr =
  577. __fastPerspTexSmoothFuncs[pixType];
  578. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  579. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTRGBTex;
  580. } else {
  581. GENACCEL(gc).__fastTexSpanFuncPtr =
  582. __fastPerspTexFlatFuncs[pixType];
  583. if (!(modeFlags & __GL_SHADE_DEPTH_ITER))
  584. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTTex;
  585. }
  586. }
  587. }
  588. if (!fZippy)
  589. GENACCEL(gc).__fastFillSubTrianglePtr = __fastGenFillSubTriangleTexRGBA;
  590. else
  591. GENACCEL(gc).flags |= SURFACE_TYPE_DIB;
  592. return TRUE;
  593. }
  594. perspTexPathFail:
  595. // We don't support any alpha modes yet...
  596. if (enables & __GL_BLEND_ENABLE)
  597. return FALSE;
  598. fUseFastGenSpan = FALSE;
  599. if (bpp == 8) {
  600. iType = 2;
  601. if (
  602. (gengc->gc.drawBuffer->redShift != 0)
  603. || (gengc->gc.drawBuffer->greenShift != 3)
  604. || (gengc->gc.drawBuffer->blueShift != 6)
  605. ) {
  606. fUseFastGenSpan = TRUE;
  607. }
  608. } else if (bpp == 16) {
  609. if (
  610. (gengc->gc.drawBuffer->greenShift == 5)
  611. && (gengc->gc.drawBuffer->blueShift == 0)
  612. ) {
  613. if (gengc->gc.drawBuffer->redShift == 10) {
  614. iType = 3;
  615. } else if (gengc->gc.drawBuffer->redShift == 11) {
  616. iType = 4;
  617. } else {
  618. iType = 3;
  619. fUseFastGenSpan = TRUE;
  620. }
  621. } else {
  622. iType = 3;
  623. fUseFastGenSpan = TRUE;
  624. }
  625. } else {
  626. if (bpp == 24) {
  627. iType = 0;
  628. } else {
  629. iType = 1;
  630. }
  631. if (
  632. (gengc->gc.drawBuffer->redShift != 16)
  633. || (gengc->gc.drawBuffer->greenShift != 8)
  634. || (gengc->gc.drawBuffer->blueShift != 0)
  635. ) {
  636. fUseFastGenSpan = TRUE;
  637. }
  638. }
  639. if (modeFlags & __GL_SHADE_DITHER) {
  640. if ( (bpp == 8)
  641. || (bpp == 16)
  642. || ((modeFlags & __GL_SHADE_DEPTH_ITER) == 0)
  643. ) {
  644. GENACCEL(gc).flags |= GEN_DITHER;
  645. }
  646. iType += 5;
  647. }
  648. // Use the accelerated span functions (with no inline z-buffering) if
  649. // we support the z-buffer function AND we're not using hardware
  650. // z-buffering:
  651. if (modeFlags & __GL_SHADE_DEPTH_ITER) {
  652. if (bMcdZ) {
  653. fUseFastGenSpan = TRUE;
  654. } else if (!fZippy) {
  655. fUseFastGenSpan = TRUE;
  656. } else if (gc->state.depth.testFunc == GL_LESS) {
  657. GENACCEL(gc).flags |= GEN_LESS;
  658. } else if (gc->state.depth.testFunc != GL_LEQUAL) {
  659. fUseFastGenSpan = TRUE;
  660. }
  661. iType += 10;
  662. }
  663. if (modeFlags & __GL_SHADE_RGB) {
  664. GENACCEL(gc).flags |= GEN_RGBMODE;
  665. pfnZippySub = __ZippyFSTRGB;
  666. if (modeFlags & __GL_SHADE_TEXTURE) {
  667. GENACCEL(gc).flags |= (GEN_TEXTURE | GEN_TEXTURE_ORTHO);
  668. if (gc->state.hints.perspectiveCorrection == GL_NICEST)
  669. return FALSE;
  670. if (internalFormat == GL_COLOR_INDEX8_EXT ||
  671. internalFormat == GL_COLOR_INDEX16_EXT)
  672. return FALSE;
  673. if (textureMode == GL_DECAL) {
  674. if (modeFlags & __GL_SHADE_DITHER)
  675. GENACCEL(gc).__fastTexSpanFuncPtr =
  676. __fastGenTexFuncs[iType];
  677. else
  678. GENACCEL(gc).__fastTexSpanFuncPtr =
  679. __fastGenTexDecalFuncs[iType];
  680. pfnZippySub = __ZippyFSTTex;
  681. } else {
  682. GENACCEL(gc).flags |= GEN_SHADE;
  683. pfnZippySub = __ZippyFSTRGBTex;
  684. GENACCEL(gc).__fastTexSpanFuncPtr =
  685. __fastGenTexFuncs[iType];
  686. }
  687. if (GENACCEL(gc).__fastTexSpanFuncPtr == __fastGenSpan) {
  688. fUseFastGenSpan = TRUE;
  689. }
  690. } else {
  691. GENACCEL(gc).__fastSmoothSpanFuncPtr = __fastGenRGBFuncs[iType];
  692. GENACCEL(gc).__fastFlatSpanFuncPtr = __fastGenRGBFlatFuncs[iType];
  693. if (GENACCEL(gc).__fastSmoothSpanFuncPtr == __fastGenSpan) {
  694. fUseFastGenSpan = TRUE;
  695. }
  696. }
  697. } else {
  698. pfnZippySub = __ZippyFSTCI;
  699. GENACCEL(gc).__fastSmoothSpanFuncPtr = __fastGenCIFuncs[iType];
  700. GENACCEL(gc).__fastFlatSpanFuncPtr = __fastGenCIFlatFuncs[iType];
  701. }
  702. if (modeFlags & __GL_SHADE_STIPPLE)
  703. {
  704. fUseFastGenSpan = TRUE;
  705. }
  706. if (fUseFastGenSpan) {
  707. GENACCEL(gc).__fastTexSpanFuncPtr = __fastGenSpan;
  708. GENACCEL(gc).__fastSmoothSpanFuncPtr = __fastGenSpan;
  709. GENACCEL(gc).__fastFlatSpanFuncPtr = __fastGenSpan;
  710. GENACCEL(gc).__fastFillSubTrianglePtr = __fastGenFillSubTriangle;
  711. } else {
  712. if (fZippy) {
  713. GENACCEL(gc).flags |= SURFACE_TYPE_DIB;
  714. if ( (iType == 2)
  715. && (
  716. (modeFlags
  717. & (__GL_SHADE_RGB | __GL_SHADE_SMOOTH)
  718. ) == 0
  719. )
  720. ) {
  721. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTCI8Flat;
  722. } else if (iType >= 10) {
  723. GENACCEL(gc).__fastFillSubTrianglePtr = __ZippyFSTZ;
  724. GENACCEL(gc).flags |= GEN_FASTZBUFFER;
  725. } else {
  726. GENACCEL(gc).flags &= ~(HAVE_STIPPLE);
  727. GENACCEL(gc).__fastFillSubTrianglePtr = pfnZippySub;
  728. }
  729. } else {
  730. GENACCEL(gc).__fastFillSubTrianglePtr = __fastGenFillSubTriangle;
  731. }
  732. }
  733. return TRUE;
  734. }
  735. void FASTCALL __fastGenPickTriangleProcs(__GLcontext *gc)
  736. {
  737. GLuint modeFlags = gc->polygon.shader.modeFlags;
  738. __GLGENcontext *genGc = (__GLGENcontext *)gc;
  739. CASTINT(gc->polygon.shader.rLittle) = 0;
  740. CASTINT(gc->polygon.shader.rBig) = 0;
  741. CASTINT(gc->polygon.shader.gLittle) = 0;
  742. CASTINT(gc->polygon.shader.gBig) = 0;
  743. CASTINT(gc->polygon.shader.bLittle) = 0;
  744. CASTINT(gc->polygon.shader.bBig) = 0;
  745. CASTINT(gc->polygon.shader.sLittle) = 0;
  746. CASTINT(gc->polygon.shader.sBig) = 0;
  747. CASTINT(gc->polygon.shader.tLittle) = 0;
  748. CASTINT(gc->polygon.shader.tBig) = 0;
  749. GENACCEL(gc).spanDelta.r = 0;
  750. GENACCEL(gc).spanDelta.g = 0;
  751. GENACCEL(gc).spanDelta.b = 0;
  752. GENACCEL(gc).spanDelta.a = 0;
  753. /*
  754. ** Setup cullFace so that a single test will do the cull check.
  755. */
  756. if (modeFlags & __GL_SHADE_CULL_FACE) {
  757. switch (gc->state.polygon.cull) {
  758. case GL_FRONT:
  759. gc->polygon.cullFace = __GL_CULL_FLAG_FRONT;
  760. break;
  761. case GL_BACK:
  762. gc->polygon.cullFace = __GL_CULL_FLAG_BACK;
  763. break;
  764. case GL_FRONT_AND_BACK:
  765. gc->procs.renderTriangle = __glDontRenderTriangle;
  766. gc->procs.fillTriangle = 0; /* Done to find bugs */
  767. return;
  768. }
  769. } else {
  770. gc->polygon.cullFace = __GL_CULL_FLAG_DONT;
  771. }
  772. /* Build lookup table for face direction */
  773. switch (gc->state.polygon.frontFaceDirection) {
  774. case GL_CW:
  775. if (gc->constants.yInverted) {
  776. gc->polygon.face[__GL_CW] = __GL_BACKFACE;
  777. gc->polygon.face[__GL_CCW] = __GL_FRONTFACE;
  778. } else {
  779. gc->polygon.face[__GL_CW] = __GL_FRONTFACE;
  780. gc->polygon.face[__GL_CCW] = __GL_BACKFACE;
  781. }
  782. break;
  783. case GL_CCW:
  784. if (gc->constants.yInverted) {
  785. gc->polygon.face[__GL_CW] = __GL_FRONTFACE;
  786. gc->polygon.face[__GL_CCW] = __GL_BACKFACE;
  787. } else {
  788. gc->polygon.face[__GL_CW] = __GL_BACKFACE;
  789. gc->polygon.face[__GL_CCW] = __GL_FRONTFACE;
  790. }
  791. break;
  792. }
  793. /* Make polygon mode indexable and zero based */
  794. gc->polygon.mode[__GL_FRONTFACE] =
  795. (GLubyte) (gc->state.polygon.frontMode & 0xf);
  796. gc->polygon.mode[__GL_BACKFACE] =
  797. (GLubyte) (gc->state.polygon.backMode & 0xf);
  798. if (gc->renderMode == GL_FEEDBACK) {
  799. gc->procs.renderTriangle = __glFeedbackTriangle;
  800. gc->procs.fillTriangle = 0; /* Done to find bugs */
  801. return;
  802. }
  803. if (gc->renderMode == GL_SELECT) {
  804. gc->procs.renderTriangle = __glSelectTriangle;
  805. gc->procs.fillTriangle = 0; /* Done to find bugs */
  806. return;
  807. }
  808. if ((gc->state.polygon.frontMode == gc->state.polygon.backMode) &&
  809. (gc->state.polygon.frontMode == GL_FILL)) {
  810. if (modeFlags & __GL_SHADE_SMOOTH_LIGHT) {
  811. gc->procs.renderTriangle = __glRenderSmoothTriangle;
  812. #ifdef GL_WIN_phong_shading
  813. } else if (modeFlags & __GL_SHADE_PHONG) {
  814. gc->procs.renderTriangle = __glRenderPhongTriangle;
  815. #endif //GL_WIN_phong_shading
  816. } else {
  817. gc->procs.renderTriangle = __glRenderFlatTriangle;
  818. }
  819. } else {
  820. gc->procs.renderTriangle = __glRenderTriangle;
  821. }
  822. if (gc->state.enables.general & __GL_POLYGON_SMOOTH_ENABLE) {
  823. #ifdef GL_WIN_phong_shading
  824. if (modeFlags & __GL_SHADE_PHONG)
  825. gc->procs.fillTriangle = __glFillAntiAliasedPhongTriangle;
  826. else
  827. #endif //GL_WIN_phong_shading
  828. gc->procs.fillTriangle = __glFillAntiAliasedTriangle;
  829. } else {
  830. if ((gc->state.raster.drawBuffer == GL_NONE) ||
  831. !bUseGenTriangles(gc))
  832. #ifdef GL_WIN_phong_shading
  833. if (modeFlags & __GL_SHADE_PHONG)
  834. gc->procs.fillTriangle = __glFillPhongTriangle;
  835. else
  836. #endif //GL_WIN_phong_shading
  837. gc->procs.fillTriangle = __glFillTriangle;
  838. }
  839. if ((modeFlags & __GL_SHADE_CHEAP_FOG) &&
  840. !(modeFlags & __GL_SHADE_SMOOTH_LIGHT)) {
  841. gc->procs.fillTriangle2 = gc->procs.fillTriangle;
  842. gc->procs.fillTriangle = __glFillFlatFogTriangle;
  843. }
  844. #ifdef GL_WIN_specular_fog
  845. /*
  846. ** The case where 1) Specular fog is enabled AND 2) flat-shaded
  847. */
  848. if ((modeFlags & (__GL_SHADE_SPEC_FOG |
  849. __GL_SHADE_SMOOTH_LIGHT |
  850. __GL_SHADE_PHONG)) == __GL_SHADE_SPEC_FOG)
  851. {
  852. gc->procs.fillTriangle2 = gc->procs.fillTriangle;
  853. gc->procs.fillTriangle = __glFillFlatSpecFogTriangle;
  854. }
  855. #endif //GL_WIN_specular_fog
  856. }
  857. void FASTCALL __fastGenPickSpanProcs(__GLcontext *gc)
  858. {
  859. __GLGENcontext *genGc = (__GLGENcontext *)gc;
  860. GLuint enables = gc->state.enables.general;
  861. GLuint modeFlags = gc->polygon.shader.modeFlags;
  862. __GLcolorBuffer *cfb = gc->drawBuffer;
  863. __GLspanFunc *sp;
  864. __GLstippledSpanFunc *ssp;
  865. int spanCount;
  866. GLboolean replicateSpan;
  867. GLboolean bMcdZ = ((((__GLGENcontext *)gc)->pMcdState != NULL) &&
  868. (((__GLGENcontext *)gc)->pMcdState->pDepthSpan != NULL) &&
  869. (((__GLGENcontext *)gc)->pMcdState->pMcdSurf != NULL) &&
  870. !(((__GLGENcontext *)gc)->pMcdState->McdBuffers.mcdDepthBuf.bufFlags & MCDBUF_ENABLED));
  871. // Always reset the color scale values at the beginning of the pick
  872. // procs. Lines, triangles, and spans may all use these values...
  873. GENACCEL(gc).rAccelScale = (GLfloat)ACCEL_FIX_SCALE;
  874. GENACCEL(gc).gAccelScale = (GLfloat)ACCEL_FIX_SCALE;
  875. GENACCEL(gc).bAccelScale = (GLfloat)ACCEL_FIX_SCALE;
  876. // Note: we need to scale between 0 and 255 to get proper alpha
  877. // blending. The software-accelerated blending code assumes this
  878. // scaling for simplicity...
  879. GENACCEL(gc).aAccelScale = (GLfloat)(ACCEL_FIX_SCALE) *
  880. (GLfloat)255.0 / gc->drawBuffer->alphaScale;
  881. replicateSpan = GL_FALSE;
  882. sp = gc->procs.span.spanFuncs;
  883. ssp = gc->procs.span.stippledSpanFuncs;
  884. /* Load phase one procs */
  885. if (!gc->transform.reasonableViewport) {
  886. *sp++ = __glClipSpan;
  887. *ssp++ = NULL;
  888. }
  889. if (modeFlags & __GL_SHADE_STIPPLE) {
  890. *sp++ = __glStippleSpan;
  891. *ssp++ = __glStippleStippledSpan;
  892. if (modeFlags & __GL_SHADE_DEPTH_TEST)
  893. {
  894. if (bMcdZ)
  895. {
  896. GENACCEL(gc).__fastStippleDepthTestSpan =
  897. GenMcdStippleAnyDepthTestSpan;
  898. }
  899. else
  900. {
  901. if (gc->state.depth.testFunc == GL_LESS)
  902. {
  903. if (gc->modes.depthBits == 32)
  904. {
  905. GENACCEL(gc).__fastStippleDepthTestSpan =
  906. __fastGenStippleLt32Span;
  907. }
  908. else
  909. {
  910. GENACCEL(gc).__fastStippleDepthTestSpan =
  911. __fastGenStippleLt16Span;
  912. }
  913. }
  914. else
  915. {
  916. GENACCEL(gc).__fastStippleDepthTestSpan =
  917. __fastGenStippleAnyDepthTestSpan;
  918. }
  919. }
  920. }
  921. else
  922. {
  923. GENACCEL(gc).__fastStippleDepthTestSpan = __glStippleSpan;
  924. }
  925. }
  926. /* Load phase three procs */
  927. if (modeFlags & __GL_SHADE_RGB) {
  928. if (modeFlags & __GL_SHADE_SMOOTH) {
  929. *sp = __glShadeRGBASpan;
  930. *ssp = __glShadeRGBASpan;
  931. #ifdef GL_WIN_phong_shading
  932. } else if (modeFlags & __GL_SHADE_PHONG) {
  933. *sp = __glPhongRGBASpan;
  934. *ssp = __glPhongRGBASpan;
  935. #endif //GL_WIN_phong_shading
  936. } else {
  937. *sp = __glFlatRGBASpan;
  938. *ssp = __glFlatRGBASpan;
  939. }
  940. } else {
  941. if (modeFlags & __GL_SHADE_SMOOTH) {
  942. *sp = __glShadeCISpan;
  943. *ssp = __glShadeCISpan;
  944. #ifdef GL_WIN_phong_shading
  945. } else if (modeFlags & __GL_SHADE_PHONG) {
  946. *sp = __glPhongCISpan;
  947. *ssp = __glPhongCISpan;
  948. #endif //GL_WIN_phong_shading
  949. } else {
  950. *sp = __glFlatCISpan;
  951. *ssp = __glFlatCISpan;
  952. }
  953. }
  954. sp++;
  955. ssp++;
  956. if (modeFlags & __GL_SHADE_TEXTURE) {
  957. *sp++ = __glTextureSpan;
  958. *ssp++ = __glTextureStippledSpan;
  959. }
  960. #ifdef GL_WIN_specular_fog
  961. if (modeFlags & (__GL_SHADE_SLOW_FOG | __GL_SHADE_SPEC_FOG))
  962. #else //GL_WIN_specular_fog
  963. if (modeFlags & __GL_SHADE_SLOW_FOG)
  964. #endif //GL_WIN_specular_fog
  965. {
  966. if (DO_NICEST_FOG (gc)) {
  967. *sp = __glFogSpanSlow;
  968. *ssp = __glFogStippledSpanSlow;
  969. } else {
  970. *sp = __glFogSpan;
  971. *ssp = __glFogStippledSpan;
  972. }
  973. sp++;
  974. ssp++;
  975. }
  976. if (modeFlags & __GL_SHADE_ALPHA_TEST) {
  977. *sp++ = __glAlphaTestSpan;
  978. *ssp++ = __glAlphaTestStippledSpan;
  979. }
  980. /* Load phase two procs */
  981. if (modeFlags & __GL_SHADE_STENCIL_TEST) {
  982. *sp++ = __glStencilTestSpan;
  983. *ssp++ = __glStencilTestStippledSpan;
  984. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  985. if (bMcdZ) {
  986. *sp = GenMcdDepthTestStencilSpan;
  987. *ssp = GenMcdDepthTestStencilStippledSpan;
  988. } else {
  989. *sp = __glDepthTestStencilSpan;
  990. *ssp = __glDepthTestStencilStippledSpan;
  991. }
  992. } else {
  993. *sp = __glDepthPassSpan;
  994. *ssp = __glDepthPassStippledSpan;
  995. }
  996. sp++;
  997. ssp++;
  998. } else {
  999. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  1000. if (bMcdZ) {
  1001. *sp++ = GenMcdDepthTestSpan;
  1002. *ssp++ = GenMcdDepthTestStippledSpan;
  1003. if (gc->state.depth.writeEnable)
  1004. ((__GLGENcontext *)gc)->pMcdState->softZSpanFuncPtr =
  1005. __fastDepthFuncs[gc->state.depth.testFunc & 0x7];
  1006. else
  1007. ((__GLGENcontext *)gc)->pMcdState->softZSpanFuncPtr =
  1008. (__GLspanFunc)NULL;
  1009. GENACCEL(gc).__fastZSpanFuncPtr = GenMcdDepthTestSpan;
  1010. } else {
  1011. if (gc->state.depth.writeEnable) {
  1012. if( gc->modes.depthBits == 32 ) {
  1013. *sp++ = GENACCEL(gc).__fastZSpanFuncPtr =
  1014. __fastDepthFuncs[gc->state.depth.testFunc & 0x7];
  1015. } else {
  1016. *sp++ = GENACCEL(gc).__fastZSpanFuncPtr =
  1017. __fastDepth16Funcs[gc->state.depth.testFunc & 0x7];
  1018. }
  1019. } else {
  1020. *sp++ = GENACCEL(gc).__fastZSpanFuncPtr =
  1021. __glDepthTestSpan;
  1022. }
  1023. *ssp++ = __glDepthTestStippledSpan;
  1024. }
  1025. }
  1026. }
  1027. if (gc->state.raster.drawBuffer == GL_FRONT_AND_BACK) {
  1028. spanCount = (int)((ULONG_PTR)(sp - gc->procs.span.spanFuncs));
  1029. gc->procs.span.n = spanCount;
  1030. replicateSpan = GL_TRUE;
  1031. }
  1032. /* Span routines deal with masking, dithering, logicop, blending */
  1033. *sp++ = cfb->storeSpan;
  1034. *ssp++ = cfb->storeStippledSpan;
  1035. spanCount = (int)((ULONG_PTR)(sp - gc->procs.span.spanFuncs));
  1036. gc->procs.span.m = spanCount;
  1037. if (replicateSpan) {
  1038. gc->procs.span.processSpan = __glProcessReplicateSpan;
  1039. } else {
  1040. gc->procs.span.processSpan = __glProcessSpan;
  1041. gc->procs.span.n = spanCount;
  1042. }
  1043. }
  1044. // These are the bits in modeFlags that affect lines
  1045. #ifdef GL_WIN_specular_fog
  1046. #define __FAST_LINE_SPEC_FOG __GL_SHADE_SPEC_FOG
  1047. #else
  1048. #define __FAST_LINE_SPEC_FOG 0
  1049. #endif //GL_WIN_specular_fog
  1050. #ifdef GL_WIN_phong_shading
  1051. #define __FAST_LINE_PHONG __GL_SHADE_PHONG
  1052. #else
  1053. #define __FAST_LINE_PHONG 0
  1054. #endif //GL_WIN_phong_shading
  1055. #define __FAST_LINE_MODE_FLAGS \
  1056. (__GL_SHADE_DEPTH_TEST | __GL_SHADE_SMOOTH | __GL_SHADE_TEXTURE | \
  1057. __GL_SHADE_LINE_STIPPLE | __GL_SHADE_STENCIL_TEST | __GL_SHADE_LOGICOP | \
  1058. __GL_SHADE_BLEND | __GL_SHADE_ALPHA_TEST | __GL_SHADE_MASK | \
  1059. __GL_SHADE_SLOW_FOG | __GL_SHADE_CHEAP_FOG | __FAST_LINE_SPEC_FOG | \
  1060. __FAST_LINE_PHONG)
  1061. /******************************Public*Routine******************************\
  1062. * __fastGenPickLineProcs
  1063. *
  1064. * Picks the line-rendering procedures. Most of this function was copied from
  1065. * the soft code. Some differences include:
  1066. * 1. The beginPrim function pointers are hooked by the accelerated code
  1067. * 2. If the attribute state is such that acceleration can be used,
  1068. * __fastGenLineSetup is called to initialize the state machine.
  1069. *
  1070. * History:
  1071. * 22-Mar-1994 -by- Eddie Robinson [v-eddier]
  1072. * Wrote it.
  1073. \**************************************************************************/
  1074. void FASTCALL __fastGenPickLineProcs(__GLcontext *gc)
  1075. {
  1076. __GLGENcontext *genGc = (__GLGENcontext *) gc;
  1077. GENACCEL *genAccel;
  1078. GLuint enables = gc->state.enables.general;
  1079. GLuint modeFlags = gc->polygon.shader.modeFlags;
  1080. __GLspanFunc *sp;
  1081. __GLstippledSpanFunc *ssp;
  1082. int spanCount;
  1083. GLboolean wideLine;
  1084. GLboolean replicateLine;
  1085. GLuint aaline;
  1086. GLboolean bMcdZ = ((genGc->pMcdState != NULL) &&
  1087. (genGc->pMcdState->pDepthSpan != NULL) &&
  1088. (genGc->pMcdState->pMcdSurf != NULL) &&
  1089. !(genGc->pMcdState->McdBuffers.mcdDepthBuf.bufFlags & MCDBUF_ENABLED));
  1090. /*
  1091. ** The fast line code replaces the line function pointers, so reset them
  1092. ** to a good state
  1093. */
  1094. gc->procs.lineBegin = __glNopLineBegin;
  1095. gc->procs.lineEnd = __glNopLineEnd;
  1096. if (gc->renderMode == GL_FEEDBACK) {
  1097. gc->procs.renderLine = __glFeedbackLine;
  1098. } else if (gc->renderMode == GL_SELECT) {
  1099. gc->procs.renderLine = __glSelectLine;
  1100. } else {
  1101. if (genAccel = (GENACCEL *) genGc->pPrivateArea) {
  1102. if (!(modeFlags & __FAST_LINE_MODE_FLAGS & ~genAccel->flLineAccelModes) &&
  1103. !(gc->state.enables.general & __GL_LINE_SMOOTH_ENABLE) &&
  1104. !(gc->state.enables.general & __GL_SCISSOR_TEST_ENABLE) &&
  1105. !(gc->state.raster.drawBuffer == GL_NONE) &&
  1106. !gc->buffers.doubleStore &&
  1107. !genGc->pMcdState &&
  1108. (genGc->dwCurrentFlags & (GLSURF_HDC | GLSURF_METAFILE)) ==
  1109. GLSURF_HDC)
  1110. {
  1111. __fastLineComputeOffsets(genGc);
  1112. #if NT_NO_BUFFER_INVARIANCE
  1113. if (!(gc->drawBuffer->buf.flags & DIB_FORMAT)) {
  1114. if (genAccel->bFastLineDispAccel) {
  1115. if (__fastGenLineSetupDisplay(gc))
  1116. return;
  1117. }
  1118. } else {
  1119. if (genAccel->bFastLineDIBAccel) {
  1120. if (__fastGenLineSetupDIB(gc))
  1121. return;
  1122. }
  1123. }
  1124. #else
  1125. if (genAccel->bFastLineDispAccel) {
  1126. if (__fastGenLineSetupDisplay(gc))
  1127. return;
  1128. }
  1129. #endif
  1130. }
  1131. }
  1132. if (__glGenSetupEitherLines(gc))
  1133. {
  1134. return;
  1135. }
  1136. replicateLine = wideLine = GL_FALSE;
  1137. aaline = gc->state.enables.general & __GL_LINE_SMOOTH_ENABLE;
  1138. if (aaline)
  1139. {
  1140. gc->procs.renderLine = __glRenderAntiAliasLine;
  1141. }
  1142. else
  1143. {
  1144. gc->procs.renderLine = __glRenderAliasLine;
  1145. }
  1146. sp = gc->procs.line.lineFuncs;
  1147. ssp = gc->procs.line.stippledLineFuncs;
  1148. if (!aaline && (modeFlags & __GL_SHADE_LINE_STIPPLE)) {
  1149. *sp++ = __glStippleLine;
  1150. *ssp++ = NULL;
  1151. }
  1152. if (!aaline && gc->state.line.aliasedWidth > 1) {
  1153. wideLine = GL_TRUE;
  1154. }
  1155. spanCount = (int)((ULONG_PTR)(sp - gc->procs.line.lineFuncs));
  1156. gc->procs.line.n = spanCount;
  1157. *sp++ = __glScissorLine;
  1158. *ssp++ = __glScissorStippledLine;
  1159. if (!aaline) {
  1160. if (modeFlags & __GL_SHADE_STENCIL_TEST) {
  1161. *sp++ = __glStencilTestLine;
  1162. *ssp++ = __glStencilTestStippledLine;
  1163. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  1164. if (bMcdZ) {
  1165. *sp = GenMcdDepthTestStencilLine;
  1166. *ssp = GenMcdDepthTestStencilStippledLine;
  1167. } else if( gc->modes.depthBits == 32 ) {
  1168. *sp = __glDepthTestStencilLine;
  1169. *ssp = __glDepthTestStencilStippledLine;
  1170. }
  1171. else {
  1172. *sp = __glDepth16TestStencilLine;
  1173. *ssp = __glDepth16TestStencilStippledLine;
  1174. }
  1175. } else {
  1176. *sp = __glDepthPassLine;
  1177. *ssp = __glDepthPassStippledLine;
  1178. }
  1179. sp++;
  1180. ssp++;
  1181. } else {
  1182. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  1183. if (gc->state.depth.testFunc == GL_NEVER) {
  1184. /* Unexpected end of line routine picking! */
  1185. spanCount = (int)((ULONG_PTR)(sp - gc->procs.line.lineFuncs));
  1186. gc->procs.line.m = spanCount;
  1187. gc->procs.line.l = spanCount;
  1188. goto pickLineProcessor;
  1189. #ifdef __GL_USEASMCODE
  1190. } else {
  1191. unsigned long ix;
  1192. if (gc->state.depth.writeEnable) {
  1193. ix = 0;
  1194. } else {
  1195. ix = 8;
  1196. }
  1197. ix += gc->state.depth.testFunc & 0x7;
  1198. if (ix == (GL_LEQUAL & 0x7)) {
  1199. *sp++ = __glDepthTestLine_LEQ_asm;
  1200. } else {
  1201. *sp++ = __glDepthTestLine_asm;
  1202. gc->procs.line.depthTestPixel = LDepthTestPixel[ix];
  1203. }
  1204. #else
  1205. } else {
  1206. if (bMcdZ) {
  1207. *sp++ = GenMcdDepthTestLine;
  1208. } else {
  1209. if( gc->modes.depthBits == 32 )
  1210. *sp++ = __glDepthTestLine;
  1211. else
  1212. *sp++ = __glDepth16TestLine;
  1213. }
  1214. #endif
  1215. }
  1216. if (bMcdZ) {
  1217. *ssp++ = GenMcdDepthTestStippledLine;
  1218. } else {
  1219. if( gc->modes.depthBits == 32 )
  1220. *ssp++ = __glDepthTestStippledLine;
  1221. else
  1222. *ssp++ = __glDepth16TestStippledLine;
  1223. }
  1224. }
  1225. }
  1226. }
  1227. /* Load phase three procs */
  1228. if (modeFlags & __GL_SHADE_RGB) {
  1229. if (modeFlags & __GL_SHADE_SMOOTH) {
  1230. *sp = __glShadeRGBASpan;
  1231. *ssp = __glShadeRGBASpan;
  1232. #ifdef GL_WIN_phong_shading
  1233. } else if (modeFlags & __GL_SHADE_PHONG) {
  1234. *sp = __glPhongRGBASpan;
  1235. *ssp = __glPhongRGBASpan;
  1236. #endif //GL_WIN_phong_shading
  1237. } else {
  1238. *sp = __glFlatRGBASpan;
  1239. *ssp = __glFlatRGBASpan;
  1240. }
  1241. } else {
  1242. if (modeFlags & __GL_SHADE_SMOOTH) {
  1243. *sp = __glShadeCISpan;
  1244. *ssp = __glShadeCISpan;
  1245. #ifdef GL_WIN_phong_shading
  1246. } else if (modeFlags & __GL_SHADE_PHONG) {
  1247. *sp = __glPhongCISpan;
  1248. *ssp = __glPhongCISpan;
  1249. #endif //GL_WIN_phong_shading
  1250. } else {
  1251. *sp = __glFlatCISpan;
  1252. *ssp = __glFlatCISpan;
  1253. }
  1254. }
  1255. sp++;
  1256. ssp++;
  1257. if (modeFlags & __GL_SHADE_TEXTURE) {
  1258. *sp++ = __glTextureSpan;
  1259. *ssp++ = __glTextureStippledSpan;
  1260. }
  1261. #ifdef GL_WIN_specular_fog
  1262. if (modeFlags & (__GL_SHADE_SLOW_FOG | __GL_SHADE_SPEC_FOG))
  1263. #else //GL_WIN_specular_fog
  1264. if (modeFlags & __GL_SHADE_SLOW_FOG)
  1265. #endif //GL_WIN_specular_fog
  1266. {
  1267. if (DO_NICEST_FOG (gc)) {
  1268. *sp = __glFogSpanSlow;
  1269. *ssp = __glFogStippledSpanSlow;
  1270. } else {
  1271. *sp = __glFogSpan;
  1272. *ssp = __glFogStippledSpan;
  1273. }
  1274. sp++;
  1275. ssp++;
  1276. }
  1277. if (aaline) {
  1278. *sp++ = __glAntiAliasLine;
  1279. *ssp++ = __glAntiAliasStippledLine;
  1280. }
  1281. if (aaline) {
  1282. if (modeFlags & __GL_SHADE_STENCIL_TEST) {
  1283. *sp++ = __glStencilTestLine;
  1284. *ssp++ = __glStencilTestStippledLine;
  1285. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  1286. if (bMcdZ) {
  1287. *sp = GenMcdDepthTestStencilLine;
  1288. *ssp = GenMcdDepthTestStencilStippledLine;
  1289. } else if( gc->modes.depthBits == 32 ) {
  1290. *sp = __glDepthTestStencilLine;
  1291. *ssp = __glDepthTestStencilStippledLine;
  1292. }
  1293. else {
  1294. *sp = __glDepth16TestStencilLine;
  1295. *ssp = __glDepth16TestStencilStippledLine;
  1296. }
  1297. } else {
  1298. *sp = __glDepthPassLine;
  1299. *ssp = __glDepthPassStippledLine;
  1300. }
  1301. sp++;
  1302. ssp++;
  1303. } else {
  1304. if (modeFlags & __GL_SHADE_DEPTH_TEST) {
  1305. if (gc->state.depth.testFunc == GL_NEVER) {
  1306. /* Unexpected end of line routine picking! */
  1307. spanCount = (int)((ULONG_PTR)(sp - gc->procs.line.lineFuncs));
  1308. gc->procs.line.m = spanCount;
  1309. gc->procs.line.l = spanCount;
  1310. goto pickLineProcessor;
  1311. #ifdef __GL_USEASMCODE
  1312. } else {
  1313. unsigned long ix;
  1314. if (gc->state.depth.writeEnable) {
  1315. ix = 0;
  1316. } else {
  1317. ix = 8;
  1318. }
  1319. ix += gc->state.depth.testFunc & 0x7;
  1320. *sp++ = __glDepthTestLine_asm;
  1321. gc->procs.line.depthTestPixel = LDepthTestPixel[ix];
  1322. #else
  1323. } else {
  1324. if (bMcdZ)
  1325. *sp++ = GenMcdDepthTestLine;
  1326. else if( gc->modes.depthBits == 32 )
  1327. *sp++ = __glDepthTestLine;
  1328. else
  1329. *sp++ = __glDepth16TestLine;
  1330. #endif
  1331. }
  1332. if (bMcdZ)
  1333. *ssp++ = GenMcdDepthTestStippledLine;
  1334. else if (gc->modes.depthBits == 32)
  1335. *ssp++ = __glDepthTestStippledLine;
  1336. else
  1337. *ssp++ = __glDepth16TestStippledLine;
  1338. }
  1339. }
  1340. }
  1341. if (modeFlags & __GL_SHADE_ALPHA_TEST) {
  1342. *sp++ = __glAlphaTestSpan;
  1343. *ssp++ = __glAlphaTestStippledSpan;
  1344. }
  1345. if (gc->buffers.doubleStore) {
  1346. replicateLine = GL_TRUE;
  1347. }
  1348. spanCount = (int)((ULONG_PTR)(sp - gc->procs.line.lineFuncs));
  1349. gc->procs.line.m = spanCount;
  1350. *sp++ = __glStoreLine;
  1351. *ssp++ = __glStoreStippledLine;
  1352. spanCount = (int)((ULONG_PTR)(sp - gc->procs.line.lineFuncs));
  1353. gc->procs.line.l = spanCount;
  1354. sp = &gc->procs.line.wideLineRep;
  1355. ssp = &gc->procs.line.wideStippledLineRep;
  1356. if (wideLine) {
  1357. *sp = __glWideLineRep;
  1358. *ssp = __glWideStippleLineRep;
  1359. sp = &gc->procs.line.drawLine;
  1360. ssp = &gc->procs.line.drawStippledLine;
  1361. }
  1362. if (replicateLine) {
  1363. *sp = __glDrawBothLine;
  1364. *ssp = __glDrawBothStippledLine;
  1365. } else {
  1366. *sp = __glNopGCBOOL;
  1367. *ssp = __glNopGCBOOL;
  1368. gc->procs.line.m = gc->procs.line.l;
  1369. }
  1370. if (!wideLine) {
  1371. gc->procs.line.n = gc->procs.line.m;
  1372. }
  1373. pickLineProcessor:
  1374. if (!wideLine && !replicateLine && spanCount == 3) {
  1375. gc->procs.line.processLine = __glProcessLine3NW;
  1376. } else {
  1377. gc->procs.line.processLine = __glProcessLine;
  1378. }
  1379. if ((modeFlags & __GL_SHADE_CHEAP_FOG) &&
  1380. !(modeFlags & __GL_SHADE_SMOOTH_LIGHT)) {
  1381. gc->procs.renderLine2 = gc->procs.renderLine;
  1382. gc->procs.renderLine = __glRenderFlatFogLine;
  1383. }
  1384. }
  1385. }
  1386. BOOL FASTCALL __glGenCreateAccelContext(__GLcontext *gc)
  1387. {
  1388. __GLGENcontext *genGc = (__GLGENcontext *)gc;
  1389. PIXELFORMATDESCRIPTOR *pfmt;
  1390. ULONG bpp;
  1391. pfmt = &genGc->gsurf.pfd;
  1392. bpp = pfmt->cColorBits;
  1393. genGc->pPrivateArea = (VOID *)(&genGc->genAccel);
  1394. __glQueryLineAcceleration(gc);
  1395. gc->procs.pickTriangleProcs = __fastGenPickTriangleProcs;
  1396. gc->procs.pickSpanProcs = __fastGenPickSpanProcs;
  1397. // Set up constant-color values:
  1398. GENACCEL(gc).constantR = ((1 << pfmt->cRedBits) - 1) << 16;
  1399. GENACCEL(gc).constantG = ((1 << pfmt->cGreenBits) - 1) << 16;
  1400. GENACCEL(gc).constantB = ((1 << pfmt->cBlueBits) - 1) << 16;
  1401. if( pfmt->cAlphaBits )
  1402. GENACCEL(gc).constantA = ((1 << pfmt->cAlphaBits) - 1) << 16;
  1403. else
  1404. GENACCEL(gc).constantA = 0xff << 16;
  1405. GENACCEL(gc).bpp = bpp;
  1406. GENACCEL(gc).xMultiplier = ((bpp + 7) / 8);
  1407. if (gc->modes.depthBits == 16 )
  1408. GENACCEL(gc).zScale = (__GLfloat)65536.0;
  1409. else
  1410. GENACCEL(gc).zScale = (__GLfloat)1.0;
  1411. return TRUE;
  1412. }
  1413. MCDHANDLE FASTCALL __glGenLoadTexture(__GLcontext *gc, __GLtexture *tex,
  1414. ULONG flags)
  1415. {
  1416. __GLGENcontext *gengc = (__GLGENcontext *)gc;
  1417. MCDHANDLE texHandle;
  1418. DWORD texKey;
  1419. #ifdef _MCD_
  1420. if (gengc->pMcdState) {
  1421. texHandle = GenMcdCreateTexture(gengc, tex, flags);
  1422. if (texHandle) {
  1423. tex->textureKey = GenMcdTextureKey(gengc, texHandle);
  1424. gc->textureKey = tex->textureKey;
  1425. }
  1426. return texHandle;
  1427. } else
  1428. #endif
  1429. return 0;
  1430. }
  1431. BOOL FASTCALL __glGenMakeTextureCurrent(__GLcontext *gc, __GLtexture *tex, MCDHANDLE loadKey)
  1432. {
  1433. GLint internalFormat;
  1434. if (!tex)
  1435. return FALSE;
  1436. InitAccelTextureValues(gc, tex);
  1437. // Update the driver texture key in the context:
  1438. if (((__GLGENcontext *)gc)->pMcdState && (gc->textureKey = tex->textureKey)) {
  1439. GenMcdUpdateTextureState((__GLGENcontext *)gc, tex, loadKey);
  1440. }
  1441. // Previously we called bUseGenTriangles here to determine whether we were
  1442. // doing 'fast' texturing, and if so, setup the texture cache pointers
  1443. // below. But this slowed down texture bind time, so for now we always
  1444. // execute this next section of code (safe, since we check for valid ptrs).
  1445. if (tex->level[0].internalFormat == GL_COLOR_INDEX8_EXT)
  1446. {
  1447. if (tex->pvUser)
  1448. GENACCEL(gc).texImageReplace =
  1449. ((GENTEXCACHE *)tex->pvUser)->texImageReplace;
  1450. }
  1451. else if (tex->level[0].internalFormat != GL_COLOR_INDEX16_EXT)
  1452. {
  1453. if (tex->pvUser)
  1454. GENACCEL(gc).texImageReplace =
  1455. ((GENTEXCACHE *)tex->pvUser)->texImageReplace;
  1456. GENACCEL(gc).texPalette = NULL;
  1457. }
  1458. return TRUE;
  1459. }
  1460. BOOL FASTCALL __glGenUpdateTexture(__GLcontext *gc, __GLtexture *tex, MCDHANDLE loadKey)
  1461. {
  1462. //!! NOTE !!
  1463. //!! This should really be broken into separate load and update calls since
  1464. //!! loading and updating are different operations. The texture texture
  1465. //!! data cache will never shrink with the current implementation.
  1466. // Do not quit if the load fails because we want the repick to occur
  1467. // in MakeTextureCurrent in both the success and failure cases
  1468. __fastGenLoadTexImage(gc, tex);
  1469. __glGenMakeTextureCurrent(gc, tex, loadKey);
  1470. return TRUE;
  1471. }
  1472. void FASTCALL __glGenFreeTexture(__GLcontext *gc, __GLtexture *tex, MCDHANDLE loadKey)
  1473. {
  1474. __GLGENcontext *gengc = (__GLGENcontext *)gc;
  1475. if (GENACCEL(gc).texImage)
  1476. GENACCEL(gc).texImage = NULL;
  1477. if (tex->pvUser) {
  1478. GCFREE(gc, tex->pvUser);
  1479. tex->pvUser = NULL;
  1480. }
  1481. #ifdef _MCD_
  1482. if (gengc->pMcdState && loadKey) {
  1483. GenMcdDeleteTexture(gengc, loadKey);
  1484. }
  1485. #endif
  1486. }
  1487. void FASTCALL __glGenUpdateTexturePalette(__GLcontext *gc, __GLtexture *tex,
  1488. MCDHANDLE loadKey, ULONG start,
  1489. ULONG count)
  1490. {
  1491. UCHAR *texBuffer;
  1492. GENTEXCACHE *pGenTex;
  1493. __GLcolorBuffer *cfb = gc->drawBuffer;
  1494. BYTE *pXlat = ((__GLGENcontext *)gc)->pajTranslateVector;
  1495. ULONG rBits, gBits, bBits;
  1496. ULONG rShift, gShift, bShift;
  1497. ULONG i, end;
  1498. ULONG *replaceBuffer;
  1499. ASSERTOPENGL(tex->paletteTotalData != NULL,
  1500. "__GenUpdateTexturePalette: null texture data\n");
  1501. #ifdef _MCD_
  1502. if (((__GLGENcontext *)gc)->pMcdState && loadKey) {
  1503. GenMcdUpdateTexturePalette((__GLGENcontext *)gc, tex, loadKey, start,
  1504. count);
  1505. }
  1506. #endif
  1507. pGenTex = GetGenTexCache(gc, tex);
  1508. if (!pGenTex)
  1509. return;
  1510. GENACCEL(gc).texImageReplace = pGenTex->texImageReplace;
  1511. replaceBuffer = (ULONG *)(pGenTex->texImageReplace) + start;
  1512. texBuffer = (UCHAR *)(tex->paletteTotalData + start);
  1513. rShift = cfb->redShift;
  1514. gShift = cfb->greenShift;
  1515. bShift = cfb->blueShift;
  1516. rBits = ((__GLGENcontext *)gc)->gsurf.pfd.cRedBits;
  1517. gBits = ((__GLGENcontext *)gc)->gsurf.pfd.cGreenBits;
  1518. bBits = ((__GLGENcontext *)gc)->gsurf.pfd.cBlueBits;
  1519. end = start + count;
  1520. for (i = start; i < end; i++, texBuffer += 4) {
  1521. ULONG color;
  1522. color = ((((ULONG)texBuffer[2] << rBits) >> 8) << rShift) |
  1523. ((((ULONG)texBuffer[1] << gBits) >> 8) << gShift) |
  1524. ((((ULONG)texBuffer[0] << bBits) >> 8) << bShift);
  1525. if (GENACCEL(gc).bpp == 8)
  1526. color = pXlat[color & 0xff];
  1527. *replaceBuffer++ = (color | ((ULONG)texBuffer[3] << 24));
  1528. }
  1529. }
  1530. #ifdef GL_EXT_flat_paletted_lighting
  1531. void FASTCALL __glGenSetPaletteOffset(__GLcontext *gc, __GLtexture *tex,
  1532. GLint offset)
  1533. {
  1534. GENTEXCACHE *pGenTex;
  1535. if (GENACCEL(gc).texPalette == NULL)
  1536. {
  1537. return;
  1538. }
  1539. GENACCEL(gc).texPalette = (ULONG *)tex->paletteTotalData+offset;
  1540. pGenTex = GetGenTexCache(gc, tex);
  1541. if (pGenTex == NULL)
  1542. {
  1543. return;
  1544. }
  1545. // Replace map for paletted textures is a replace map of the
  1546. // entire palette, so offset it
  1547. if (GENACCEL(gc).texImageReplace != NULL)
  1548. {
  1549. GENACCEL(gc).texImageReplace = (UCHAR *)
  1550. ((ULONG *)pGenTex->texImageReplace+offset);
  1551. }
  1552. // Consider - Call MCD
  1553. }
  1554. #endif
  1555. void FASTCALL __glGenDestroyAccelContext(__GLcontext *gc)
  1556. {
  1557. __GLGENcontext *genGc = (__GLGENcontext *)gc;
  1558. /* Free any platform-specific private data area */
  1559. if (genGc->pPrivateArea) {
  1560. if (GENACCEL(gc).pFastLineBuffer) {
  1561. GCFREE(gc, GENACCEL(gc).pFastLineBuffer);
  1562. #ifndef _CLIENTSIDE_
  1563. wglDeletePath(GENACCEL(gc).pFastLinePathobj);
  1564. #endif
  1565. }
  1566. genGc->pPrivateArea = NULL;
  1567. }
  1568. }