Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

621 lines
26 KiB

  1. // BE VERY VERY CAREFUL what you do in these function. They are extremely hot, and calling the wrong GL API's in here will crush perf. (especially on NVidia threaded drivers).
  2. FORCEINLINE uint32 bitmix32(uint32 a)
  3. {
  4. a -= (a<<6);
  5. //a ^= (a>>17);
  6. //a -= (a<<9);
  7. a ^= (a<<4);
  8. //a -= (a<<3);
  9. //a ^= (a<<10);
  10. a ^= (a>>15);
  11. return a;
  12. }
  13. #ifndef OSX
  14. FORCEINLINE GLuint GLMContext::FindSamplerObject( const GLMTexSamplingParams &desiredParams )
  15. {
  16. int h = bitmix32( desiredParams.m_bits + desiredParams.m_borderColor ) & ( cSamplerObjectHashSize - 1 );
  17. while ( ( m_samplerObjectHash[h].m_params.m_bits != desiredParams.m_bits ) || ( m_samplerObjectHash[h].m_params.m_borderColor != desiredParams.m_borderColor ) )
  18. {
  19. if ( !m_samplerObjectHash[h].m_params.m_packed.m_isValid )
  20. break;
  21. if ( ++h >= cSamplerObjectHashSize )
  22. h = 0;
  23. }
  24. if ( !m_samplerObjectHash[h].m_params.m_packed.m_isValid )
  25. {
  26. GLMTexSamplingParams &hashParams = m_samplerObjectHash[h].m_params;
  27. hashParams = desiredParams;
  28. hashParams.SetToSamplerObject( m_samplerObjectHash[h].m_samplerObject );
  29. if ( ++m_nSamplerObjectHashNumEntries == cSamplerObjectHashSize )
  30. {
  31. // TODO: Support resizing
  32. Error( "Sampler object hash is full, increase cSamplerObjectHashSize" );
  33. }
  34. }
  35. return m_samplerObjectHash[h].m_samplerObject;
  36. }
  37. #endif
  38. // BE VERY CAREFUL WHAT YOU DO IN HERE. This is called on every batch, even seemingly simple changes can kill perf.
  39. FORCEINLINE void GLMContext::FlushDrawStates( uint nStartIndex, uint nEndIndex, uint nBaseVertex ) // shadersOn = true for draw calls, false for clear calls
  40. {
  41. Assert( m_drawingLang == kGLMGLSL ); // no support for ARB shaders right now (and NVidia reports that they aren't worth targeting under Windows/Linux for various reasons anyway)
  42. Assert( ( m_drawingFBO == m_boundDrawFBO ) && ( m_drawingFBO == m_boundReadFBO ) ); // this check MUST succeed
  43. Assert( m_pDevice->m_pVertDecl );
  44. #if GLMDEBUG
  45. GLM_FUNC;
  46. #endif
  47. GL_BATCH_PERF( m_FlushStats.m_nTotalBatchFlushes++; )
  48. #if GLMDEBUG
  49. bool tex0_srgb = (m_boundDrawFBO[0].m_attach[0].m_tex->m_layout->m_key.m_texFlags & kGLMTexSRGB) != 0;
  50. // you can only actually use the sRGB FB state on some systems.. check caps
  51. if (m_caps.m_hasGammaWrites)
  52. {
  53. GLBlendEnableSRGB_t writeSRGBState;
  54. m_BlendEnableSRGB.Read( &writeSRGBState, 0 ); // the client set value, not the API-written value yet..
  55. bool draw_srgb = writeSRGBState.enable != 0;
  56. if (draw_srgb)
  57. {
  58. if (tex0_srgb)
  59. {
  60. // good - draw mode and color tex agree
  61. }
  62. else
  63. {
  64. // bad
  65. // Client has asked to write sRGB into a texture that can't do it.
  66. // there is no way to satisfy this unless we change the RT tex and we avoid doing that.
  67. // (although we might consider a ** ONE TIME ** promotion.
  68. // this shouldn't be a big deal if the tex format is one where it doesn't matter like 32F.
  69. GLMPRINTF(("-Z- srgb-enabled FBO conflict: attached tex %08x [%s] is not SRGB", m_boundDrawFBO[0].m_attach[0].m_tex, m_boundDrawFBO[0].m_attach[0].m_tex->m_layout->m_layoutSummary ));
  70. // do we shoot down the srgb-write state for this batch?
  71. // I think the runtime will just ignore it.
  72. }
  73. }
  74. else
  75. {
  76. if (tex0_srgb)
  77. {
  78. // odd - client is not writing sRGB into a texture which *can* do it.
  79. //GLMPRINTF(( "-Z- srgb-disabled FBO conflict: attached tex %08x [%s] is SRGB", m_boundFBO[0].m_attach[0].m_tex, m_boundFBO[0].m_attach[0].m_tex->m_layout->m_layoutSummary ));
  80. //writeSRGBState.enable = true;
  81. //m_BlendEnableSRGB.Write( &writeSRGBState );
  82. }
  83. else
  84. {
  85. // good - draw mode and color tex agree
  86. }
  87. }
  88. }
  89. #endif
  90. Assert( m_drawingProgram[ kGLMVertexProgram ] );
  91. Assert( m_drawingProgram[ kGLMFragmentProgram ] );
  92. Assert( ( m_drawingProgram[kGLMVertexProgram]->m_type == kGLMVertexProgram ) && ( m_drawingProgram[kGLMFragmentProgram]->m_type == kGLMFragmentProgram ) );
  93. Assert( m_drawingProgram[ kGLMVertexProgram ]->m_bTranslatedProgram && m_drawingProgram[ kGLMFragmentProgram ]->m_bTranslatedProgram );
  94. #if GLMDEBUG
  95. // Depth compare mode check
  96. uint nCurMask = 1, nShaderSamplerMask = m_drawingProgram[kGLMFragmentProgram]->m_samplerMask;
  97. for ( int nSamplerIndex = 0; nSamplerIndex < GLM_SAMPLER_COUNT; ++nSamplerIndex, nCurMask <<= 1 )
  98. {
  99. if ( !m_samplers[nSamplerIndex].m_pBoundTex )
  100. continue;
  101. if ( m_samplers[nSamplerIndex].m_pBoundTex->m_layout->m_mipCount == 1 )
  102. {
  103. if ( m_samplers[nSamplerIndex].m_samp.m_packed.m_mipFilter == D3DTEXF_LINEAR )
  104. {
  105. GLMDebugPrintf( "Sampler %u has mipmap filtering enabled on a texture without mipmaps! (texture name: %s, pixel shader: %s)!\n",
  106. nSamplerIndex,
  107. m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel ? m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel : "?",
  108. m_drawingProgram[kGLMFragmentProgram]->m_shaderName );
  109. }
  110. }
  111. if ( ( nShaderSamplerMask & nCurMask ) == 0 )
  112. continue;
  113. if ( m_samplers[nSamplerIndex].m_pBoundTex->m_layout->m_mipCount == 1 )
  114. {
  115. if ( m_samplers[nSamplerIndex].m_samp.m_packed.m_mipFilter == D3DTEXF_LINEAR )
  116. {
  117. // Note this is not always an error - shadow buffer debug visualization shaders purposely want to read shadow depths (and not do the comparison)
  118. GLMDebugPrintf( "Sampler %u has mipmap filtering enabled on a texture without mipmaps! (texture name: %s, pixel shader: %s)!\n",
  119. nSamplerIndex,
  120. m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel ? m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel : "?",
  121. m_drawingProgram[kGLMFragmentProgram]->m_shaderName );
  122. }
  123. }
  124. bool bSamplerIsDepth = ( m_samplers[nSamplerIndex].m_pBoundTex->m_layout->m_key.m_texFlags & kGLMTexIsDepth ) != 0;
  125. bool bSamplerShadow = m_samplers[nSamplerIndex].m_samp.m_packed.m_compareMode != 0;
  126. bool bShaderShadow = ( m_drawingProgram[kGLMFragmentProgram]->m_nShadowDepthSamplerMask & nCurMask ) != 0;
  127. // NOTE - Check shader name hardcoded into ShadowDepthSamplerMaskFromName() in dxabstract.cpp!!
  128. if ( bShaderShadow )
  129. {
  130. // Shader expects shadow depth sampling at this sampler index
  131. // Must have a depth texture and compare mode must be enabled
  132. if ( !bSamplerIsDepth || !bSamplerShadow )
  133. {
  134. // FIXME: This occasionally occurs in L4D2 when CShaderAPIDx8::ExecuteCommandBuffer() sets the TEXTURE_WHITE texture in the flashlight depth texture slot.
  135. GLMDebugPrintf( "Sampler %u's compare mode (%u) or format (depth=%u) is not consistent with pixel shader's compare mode (%u) (texture name: %s, pixel shader: %s)!\n",
  136. nSamplerIndex, bSamplerShadow, bSamplerIsDepth, bShaderShadow,
  137. m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel ? m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel : "?",
  138. m_drawingProgram[kGLMFragmentProgram]->m_shaderName );
  139. }
  140. }
  141. else
  142. {
  143. // Shader does not expect shadow depth sampling as this sampler index
  144. // We don't care if comparemode is enabled, but we can't have a depth texture in this sampler
  145. if ( bSamplerIsDepth )
  146. {
  147. GLMDebugPrintf( "Sampler %u is a depth texture but the pixel shader's shadow depth sampler mask does not expect depth here (texture name: %s, pixel shader: %s)!\n",
  148. nSamplerIndex,
  149. m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel ? m_samplers[nSamplerIndex].m_pBoundTex->m_debugLabel : "?",
  150. m_drawingProgram[kGLMFragmentProgram]->m_shaderName );
  151. }
  152. }
  153. }
  154. #endif
  155. if ( m_bDirtyPrograms )
  156. {
  157. m_bDirtyPrograms = false;
  158. CGLMShaderPair *pNewPair = m_pairCache->SelectShaderPair( m_drawingProgram[ kGLMVertexProgram ], m_drawingProgram[ kGLMFragmentProgram ], 0 );
  159. if ( pNewPair != m_pBoundPair )
  160. {
  161. #if GL_BATCH_TELEMETRY_ZONES
  162. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "NewProgram" );
  163. #endif
  164. if ( !pNewPair->m_valid )
  165. {
  166. if ( !pNewPair->ValidateProgramPair() )
  167. {
  168. goto flush_error_exit;
  169. }
  170. }
  171. gGL->glUseProgram( (GLuint)pNewPair->m_program );
  172. GL_BATCH_PERF( m_FlushStats.m_nTotalProgramPairChanges++; )
  173. if ( !m_pBoundPair )
  174. {
  175. GL_BATCH_PERF( m_FlushStats.m_nNewPS++; )
  176. GL_BATCH_PERF( m_FlushStats.m_nNewVS++; )
  177. }
  178. else
  179. {
  180. GL_BATCH_PERF( if ( pNewPair->m_fragmentProg != m_pBoundPair->m_fragmentProg ) m_FlushStats.m_nNewPS++; )
  181. GL_BATCH_PERF( if ( pNewPair->m_vertexProg != m_pBoundPair->m_vertexProg ) m_FlushStats.m_nNewVS++; )
  182. }
  183. #if GL_BATCH_PERF_ANALYSIS
  184. tmMessage( TELEMETRY_LEVEL2, TMMF_ICON_NOTE, "V:%s (V Regs:%u V Bone Regs:%u) F:%s (F Regs:%u)",
  185. m_drawingProgram[ kGLMVertexProgram ]->m_shaderName,
  186. m_drawingProgram[ kGLMVertexProgram ]->m_descs[kGLMGLSL].m_highWater,
  187. m_drawingProgram[ kGLMVertexProgram ]->m_descs[kGLMGLSL].m_VSHighWaterBone,
  188. m_drawingProgram[ kGLMFragmentProgram ]->m_shaderName,
  189. m_drawingProgram[ kGLMFragmentProgram ]->m_descs[kGLMGLSL].m_highWater );
  190. #endif
  191. m_pBoundPair = pNewPair;
  192. // set the dirty levels appropriately since the program changed and has never seen any of the current values.
  193. m_programParamsF[kGLMVertexProgram].m_firstDirtySlotNonBone = 0;
  194. m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone = m_drawingProgram[ kGLMVertexProgram ]->m_descs[kGLMGLSL].m_highWater;
  195. m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterBone = m_drawingProgram[ kGLMVertexProgram ]->m_descs[kGLMGLSL].m_VSHighWaterBone;
  196. m_programParamsF[kGLMFragmentProgram].m_firstDirtySlotNonBone = 0;
  197. m_programParamsF[kGLMFragmentProgram].m_dirtySlotHighWaterNonBone = m_drawingProgram[ kGLMFragmentProgram ]->m_descs[kGLMGLSL].m_highWater;
  198. // bool and int dirty levels get set to max, we don't have actual high water marks for them
  199. // code which sends the values must clamp on these types.
  200. m_programParamsB[kGLMVertexProgram].m_dirtySlotCount = kGLMProgramParamBoolLimit;
  201. m_programParamsB[kGLMFragmentProgram].m_dirtySlotCount = kGLMProgramParamBoolLimit;
  202. m_programParamsI[kGLMVertexProgram].m_dirtySlotCount = kGLMProgramParamInt4Limit;
  203. m_programParamsI[kGLMFragmentProgram].m_dirtySlotCount = 0;
  204. // check fragment buffers used (MRT)
  205. if( pNewPair->m_fragmentProg->m_fragDataMask != m_fragDataMask )
  206. {
  207. gGL->glDrawBuffers( pNewPair->m_fragmentProg->m_numDrawBuffers, pNewPair->m_fragmentProg->m_drawBuffers );
  208. m_fragDataMask = pNewPair->m_fragmentProg->m_fragDataMask;
  209. }
  210. }
  211. }
  212. Assert( m_ViewportBox.GetData().width == (int)( m_ViewportBox.GetData().widthheight & 0xFFFF ) );
  213. Assert( m_ViewportBox.GetData().height == (int)( m_ViewportBox.GetData().widthheight >> 16 ) );
  214. m_pBoundPair->UpdateScreenUniform( m_ViewportBox.GetData().widthheight );
  215. GL_BATCH_PERF( m_FlushStats.m_nNumChangedSamplers += m_nNumDirtySamplers );
  216. #if !defined( OSX ) // no support for sampler objects in OSX 10.6 (GL 2.1 profile)
  217. if ( m_bUseSamplerObjects)
  218. {
  219. while ( m_nNumDirtySamplers )
  220. {
  221. const uint nSamplerIndex = m_nDirtySamplers[--m_nNumDirtySamplers];
  222. Assert( ( nSamplerIndex < GLM_SAMPLER_COUNT ) && ( !m_nDirtySamplerFlags[nSamplerIndex]) );
  223. m_nDirtySamplerFlags[nSamplerIndex] = 1;
  224. gGL->glBindSampler( nSamplerIndex, FindSamplerObject( m_samplers[nSamplerIndex].m_samp ) );
  225. GL_BATCH_PERF( m_FlushStats.m_nNumSamplingParamsChanged++ );
  226. #if defined( OSX ) // valid for OSX only if using GL 3.3 context
  227. CGLMTex *pTex = m_samplers[nSamplerIndex].m_pBoundTex;
  228. if( pTex && !( gGL->m_bHave_GL_EXT_texture_sRGB_decode ) )
  229. {
  230. // see if requested SRGB state differs from the known one
  231. bool texSRGB = ( pTex->m_layout->m_key.m_texFlags & kGLMTexSRGB ) != 0;
  232. bool glSampSRGB = m_samplers[nSamplerIndex].m_samp.m_packed.m_srgb;
  233. if ( texSRGB != glSampSRGB ) // mismatch
  234. {
  235. pTex->HandleSRGBMismatch( glSampSRGB, pTex->m_srgbFlipCount );
  236. }
  237. }
  238. #endif
  239. }
  240. }
  241. else
  242. #endif // if !defined( OSX )
  243. {
  244. while ( m_nNumDirtySamplers )
  245. {
  246. const uint nSamplerIndex = m_nDirtySamplers[--m_nNumDirtySamplers];
  247. Assert( ( nSamplerIndex < GLM_SAMPLER_COUNT ) && ( !m_nDirtySamplerFlags[nSamplerIndex]) );
  248. m_nDirtySamplerFlags[nSamplerIndex] = 1;
  249. CGLMTex *pTex = m_samplers[nSamplerIndex].m_pBoundTex;
  250. if ( ( pTex ) && ( !( pTex->m_SamplingParams == m_samplers[nSamplerIndex].m_samp ) ) )
  251. {
  252. SelectTMU( nSamplerIndex );
  253. m_samplers[nSamplerIndex].m_samp.DeltaSetToTarget( pTex->m_texGLTarget, pTex->m_SamplingParams );
  254. pTex->m_SamplingParams = m_samplers[nSamplerIndex].m_samp;
  255. #if defined( OSX )
  256. if( pTex && !( gGL->m_bHave_GL_EXT_texture_sRGB_decode ) )
  257. {
  258. // see if requested SRGB state differs from the known one
  259. bool texSRGB = ( pTex->m_layout->m_key.m_texFlags & kGLMTexSRGB ) != 0;
  260. bool glSampSRGB = m_samplers[nSamplerIndex].m_samp.m_packed.m_srgb;
  261. if ( texSRGB != glSampSRGB ) // mismatch
  262. {
  263. pTex->HandleSRGBMismatch( glSampSRGB, pTex->m_srgbFlipCount );
  264. }
  265. }
  266. #endif
  267. }
  268. }
  269. }
  270. // vertex stage --------------------------------------------------------------------
  271. if ( m_bUseBoneUniformBuffers )
  272. {
  273. // vertex stage --------------------------------------------------------------------
  274. if ( m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone )
  275. {
  276. int firstDirtySlot = m_programParamsF[kGLMVertexProgram].m_firstDirtySlotNonBone;
  277. int dirtySlotHighWater = MIN( m_drawingProgram[kGLMVertexProgram]->m_descs[kGLMGLSL].m_highWater, m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone );
  278. GLint vconstLoc = m_pBoundPair->m_locVertexParams;
  279. if ( ( vconstLoc >= 0 ) && ( dirtySlotHighWater > firstDirtySlot ) )
  280. {
  281. #if GL_BATCH_TELEMETRY_ZONES
  282. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "VSNonBoneUniformUpdate %u %u", firstDirtySlot, dirtySlotHighWater );
  283. #endif
  284. int numSlots = dirtySlotHighWater - DXABSTRACT_VS_FIRST_BONE_SLOT;
  285. // consts after the bones (c217 onwards), since we use the concatenated destination array vc[], upload these consts starting from vc[58]
  286. if( numSlots > 0 )
  287. {
  288. gGL->glUniform4fv( m_pBoundPair->m_UniformBufferParams[kGLMVertexProgram][DXABSTRACT_VS_FIRST_BONE_SLOT], numSlots, &m_programParamsF[kGLMVertexProgram].m_values[(DXABSTRACT_VS_LAST_BONE_SLOT+1)][0] );
  289. dirtySlotHighWater = DXABSTRACT_VS_FIRST_BONE_SLOT;
  290. GL_BATCH_PERF( m_nTotalVSUniformCalls++; )
  291. GL_BATCH_PERF( m_nTotalVSUniformsSet += numSlots; )
  292. GL_BATCH_PERF( m_FlushStats.m_nFirstVSConstant = DXABSTRACT_VS_FIRST_BONE_SLOT; )
  293. GL_BATCH_PERF( m_FlushStats.m_nNumVSConstants += numSlots; )
  294. }
  295. numSlots = dirtySlotHighWater - firstDirtySlot;
  296. // consts before the bones (c0-c57)
  297. if( numSlots > 0 )
  298. {
  299. gGL->glUniform4fv( m_pBoundPair->m_UniformBufferParams[kGLMVertexProgram][firstDirtySlot], dirtySlotHighWater - firstDirtySlot, &m_programParamsF[kGLMVertexProgram].m_values[firstDirtySlot][0] );
  300. GL_BATCH_PERF( m_nTotalVSUniformCalls++; )
  301. GL_BATCH_PERF( m_nTotalVSUniformsSet += dirtySlotHighWater - firstDirtySlot; )
  302. GL_BATCH_PERF( m_FlushStats.m_nFirstVSConstant = firstDirtySlot; )
  303. GL_BATCH_PERF( m_FlushStats.m_nNumVSConstants += (dirtySlotHighWater - firstDirtySlot); )
  304. }
  305. }
  306. m_programParamsF[kGLMVertexProgram].m_firstDirtySlotNonBone = 256;
  307. m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone = 0;
  308. }
  309. if ( m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterBone )
  310. {
  311. const GLint vconstBoneLoc = m_pBoundPair->m_locVertexBoneParams;
  312. if ( vconstBoneLoc >= 0 )
  313. {
  314. int shaderSlotsBone = 0;
  315. if ( ( m_drawingProgram[kGLMVertexProgram]->m_descs[kGLMGLSL].m_VSHighWaterBone > 0 ) && ( m_nMaxUsedVertexProgramConstantsHint > DXABSTRACT_VS_FIRST_BONE_SLOT ) )
  316. {
  317. shaderSlotsBone = MIN( m_drawingProgram[kGLMVertexProgram]->m_descs[kGLMGLSL].m_VSHighWaterBone, m_nMaxUsedVertexProgramConstantsHint - DXABSTRACT_VS_FIRST_BONE_SLOT );
  318. }
  319. int dirtySlotHighWaterBone = MIN( shaderSlotsBone, m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterBone );
  320. if ( dirtySlotHighWaterBone )
  321. {
  322. uint nNumBoneRegs = dirtySlotHighWaterBone;
  323. #if GL_BATCH_TELEMETRY_ZONES
  324. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "VSBoneUniformUpdate %u", nNumBoneRegs );
  325. #endif
  326. gGL->glUniform4fv( vconstBoneLoc, nNumBoneRegs, &m_programParamsF[kGLMVertexProgram].m_values[DXABSTRACT_VS_FIRST_BONE_SLOT][0] );
  327. GL_BATCH_PERF( m_nTotalVSUniformBoneCalls++; )
  328. GL_BATCH_PERF( m_nTotalVSUniformsBoneSet += nNumBoneRegs; )
  329. GL_BATCH_PERF( m_FlushStats.m_nNumVSBoneConstants += nNumBoneRegs; )
  330. }
  331. m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterBone = 0;
  332. }
  333. }
  334. }
  335. else
  336. {
  337. if ( m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone )
  338. {
  339. const int nMaxUsedShaderSlots = m_drawingProgram[kGLMVertexProgram]->m_descs[kGLMGLSL].m_highWater;
  340. int firstDirtySlot = m_programParamsF[kGLMVertexProgram].m_firstDirtySlotNonBone;
  341. int dirtySlotHighWater = MIN( nMaxUsedShaderSlots, m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone );
  342. GLint vconstLoc = m_pBoundPair->m_locVertexParams;
  343. if ( ( vconstLoc >= 0 ) && ( dirtySlotHighWater > firstDirtySlot ) )
  344. {
  345. #if GL_BATCH_TELEMETRY_ZONES
  346. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "VSNonBoneUniformUpdate %u %u", firstDirtySlot, dirtySlotHighWater );
  347. #endif
  348. gGL->glUniform4fv( m_pBoundPair->m_UniformBufferParams[kGLMVertexProgram][firstDirtySlot], dirtySlotHighWater - firstDirtySlot, &m_programParamsF[kGLMVertexProgram].m_values[firstDirtySlot][0] );
  349. GL_BATCH_PERF( m_nTotalVSUniformCalls++; )
  350. GL_BATCH_PERF( m_nTotalVSUniformsSet += dirtySlotHighWater - firstDirtySlot; )
  351. GL_BATCH_PERF( m_FlushStats.m_nFirstVSConstant = firstDirtySlot; )
  352. GL_BATCH_PERF( m_FlushStats.m_nNumVSConstants += (dirtySlotHighWater - firstDirtySlot); )
  353. }
  354. m_programParamsF[kGLMVertexProgram].m_firstDirtySlotNonBone = 256;
  355. m_programParamsF[kGLMVertexProgram].m_dirtySlotHighWaterNonBone = 0;
  356. }
  357. }
  358. // see if VS uses i0, b0, b1, b2, b3.
  359. // use a glUniform1i to set any one of these if active. skip all of them if no dirties reported.
  360. // my kingdom for the UBO extension!
  361. // ------- bools ---------- //
  362. if ( m_pBoundPair->m_bHasBoolOrIntUniforms )
  363. {
  364. if ( m_programParamsB[kGLMVertexProgram].m_dirtySlotCount ) // optimize this later after the float param pushes are proven out
  365. {
  366. const uint nLimit = MIN( CGLMShaderPair::cMaxVertexShaderBoolUniforms, m_programParamsB[kGLMVertexProgram].m_dirtySlotCount );
  367. for ( uint i = 0; i < nLimit; ++i )
  368. {
  369. GLint constBoolLoc = m_pBoundPair->m_locVertexBool[i];
  370. if ( constBoolLoc >= 0 )
  371. gGL->glUniform1i( constBoolLoc, m_programParamsB[kGLMVertexProgram].m_values[i] );
  372. }
  373. m_programParamsB[kGLMVertexProgram].m_dirtySlotCount = 0;
  374. }
  375. if ( m_programParamsB[kGLMFragmentProgram].m_dirtySlotCount ) // optimize this later after the float param pushes are proven out
  376. {
  377. const uint nLimit = MIN( CGLMShaderPair::cMaxFragmentShaderBoolUniforms, m_programParamsB[kGLMFragmentProgram].m_dirtySlotCount );
  378. for ( uint i = 0; i < nLimit; ++i )
  379. {
  380. GLint constBoolLoc = m_pBoundPair->m_locFragmentBool[i];
  381. if ( constBoolLoc >= 0 )
  382. gGL->glUniform1i( constBoolLoc, m_programParamsB[kGLMFragmentProgram].m_values[i] );
  383. }
  384. m_programParamsB[kGLMFragmentProgram].m_dirtySlotCount = 0;
  385. }
  386. if ( m_programParamsI[kGLMVertexProgram].m_dirtySlotCount )
  387. {
  388. GLint vconstInt0Loc = m_pBoundPair->m_locVertexInteger0; //glGetUniformLocationARB( prog, "i0");
  389. if ( vconstInt0Loc >= 0 )
  390. {
  391. gGL->glUniform1i( vconstInt0Loc, m_programParamsI[kGLMVertexProgram].m_values[0][0] ); //FIXME magic number
  392. }
  393. m_programParamsI[kGLMVertexProgram].m_dirtySlotCount = 0;
  394. }
  395. }
  396. Assert( ( m_pDevice->m_streams[0].m_vtxBuffer && ( m_pDevice->m_streams[0].m_vtxBuffer->m_vtxBuffer == m_pDevice->m_vtx_buffers[0] ) ) || ( ( !m_pDevice->m_streams[0].m_vtxBuffer ) && ( m_pDevice->m_vtx_buffers[0] == m_pDevice->m_pDummy_vtx_buffer ) ) );
  397. Assert( ( m_pDevice->m_streams[1].m_vtxBuffer && ( m_pDevice->m_streams[1].m_vtxBuffer->m_vtxBuffer == m_pDevice->m_vtx_buffers[1] ) ) || ( ( !m_pDevice->m_streams[1].m_vtxBuffer ) && ( m_pDevice->m_vtx_buffers[1] == m_pDevice->m_pDummy_vtx_buffer ) ) );
  398. Assert( ( m_pDevice->m_streams[2].m_vtxBuffer && ( m_pDevice->m_streams[2].m_vtxBuffer->m_vtxBuffer == m_pDevice->m_vtx_buffers[2] ) ) || ( ( !m_pDevice->m_streams[2].m_vtxBuffer ) && ( m_pDevice->m_vtx_buffers[2] == m_pDevice->m_pDummy_vtx_buffer ) ) );
  399. Assert( ( m_pDevice->m_streams[3].m_vtxBuffer && ( m_pDevice->m_streams[3].m_vtxBuffer->m_vtxBuffer == m_pDevice->m_vtx_buffers[3] ) ) || ( ( !m_pDevice->m_streams[3].m_vtxBuffer ) && ( m_pDevice->m_vtx_buffers[3] == m_pDevice->m_pDummy_vtx_buffer ) ) );
  400. uint nCurTotalBufferRevision;
  401. nCurTotalBufferRevision = m_pDevice->m_vtx_buffers[0]->m_nRevision + m_pDevice->m_vtx_buffers[1]->m_nRevision + m_pDevice->m_vtx_buffers[2]->m_nRevision + m_pDevice->m_vtx_buffers[3]->m_nRevision;
  402. // If any of these inputs have changed, we need to enumerate through all of the expected GL vertex attribs and modify anything in the GL layer that have changed.
  403. // This is not always a win, but it is a net win on NVidia (by 1-4.8% depending on whether driver threading is enabled).
  404. if ( ( nCurTotalBufferRevision != m_CurAttribs.m_nTotalBufferRevision ) ||
  405. ( m_CurAttribs.m_pVertDecl != m_pDevice->m_pVertDecl ) ||
  406. ( m_CurAttribs.m_vtxAttribMap[0] != reinterpret_cast<const uint64 *>(m_pDevice->m_vertexShader->m_vtxAttribMap)[0] ) ||
  407. ( m_CurAttribs.m_vtxAttribMap[1] != reinterpret_cast<const uint64 *>(m_pDevice->m_vertexShader->m_vtxAttribMap)[1] ) ||
  408. ( memcmp( m_CurAttribs.m_streams, m_pDevice->m_streams, sizeof( m_pDevice->m_streams ) ) != 0 ) )
  409. {
  410. // This branch is taken 52.2% of the time in the L4D2 test1 (long) timedemo.
  411. #if GL_BATCH_TELEMETRY_ZONES
  412. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "SetVertexAttribs" );
  413. #endif
  414. m_CurAttribs.m_nTotalBufferRevision = nCurTotalBufferRevision;
  415. m_CurAttribs.m_pVertDecl = m_pDevice->m_pVertDecl;
  416. m_CurAttribs.m_vtxAttribMap[0] = reinterpret_cast<const uint64 *>(m_pDevice->m_vertexShader->m_vtxAttribMap)[0];
  417. m_CurAttribs.m_vtxAttribMap[1] = reinterpret_cast<const uint64 *>(m_pDevice->m_vertexShader->m_vtxAttribMap)[1];
  418. memcpy( m_CurAttribs.m_streams, m_pDevice->m_streams, sizeof( m_pDevice->m_streams ) );
  419. unsigned char *pVertexShaderAttribMap = m_pDevice->m_vertexShader->m_vtxAttribMap;
  420. const int nMaxVertexAttributesToCheck = m_drawingProgram[ kGLMVertexProgram ]->m_maxVertexAttrs;
  421. IDirect3DVertexDeclaration9 *pVertDecl = m_pDevice->m_pVertDecl;
  422. const uint8 *pVertexAttribDescToStreamIndex = pVertDecl->m_VertexAttribDescToStreamIndex;
  423. for( int nMask = 1, nIndex = 0; nIndex < nMaxVertexAttributesToCheck; ++nIndex, nMask <<= 1 )
  424. {
  425. uint8 vertexShaderAttrib = pVertexShaderAttribMap[ nIndex ];
  426. uint nDeclIndex = pVertexAttribDescToStreamIndex[vertexShaderAttrib];
  427. if ( nDeclIndex == 0xFF )
  428. {
  429. // Not good - the vertex shader has an attribute which can't be located in the decl!
  430. // The D3D9 debug runtime is also going to complain.
  431. Assert( 0 );
  432. if ( m_lastKnownVertexAttribMask & nMask )
  433. {
  434. m_lastKnownVertexAttribMask &= ~nMask;
  435. gGL->glDisableVertexAttribArray( nIndex );
  436. }
  437. continue;
  438. }
  439. D3DVERTEXELEMENT9_GL *pDeclElem = &pVertDecl->m_elements[nDeclIndex];
  440. Assert( ( ( vertexShaderAttrib >> 4 ) == pDeclElem->m_dxdecl.Usage ) && ( ( vertexShaderAttrib & 0x0F ) == pDeclElem->m_dxdecl.UsageIndex) );
  441. const uint nStreamIndex = pDeclElem->m_dxdecl.Stream;
  442. const D3DStreamDesc *pStream = &m_pDevice->m_streams[ nStreamIndex ];
  443. CGLMBuffer *pBuf = m_pDevice->m_vtx_buffers[ nStreamIndex ];
  444. if ( pBuf == m_pDevice->m_pDummy_vtx_buffer )
  445. {
  446. Assert( pStream->m_vtxBuffer == NULL );
  447. // this shader doesn't use that pair.
  448. if ( m_lastKnownVertexAttribMask & nMask )
  449. {
  450. m_lastKnownVertexAttribMask &= ~nMask;
  451. gGL->glDisableVertexAttribArray( nIndex );
  452. }
  453. continue;
  454. }
  455. Assert( pStream->m_vtxBuffer->m_vtxBuffer == pBuf );
  456. int nBufOffset = pDeclElem->m_gldecl.m_offset + pStream->m_offset;
  457. Assert( nBufOffset >= 0 );
  458. Assert( nBufOffset < (int)pBuf->m_nSize );
  459. if ( pBuf->m_bUsingPersistentBuffer )
  460. {
  461. nBufOffset += pBuf->m_nPersistentBufferStartOffset;
  462. }
  463. SetBufAndVertexAttribPointer( nIndex, pBuf->GetHandle(),
  464. pStream->m_stride, pDeclElem->m_gldecl.m_datatype, pDeclElem->m_gldecl.m_normalized, pDeclElem->m_gldecl.m_nCompCount,
  465. reinterpret_cast< const GLvoid * >( reinterpret_cast< intp >( pBuf->m_pPseudoBuf ) + nBufOffset ),
  466. pBuf->m_nRevision );
  467. if ( !( m_lastKnownVertexAttribMask & nMask ) )
  468. {
  469. m_lastKnownVertexAttribMask |= nMask;
  470. gGL->glEnableVertexAttribArray( nIndex );
  471. }
  472. }
  473. for( int nIndex = nMaxVertexAttributesToCheck; nIndex < m_nNumSetVertexAttributes; nIndex++ )
  474. {
  475. gGL->glDisableVertexAttribArray( nIndex );
  476. m_lastKnownVertexAttribMask &= ~(1 << nIndex);
  477. }
  478. m_nNumSetVertexAttributes = nMaxVertexAttributesToCheck;
  479. }
  480. // fragment stage --------------------------------------------------------------------
  481. if ( m_programParamsF[kGLMFragmentProgram].m_dirtySlotHighWaterNonBone )
  482. {
  483. GLint fconstLoc;
  484. fconstLoc = m_pBoundPair->m_locFragmentParams;
  485. if ( fconstLoc >= 0 )
  486. {
  487. const int nMaxUsedShaderSlots = m_drawingProgram[kGLMFragmentProgram]->m_descs[kGLMGLSL].m_highWater;
  488. int firstDirtySlot = m_programParamsF[kGLMFragmentProgram].m_firstDirtySlotNonBone;
  489. int dirtySlotHighWater = MIN( nMaxUsedShaderSlots, m_programParamsF[kGLMFragmentProgram].m_dirtySlotHighWaterNonBone );
  490. if ( dirtySlotHighWater > firstDirtySlot )
  491. {
  492. #if GL_BATCH_TELEMETRY_ZONES
  493. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "PSUniformUpdate %u %u", firstDirtySlot, dirtySlotHighWater );
  494. #endif
  495. gGL->glUniform4fv( m_pBoundPair->m_UniformBufferParams[kGLMFragmentProgram][firstDirtySlot], dirtySlotHighWater - firstDirtySlot, &m_programParamsF[kGLMFragmentProgram].m_values[firstDirtySlot][0] );
  496. GL_BATCH_PERF( m_nTotalPSUniformCalls++; )
  497. GL_BATCH_PERF( m_nTotalPSUniformsSet += dirtySlotHighWater - firstDirtySlot; )
  498. GL_BATCH_PERF( m_FlushStats.m_nFirstPSConstant = firstDirtySlot; )
  499. GL_BATCH_PERF( m_FlushStats.m_nNumPSConstants += (dirtySlotHighWater - firstDirtySlot); )
  500. }
  501. m_programParamsF[kGLMFragmentProgram].m_firstDirtySlotNonBone = 256;
  502. m_programParamsF[kGLMFragmentProgram].m_dirtySlotHighWaterNonBone = 0;
  503. }
  504. }
  505. return;
  506. flush_error_exit:
  507. m_pBoundPair = NULL;
  508. m_bDirtyPrograms = true;
  509. return;
  510. }