Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1753 lines
52 KiB

  1. //========= Copyright Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose:
  4. //
  5. //=============================================================================
  6. #include "pch_materialsystem.h"
  7. #include "tier1/functors.h"
  8. #include "itextureinternal.h"
  9. #define MATSYS_INTERNAL
  10. #include "cmatqueuedrendercontext.h"
  11. #include "cmaterialsystem.h" // @HACKHACK
  12. // NOTE: This has to be the last file included!
  13. #include "tier0/memdbgon.h"
  14. ConVar mat_report_queue_status( "mat_report_queue_status", "0", FCVAR_MATERIAL_SYSTEM_THREAD );
  15. //-----------------------------------------------------------------------------
  16. //
  17. //-----------------------------------------------------------------------------
  18. #if defined( _WIN32 )
  19. void FastCopy( byte *pDest, const byte *pSrc, size_t nBytes )
  20. {
  21. if ( !nBytes )
  22. {
  23. return;
  24. }
  25. #if !defined( _X360 )
  26. if ( (size_t)pDest % 16 == 0 && (size_t)pSrc % 16 == 0 )
  27. {
  28. const int BYTES_PER_FULL = 128;
  29. int nBytesFull = nBytes - ( nBytes % BYTES_PER_FULL );
  30. for ( byte *pLimit = pDest + nBytesFull; pDest < pLimit; pDest += BYTES_PER_FULL, pSrc += BYTES_PER_FULL )
  31. {
  32. // memcpy( pDest, pSrc, BYTES_PER_FULL);
  33. __asm
  34. {
  35. mov esi, pSrc
  36. mov edi, pDest
  37. movaps xmm0, [esi + 0]
  38. movaps xmm1, [esi + 16]
  39. movaps xmm2, [esi + 32]
  40. movaps xmm3, [esi + 48]
  41. movaps xmm4, [esi + 64]
  42. movaps xmm5, [esi + 80]
  43. movaps xmm6, [esi + 96]
  44. movaps xmm7, [esi + 112]
  45. movntps [edi + 0], xmm0
  46. movntps [edi + 16], xmm1
  47. movntps [edi + 32], xmm2
  48. movntps [edi + 48], xmm3
  49. movntps [edi + 64], xmm4
  50. movntps [edi + 80], xmm5
  51. movntps [edi + 96], xmm6
  52. movntps [edi + 112], xmm7
  53. }
  54. }
  55. nBytes -= nBytesFull;
  56. }
  57. if ( nBytes )
  58. {
  59. memcpy( pDest, pSrc, nBytes );
  60. }
  61. #else
  62. if ( (size_t)pDest % 4 == 0 && nBytes % 4 == 0 )
  63. {
  64. XMemCpyStreaming_WriteCombined( pDest, pSrc, nBytes );
  65. }
  66. else
  67. {
  68. // work around a bug in memcpy
  69. if ((size_t)pDest % 2 == 0 && nBytes == 4)
  70. {
  71. *(reinterpret_cast<short *>(pDest)) = *(reinterpret_cast<const short *>(pSrc));
  72. *(reinterpret_cast<short *>(pDest)+1) = *(reinterpret_cast<const short *>(pSrc)+1);
  73. }
  74. else
  75. {
  76. memcpy( pDest, pSrc, nBytes );
  77. }
  78. }
  79. #endif
  80. }
  81. #else
  82. #define FastCopy memcpy
  83. #endif
  84. //-----------------------------------------------------------------------------
  85. //
  86. //-----------------------------------------------------------------------------
  87. enum MatQueuedMeshFlags_t
  88. {
  89. MQM_BUFFERED = ( 1 << 0 ),
  90. MQM_FLEX = ( 1 << 1 ),
  91. };
  92. class CMatQueuedMesh : public IMesh
  93. {
  94. public:
  95. CMatQueuedMesh( CMatQueuedRenderContext *pOwner, IMatRenderContextInternal *pHardwareContext )
  96. : m_pLateBoundMesh( &m_pActualMesh ),
  97. m_pOwner( pOwner ),
  98. m_pCallQueue( pOwner->GetCallQueueInternal() ),
  99. m_pHardwareContext( pHardwareContext ),
  100. m_pVertexData( NULL ),
  101. m_pIndexData( NULL ),
  102. m_nVerts( 0 ),
  103. m_nIndices( 0 ),
  104. m_VertexSize( 0 ),
  105. m_Type(MATERIAL_TRIANGLES),
  106. m_pVertexOverride( NULL ),
  107. m_pIndexOverride ( NULL ),
  108. m_pActualMesh( NULL ),
  109. m_nActualVertexOffsetInBytes( 0 ),
  110. m_VertexFormat( 0 ),
  111. m_MorphFormat( 0 )
  112. {
  113. }
  114. CLateBoundPtr<IMesh> &AccessLateBoundMesh()
  115. {
  116. return m_pLateBoundMesh;
  117. }
  118. byte *GetVertexData() { return m_pVertexData; }
  119. uint16 *GetIndexData() { return m_pIndexData; }
  120. IMesh *DetachActualMesh() { IMesh *p = m_pActualMesh; m_pActualMesh = NULL; return p; }
  121. IMesh *GetActualMesh() { return m_pActualMesh; }
  122. int GetActualVertexOffsetInBytes() { return m_nActualVertexOffsetInBytes; }
  123. void DeferredGetDynamicMesh( VertexFormat_t vertexFormat, unsigned flags, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterialInternal *pMaterial )
  124. {
  125. if ( !( flags & MQM_FLEX ))
  126. {
  127. if ( vertexFormat == 0 )
  128. {
  129. m_pActualMesh = m_pHardwareContext->GetDynamicMesh( ( ( flags & MQM_BUFFERED ) != 0 ), pVertexOverride, pIndexOverride, pMaterial );
  130. }
  131. else
  132. {
  133. m_pActualMesh = m_pHardwareContext->GetDynamicMeshEx( vertexFormat, ( ( flags & MQM_BUFFERED ) != 0 ), pVertexOverride, pIndexOverride, pMaterial );
  134. }
  135. }
  136. else
  137. {
  138. m_pActualMesh = m_pHardwareContext->GetFlexMesh();
  139. }
  140. }
  141. bool OnGetDynamicMesh( VertexFormat_t vertexFormat, unsigned flags, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterialInternal *pMaterial, int nHWSkinBoneCount )
  142. {
  143. if ( !m_pVertexOverride && ( m_pVertexData || m_pIndexData ) )
  144. {
  145. CannotSupport();
  146. if ( IsDebug() )
  147. {
  148. Assert( !"Getting a dynamic mesh without resolving the previous one" );
  149. }
  150. else
  151. {
  152. Error( "Getting a dynamic mesh without resolving the previous one" );
  153. }
  154. }
  155. FreeBuffers();
  156. m_pVertexOverride = pVertexOverride;
  157. m_pIndexOverride = pIndexOverride;
  158. if ( !( flags & MQM_FLEX ) )
  159. {
  160. if ( pVertexOverride )
  161. {
  162. m_VertexFormat = pVertexOverride->GetVertexFormat();
  163. }
  164. else
  165. {
  166. // Remove VERTEX_FORMAT_COMPRESSED from the material's format (dynamic meshes don't
  167. // support compression, and all materials should support uncompressed verts too)
  168. m_VertexFormat = ( vertexFormat == 0 ) ? ( pMaterial->GetVertexFormat() & ~VERTEX_FORMAT_COMPRESSED ) : vertexFormat;
  169. if ( vertexFormat != 0 )
  170. {
  171. int nVertexFormatBoneWeights = NumBoneWeights( vertexFormat );
  172. if ( nHWSkinBoneCount < nVertexFormatBoneWeights )
  173. {
  174. nHWSkinBoneCount = nVertexFormatBoneWeights;
  175. }
  176. }
  177. // Force the requested number of bone weights
  178. m_VertexFormat &= ~VERTEX_BONE_WEIGHT_MASK;
  179. m_VertexFormat |= VERTEX_BONEWEIGHT( nHWSkinBoneCount );
  180. if ( nHWSkinBoneCount > 0 )
  181. {
  182. m_VertexFormat |= VERTEX_BONE_INDEX;
  183. }
  184. }
  185. }
  186. else
  187. {
  188. m_VertexFormat = VERTEX_POSITION | VERTEX_NORMAL | VERTEX_FORMAT_USE_EXACT_FORMAT;
  189. if ( g_pMaterialSystemHardwareConfig->SupportsPixelShaders_2_b() )
  190. {
  191. m_VertexFormat |= VERTEX_WRINKLE;
  192. }
  193. }
  194. MeshDesc_t temp;
  195. g_pShaderAPI->ComputeVertexDescription( 0, m_VertexFormat, temp );
  196. m_VertexSize = temp.m_ActualVertexSize;
  197. // queue up get of real dynamic mesh, allocate space for verts & indices
  198. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::DeferredGetDynamicMesh, vertexFormat, flags, pVertexOverride, pIndexOverride, pMaterial );
  199. return true;
  200. }
  201. void ModifyBegin( int firstVertex, int numVerts, int firstIndex, int numIndices, MeshDesc_t& desc )
  202. {
  203. CannotSupport();
  204. }
  205. void ModifyBeginEx( bool bReadOnly, int firstVertex, int numVerts, int firstIndex, int numIndices, MeshDesc_t& desc )
  206. {
  207. CannotSupport();
  208. }
  209. void ModifyEnd( MeshDesc_t& desc )
  210. {
  211. CannotSupport();
  212. }
  213. void GenerateSequentialIndexBuffer( unsigned short* pIndexMemory, int numIndices, int firstVertex )
  214. {
  215. Assert( pIndexMemory == m_pIndexData );
  216. m_pCallQueue->QueueCall( &::GenerateSequentialIndexBuffer, pIndexMemory, numIndices, firstVertex );
  217. }
  218. void GenerateQuadIndexBuffer( unsigned short* pIndexMemory, int numIndices, int firstVertex )
  219. {
  220. Assert( pIndexMemory == m_pIndexData );
  221. m_pCallQueue->QueueCall( &::GenerateQuadIndexBuffer, pIndexMemory, numIndices, firstVertex );
  222. }
  223. void GeneratePolygonIndexBuffer( unsigned short* pIndexMemory, int numIndices, int firstVertex )
  224. {
  225. Assert( pIndexMemory == m_pIndexData );
  226. m_pCallQueue->QueueCall( &::GeneratePolygonIndexBuffer, pIndexMemory, numIndices, firstVertex );
  227. }
  228. void GenerateLineStripIndexBuffer( unsigned short* pIndexMemory, int numIndices, int firstVertex )
  229. {
  230. Assert( pIndexMemory == m_pIndexData );
  231. m_pCallQueue->QueueCall( &::GenerateLineStripIndexBuffer, pIndexMemory, numIndices, firstVertex );
  232. }
  233. void GenerateLineLoopIndexBuffer( unsigned short* pIndexMemory, int numIndices, int firstVertex )
  234. {
  235. Assert( pIndexMemory == m_pIndexData );
  236. m_pCallQueue->QueueCall( &::GenerateLineLoopIndexBuffer, pIndexMemory, numIndices, firstVertex );
  237. }
  238. int VertexCount() const
  239. {
  240. return m_VertexSize ? m_nVerts : 0;
  241. }
  242. int IndexCount() const
  243. {
  244. return m_nIndices;
  245. }
  246. int GetVertexSize()
  247. {
  248. return m_VertexSize;
  249. }
  250. void SetPrimitiveType( MaterialPrimitiveType_t type )
  251. {
  252. m_Type = type;
  253. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetPrimitiveType, type );
  254. }
  255. void SetColorMesh( IMesh *pColorMesh, int nVertexOffset )
  256. {
  257. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetColorMesh, pColorMesh, nVertexOffset );
  258. }
  259. void Draw( CPrimList *pLists, int nLists )
  260. {
  261. CannotSupport();
  262. }
  263. void CopyToMeshBuilder( int iStartVert, int nVerts, int iStartIndex, int nIndices, int indexOffset, CMeshBuilder &builder )
  264. {
  265. CannotSupport();
  266. }
  267. void Spew( int numVerts, int numIndices, const MeshDesc_t & desc )
  268. {
  269. }
  270. void ValidateData( int numVerts, int numIndices, const MeshDesc_t & desc )
  271. {
  272. }
  273. void LockMesh( int numVerts, int numIndices, MeshDesc_t& desc )
  274. {
  275. if ( !m_pVertexOverride )
  276. {
  277. m_nVerts = numVerts;
  278. }
  279. else
  280. {
  281. m_nVerts = 0;
  282. }
  283. if ( !m_pIndexOverride )
  284. {
  285. m_nIndices = numIndices;
  286. }
  287. else
  288. {
  289. m_nIndices = 0;
  290. }
  291. if( numVerts > 0 )
  292. {
  293. Assert( m_VertexSize );
  294. Assert( !m_pVertexData );
  295. m_pVertexData = (byte *)m_pOwner->AllocVertices( numVerts, m_VertexSize );
  296. Assert( (unsigned)m_pVertexData % 16 == 0 );
  297. // Compute the vertex index..
  298. desc.m_nFirstVertex = 0;
  299. static_cast< VertexDesc_t* >( &desc )->m_nOffset = 0;
  300. // Set up the mesh descriptor
  301. g_pShaderAPI->ComputeVertexDescription( m_pVertexData, m_VertexFormat, desc );
  302. }
  303. else
  304. {
  305. desc.m_nFirstVertex = 0;
  306. static_cast< VertexDesc_t* >( &desc )->m_nOffset = 0;
  307. // Set up the mesh descriptor
  308. g_pShaderAPI->ComputeVertexDescription( 0, 0, desc );
  309. }
  310. if ( m_Type != MATERIAL_POINTS && numIndices > 0 )
  311. {
  312. Assert( !m_pIndexData );
  313. m_pIndexData = m_pOwner->AllocIndices( numIndices );
  314. desc.m_pIndices = m_pIndexData;
  315. desc.m_nIndexSize = 1;
  316. desc.m_nFirstIndex = 0;
  317. static_cast< IndexDesc_t* >( &desc )->m_nOffset = 0;
  318. }
  319. else
  320. {
  321. desc.m_pIndices = &gm_ScratchIndexBuffer[0];
  322. desc.m_nIndexSize = 0;
  323. desc.m_nFirstIndex = 0;
  324. static_cast< IndexDesc_t* >( &desc )->m_nOffset = 0;
  325. }
  326. }
  327. void UnlockMesh( int numVerts, int numIndices, MeshDesc_t& desc )
  328. {
  329. if ( m_pVertexData && numVerts < m_nVerts )
  330. {
  331. m_pVertexData = m_pOwner->ReallocVertices( m_pVertexData, m_nVerts, numVerts, m_VertexSize );
  332. }
  333. m_nVerts = numVerts;
  334. if ( m_pIndexData && numIndices < m_nIndices )
  335. {
  336. m_pIndexData = m_pOwner->ReallocIndices( m_pIndexData, m_nIndices, numIndices );
  337. }
  338. m_nIndices = numIndices;
  339. }
  340. void SetFlexMesh( IMesh *pMesh, int nVertexOffset )
  341. {
  342. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetFlexMesh, pMesh, nVertexOffset );
  343. }
  344. void DisableFlexMesh()
  345. {
  346. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::DisableFlexMesh );
  347. }
  348. void ExecuteDefferredBuild( byte *pVertexData, int nVerts, int nBytesVerts, uint16 *pIndexData, int nIndices )
  349. {
  350. Assert( m_pActualMesh );
  351. MeshDesc_t desc;
  352. m_pActualMesh->LockMesh( nVerts, nIndices, desc );
  353. m_nActualVertexOffsetInBytes = desc.m_nFirstVertex * desc.m_ActualVertexSize;
  354. if ( pVertexData && desc.m_ActualVertexSize ) // if !desc.m_ActualVertexSize, device lost
  355. {
  356. void *pDest;
  357. if ( desc.m_VertexSize_Position != 0 )
  358. {
  359. pDest = desc.m_pPosition;
  360. }
  361. else
  362. {
  363. #define FindMin( desc, pCurrent, tag ) ( ( desc.m_VertexSize_##tag != 0 ) ? min( pCurrent, (void *)desc.m_p##tag ) : pCurrent )
  364. pDest = (void *)(((byte *)0) - 1);
  365. pDest = FindMin( desc, pDest, BoneWeight );
  366. pDest = FindMin( desc, pDest, BoneMatrixIndex );
  367. pDest = FindMin( desc, pDest, Normal );
  368. pDest = FindMin( desc, pDest, Color );
  369. pDest = FindMin( desc, pDest, Specular );
  370. pDest = FindMin( desc, pDest, TangentS );
  371. pDest = FindMin( desc, pDest, TangentT );
  372. pDest = FindMin( desc, pDest, Wrinkle );
  373. for ( int i = 0; i < VERTEX_MAX_TEXTURE_COORDINATES; i++ )
  374. {
  375. if ( desc.m_VertexSize_TexCoord[i] && desc.m_pTexCoord < pDest )
  376. {
  377. pDest = desc.m_pTexCoord;
  378. }
  379. }
  380. #undef FindMin
  381. }
  382. Assert( pDest );
  383. if ( pDest )
  384. {
  385. FastCopy( (byte *)pDest, pVertexData, nBytesVerts );
  386. }
  387. }
  388. if ( pIndexData && pIndexData != &gm_ScratchIndexBuffer[0] && desc.m_nIndexSize )
  389. {
  390. if ( !desc.m_nFirstVertex )
  391. {
  392. // AssertMsg(desc.m_pIndices & 0x03 == 0,"desc.m_pIndices is misaligned in CMatQueuedMesh::ExecuteDefferedBuild\n");
  393. FastCopy( (byte *)desc.m_pIndices, (byte *)pIndexData, nIndices * sizeof(*pIndexData) );
  394. }
  395. else
  396. {
  397. ALIGN16 uint16 tempIndices[16];
  398. int i = 0;
  399. if ( (size_t)desc.m_pIndices % 4 == 2 )
  400. {
  401. desc.m_pIndices[i] = pIndexData[i] + desc.m_nFirstVertex;
  402. i++;
  403. }
  404. while ( i < nIndices )
  405. {
  406. int nToCopy = min( (int)ARRAYSIZE(tempIndices), nIndices - i );
  407. for ( int j = 0; j < nToCopy; j++ )
  408. {
  409. tempIndices[j] = pIndexData[i+j] + desc.m_nFirstVertex;
  410. }
  411. FastCopy( (byte *)(desc.m_pIndices + i), (byte *)tempIndices, nToCopy * sizeof(uint16) );
  412. i += nToCopy;
  413. }
  414. }
  415. }
  416. m_pActualMesh->UnlockMesh( nVerts, nIndices, desc );
  417. if ( pIndexData && pIndexData != &gm_ScratchIndexBuffer[0])
  418. {
  419. m_pOwner->FreeIndices( pIndexData, nIndices );
  420. }
  421. if ( pVertexData )
  422. {
  423. m_pOwner->FreeVertices( pVertexData, nVerts, desc.m_ActualVertexSize );
  424. }
  425. }
  426. void QueueBuild( bool bDetachBuffers = true )
  427. {
  428. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::ExecuteDefferredBuild, m_pVertexData, m_nVerts, m_nVerts * m_VertexSize, m_pIndexData, m_nIndices );
  429. if ( bDetachBuffers )
  430. {
  431. DetachBuffers();
  432. m_Type = MATERIAL_TRIANGLES;
  433. }
  434. }
  435. void Draw( int firstIndex = -1, int numIndices = 0 )
  436. {
  437. if ( !m_nVerts && !m_nIndices )
  438. {
  439. MarkAsDrawn();
  440. return;
  441. }
  442. void (IMesh::*pfnDraw)( int, int) = &IMesh::Draw; // need assignment to disambiguate overloaded function
  443. bool bDetachBuffers;
  444. if ( firstIndex == -1 || numIndices == 0 )
  445. {
  446. bDetachBuffers = true;
  447. }
  448. else if ( m_pIndexOverride )
  449. {
  450. bDetachBuffers = ( firstIndex + numIndices == m_pIndexOverride->IndexCount() );
  451. }
  452. else if ( !m_nIndices || firstIndex + numIndices == m_nIndices )
  453. {
  454. bDetachBuffers = true;
  455. }
  456. else
  457. {
  458. bDetachBuffers = false;
  459. }
  460. QueueBuild( bDetachBuffers );
  461. m_pCallQueue->QueueCall( m_pLateBoundMesh, pfnDraw, firstIndex, numIndices );
  462. }
  463. void MarkAsDrawn()
  464. {
  465. FreeBuffers();
  466. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::MarkAsDrawn );
  467. }
  468. void FreeBuffers()
  469. {
  470. if ( m_pIndexData && m_pIndexData != &gm_ScratchIndexBuffer[0])
  471. {
  472. m_pOwner->FreeIndices( m_pIndexData, m_nIndices );
  473. m_pIndexData = NULL;
  474. }
  475. if ( m_pVertexData )
  476. {
  477. m_pOwner->FreeVertices( m_pVertexData, m_nVerts, m_VertexSize );
  478. m_pVertexData = NULL;
  479. }
  480. }
  481. void DetachBuffers()
  482. {
  483. m_pVertexData = NULL;
  484. m_pIndexData = NULL;
  485. }
  486. unsigned ComputeMemoryUsed()
  487. {
  488. return 0;
  489. }
  490. virtual VertexFormat_t GetVertexFormat() const
  491. {
  492. return m_VertexFormat;
  493. }
  494. virtual IMesh *GetMesh()
  495. {
  496. return this;
  497. }
  498. // FIXME: Implement!
  499. virtual bool Lock( int nMaxIndexCount, bool bAppend, IndexDesc_t& desc )
  500. {
  501. Assert( 0 );
  502. return false;
  503. }
  504. virtual void Unlock( int nWrittenIndexCount, IndexDesc_t& desc )
  505. {
  506. Assert( 0 );
  507. }
  508. virtual void ModifyBegin( bool bReadOnly, int nFirstIndex, int nIndexCount, IndexDesc_t& desc )
  509. {
  510. CannotSupport();
  511. }
  512. void ModifyEnd( IndexDesc_t& desc )
  513. {
  514. CannotSupport();
  515. }
  516. virtual void Spew( int nIndexCount, const IndexDesc_t & desc )
  517. {
  518. Assert( 0 );
  519. }
  520. virtual void ValidateData( int nIndexCount, const IndexDesc_t &desc )
  521. {
  522. Assert( 0 );
  523. }
  524. virtual bool Lock( int nVertexCount, bool bAppend, VertexDesc_t &desc )
  525. {
  526. Assert( 0 );
  527. return false;
  528. }
  529. virtual void Unlock( int nVertexCount, VertexDesc_t &desc )
  530. {
  531. Assert( 0 );
  532. }
  533. virtual void Spew( int nVertexCount, const VertexDesc_t &desc )
  534. {
  535. Assert( 0 );
  536. }
  537. virtual void ValidateData( int nVertexCount, const VertexDesc_t & desc )
  538. {
  539. Assert( 0 );
  540. }
  541. virtual bool IsDynamic() const
  542. {
  543. Assert( 0 );
  544. return false;
  545. }
  546. virtual MaterialIndexFormat_t IndexFormat() const
  547. {
  548. Assert( 0 );
  549. return MATERIAL_INDEX_FORMAT_UNKNOWN;
  550. }
  551. virtual void BeginCastBuffer( VertexFormat_t format )
  552. {
  553. Assert( 0 );
  554. }
  555. virtual void BeginCastBuffer( MaterialIndexFormat_t format )
  556. {
  557. Assert( 0 );
  558. }
  559. virtual void EndCastBuffer( )
  560. {
  561. Assert( 0 );
  562. }
  563. // Returns the number of vertices that can still be written into the buffer
  564. virtual int GetRoomRemaining() const
  565. {
  566. Assert( 0 );
  567. return 0;
  568. }
  569. //----------------------------------------------------------------------------
  570. static void DoDraw( int firstIndex = -1, int numIndices = 0 )
  571. {
  572. }
  573. private:
  574. IMesh *m_pActualMesh;
  575. int m_nActualVertexOffsetInBytes;
  576. CLateBoundPtr<IMesh> m_pLateBoundMesh;
  577. CMatQueuedRenderContext *m_pOwner;
  578. CMatCallQueue *m_pCallQueue;
  579. IMatRenderContextInternal *m_pHardwareContext;
  580. //-----------------------------------------------------
  581. // The vertex format we're using...
  582. VertexFormat_t m_VertexFormat;
  583. // The morph format we're using
  584. MorphFormat_t m_MorphFormat;
  585. byte *m_pVertexData;
  586. uint16 *m_pIndexData;
  587. int m_nVerts;
  588. int m_nIndices;
  589. unsigned short m_VertexSize;
  590. MaterialPrimitiveType_t m_Type;
  591. // Used in rendering sub-parts of the mesh
  592. //static unsigned int s_NumIndices;
  593. //static unsigned int s_FirstIndex;
  594. IMesh *m_pVertexOverride;
  595. IMesh *m_pIndexOverride;
  596. static unsigned short gm_ScratchIndexBuffer[6];
  597. };
  598. unsigned short CMatQueuedMesh::gm_ScratchIndexBuffer[6];
  599. //-----------------------------------------------------------------------------
  600. //
  601. //-----------------------------------------------------------------------------
  602. bool CMatQueuedRenderContext::Init( CMaterialSystem *pMaterialSystem, CMatRenderContextBase *pHardwareContext )
  603. {
  604. BaseClass::Init();
  605. m_pMaterialSystem = pMaterialSystem;
  606. m_pHardwareContext = pHardwareContext;
  607. m_pQueuedMesh = new CMatQueuedMesh( this, pHardwareContext );
  608. MEM_ALLOC_CREDIT();
  609. int nSize = 16 * 1024 * 1024;
  610. int nCommitSize = 128 * 1024;
  611. #if defined(DEDICATED)
  612. Assert( !"CMatQueuedRenderContext shouldn't be initialized on dedicated servers..." );
  613. nSize = nCommitSize = 1024;
  614. #endif
  615. bool bVerticesInit = m_Vertices.Init( nSize, nCommitSize );
  616. bool bIndicesInit = m_Indices.Init( nSize, nCommitSize );
  617. if ( !bVerticesInit || !bIndicesInit )
  618. {
  619. return false;
  620. }
  621. return true;
  622. }
  623. //-----------------------------------------------------------------------------
  624. //
  625. //-----------------------------------------------------------------------------
  626. void CMatQueuedRenderContext::Shutdown()
  627. {
  628. if ( !m_pHardwareContext )
  629. {
  630. return;
  631. }
  632. Assert( !m_pCurrentMaterial );
  633. delete m_pQueuedMesh;
  634. m_pMaterialSystem = NULL;
  635. m_pHardwareContext = NULL;
  636. m_pQueuedMesh = NULL;
  637. m_Vertices.Term();
  638. m_Indices.Term();
  639. BaseClass::Shutdown();
  640. Assert(m_queue.Count() == 0);
  641. }
  642. //-----------------------------------------------------------------------------
  643. //
  644. //-----------------------------------------------------------------------------
  645. void CMatQueuedRenderContext::Free()
  646. {
  647. m_Vertices.FreeAll();
  648. m_Indices.FreeAll();
  649. }
  650. //-----------------------------------------------------------------------------
  651. //
  652. //-----------------------------------------------------------------------------
  653. void CMatQueuedRenderContext::CompactMemory()
  654. {
  655. BaseClass::CompactMemory();
  656. m_Vertices.FreeAll();
  657. m_Indices.FreeAll();
  658. }
  659. //-----------------------------------------------------------------------------
  660. //
  661. //-----------------------------------------------------------------------------
  662. void CMatQueuedRenderContext::BeginQueue( CMatRenderContextBase *pInitialState )
  663. {
  664. if ( !pInitialState )
  665. {
  666. pInitialState = m_pHardwareContext;
  667. }
  668. CMatRenderContextBase::InitializeFrom( pInitialState );
  669. g_pShaderAPI->GetBackBufferDimensions( m_WidthBackBuffer, m_HeightBackBuffer );
  670. m_FogMode = pInitialState->GetFogMode();
  671. m_nBoneCount = pInitialState->GetCurrentNumBones();
  672. pInitialState->GetFogDistances( &m_flFogStart, &m_flFogEnd, &m_flFogZ );
  673. }
  674. //-----------------------------------------------------------------------------
  675. //
  676. //-----------------------------------------------------------------------------
  677. void CMatQueuedRenderContext::EndQueue( bool bCallQueued )
  678. {
  679. if ( bCallQueued )
  680. {
  681. CallQueued();
  682. }
  683. int i;
  684. if ( m_pCurrentMaterial )
  685. {
  686. m_pCurrentMaterial = NULL;
  687. }
  688. if ( m_pUserDefinedLightmap )
  689. {
  690. m_pUserDefinedLightmap = NULL;
  691. }
  692. if ( m_pLocalCubemapTexture )
  693. {
  694. m_pLocalCubemapTexture = NULL;
  695. }
  696. for ( i = 0; i < MAX_FB_TEXTURES; i++ )
  697. {
  698. if ( m_pCurrentFrameBufferCopyTexture[i] )
  699. {
  700. m_pCurrentFrameBufferCopyTexture[i] = NULL;
  701. }
  702. }
  703. for ( i = 0; i < m_RenderTargetStack.Count(); i++ )
  704. {
  705. for ( int j = 0; j < MAX_RENDER_TARGETS; j++ )
  706. {
  707. if ( m_RenderTargetStack[i].m_pRenderTargets[j] )
  708. {
  709. m_RenderTargetStack[i].m_pRenderTargets[j] = NULL;
  710. }
  711. }
  712. }
  713. m_RenderTargetStack.Clear();
  714. }
  715. void CMatQueuedRenderContext::Bind( IMaterial *iMaterial, void *proxyData )
  716. {
  717. if ( !iMaterial )
  718. {
  719. if( !g_pErrorMaterial )
  720. return;
  721. }
  722. else
  723. {
  724. iMaterial = ((IMaterialInternal *)iMaterial)->GetRealTimeVersion(); //always work with the real time versions of materials internally
  725. }
  726. CMatRenderContextBase::Bind( iMaterial, proxyData );
  727. // We've always gotta call the bind proxy (assuming there is one)
  728. // so we can copy off the material vars at this point.
  729. IMaterialInternal* pIMaterial = GetCurrentMaterialInternal();
  730. pIMaterial->CallBindProxy( proxyData );
  731. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::Bind, iMaterial, proxyData );
  732. }
  733. //-----------------------------------------------------------------------------
  734. //
  735. //-----------------------------------------------------------------------------
  736. void CMatQueuedRenderContext::BeginRender()
  737. {
  738. if ( ++m_iRenderDepth == 1 )
  739. {
  740. VPROF_INCREMENT_GROUP_COUNTER( "render/CMatQBeginRender", COUNTER_GROUP_TELEMETRY, 1 );
  741. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::BeginRender );
  742. }
  743. }
  744. //-----------------------------------------------------------------------------
  745. //
  746. //-----------------------------------------------------------------------------
  747. void CMatQueuedRenderContext::EndRender()
  748. {
  749. if ( --m_iRenderDepth == 0 )
  750. {
  751. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EndRender );
  752. }
  753. }
  754. //-----------------------------------------------------------------------------
  755. //
  756. //-----------------------------------------------------------------------------
  757. void CMatQueuedRenderContext::CallQueued( bool bTermAfterCall )
  758. {
  759. if ( mat_report_queue_status.GetBool() )
  760. {
  761. Msg( "%d calls queued for %llu bytes in parameters and overhead, %d bytes verts, %d bytes indices, %d bytes other\n", m_queue.Count(), (uint64)(m_queue.GetMemoryUsed()), m_Vertices.GetUsed(), m_Indices.GetUsed(), RenderDataSizeUsed() );
  762. }
  763. m_queue.CallQueued();
  764. m_Vertices.FreeAll( false );
  765. m_Indices.FreeAll( false );
  766. if ( bTermAfterCall )
  767. {
  768. Shutdown();
  769. }
  770. }
  771. //-----------------------------------------------------------------------------
  772. //
  773. //-----------------------------------------------------------------------------
  774. void CMatQueuedRenderContext::FlushQueued()
  775. {
  776. m_queue.Flush();
  777. }
  778. //-----------------------------------------------------------------------------
  779. //
  780. //-----------------------------------------------------------------------------
  781. ICallQueue *CMatQueuedRenderContext::GetCallQueue()
  782. {
  783. return &m_CallQueueExternal;
  784. }
  785. //-----------------------------------------------------------------------------
  786. //
  787. //-----------------------------------------------------------------------------
  788. void CMatQueuedRenderContext::SetRenderTargetEx( int nRenderTargetID, ITexture *pNewTarget )
  789. {
  790. CMatRenderContextBase::SetRenderTargetEx( nRenderTargetID, pNewTarget );
  791. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetRenderTargetEx, nRenderTargetID, pNewTarget );
  792. }
  793. //-----------------------------------------------------------------------------
  794. //
  795. //-----------------------------------------------------------------------------
  796. void CMatQueuedRenderContext::GetRenderTargetDimensions( int &width, int &height) const
  797. {
  798. // Target at top of stack
  799. ITexture *pTOS = NULL;
  800. if ( m_RenderTargetStack.Count() )
  801. {
  802. pTOS = m_RenderTargetStack.Top().m_pRenderTargets[ 0 ];
  803. }
  804. // If top of stack isn't the back buffer, get dimensions from the texture
  805. if ( pTOS != NULL )
  806. {
  807. width = pTOS->GetActualWidth();
  808. height = pTOS->GetActualHeight();
  809. }
  810. else // otherwise, get them from the shader API
  811. {
  812. width = m_WidthBackBuffer;
  813. height = m_HeightBackBuffer;
  814. }
  815. }
  816. //-----------------------------------------------------------------------------
  817. //
  818. //-----------------------------------------------------------------------------
  819. void CMatQueuedRenderContext::Viewport( int x, int y, int width, int height )
  820. {
  821. CMatRenderContextBase::Viewport( x, y, width, height );
  822. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::Viewport, x, y, width, height );
  823. }
  824. //-----------------------------------------------------------------------------
  825. //
  826. //-----------------------------------------------------------------------------
  827. void CMatQueuedRenderContext::SetLight( int i, const LightDesc_t &desc )
  828. {
  829. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetLight, i, RefToVal( desc ) );
  830. }
  831. //-----------------------------------------------------------------------------
  832. //
  833. //-----------------------------------------------------------------------------
  834. void CMatQueuedRenderContext::SetLightingOrigin( Vector vLightingOrigin )
  835. {
  836. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetLightingOrigin, vLightingOrigin );
  837. }
  838. //-----------------------------------------------------------------------------
  839. //
  840. //-----------------------------------------------------------------------------
  841. void CMatQueuedRenderContext::SetAmbientLightCube( LightCube_t cube )
  842. {
  843. // FIXME: does compiler do the right thing, is envelope needed?
  844. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetAmbientLightCube, CUtlEnvelope<Vector4D>( &cube[0], 6 ) );
  845. }
  846. //-----------------------------------------------------------------------------
  847. // Bone count
  848. //-----------------------------------------------------------------------------
  849. void CMatQueuedRenderContext::SetNumBoneWeights( int nBoneCount )
  850. {
  851. m_nBoneCount = nBoneCount;
  852. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetNumBoneWeights, nBoneCount );
  853. }
  854. int CMatQueuedRenderContext::GetCurrentNumBones( ) const
  855. {
  856. return m_nBoneCount;
  857. }
  858. //-----------------------------------------------------------------------------
  859. //
  860. //-----------------------------------------------------------------------------
  861. void CMatQueuedRenderContext::FogMode( MaterialFogMode_t fogMode )
  862. {
  863. m_FogMode = fogMode;
  864. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogMode, fogMode );
  865. }
  866. void CMatQueuedRenderContext::FogStart( float fStart )
  867. {
  868. m_flFogStart = fStart;
  869. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogStart, fStart );
  870. }
  871. void CMatQueuedRenderContext::FogEnd( float fEnd )
  872. {
  873. m_flFogEnd = fEnd;
  874. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogEnd, fEnd );
  875. }
  876. void CMatQueuedRenderContext::FogMaxDensity( float flMaxDensity )
  877. {
  878. m_flFogMaxDensity = flMaxDensity;
  879. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogMaxDensity, flMaxDensity );
  880. }
  881. void CMatQueuedRenderContext::SetFogZ( float fogZ )
  882. {
  883. m_flFogZ = fogZ;
  884. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetFogZ, fogZ );
  885. }
  886. MaterialFogMode_t CMatQueuedRenderContext::GetFogMode( void )
  887. {
  888. return m_FogMode;
  889. }
  890. void CMatQueuedRenderContext::FogColor3f( float r, float g, float b )
  891. {
  892. FogColor3ub( clamp( (int)(r * 255.0f), 0, 255 ), clamp( (int)(g * 255.0f), 0, 255 ), clamp( (int)(b * 255.0f), 0, 255 ) );
  893. }
  894. void CMatQueuedRenderContext::FogColor3fv( float const* rgb )
  895. {
  896. FogColor3ub( clamp( (int)(rgb[0] * 255.0f), 0, 255 ), clamp( (int)(rgb[1] * 255.0f), 0, 255 ), clamp( (int)(rgb[2] * 255.0f), 0, 255 ) );
  897. }
  898. void CMatQueuedRenderContext::FogColor3ub( unsigned char r, unsigned char g, unsigned char b )
  899. {
  900. m_FogColor.r = r;
  901. m_FogColor.g = g;
  902. m_FogColor.b = b;
  903. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogColor3ub, r, g, b );
  904. }
  905. void CMatQueuedRenderContext::FogColor3ubv( unsigned char const* rgb )
  906. {
  907. FogColor3ub( rgb[0], rgb[1], rgb[2] );
  908. }
  909. void CMatQueuedRenderContext::GetFogColor( unsigned char *rgb )
  910. {
  911. rgb[0] = m_FogColor.r;
  912. rgb[1] = m_FogColor.g;
  913. rgb[2] = m_FogColor.b;
  914. }
  915. void CMatQueuedRenderContext::GetFogDistances( float *fStart, float *fEnd, float *fFogZ )
  916. {
  917. if( fStart )
  918. *fStart = m_flFogStart;
  919. if( fEnd )
  920. *fEnd = m_flFogEnd;
  921. if( fFogZ )
  922. *fFogZ = m_flFogZ;
  923. }
  924. //-----------------------------------------------------------------------------
  925. //
  926. //-----------------------------------------------------------------------------
  927. void CMatQueuedRenderContext::GetViewport( int& x, int& y, int& width, int& height ) const
  928. {
  929. // Verify valid top of RT stack
  930. Assert ( m_RenderTargetStack.Count() > 0 );
  931. // Grab the top of stack
  932. const RenderTargetStackElement_t& element = m_RenderTargetStack.Top();
  933. // If either dimension is negative, set to full bounds of current target
  934. if ( (element.m_nViewW < 0) || (element.m_nViewH < 0) )
  935. {
  936. // Viewport origin at target origin
  937. x = y = 0;
  938. // If target is back buffer
  939. if ( element.m_pRenderTargets[0] == NULL )
  940. {
  941. width = m_WidthBackBuffer;
  942. height = m_HeightBackBuffer;
  943. }
  944. else // if target is texture
  945. {
  946. width = element.m_pRenderTargets[0]->GetActualWidth();
  947. height = element.m_pRenderTargets[0]->GetActualHeight();
  948. }
  949. }
  950. else // use the bounds from the stack directly
  951. {
  952. x = element.m_nViewX;
  953. y = element.m_nViewY;
  954. width = element.m_nViewW;
  955. height = element.m_nViewH;
  956. }
  957. }
  958. //-----------------------------------------------------------------------------
  959. //
  960. //-----------------------------------------------------------------------------
  961. void CMatQueuedRenderContext::SyncToken( const char *p )
  962. {
  963. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SyncToken, CUtlEnvelope<const char *>( p ) );
  964. }
  965. //-----------------------------------------------------------------------------
  966. //
  967. //-----------------------------------------------------------------------------
  968. IMesh* CMatQueuedRenderContext::GetDynamicMesh( bool buffered, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterial *pAutoBind )
  969. {
  970. if( pAutoBind )
  971. Bind( pAutoBind, NULL );
  972. if ( pVertexOverride && pIndexOverride )
  973. {
  974. // Use the new batch API
  975. DebuggerBreak();
  976. return NULL;
  977. }
  978. if ( pVertexOverride )
  979. {
  980. if ( CompressionType( pVertexOverride->GetVertexFormat() ) != VERTEX_COMPRESSION_NONE )
  981. {
  982. // UNDONE: support compressed dynamic meshes if needed (pro: less VB memory, con: time spent compressing)
  983. DebuggerBreak();
  984. return NULL;
  985. }
  986. }
  987. // For anything more than 1 bone, imply the last weight from the 1 - the sum of the others.
  988. int nCurrentBoneCount = GetCurrentNumBones();
  989. Assert( nCurrentBoneCount <= 4 );
  990. if ( nCurrentBoneCount > 1 )
  991. {
  992. --nCurrentBoneCount;
  993. }
  994. m_pQueuedMesh->OnGetDynamicMesh( 0, ( buffered ) ? MQM_BUFFERED : 0, pVertexOverride, pIndexOverride, GetCurrentMaterialInternal(), nCurrentBoneCount );
  995. return m_pQueuedMesh;
  996. }
  997. IMesh* CMatQueuedRenderContext::GetDynamicMeshEx( VertexFormat_t vertexFormat, bool bBuffered, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterial *pAutoBind )
  998. {
  999. if( pAutoBind )
  1000. {
  1001. Bind( pAutoBind, NULL );
  1002. }
  1003. if ( pVertexOverride && pIndexOverride )
  1004. {
  1005. // Use the new batch API
  1006. DebuggerBreak();
  1007. return NULL;
  1008. }
  1009. if ( pVertexOverride )
  1010. {
  1011. if ( CompressionType( pVertexOverride->GetVertexFormat() ) != VERTEX_COMPRESSION_NONE )
  1012. {
  1013. // UNDONE: support compressed dynamic meshes if needed (pro: less VB memory, con: time spent compressing)
  1014. DebuggerBreak();
  1015. return NULL;
  1016. }
  1017. }
  1018. // For anything more than 1 bone, imply the last weight from the 1 - the sum of the others.
  1019. int nCurrentBoneCount = GetCurrentNumBones();
  1020. Assert( nCurrentBoneCount <= 4 );
  1021. if ( nCurrentBoneCount > 1 )
  1022. {
  1023. --nCurrentBoneCount;
  1024. }
  1025. m_pQueuedMesh->OnGetDynamicMesh( vertexFormat, ( bBuffered ) ? MQM_BUFFERED : 0, pVertexOverride, pIndexOverride, GetCurrentMaterialInternal(), nCurrentBoneCount );
  1026. return m_pQueuedMesh;
  1027. }
  1028. //-----------------------------------------------------------------------------
  1029. //
  1030. //-----------------------------------------------------------------------------
  1031. int CMatQueuedRenderContext::GetMaxVerticesToRender( IMaterial *pMaterial )
  1032. {
  1033. pMaterial = ((IMaterialInternal *)pMaterial)->GetRealTimeVersion(); //always work with the real time version of materials internally.
  1034. MeshDesc_t temp;
  1035. // Be conservative, assume no compression (in here, we don't know if the caller will used a compressed VB or not)
  1036. // FIXME: allow the caller to specify which compression type should be used to compute size from the vertex format
  1037. // (this can vary between multiple VBs/Meshes using the same material)
  1038. VertexFormat_t materialFormat = pMaterial->GetVertexFormat() & ~VERTEX_FORMAT_COMPRESSED;
  1039. g_pShaderAPI->ComputeVertexDescription( 0, materialFormat, temp );
  1040. int maxVerts = g_pShaderAPI->GetCurrentDynamicVBSize() / temp.m_ActualVertexSize;
  1041. if ( maxVerts > 65535 )
  1042. {
  1043. maxVerts = 65535;
  1044. }
  1045. return maxVerts;
  1046. }
  1047. //-----------------------------------------------------------------------------
  1048. //
  1049. //-----------------------------------------------------------------------------
  1050. void CMatQueuedRenderContext::GetMaxToRender( IMesh *pMesh, bool bMaxUntilFlush, int *pMaxVerts, int *pMaxIndices )
  1051. {
  1052. Assert( !bMaxUntilFlush );
  1053. *pMaxVerts = g_pShaderAPI->GetCurrentDynamicVBSize() / m_pQueuedMesh->GetVertexSize();
  1054. if ( *pMaxVerts > 65535 )
  1055. {
  1056. *pMaxVerts = 65535;
  1057. }
  1058. *pMaxIndices = INDEX_BUFFER_SIZE;
  1059. }
  1060. //-----------------------------------------------------------------------------
  1061. //
  1062. //-----------------------------------------------------------------------------
  1063. IMesh *CMatQueuedRenderContext::GetFlexMesh()
  1064. {
  1065. m_pQueuedMesh->OnGetDynamicMesh( 0, MQM_FLEX, NULL, NULL, NULL, 0 );
  1066. return m_pQueuedMesh;
  1067. }
  1068. //-----------------------------------------------------------------------------
  1069. //
  1070. //-----------------------------------------------------------------------------
  1071. OcclusionQueryObjectHandle_t CMatQueuedRenderContext::CreateOcclusionQueryObject()
  1072. {
  1073. OcclusionQueryObjectHandle_t h = g_pOcclusionQueryMgr->CreateOcclusionQueryObject();
  1074. m_queue.QueueCall( g_pOcclusionQueryMgr, &COcclusionQueryMgr::OnCreateOcclusionQueryObject, h );
  1075. return h;
  1076. }
  1077. int CMatQueuedRenderContext::OcclusionQuery_GetNumPixelsRendered( OcclusionQueryObjectHandle_t h )
  1078. {
  1079. m_queue.QueueCall( g_pOcclusionQueryMgr, &COcclusionQueryMgr::OcclusionQuery_IssueNumPixelsRenderedQuery, h );
  1080. return g_pOcclusionQueryMgr->OcclusionQuery_GetNumPixelsRendered( h, false );
  1081. }
  1082. //-----------------------------------------------------------------------------
  1083. //
  1084. //-----------------------------------------------------------------------------
  1085. void CMatQueuedRenderContext::SetFlashlightState( const FlashlightState_t &s, const VMatrix &m )
  1086. {
  1087. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetFlashlightState, RefToVal( s ), RefToVal( m ) );
  1088. }
  1089. //-----------------------------------------------------------------------------
  1090. //
  1091. //-----------------------------------------------------------------------------
  1092. bool CMatQueuedRenderContext::EnableClipping( bool bEnable )
  1093. {
  1094. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EnableClipping, bEnable );
  1095. return BaseClass::EnableClipping( bEnable );
  1096. }
  1097. //-----------------------------------------------------------------------------
  1098. //
  1099. //-----------------------------------------------------------------------------
  1100. void CMatQueuedRenderContext::UserClipTransform( const VMatrix &m )
  1101. {
  1102. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::UserClipTransform, RefToVal( m ) );
  1103. }
  1104. //-----------------------------------------------------------------------------
  1105. //
  1106. //-----------------------------------------------------------------------------
  1107. void CMatQueuedRenderContext::GetWindowSize( int &width, int &height ) const
  1108. {
  1109. width = m_WidthBackBuffer;
  1110. height = m_HeightBackBuffer;
  1111. }
  1112. //-----------------------------------------------------------------------------
  1113. //
  1114. //-----------------------------------------------------------------------------
  1115. void CMatQueuedRenderContext::DrawScreenSpaceRectangle(
  1116. IMaterial *pMaterial,
  1117. int destx, int desty,
  1118. int width, int height,
  1119. float src_texture_x0, float src_texture_y0, // which texel you want to appear at
  1120. // destx/y
  1121. float src_texture_x1, float src_texture_y1, // which texel you want to appear at
  1122. // destx+width-1, desty+height-1
  1123. int src_texture_width, int src_texture_height, // needed for fixup
  1124. void *pClientRenderable,
  1125. int nXDice, int nYDice ) // Amount to tessellate the quad
  1126. {
  1127. IMaterial *pRealTimeVersionMaterial = ((IMaterialInternal *)pMaterial)->GetRealTimeVersion();
  1128. pRealTimeVersionMaterial->CallBindProxy( pClientRenderable );
  1129. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::DrawScreenSpaceRectangle, pMaterial, destx, desty, width, height, src_texture_x0, src_texture_y0, src_texture_x1, src_texture_y1, src_texture_width, src_texture_height, pClientRenderable, nXDice, nYDice );
  1130. }
  1131. //-----------------------------------------------------------------------------
  1132. //
  1133. //-----------------------------------------------------------------------------
  1134. void CMatQueuedRenderContext::LoadBoneMatrix( int i, const matrix3x4_t &m )
  1135. {
  1136. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::LoadBoneMatrix, i, RefToVal( m ) );
  1137. }
  1138. //-----------------------------------------------------------------------------
  1139. //
  1140. //-----------------------------------------------------------------------------
  1141. void CMatQueuedRenderContext::CopyRenderTargetToTextureEx( ITexture *pTexture, int i, Rect_t *pSrc, Rect_t *pDst )
  1142. {
  1143. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::CopyRenderTargetToTextureEx, pTexture, i, CUtlEnvelope<Rect_t>(pSrc), CUtlEnvelope<Rect_t>(pDst) );
  1144. }
  1145. //-----------------------------------------------------------------------------
  1146. //
  1147. //-----------------------------------------------------------------------------
  1148. void CMatQueuedRenderContext::CopyTextureToRenderTargetEx( int i, ITexture *pTexture, Rect_t *pSrc, Rect_t *pDst )
  1149. {
  1150. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::CopyTextureToRenderTargetEx, i, pTexture, CUtlEnvelope<Rect_t>(pSrc), CUtlEnvelope<Rect_t>(pDst) );
  1151. }
  1152. //-----------------------------------------------------------------------------
  1153. //
  1154. //-----------------------------------------------------------------------------
  1155. bool CMatQueuedRenderContext::OnDrawMesh( IMesh *pMesh, int firstIndex, int numIndices )
  1156. {
  1157. void (IMesh::*pfnDraw)( int, int) = &IMesh::Draw; // need assignment to disambiguate overloaded function
  1158. m_queue.QueueCall( pMesh, pfnDraw, firstIndex, numIndices );
  1159. return false;
  1160. }
  1161. //-----------------------------------------------------------------------------
  1162. //
  1163. //-----------------------------------------------------------------------------
  1164. bool CMatQueuedRenderContext::OnDrawMesh( IMesh *pMesh, CPrimList *pLists, int nLists )
  1165. {
  1166. CMatRenderData< CPrimList > rdPrimList( this, nLists, pLists );
  1167. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredDrawPrimList, pMesh, rdPrimList.Base(), nLists );
  1168. return false;
  1169. }
  1170. void CMatQueuedRenderContext::DeferredDrawPrimList( IMesh *pMesh, CPrimList *pLists, int nLists )
  1171. {
  1172. pMesh->Draw( pLists, nLists );
  1173. }
  1174. //-----------------------------------------------------------------------------
  1175. //
  1176. //-----------------------------------------------------------------------------
  1177. void CMatQueuedRenderContext::DeferredSetFlexMesh( IMesh *pStaticMesh, int nVertexOffsetInBytes )
  1178. {
  1179. pStaticMesh->SetFlexMesh( m_pQueuedMesh->GetActualMesh(), m_pQueuedMesh->GetActualVertexOffsetInBytes() );
  1180. }
  1181. bool CMatQueuedRenderContext::OnSetFlexMesh( IMesh *pStaticMesh, IMesh *pMesh, int nVertexOffsetInBytes )
  1182. {
  1183. Assert( pMesh == m_pQueuedMesh || !pMesh );
  1184. if ( pMesh )
  1185. {
  1186. m_pQueuedMesh->QueueBuild();
  1187. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredSetFlexMesh, pStaticMesh, nVertexOffsetInBytes );
  1188. }
  1189. else
  1190. {
  1191. m_queue.QueueCall( pStaticMesh, &IMesh::SetFlexMesh, (IMesh *)NULL, 0 );
  1192. }
  1193. return false;
  1194. }
  1195. //-----------------------------------------------------------------------------
  1196. //
  1197. //-----------------------------------------------------------------------------
  1198. bool CMatQueuedRenderContext::OnSetColorMesh( IMesh *pStaticMesh, IMesh *pMesh, int nVertexOffsetInBytes )
  1199. {
  1200. m_queue.QueueCall( pStaticMesh, &IMesh::SetColorMesh, pMesh, nVertexOffsetInBytes );
  1201. return false;
  1202. }
  1203. //-----------------------------------------------------------------------------
  1204. //
  1205. //-----------------------------------------------------------------------------
  1206. bool CMatQueuedRenderContext::OnSetPrimitiveType( IMesh *pMesh, MaterialPrimitiveType_t type )
  1207. {
  1208. m_queue.QueueCall( pMesh, &IMesh::SetPrimitiveType, type );
  1209. return false;
  1210. }
  1211. //-----------------------------------------------------------------------------
  1212. //
  1213. //-----------------------------------------------------------------------------
  1214. bool CMatQueuedRenderContext::OnFlushBufferedPrimitives()
  1215. {
  1216. m_queue.QueueCall( g_pShaderAPI, &IShaderAPI::FlushBufferedPrimitives );
  1217. return false;
  1218. }
  1219. //-----------------------------------------------------------------------------
  1220. //
  1221. //-----------------------------------------------------------------------------
  1222. inline void CMatQueuedRenderContext::QueueMatrixSync()
  1223. {
  1224. void (IMatRenderContext::*pfnLoadMatrix)( const VMatrix & ) = &IMatRenderContext::LoadMatrix; // need assignment to disambiguate overloaded function
  1225. m_queue.QueueCall( m_pHardwareContext, pfnLoadMatrix, RefToVal( AccessCurrentMatrix() ) );
  1226. }
  1227. void CMatQueuedRenderContext::MatrixMode( MaterialMatrixMode_t mode )
  1228. {
  1229. CMatRenderContextBase::MatrixMode( mode );
  1230. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::MatrixMode, mode );
  1231. }
  1232. void CMatQueuedRenderContext::PushMatrix()
  1233. {
  1234. CMatRenderContextBase::PushMatrix();
  1235. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::PushMatrix );
  1236. }
  1237. void CMatQueuedRenderContext::PopMatrix()
  1238. {
  1239. CMatRenderContextBase::PopMatrix();
  1240. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::PopMatrix );
  1241. }
  1242. void CMatQueuedRenderContext::LoadMatrix( const VMatrix& matrix )
  1243. {
  1244. CMatRenderContextBase::LoadMatrix( matrix );
  1245. QueueMatrixSync();
  1246. }
  1247. void CMatQueuedRenderContext::LoadMatrix( const matrix3x4_t& matrix )
  1248. {
  1249. CMatRenderContextBase::LoadMatrix( matrix );
  1250. QueueMatrixSync();
  1251. }
  1252. void CMatQueuedRenderContext::MultMatrix( const VMatrix& matrix )
  1253. {
  1254. CMatRenderContextBase::MultMatrix( matrix );
  1255. QueueMatrixSync();
  1256. }
  1257. void CMatQueuedRenderContext::MultMatrix( const matrix3x4_t& matrix )
  1258. {
  1259. CMatRenderContextBase::MultMatrix( VMatrix( matrix ) );
  1260. QueueMatrixSync();
  1261. }
  1262. void CMatQueuedRenderContext::MultMatrixLocal( const VMatrix& matrix )
  1263. {
  1264. CMatRenderContextBase::MultMatrixLocal( matrix );
  1265. QueueMatrixSync();
  1266. }
  1267. void CMatQueuedRenderContext::MultMatrixLocal( const matrix3x4_t& matrix )
  1268. {
  1269. CMatRenderContextBase::MultMatrixLocal( VMatrix( matrix ) );
  1270. QueueMatrixSync();
  1271. }
  1272. void CMatQueuedRenderContext::LoadIdentity()
  1273. {
  1274. CMatRenderContextBase::LoadIdentity();
  1275. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::LoadIdentity );
  1276. }
  1277. void CMatQueuedRenderContext::Ortho( double left, double top, double right, double bottom, double zNear, double zFar )
  1278. {
  1279. CMatRenderContextBase::Ortho( left, top, right, bottom, zNear, zFar );
  1280. QueueMatrixSync();
  1281. }
  1282. void CMatQueuedRenderContext::PerspectiveX( double flFovX, double flAspect, double flZNear, double flZFar )
  1283. {
  1284. CMatRenderContextBase::PerspectiveX( flFovX, flAspect, flZNear, flZFar );
  1285. QueueMatrixSync();
  1286. }
  1287. void CMatQueuedRenderContext::PerspectiveOffCenterX( double flFovX, double flAspect, double flZNear, double flZFar, double bottom, double top, double left, double right )
  1288. {
  1289. CMatRenderContextBase::PerspectiveOffCenterX( flFovX, flAspect, flZNear, flZFar, bottom, top, left, right );
  1290. QueueMatrixSync();
  1291. }
  1292. void CMatQueuedRenderContext::PickMatrix( int x, int y, int nWidth, int nHeight )
  1293. {
  1294. CMatRenderContextBase::PickMatrix( x, y, nWidth, nHeight );
  1295. QueueMatrixSync();
  1296. }
  1297. void CMatQueuedRenderContext::Rotate( float flAngle, float x, float y, float z )
  1298. {
  1299. CMatRenderContextBase::Rotate( flAngle, x, y, z );
  1300. QueueMatrixSync();
  1301. }
  1302. void CMatQueuedRenderContext::Translate( float x, float y, float z )
  1303. {
  1304. CMatRenderContextBase::Translate( x, y, z );
  1305. QueueMatrixSync();
  1306. }
  1307. void CMatQueuedRenderContext::Scale( float x, float y, float z )
  1308. {
  1309. CMatRenderContextBase::Scale( x, y, z );
  1310. QueueMatrixSync();
  1311. }
  1312. void CMatQueuedRenderContext::BeginBatch( IMesh* pIndices )
  1313. {
  1314. Assert( pIndices == (IMesh *)m_pQueuedMesh );
  1315. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredBeginBatch, m_pQueuedMesh->GetIndexData(), m_pQueuedMesh->IndexCount() );
  1316. m_pQueuedMesh->DetachBuffers();
  1317. }
  1318. void CMatQueuedRenderContext::BindBatch( IMesh* pVertices, IMaterial *pAutoBind )
  1319. {
  1320. Assert( pVertices != (IMesh *)m_pQueuedMesh );
  1321. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::BindBatch, pVertices, pAutoBind );
  1322. }
  1323. void CMatQueuedRenderContext::DrawBatch(int firstIndex, int numIndices )
  1324. {
  1325. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::DrawBatch, firstIndex, numIndices );
  1326. }
  1327. void CMatQueuedRenderContext::EndBatch()
  1328. {
  1329. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EndBatch );
  1330. }
  1331. void CMatQueuedRenderContext::DeferredBeginBatch( uint16 *pIndexData, int nIndices )
  1332. {
  1333. m_pQueuedMesh->DeferredGetDynamicMesh( 0, false, NULL, NULL, NULL );
  1334. m_pQueuedMesh->ExecuteDefferredBuild( NULL, 0, 0, pIndexData, nIndices );
  1335. m_pHardwareContext->BeginBatch( m_pQueuedMesh->DetachActualMesh() );
  1336. }
  1337. //-----------------------------------------------------------------------------
  1338. // Memory allocation calls for queued mesh, et. al.
  1339. //-----------------------------------------------------------------------------
  1340. byte *CMatQueuedRenderContext::AllocVertices( int nVerts, int nVertexSize )
  1341. {
  1342. MEM_ALLOC_CREDIT();
  1343. #if defined(_WIN32) && !defined(_X360)
  1344. const byte *pNextAlloc = (const byte *)(m_Vertices.GetBase()) + m_Vertices.GetUsed() + AlignValue( nVerts * nVertexSize, 16 );
  1345. const byte *pCommitLimit = (const byte *)(m_Vertices.GetBase()) + m_Vertices.GetSize();
  1346. #endif
  1347. void *pResult = m_Vertices.Alloc( nVerts * nVertexSize, false );
  1348. #if defined(_WIN32) && !defined(_X360)
  1349. if ( !pResult )
  1350. {
  1351. // Force a crash with useful minidump info in the registers.
  1352. uint64 status = 0x31415926;
  1353. // Print some information to the console so that it's picked up in the minidump comment.
  1354. Msg( "AllocVertices( %d, %d ) on %p failed. m_Vertices is based at %p with a size of 0x%x.\n", nVerts, nVertexSize, this, m_Vertices.GetBase(), m_Vertices.GetSize() );
  1355. Msg( "%d vertices used.\n", m_Vertices.GetUsed() );
  1356. if ( pNextAlloc > pCommitLimit )
  1357. {
  1358. Msg( "VirtualAlloc would have been called. %p > %p.\n", pNextAlloc, pCommitLimit );
  1359. const byte *pNewCommitLimit = AlignValue( pNextAlloc, 128 * 1024 );
  1360. const uint32 commitSize = pNewCommitLimit - pCommitLimit;
  1361. const void *pRet = VirtualAlloc( (void *)pCommitLimit, commitSize, MEM_COMMIT, PAGE_READWRITE );
  1362. if ( !pRet )
  1363. status = GetLastError();
  1364. Msg( "VirtualAlloc( %p, %d ) returned %p on repeat. VirtualAlloc %s with code %x.\n", pCommitLimit, commitSize, pRet, (pRet != NULL) ? "succeeded" : "failed", (uint32) status );
  1365. }
  1366. else
  1367. {
  1368. Msg( "VirtualAlloc would not have been called. %p <= %p.\n", pNextAlloc, pCommitLimit );
  1369. }
  1370. // Now crash.
  1371. *(volatile uint64 *)0 = status << 32 | m_Vertices.GetUsed();
  1372. }
  1373. #endif
  1374. return (byte *) pResult;
  1375. }
  1376. uint16 *CMatQueuedRenderContext::AllocIndices( int nIndices )
  1377. {
  1378. MEM_ALLOC_CREDIT();
  1379. #if defined(_WIN32) && !defined(_X360)
  1380. const byte *pNextAlloc = (const byte *)(m_Indices.GetBase()) + m_Indices.GetUsed() + AlignValue( nIndices * sizeof(uint16), 16 );
  1381. const byte *pCommitLimit = (const byte *)(m_Indices.GetBase()) + m_Indices.GetSize();
  1382. #endif
  1383. void *pResult = m_Indices.Alloc( nIndices * sizeof(uint16), false );
  1384. #if defined(_WIN32) && !defined(_X360)
  1385. if ( !pResult )
  1386. {
  1387. // Force a crash with useful minidump info in the registers.
  1388. uint64 status = 0x31415926;
  1389. // Print some information to the console so that it's picked up in the minidump comment.
  1390. Msg( "AllocIndices( %d ) on %p failed. m_Indices is based at %p with a size of 0x%x.\n", nIndices, this, m_Indices.GetBase(), m_Indices.GetSize() );
  1391. Msg( "%d indices used.\n", m_Indices.GetUsed() );
  1392. if ( pNextAlloc > pCommitLimit )
  1393. {
  1394. Msg( "VirtualAlloc would have been called. %p > %p.\n", pNextAlloc, pCommitLimit );
  1395. const byte *pNewCommitLimit = AlignValue( pNextAlloc, 128 * 1024 );
  1396. const uint32 commitSize = pNewCommitLimit - pCommitLimit;
  1397. const void *pRet = VirtualAlloc( (void *)pCommitLimit, commitSize, MEM_COMMIT, PAGE_READWRITE );
  1398. if ( !pRet )
  1399. status = GetLastError();
  1400. Msg( "VirtualAlloc( %p, %d ) returned %p on repeat. VirtualAlloc %s with code %x.\n", pCommitLimit, commitSize, pRet, (pRet != NULL) ? "succeeded" : "failed", (uint32) status );
  1401. }
  1402. else
  1403. {
  1404. Msg( "VirtualAlloc would not have been called. %p <= %p.\n", pNextAlloc, pCommitLimit );
  1405. }
  1406. // Now crash.
  1407. *(volatile uint64 *)0 = status << 32 | m_Indices.GetUsed();
  1408. }
  1409. #endif
  1410. return (uint16 *) pResult;
  1411. }
  1412. byte *CMatQueuedRenderContext::ReallocVertices( byte *pVerts, int nVertsOld, int nVertsNew, int nVertexSize )
  1413. {
  1414. Assert( nVertsNew <= nVertsOld );
  1415. if ( nVertsNew < nVertsOld )
  1416. {
  1417. unsigned nBytes = ( ( nVertsOld - nVertsNew ) * nVertexSize );
  1418. m_Vertices.FreeToAllocPoint( AlignValue( m_Vertices.GetCurrentAllocPoint() - nBytes, 16), false ); // memstacks 128 bit aligned
  1419. }
  1420. return pVerts;
  1421. }
  1422. uint16 *CMatQueuedRenderContext::ReallocIndices( uint16 *pIndices, int nIndicesOld, int nIndicesNew )
  1423. {
  1424. Assert( nIndicesNew <= nIndicesOld );
  1425. if ( nIndicesNew < nIndicesOld )
  1426. {
  1427. unsigned nBytes = ( ( nIndicesOld - nIndicesNew ) * sizeof(uint16) );
  1428. m_Indices.FreeToAllocPoint( AlignValue( m_Indices.GetCurrentAllocPoint() - nBytes, 16 ), false ); // memstacks 128 bit aligned
  1429. }
  1430. return pIndices;
  1431. }
  1432. void CMatQueuedRenderContext::FreeVertices( byte *pVerts, int nVerts, int nVertexSize )
  1433. {
  1434. // free at end of call dispatch
  1435. }
  1436. void CMatQueuedRenderContext::FreeIndices( uint16 *pIndices, int nIndices )
  1437. {
  1438. // free at end of call dispatch
  1439. }
  1440. //------------------------------------------------------------------------------
  1441. // Color correction related methods
  1442. //------------------------------------------------------------------------------
  1443. ColorCorrectionHandle_t CMatQueuedRenderContext::AddLookup( const char *pName )
  1444. {
  1445. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1446. ColorCorrectionHandle_t hCC = ColorCorrectionSystem()->AddLookup( pName );
  1447. m_pMaterialSystem->Unlock( hLock );
  1448. return hCC;
  1449. }
  1450. bool CMatQueuedRenderContext::RemoveLookup( ColorCorrectionHandle_t handle )
  1451. {
  1452. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1453. bool bRemoved = ColorCorrectionSystem()->RemoveLookup( handle );
  1454. m_pMaterialSystem->Unlock( hLock );
  1455. return bRemoved;
  1456. }
  1457. void CMatQueuedRenderContext::LockLookup( ColorCorrectionHandle_t handle )
  1458. {
  1459. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1460. ColorCorrectionSystem()->LockLookup( handle );
  1461. m_pMaterialSystem->Unlock( hLock );
  1462. }
  1463. void CMatQueuedRenderContext::LoadLookup( ColorCorrectionHandle_t handle, const char *pLookupName )
  1464. {
  1465. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1466. ColorCorrectionSystem()->LoadLookup( handle, pLookupName );
  1467. m_pMaterialSystem->Unlock( hLock );
  1468. }
  1469. void CMatQueuedRenderContext::UnlockLookup( ColorCorrectionHandle_t handle )
  1470. {
  1471. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1472. ColorCorrectionSystem()->UnlockLookup( handle );
  1473. m_pMaterialSystem->Unlock( hLock );
  1474. }
  1475. // NOTE: These are synchronous calls! The rendering thread is stopped, the current queue is drained and the pixels are read
  1476. // NOTE: We should also have a queued read pixels in the API for doing mid frame reads (as opposed to screenshots)
  1477. void CMatQueuedRenderContext::ReadPixels( int x, int y, int width, int height, unsigned char *data, ImageFormat dstFormat )
  1478. {
  1479. EndRender();
  1480. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1481. this->CallQueued(false);
  1482. g_pShaderAPI->ReadPixels( x, y, width, height, data, dstFormat );
  1483. m_pMaterialSystem->Unlock( hLock );
  1484. BeginRender();
  1485. }
  1486. void CMatQueuedRenderContext::ReadPixelsAndStretch( Rect_t *pSrcRect, Rect_t *pDstRect, unsigned char *pBuffer, ImageFormat dstFormat, int nDstStride )
  1487. {
  1488. EndRender();
  1489. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  1490. this->CallQueued(false);
  1491. g_pShaderAPI->ReadPixels( pSrcRect, pDstRect, pBuffer, dstFormat, nDstStride );
  1492. m_pMaterialSystem->Unlock( hLock );
  1493. BeginRender();
  1494. }