Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2502 lines
80 KiB

  1. //========== Copyright 2005, Valve Corporation, All rights reserved. ========
  2. //
  3. // Purpose:
  4. //
  5. //=============================================================================
  6. #include "pch_materialsystem.h"
  7. #include "tier1/functors.h"
  8. #include "tier1/fmtstr.h"
  9. #include "itextureinternal.h"
  10. #ifndef _PS3
  11. #define MATSYS_INTERNAL
  12. #endif
  13. #include "cmatqueuedrendercontext.h"
  14. #include "cmaterialsystem.h" // @HACKHACK
  15. // NOTE: This has to be the last file included!
  16. #include "tier0/memdbgon.h"
  17. ConVar mat_report_queue_status( "mat_report_queue_status", "0", FCVAR_MATERIAL_SYSTEM_THREAD );
  18. #if defined( _PS3 ) || defined( _OSX )
  19. #define g_pShaderAPI ShaderAPI()
  20. #endif
  21. //-----------------------------------------------------------------------------
  22. //
  23. //-----------------------------------------------------------------------------
  24. #if defined( _WIN32 ) && !defined( _WIN64 )
  25. void FastCopy( byte *pDest, const byte *pSrc, size_t nBytes )
  26. {
  27. if ( !nBytes )
  28. {
  29. return;
  30. }
  31. #if !defined( _X360 )
  32. if ( (size_t)pDest % 16 == 0 && (size_t)pSrc % 16 == 0 )
  33. {
  34. const int BYTES_PER_FULL = 128;
  35. int nBytesFull = nBytes - ( nBytes % BYTES_PER_FULL );
  36. for ( byte *pLimit = pDest + nBytesFull; pDest < pLimit; pDest += BYTES_PER_FULL, pSrc += BYTES_PER_FULL )
  37. {
  38. // memcpy( pDest, pSrc, BYTES_PER_FULL);
  39. __asm
  40. {
  41. mov esi, pSrc
  42. mov edi, pDest
  43. movaps xmm0, [esi + 0]
  44. movaps xmm1, [esi + 16]
  45. movaps xmm2, [esi + 32]
  46. movaps xmm3, [esi + 48]
  47. movaps xmm4, [esi + 64]
  48. movaps xmm5, [esi + 80]
  49. movaps xmm6, [esi + 96]
  50. movaps xmm7, [esi + 112]
  51. movntps [edi + 0], xmm0
  52. movntps [edi + 16], xmm1
  53. movntps [edi + 32], xmm2
  54. movntps [edi + 48], xmm3
  55. movntps [edi + 64], xmm4
  56. movntps [edi + 80], xmm5
  57. movntps [edi + 96], xmm6
  58. movntps [edi + 112], xmm7
  59. }
  60. }
  61. nBytes -= nBytesFull;
  62. }
  63. if ( nBytes )
  64. {
  65. memcpy( pDest, pSrc, nBytes );
  66. }
  67. #else
  68. if ( (size_t)pDest % 4 == 0 && nBytes % 4 == 0 )
  69. {
  70. XMemCpyStreaming_WriteCombined( pDest, pSrc, nBytes );
  71. }
  72. else
  73. {
  74. // work around a bug in memcpy
  75. if ((size_t)pDest % 2 == 0 && nBytes == 4)
  76. {
  77. *(reinterpret_cast<short *>(pDest)) = *(reinterpret_cast<const short *>(pSrc));
  78. *(reinterpret_cast<short *>(pDest)+1) = *(reinterpret_cast<const short *>(pSrc)+1);
  79. }
  80. else
  81. {
  82. memcpy( pDest, pSrc, nBytes );
  83. }
  84. }
  85. #endif
  86. }
  87. #else
  88. #define FastCopy memcpy
  89. #endif
  90. class CCachedPerFrameMeshData;
  91. //-----------------------------------------------------------------------------
  92. // Queued mesh, used for dynamic meshes
  93. //-----------------------------------------------------------------------------
  94. class CMatQueuedMesh : public IMesh
  95. {
  96. // Methods of IVertexBuffer, called from the main (client) thread
  97. public:
  98. virtual int VertexCount() const;
  99. virtual VertexFormat_t GetVertexFormat() const;
  100. virtual bool IsDynamic() const { return true; }
  101. virtual void BeginCastBuffer( VertexFormat_t format ) { CannotSupport(); }
  102. virtual void EndCastBuffer( ) { CannotSupport(); }
  103. virtual int GetRoomRemaining() const { CannotSupport(); return 0; }
  104. virtual void * AccessRawHardwareDataStream( uint8 nRawStreamIndex, uint32 numBytes, uint32 uiFlags, void *pvContext ) { CannotSupport(); return NULL; }
  105. virtual bool Lock( int nVertexCount, bool bAppend, VertexDesc_t &desc ) { CannotSupport(); return false; }
  106. virtual void Unlock( int nVertexCount, VertexDesc_t &desc ) { CannotSupport(); }
  107. virtual void Spew( int nVertexCount, const VertexDesc_t &desc ) { }
  108. virtual void ValidateData( int nVertexCount, const VertexDesc_t & desc ) { }
  109. // Methods of IIndexBuffer, called from the main (client) thread
  110. public:
  111. virtual int IndexCount() const;
  112. virtual MaterialIndexFormat_t IndexFormat() const { CannotSupport(); return MATERIAL_INDEX_FORMAT_16BIT; }
  113. virtual void BeginCastBuffer( MaterialIndexFormat_t format ) { CannotSupport(); }
  114. virtual bool Lock( int nMaxIndexCount, bool bAppend, IndexDesc_t& desc ) { CannotSupport(); return false; }
  115. virtual void Unlock( int nWrittenIndexCount, IndexDesc_t& desc ) { CannotSupport(); }
  116. virtual void ModifyBegin( bool bReadOnly, int nFirstIndex, int nIndexCount, IndexDesc_t& desc ) { CannotSupport(); }
  117. virtual void ModifyEnd( IndexDesc_t& desc ) { CannotSupport(); }
  118. virtual void Spew( int nIndexCount, const IndexDesc_t & desc ) { }
  119. virtual void ValidateData( int nIndexCount, const IndexDesc_t &desc ) { }
  120. virtual IMesh *GetMesh() { return this; }
  121. // Methods of IMesh, called from the main (client) thread
  122. public:
  123. virtual void SetPrimitiveType( MaterialPrimitiveType_t type );
  124. virtual void Draw( int firstIndex = -1, int numIndices = 0 );
  125. virtual void SetColorMesh( IMesh *pColorMesh, int nVertexOffset );
  126. virtual void Draw( CPrimList *pLists, int nLists ) { CannotSupport(); }
  127. virtual void CopyToMeshBuilder( int iStartVert, int nVerts, int iStartIndex, int nIndices, int indexOffset, CMeshBuilder &builder ) { CannotSupport(); }
  128. virtual void Spew( int numVerts, int numIndices, const MeshDesc_t & desc ) { }
  129. virtual void ValidateData( int numVerts, int numIndices, const MeshDesc_t & desc ) { }
  130. virtual void LockMesh( int numVerts, int numIndices, MeshDesc_t& desc, MeshBuffersAllocationSettings_t *pSettings = 0 );
  131. virtual void ModifyBegin( int firstVertex, int numVerts, int firstIndex, int numIndices, MeshDesc_t& desc ) { CannotSupport(); }
  132. virtual void ModifyEnd( MeshDesc_t& desc ) { CannotSupport(); }
  133. virtual void UnlockMesh( int numVerts, int numIndices, MeshDesc_t& desc );
  134. virtual void ModifyBeginEx( bool bReadOnly, int firstVertex, int numVerts, int firstIndex, int numIndices, MeshDesc_t& desc ) { CannotSupport(); }
  135. virtual void SetFlexMesh( IMesh *pMesh, int nVertexOffset );
  136. virtual void DisableFlexMesh();
  137. virtual void MarkAsDrawn();
  138. virtual void DrawModulated( const Vector4D &vecDiffuseModulation, int firstIndex = -1, int numIndices = 0 );
  139. virtual unsigned int ComputeMemoryUsed() { return 0; }
  140. virtual ICachedPerFrameMeshData *GetCachedPerFrameMeshData();
  141. virtual void ReconstructFromCachedPerFrameMeshData( ICachedPerFrameMeshData *pData );
  142. // Other methods called from the main (client) thread
  143. public:
  144. CMatQueuedMesh( CMatQueuedRenderContext *pOwner, IMatRenderContextInternal *pHardwareContext, bool bFlexMesh );
  145. byte *GetVertexData() { return m_pVertexData; }
  146. uint16 *GetIndexData() { return m_pIndexData; }
  147. void InvalidateAuxMeshSet() { m_bCanSetAuxMeshes = false; }
  148. int GetVertexSize() { return m_VertexSize; }
  149. bool OnGetDynamicMesh( VertexFormat_t vertexFormat, unsigned flags, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterialInternal *pMaterial, int nHWSkinBoneCount );
  150. void QueueBuild( );
  151. void FreeBuffers();
  152. // These methods are called from the material system thread
  153. // Using the prefix MST_ to indicate this.
  154. public:
  155. struct MST_MeshInfo_t
  156. {
  157. IMaterial *m_pMaterial;
  158. VertexFormat_t m_VertexFormat;
  159. uint8 m_nFlags;
  160. bool m_bExternalVB : 1;
  161. bool m_bExternalIB : 1;
  162. IMesh* m_pVertexOverride;
  163. IMesh* m_pIndexOverride;
  164. uint8 *m_pVertexData;
  165. int m_nVertexCount;
  166. int m_nVertexSizeInBytes;
  167. uint16 *m_pIndexData;
  168. int m_nIndexCount;
  169. MaterialPrimitiveType_t m_nPrimitiveType;
  170. };
  171. struct MST_DrawInfo_t
  172. {
  173. MaterialPrimitiveType_t m_Type;
  174. int m_nFirstIndex;
  175. int m_nIndexCount;
  176. };
  177. struct MST_DrawModulatedInfo_t
  178. {
  179. MaterialPrimitiveType_t m_Type;
  180. Vector4D m_vecDiffuseModulation;
  181. int m_nFirstIndex;
  182. int m_nIndexCount;
  183. };
  184. IMesh *MST_DetachActualMesh() { IMesh *p = m_pMSTActualMesh; m_pMSTActualMesh = NULL; return p; }
  185. IMesh *MST_GetActualMesh() { return m_pMSTActualMesh; }
  186. IMesh *MST_SetupExternalMesh( const MST_MeshInfo_t &info );
  187. IMesh *MST_SetupDynamicMesh( const MST_MeshInfo_t &info, IMesh *pExternalMesh );
  188. int MST_GetActualVertexOffsetInBytes() const { return m_nMSTActualVertexOffsetInBytes; }
  189. void MST_CopyDynamicVB( const MeshDesc_t &desc, const uint8 *pVertexData, size_t nSizeInBytes );
  190. void MST_CopyDynamicIB( const MeshDesc_t &desc, const uint16 *pIndexData, int nIndices );
  191. void MST_BuildDynamicBuffers( const MST_MeshInfo_t &info );
  192. void MST_Draw( const MST_DrawInfo_t &info );
  193. void MST_DrawModulated( const MST_DrawModulatedInfo_t &info );
  194. // Member variables accessible from the material thread
  195. private:
  196. int m_nMSTActualVertexOffsetInBytes;
  197. IMesh *m_pMSTActualMesh;
  198. // Member variables accessible from the main thread
  199. private:
  200. CLateBoundPtr<IMesh> m_pLateBoundMesh;
  201. CMatQueuedRenderContext *m_pOwner;
  202. CMatCallQueue *m_pCallQueue;
  203. IMatRenderContextInternal *m_pHardwareContext;
  204. // The vertex format we're using...
  205. VertexFormat_t m_VertexFormat;
  206. byte *m_pVertexData;
  207. uint16 *m_pIndexData;
  208. int m_nVerts;
  209. int m_nIndices;
  210. unsigned short m_VertexSize;
  211. uint8 m_nFlags;
  212. bool m_bExternalVB;
  213. bool m_bExternalIB;
  214. bool m_bFlexMesh;
  215. bool m_bCanSetAuxMeshes;
  216. MaterialPrimitiveType_t m_Type;
  217. IMesh *m_pVertexOverride;
  218. IMesh *m_pIndexOverride;
  219. static unsigned short gm_ScratchIndexBuffer;
  220. };
  221. enum MatQueuedMeshFlags_t
  222. {
  223. MQM_BUFFERED = ( 1 << 0 ),
  224. };
  225. //-----------------------------------------------------------------------------
  226. // Used for caching off the memory pointers, and vertex/index data info for a
  227. // CMatQueuedMesh.
  228. //-----------------------------------------------------------------------------
  229. class CCachedPerFrameMeshData : public ICachedPerFrameMeshData
  230. {
  231. public:
  232. virtual void Free()
  233. {
  234. delete this;
  235. }
  236. CMatQueuedMesh::MST_MeshInfo_t m_meshInfo;
  237. CCachedPerFrameMeshData()
  238. {
  239. m_meshInfo.m_pMaterial = NULL;
  240. }
  241. ~CCachedPerFrameMeshData()
  242. {
  243. }
  244. };
  245. //----------------------------------------------------------------------------
  246. // Static members
  247. //----------------------------------------------------------------------------
  248. unsigned short CMatQueuedMesh::gm_ScratchIndexBuffer;
  249. //----------------------------------------------------------------------------
  250. // Constructor
  251. //----------------------------------------------------------------------------
  252. CMatQueuedMesh::CMatQueuedMesh( CMatQueuedRenderContext *pOwner, IMatRenderContextInternal *pHardwareContext, bool bFlexMesh )
  253. : m_pLateBoundMesh( &m_pMSTActualMesh ),
  254. m_pOwner( pOwner ),
  255. m_pCallQueue( pOwner->GetCallQueueInternal() ),
  256. m_pHardwareContext( pHardwareContext ),
  257. m_pVertexData( NULL ),
  258. m_pIndexData( NULL ),
  259. m_nVerts( 0 ),
  260. m_nIndices( 0 ),
  261. m_VertexSize( 0 ),
  262. m_Type( MATERIAL_TRIANGLES ),
  263. m_pVertexOverride( NULL ),
  264. m_pIndexOverride ( NULL ),
  265. m_pMSTActualMesh( NULL ),
  266. m_nMSTActualVertexOffsetInBytes( 0 ),
  267. m_VertexFormat( 0 ),
  268. m_bFlexMesh( bFlexMesh ),
  269. m_bCanSetAuxMeshes( false ),
  270. m_nFlags( 0 ),
  271. m_bExternalIB( false ),
  272. m_bExternalVB( false )
  273. {
  274. }
  275. //----------------------------------------------------------------------------
  276. //
  277. // Methods that run in the material system thread
  278. //
  279. //----------------------------------------------------------------------------
  280. //----------------------------------------------------------------------------
  281. // Gets the external mesh + points it at the already-allocated data
  282. //----------------------------------------------------------------------------
  283. IMesh *CMatQueuedMesh::MST_SetupExternalMesh( const MST_MeshInfo_t &info )
  284. {
  285. #ifndef MS_NO_DYNAMIC_BUFFER_COPY
  286. return NULL;
  287. #else
  288. // If we don't have real external data on either VB or IB channel, no dynamic mesh
  289. if ( !info.m_bExternalVB && !info.m_bExternalIB )
  290. return NULL;
  291. // Ok, at at least one of VB or IB (maybe both) are external data.
  292. // In this case, the other buffer may be an override, NULL, or dynamic data
  293. // We are always going to treat external data as an override of a dynamic mesh
  294. // because that way colormesh/flexmesh state tracking is simpler: it
  295. // always goes through the dynamic mesh
  296. ExternalMeshInfo_t extInfo;
  297. extInfo.m_pMaterial = info.m_pMaterial;
  298. extInfo.m_bFlexMesh = m_bFlexMesh;
  299. extInfo.m_VertexFormat = info.m_VertexFormat;
  300. extInfo.m_pVertexOverride = NULL;
  301. extInfo.m_pIndexOverride = NULL;
  302. IMesh *pExternalMesh = g_pShaderAPI->GetExternalMesh( extInfo );
  303. // Now make the external mesh point at the externally allocated data
  304. ExternalMeshData_t data;
  305. memset( &data, 0, sizeof(data) );
  306. if ( info.m_bExternalVB )
  307. {
  308. data.m_pVertexData = info.m_pVertexData;
  309. data.m_nVertexCount = info.m_nVertexCount;
  310. data.m_nVertexSizeInBytes = info.m_nVertexSizeInBytes;
  311. }
  312. if ( info.m_bExternalIB )
  313. {
  314. data.m_pIndexData = info.m_pIndexData;
  315. data.m_nIndexCount = info.m_nIndexCount;
  316. }
  317. g_pShaderAPI->SetExternalMeshData( pExternalMesh, data );
  318. return pExternalMesh;
  319. #endif
  320. }
  321. //-----------------------------------------------------------------------------
  322. // Copies queued vertex buffer data into the actual dynamic buffer
  323. //-----------------------------------------------------------------------------
  324. FORCEINLINE void CMatQueuedMesh::MST_CopyDynamicVB( const MeshDesc_t &desc, const uint8 *pVertexData, size_t nSizeInBytes )
  325. {
  326. void *pDest;
  327. if ( desc.m_VertexSize_Position != 0 )
  328. {
  329. pDest = desc.m_pPosition;
  330. }
  331. else
  332. {
  333. #define FindMin( desc, pCurrent, tag ) ( ( desc.m_VertexSize_##tag != 0 ) ? MIN( pCurrent, desc.m_p##tag ) : pCurrent )
  334. pDest = (void *)((byte *)~0);
  335. pDest = FindMin( desc, pDest, BoneWeight );
  336. pDest = FindMin( desc, pDest, BoneMatrixIndex );
  337. pDest = FindMin( desc, pDest, Normal );
  338. pDest = FindMin( desc, pDest, Color );
  339. pDest = FindMin( desc, pDest, Specular );
  340. pDest = FindMin( desc, pDest, TangentS );
  341. pDest = FindMin( desc, pDest, TangentT );
  342. pDest = FindMin( desc, pDest, Wrinkle );
  343. for ( int i = 0; i < VERTEX_MAX_TEXTURE_COORDINATES; i++ )
  344. {
  345. if ( desc.m_VertexSize_TexCoord[i] && desc.m_pTexCoord[i] < pDest )
  346. {
  347. pDest = desc.m_pTexCoord[i];
  348. }
  349. }
  350. #undef FindMin
  351. }
  352. Assert( pDest );
  353. if ( pDest )
  354. {
  355. FastCopy( (byte *)pDest, pVertexData, nSizeInBytes );
  356. }
  357. }
  358. //-----------------------------------------------------------------------------
  359. // Copies queued index buffer data into the actual dynamic buffer
  360. //-----------------------------------------------------------------------------
  361. FORCEINLINE void CMatQueuedMesh::MST_CopyDynamicIB( const MeshDesc_t &desc, const uint16 *pIndexData, int nIndices )
  362. {
  363. if ( !desc.m_nFirstVertex )
  364. {
  365. // AssertMsg(desc.m_pIndices & 0x03 == 0,"desc.m_pIndices is misaligned in CMatQueuedMesh::ExecuteDefferedBuild\n");
  366. FastCopy( (byte *)desc.m_pIndices, (const byte *)pIndexData, nIndices * sizeof(*pIndexData) );
  367. return;
  368. }
  369. ALIGN16 uint16 tempIndices[16];
  370. int i = 0;
  371. if ( (size_t)desc.m_pIndices % 4 == 2 )
  372. {
  373. desc.m_pIndices[i] = pIndexData[i] + desc.m_nFirstVertex;
  374. i++;
  375. }
  376. while ( i < nIndices )
  377. {
  378. int nToCopy = MIN( ARRAYSIZE(tempIndices), nIndices - i );
  379. for ( int j = 0; j < nToCopy; j++ )
  380. {
  381. tempIndices[j] = pIndexData[i+j] + desc.m_nFirstVertex;
  382. }
  383. FastCopy( (byte *)(desc.m_pIndices + i), (byte *)tempIndices, nToCopy * sizeof(uint16) );
  384. i += nToCopy;
  385. }
  386. }
  387. //-----------------------------------------------------------------------------
  388. // Gets the actual dynamic buffer (if necessary), and copies queued data into it
  389. //-----------------------------------------------------------------------------
  390. IMesh *CMatQueuedMesh::MST_SetupDynamicMesh( const MST_MeshInfo_t &info, IMesh *pExternalMesh )
  391. {
  392. IMesh *pVertexOverride = info.m_pVertexOverride;
  393. IMesh *pIndexOverride = info.m_pIndexOverride;
  394. int nVerticesToLock = info.m_nVertexCount;
  395. int nIndicesToLock = info.m_nIndexCount;
  396. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  397. // We are always going to treat external data as an override of a dynamic mesh
  398. // because that way colormesh/flexmesh state tracking is simpler: it
  399. // always goes through the dynamic mesh
  400. if ( info.m_bExternalVB )
  401. {
  402. pVertexOverride = pExternalMesh;
  403. nVerticesToLock = 0;
  404. }
  405. if ( info.m_bExternalIB )
  406. {
  407. pIndexOverride = pExternalMesh;
  408. nIndicesToLock = 0;
  409. }
  410. #endif
  411. // Gets the dynamic mesh
  412. IMesh *pDynamicMesh;
  413. if ( !m_bFlexMesh )
  414. {
  415. pDynamicMesh = m_pHardwareContext->GetDynamicMeshEx( info.m_VertexFormat, ( ( info.m_nFlags & MQM_BUFFERED ) != 0 ), pVertexOverride, pIndexOverride, info.m_pMaterial );
  416. }
  417. else
  418. {
  419. pDynamicMesh = m_pHardwareContext->GetFlexMesh();
  420. }
  421. // Copies the buffers into the actual dynamic mesh
  422. if ( !nVerticesToLock && !nIndicesToLock )
  423. return pDynamicMesh;
  424. // Primitive type doesn't get sent down until the draw call.
  425. // Because we lock based upon the primitive type (whether we're points or instanced quads), we
  426. // don't have a valid primitive type for our dynamic mesh, so set it here. This will get
  427. // redundantly set on the draw call.
  428. pDynamicMesh->SetPrimitiveType( info.m_nPrimitiveType );
  429. MeshDesc_t desc;
  430. pDynamicMesh->LockMesh( nVerticesToLock, nIndicesToLock, desc );
  431. m_nMSTActualVertexOffsetInBytes = desc.m_nFirstVertex * desc.m_ActualVertexSize;
  432. if ( nVerticesToLock && desc.m_ActualVertexSize ) // if !desc.m_ActualVertexSize, device lost
  433. {
  434. MST_CopyDynamicVB( desc, info.m_pVertexData, info.m_nVertexCount * info.m_nVertexSizeInBytes );
  435. }
  436. if ( nIndicesToLock && ( info.m_pIndexData != &gm_ScratchIndexBuffer ) && desc.m_nIndexSize )
  437. {
  438. MST_CopyDynamicIB( desc, info.m_pIndexData, info.m_nIndexCount );
  439. }
  440. pDynamicMesh->UnlockMesh( nVerticesToLock, nIndicesToLock, desc );
  441. return pDynamicMesh;
  442. }
  443. //-----------------------------------------------------------------------------
  444. // Main entry point for setting up the dynamic buffers on the material thread
  445. //-----------------------------------------------------------------------------
  446. void CMatQueuedMesh::MST_BuildDynamicBuffers( const MST_MeshInfo_t &info )
  447. {
  448. // Think about the 4 cases each for VB and IB:
  449. // 1) External, 2) Dynamic, 3) Override, 4) Empty
  450. // We identify them this way:
  451. // if info.m_pVertex/IndexOverride, it's an override
  452. // else if info.m_nVertex/IndexCount == 0 it's empty
  453. // else if info.m_bExternalIB/VB, it's external
  454. // else it's dynamic
  455. // In general, any time we encounter an external buffer, we always have to make one.
  456. // Any time we encounter a dynamic buffer, we have to make one.
  457. // The strange cases are
  458. // * one is ext, the other is dynamic.
  459. // In that case, we make both and pass the external as an override to the dynamic.
  460. // * Both are overrides, or one is an override and the other is empty
  461. // In that case, we make only a dynamic and use the override(s)
  462. // * Both are empty
  463. // This is an illegal case.
  464. // We need data at least *somewhere* for this to make sense.
  465. Assert( info.m_pVertexOverride || info.m_nVertexCount || info.m_pIndexOverride || info.m_nIndexCount );
  466. // First, see if we need any external buffers. Then see if we need
  467. // dynamic buffers also
  468. IMesh *pExternalMesh = MST_SetupExternalMesh( info );
  469. m_pMSTActualMesh = MST_SetupDynamicMesh( info, pExternalMesh );
  470. // At this point, we're done with the data. We can free it.
  471. if ( info.m_pVertexData && !info.m_bExternalVB )
  472. {
  473. m_pOwner->FreeVertices( info.m_pVertexData, info.m_nVertexCount, info.m_nVertexSizeInBytes );
  474. }
  475. if ( info.m_pIndexData && !info.m_bExternalIB && ( info.m_pIndexData != &gm_ScratchIndexBuffer ) )
  476. {
  477. m_pOwner->FreeIndices( (uint8*)info.m_pIndexData, info.m_nIndexCount, sizeof(uint16) );
  478. }
  479. if ( info.m_pMaterial )
  480. {
  481. info.m_pMaterial->DecrementReferenceCount();
  482. }
  483. }
  484. //-----------------------------------------------------------------------------
  485. // Main entry point for calling draw
  486. //-----------------------------------------------------------------------------
  487. void CMatQueuedMesh::MST_Draw( const MST_DrawInfo_t &info )
  488. {
  489. m_pMSTActualMesh->SetPrimitiveType( info.m_Type );
  490. m_pMSTActualMesh->Draw( info.m_nFirstIndex, info.m_nIndexCount );
  491. }
  492. //-----------------------------------------------------------------------------
  493. // Main entry point for calling draw modulated
  494. //-----------------------------------------------------------------------------
  495. void CMatQueuedMesh::MST_DrawModulated( const MST_DrawModulatedInfo_t &info )
  496. {
  497. m_pMSTActualMesh->SetPrimitiveType( info.m_Type );
  498. m_pMSTActualMesh->DrawModulated( info.m_vecDiffuseModulation, info.m_nFirstIndex, info.m_nIndexCount );
  499. }
  500. //-----------------------------------------------------------------------------
  501. //
  502. // Methods called by the main (client) thread
  503. //
  504. //-----------------------------------------------------------------------------
  505. bool CMatQueuedMesh::OnGetDynamicMesh( VertexFormat_t vertexFormat, unsigned flags, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterialInternal *pMaterial, int nHWSkinBoneCount )
  506. {
  507. FreeBuffers();
  508. m_pVertexOverride = pVertexOverride;
  509. m_pIndexOverride = pIndexOverride;
  510. m_nFlags = flags;
  511. if ( !m_bFlexMesh )
  512. {
  513. if ( pVertexOverride )
  514. {
  515. m_VertexFormat = pVertexOverride->GetVertexFormat();
  516. }
  517. else
  518. {
  519. // Remove VERTEX_FORMAT_COMPRESSED from the material's format (dynamic meshes don't
  520. // support compression, and all materials should support uncompressed verts too)
  521. VertexFormat_t materialFormat = pMaterial->GetVertexFormat() & ~VERTEX_FORMAT_COMPRESSED;
  522. m_VertexFormat = ( vertexFormat != 0 ) ? vertexFormat : materialFormat;
  523. if ( vertexFormat != 0 )
  524. {
  525. int nVertexFormatBoneWeights = NumBoneWeights( vertexFormat );
  526. if ( nHWSkinBoneCount < nVertexFormatBoneWeights )
  527. {
  528. nHWSkinBoneCount = nVertexFormatBoneWeights;
  529. }
  530. }
  531. // Force the requested number of bone weights
  532. m_VertexFormat &= ~VERTEX_BONE_WEIGHT_MASK;
  533. m_VertexFormat |= VERTEX_BONEWEIGHT( nHWSkinBoneCount );
  534. if ( nHWSkinBoneCount > 0 )
  535. {
  536. m_VertexFormat |= VERTEX_BONE_INDEX;
  537. }
  538. }
  539. }
  540. else
  541. {
  542. m_VertexFormat = VERTEX_POSITION | VERTEX_NORMAL | VERTEX_FORMAT_USE_EXACT_FORMAT;
  543. if ( g_pMaterialSystemHardwareConfig->GetDXSupportLevel() >= 92 )
  544. {
  545. m_VertexFormat |= VERTEX_WRINKLE;
  546. }
  547. }
  548. m_VertexSize = g_pShaderAPI->VertexFormatSize( m_VertexFormat );
  549. return true;
  550. }
  551. int CMatQueuedMesh::VertexCount() const
  552. {
  553. return m_VertexSize ? m_nVerts : 0;
  554. }
  555. int CMatQueuedMesh::IndexCount() const
  556. {
  557. return m_nIndices;
  558. }
  559. void CMatQueuedMesh::SetPrimitiveType( MaterialPrimitiveType_t type )
  560. {
  561. // NOTE: Have to just hold onto the type here. We might not actually
  562. // have our meshes set up in the material system thread at this point
  563. // because we don't know if it's an external or a dynamic mesh
  564. // until unlock.
  565. m_Type = type;
  566. // m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetPrimitiveType, type );
  567. }
  568. void CMatQueuedMesh::LockMesh( int numVerts, int numIndices, MeshDesc_t& desc, MeshBuffersAllocationSettings_t *pSettings )
  569. {
  570. if ( m_pVertexOverride )
  571. {
  572. numVerts = 0;
  573. }
  574. if ( m_pIndexOverride )
  575. {
  576. numIndices = 0;
  577. }
  578. m_nVerts = numVerts;
  579. m_nIndices = numIndices;
  580. if( numVerts > 0 )
  581. {
  582. Assert( m_VertexSize );
  583. Assert( !m_pVertexData );
  584. m_pVertexData = (byte *)m_pOwner->AllocVertices( numVerts, m_VertexSize, &m_bExternalVB );
  585. Assert( (uintp)m_pVertexData % 16 == 0 );
  586. // Compute the vertex index..
  587. desc.m_nFirstVertex = 0;
  588. static_cast< VertexDesc_t* >( &desc )->m_nOffset = 0;
  589. // Set up the mesh descriptor
  590. g_pShaderAPI->ComputeVertexDescription( m_pVertexData, m_VertexFormat, desc );
  591. }
  592. else
  593. {
  594. m_bExternalVB = false;
  595. desc.m_nFirstVertex = 0;
  596. static_cast< VertexDesc_t* >( &desc )->m_nOffset = 0;
  597. // Set up the mesh descriptor
  598. g_pShaderAPI->ComputeVertexDescription( 0, 0, desc );
  599. }
  600. if ( m_Type != MATERIAL_POINTS && numIndices > 0 )
  601. {
  602. Assert( !m_pIndexData );
  603. m_pIndexData = (uint16*)m_pOwner->AllocIndices( numIndices, sizeof(uint16), &m_bExternalIB );
  604. desc.m_pIndices = m_pIndexData;
  605. desc.m_nIndexSize = 1;
  606. desc.m_nFirstIndex = 0;
  607. static_cast< IndexDesc_t* >( &desc )->m_nOffset = 0;
  608. }
  609. else
  610. {
  611. m_bExternalIB = false;
  612. desc.m_pIndices = &gm_ScratchIndexBuffer;
  613. desc.m_nIndexSize = 0;
  614. desc.m_nFirstIndex = 0;
  615. static_cast< IndexDesc_t* >( &desc )->m_nOffset = 0;
  616. }
  617. }
  618. void CMatQueuedMesh::UnlockMesh( int numVerts, int numIndices, MeshDesc_t& desc )
  619. {
  620. if ( m_pVertexData && numVerts < m_nVerts )
  621. {
  622. m_pVertexData = m_pOwner->ReallocVertices( m_pVertexData, m_nVerts, numVerts, m_VertexSize, m_bExternalVB );
  623. m_nVerts = numVerts;
  624. }
  625. if ( m_pIndexData && numIndices < m_nIndices )
  626. {
  627. m_pIndexData = (uint16*)m_pOwner->ReallocIndices( (byte*)m_pIndexData, m_nIndices, numIndices, sizeof(uint16), m_bExternalIB );
  628. m_nIndices = numIndices;
  629. }
  630. // Once we've unlocked the mesh, fire it off to the materialsystem thread.
  631. // The rules of mesh locking is that once Lock() is called, there's no
  632. // guarantee that any of the previous mesh still exists. However, since
  633. // flex meshes + dynamic meshes are separate concepts, the *flex* mesh can
  634. // remain if the dynamic mesh is locked, hence the queued mat render context
  635. // maintains two queued meshes, one for the flex, one for normal.
  636. QueueBuild( );
  637. }
  638. //-----------------------------------------------------------------------------
  639. // Queues the buffers up to be
  640. //-----------------------------------------------------------------------------
  641. void CMatQueuedMesh::QueueBuild( )
  642. {
  643. if ( !m_pVertexOverride && !m_nVerts && !m_pIndexOverride && !m_nIndices )
  644. return;
  645. MST_MeshInfo_t info;
  646. info.m_pMaterial = m_pOwner->GetCurrentMaterialInternal();
  647. info.m_nFlags = 0; //m_nFlags;
  648. info.m_pVertexData = m_pVertexData;
  649. info.m_nVertexCount = m_nVerts;
  650. info.m_VertexFormat = m_VertexFormat;
  651. info.m_nVertexSizeInBytes = m_VertexSize;
  652. info.m_pVertexOverride = m_pVertexOverride;
  653. info.m_bExternalVB = m_bExternalVB;
  654. info.m_pIndexData = m_pIndexData;
  655. info.m_nIndexCount = m_nIndices;
  656. info.m_bExternalIB = m_bExternalIB;
  657. info.m_pIndexOverride = m_pIndexOverride;
  658. info.m_nPrimitiveType = m_Type;
  659. Assert( info.m_pIndexData || ( m_nIndices == 0 ) );
  660. if ( info.m_pMaterial )
  661. {
  662. info.m_pMaterial->IncrementReferenceCount();
  663. }
  664. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::MST_BuildDynamicBuffers, info );
  665. m_bCanSetAuxMeshes = true;
  666. }
  667. //-----------------------------------------------------------------------------
  668. // Associates flex/color meshes with the dynamic mesh.
  669. //-----------------------------------------------------------------------------
  670. void CMatQueuedMesh::SetColorMesh( IMesh *pColorMesh, int nVertexOffset )
  671. {
  672. // This only works on the main dynamic mesh
  673. Assert( !m_bFlexMesh );
  674. // This cannot be called in between the call to GetDynamicMesh + UnlockMesh;
  675. // the late bound mesh is in an indeterminant state during that point
  676. Assert( m_bCanSetAuxMeshes );
  677. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetColorMesh, pColorMesh, nVertexOffset );
  678. }
  679. void CMatQueuedMesh::SetFlexMesh( IMesh *pMesh, int nVertexOffset )
  680. {
  681. // This only works on the main dynamic mesh
  682. Assert( !m_bFlexMesh );
  683. // This cannot be called in between the call to GetDynamicMesh + UnlockMesh;
  684. // the late bound mesh is in an indeterminant state during that point
  685. Assert( m_bCanSetAuxMeshes );
  686. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::SetFlexMesh, pMesh, nVertexOffset );
  687. }
  688. void CMatQueuedMesh::DisableFlexMesh()
  689. {
  690. // This only works on the color mesh
  691. Assert( !m_bFlexMesh );
  692. // This cannot be called in between the call to GetDynamicMesh + UnlockMesh;
  693. // the late bound mesh is in an indeterminant state during that point
  694. Assert( m_bCanSetAuxMeshes );
  695. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::DisableFlexMesh );
  696. }
  697. void CMatQueuedMesh::Draw( int firstIndex, int numIndices )
  698. {
  699. if ( ( !m_nVerts && !m_pVertexOverride ) && ( !m_nIndices && !m_pIndexOverride ) )
  700. {
  701. MarkAsDrawn();
  702. return;
  703. }
  704. if ( ( m_Type == MATERIAL_INSTANCED_QUADS ) || ( m_Type == MATERIAL_POINTS ) )
  705. {
  706. if ( !m_nVerts && !m_pVertexOverride )
  707. {
  708. MarkAsDrawn();
  709. return;
  710. }
  711. }
  712. MST_DrawInfo_t info;
  713. info.m_Type = m_Type;
  714. info.m_nFirstIndex = firstIndex;
  715. info.m_nIndexCount = numIndices;
  716. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::MST_Draw, info );
  717. }
  718. void CMatQueuedMesh::DrawModulated( const Vector4D &vecDiffuseModulation, int firstIndex, int numIndices )
  719. {
  720. if ( ( !m_nVerts && !m_pVertexOverride ) && ( !m_nIndices && !m_pIndexOverride ) )
  721. {
  722. MarkAsDrawn();
  723. return;
  724. }
  725. if ( ( m_Type == MATERIAL_INSTANCED_QUADS ) || ( m_Type == MATERIAL_POINTS ) )
  726. {
  727. if ( !m_nVerts && !m_pVertexOverride )
  728. {
  729. MarkAsDrawn();
  730. return;
  731. }
  732. }
  733. MST_DrawModulatedInfo_t info;
  734. info.m_Type = m_Type;
  735. info.m_vecDiffuseModulation = vecDiffuseModulation;
  736. info.m_nFirstIndex = firstIndex;
  737. info.m_nIndexCount = numIndices;
  738. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::MST_DrawModulated, info );
  739. }
  740. ICachedPerFrameMeshData *CMatQueuedMesh::GetCachedPerFrameMeshData()
  741. {
  742. // Short-circuit if we have no vertices
  743. if ( m_nVerts == 0 )
  744. return NULL;
  745. CCachedPerFrameMeshData *pNewPerFrameData = new CCachedPerFrameMeshData();
  746. MST_MeshInfo_t &info = pNewPerFrameData->m_meshInfo;
  747. info.m_pMaterial = m_pOwner->GetCurrentMaterialInternal();
  748. info.m_nFlags = 0;
  749. info.m_pVertexData = m_pVertexData;
  750. info.m_nVertexCount = m_nVerts;
  751. info.m_VertexFormat = m_VertexFormat;
  752. info.m_nVertexSizeInBytes = m_VertexSize;
  753. info.m_pVertexOverride = m_pVertexOverride;
  754. info.m_bExternalVB = m_bExternalVB;
  755. info.m_pIndexData = m_pIndexData;
  756. info.m_nIndexCount = m_nIndices;
  757. info.m_bExternalIB = m_bExternalIB;
  758. info.m_pIndexOverride = m_pIndexOverride;
  759. info.m_nPrimitiveType = m_Type;
  760. Assert( info.m_pIndexData || ( m_nIndices == 0 ) );
  761. return pNewPerFrameData;
  762. }
  763. void CMatQueuedMesh::ReconstructFromCachedPerFrameMeshData( ICachedPerFrameMeshData *pData )
  764. {
  765. CCachedPerFrameMeshData *pCachedData = (CCachedPerFrameMeshData*)pData;
  766. m_pVertexData = pCachedData->m_meshInfo.m_pVertexData;
  767. m_nVerts = pCachedData->m_meshInfo.m_nVertexCount;
  768. m_VertexFormat = pCachedData->m_meshInfo.m_VertexFormat;
  769. m_pVertexOverride = pCachedData->m_meshInfo.m_pVertexOverride;
  770. m_bExternalVB = pCachedData->m_meshInfo.m_bExternalVB;
  771. m_pIndexData = pCachedData->m_meshInfo.m_pIndexData;
  772. m_nIndices = pCachedData->m_meshInfo.m_nIndexCount;
  773. m_bExternalIB = pCachedData->m_meshInfo.m_bExternalIB;
  774. m_pIndexOverride = pCachedData->m_meshInfo.m_pIndexOverride;
  775. m_Type = pCachedData->m_meshInfo.m_nPrimitiveType;
  776. if( pCachedData->m_meshInfo.m_pVertexOverride || pCachedData->m_meshInfo.m_nVertexCount || pCachedData->m_meshInfo.m_pIndexOverride || pCachedData->m_meshInfo.m_nIndexCount )
  777. {
  778. if ( pCachedData->m_meshInfo.m_pMaterial )
  779. {
  780. pCachedData->m_meshInfo.m_pMaterial->IncrementReferenceCount();
  781. }
  782. m_pCallQueue->QueueCall( this, &CMatQueuedMesh::MST_BuildDynamicBuffers, pCachedData->m_meshInfo );
  783. m_bCanSetAuxMeshes = true;
  784. }
  785. }
  786. void CMatQueuedMesh::MarkAsDrawn()
  787. {
  788. if ( m_bCanSetAuxMeshes )
  789. {
  790. m_pCallQueue->QueueCall( m_pLateBoundMesh, &IMesh::MarkAsDrawn );
  791. }
  792. FreeBuffers();
  793. }
  794. void CMatQueuedMesh::FreeBuffers()
  795. {
  796. if ( m_pIndexData && ( m_pIndexData != &gm_ScratchIndexBuffer ) )
  797. {
  798. m_pOwner->FreeIndices( (byte*)m_pIndexData, m_nIndices, sizeof(uint16) );
  799. }
  800. m_nIndices = 0;
  801. m_pIndexData = NULL;
  802. if ( m_pVertexData )
  803. {
  804. m_pOwner->FreeVertices( m_pVertexData, m_nVerts, m_VertexSize );
  805. }
  806. m_nVerts = 0;
  807. m_VertexFormat = 0;
  808. m_VertexSize = 0;
  809. m_pVertexData = NULL;
  810. m_bCanSetAuxMeshes = false;
  811. m_nFlags = 0;
  812. m_bExternalIB = false;
  813. m_bExternalVB = false;
  814. }
  815. VertexFormat_t CMatQueuedMesh::GetVertexFormat() const
  816. {
  817. return m_VertexFormat;
  818. }
  819. //-----------------------------------------------------------------------------
  820. // Index buffer
  821. //-----------------------------------------------------------------------------
  822. class CMatQueuedIndexBuffer : public IIndexBuffer
  823. {
  824. // Inherited from IIndexBuffer
  825. public:
  826. virtual int IndexCount() const;
  827. virtual MaterialIndexFormat_t IndexFormat() const;
  828. virtual bool IsDynamic() const;
  829. virtual void BeginCastBuffer( MaterialIndexFormat_t format );
  830. virtual void EndCastBuffer();
  831. virtual int GetRoomRemaining() const;
  832. virtual bool Lock( int nMaxIndexCount, bool bAppend, IndexDesc_t &desc );
  833. virtual void Unlock( int nWrittenIndexCount, IndexDesc_t &desc );
  834. virtual void ModifyBegin( bool bReadOnly, int nFirstIndex, int nIndexCount, IndexDesc_t& desc );
  835. virtual void ModifyEnd( IndexDesc_t& desc );
  836. virtual void Spew( int nIndexCount, const IndexDesc_t &desc );
  837. virtual void ValidateData( int nIndexCount, const IndexDesc_t &desc );
  838. virtual IMesh *GetMesh() { return NULL; }
  839. // Other public methods, accessible from the main thread
  840. public:
  841. CMatQueuedIndexBuffer( CMatQueuedRenderContext *pOwner, IMatRenderContextInternal *pHardwareContext );
  842. virtual ~CMatQueuedIndexBuffer();
  843. const void *GetIndexData() const;
  844. // These methods are expected to only be accessed from the render thread
  845. public:
  846. int RT_GetIndexStart() const;
  847. IIndexBuffer* RT_GetDynamicIndexBuffer();
  848. private:
  849. void ReleaseBuffer();
  850. void FreeIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData );
  851. // These methods run in the render thread
  852. void RT_CopyIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData, bool bIsExternal );
  853. void RT_FreeIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData );
  854. CMatQueuedRenderContext *m_pOwner;
  855. CMatCallQueue *m_pCallQueue;
  856. IMatRenderContextInternal *m_pHardwareContext;
  857. void *m_pIndexData;
  858. int m_nIndices;
  859. bool m_bExternalIB;
  860. MaterialIndexFormat_t m_nIndexFormat;
  861. // This must only be accessed from the render thread
  862. int m_nRTStartIndex;
  863. IIndexBuffer *m_pRTDynamicIndexBuffer;
  864. };
  865. //----------------------------------------------------------------------------
  866. // Constructor, destructor
  867. //----------------------------------------------------------------------------
  868. CMatQueuedIndexBuffer::CMatQueuedIndexBuffer( CMatQueuedRenderContext *pOwner, IMatRenderContextInternal *pHardwareContext ) :
  869. m_pOwner( pOwner ),
  870. m_pCallQueue( pOwner->GetCallQueueInternal() ),
  871. m_pHardwareContext( pHardwareContext ),
  872. m_pIndexData( NULL ),
  873. m_nIndices( 0 ),
  874. m_nIndexFormat( MATERIAL_INDEX_FORMAT_UNKNOWN ),
  875. m_nRTStartIndex( -1 ),
  876. m_pRTDynamicIndexBuffer( 0 ),
  877. m_bExternalIB( false )
  878. {
  879. }
  880. CMatQueuedIndexBuffer::~CMatQueuedIndexBuffer()
  881. {
  882. if ( m_pIndexData )
  883. {
  884. FreeIndexData( m_nIndices, m_nIndexFormat, m_pIndexData );
  885. m_pIndexData = NULL;
  886. m_nIndices = 0;
  887. }
  888. }
  889. const void *CMatQueuedIndexBuffer::GetIndexData() const
  890. {
  891. return m_pIndexData;
  892. }
  893. int CMatQueuedIndexBuffer::IndexCount() const
  894. {
  895. return m_nIndices;
  896. }
  897. MaterialIndexFormat_t CMatQueuedIndexBuffer::IndexFormat() const
  898. {
  899. return m_nIndexFormat;
  900. }
  901. bool CMatQueuedIndexBuffer::IsDynamic() const
  902. {
  903. // Queued index buffers are only interesting for dynamic index buffers
  904. // static ones need not use this
  905. return true;
  906. }
  907. void CMatQueuedIndexBuffer::BeginCastBuffer( MaterialIndexFormat_t format )
  908. {
  909. // Recasting this buffer has the effect of causing us to not need this index data any more
  910. ReleaseBuffer();
  911. m_nIndexFormat = format;
  912. }
  913. void CMatQueuedIndexBuffer::EndCastBuffer()
  914. {
  915. ReleaseBuffer();
  916. m_nIndexFormat = MATERIAL_INDEX_FORMAT_UNKNOWN;
  917. }
  918. // Returns the number of indices that can still be written into the buffer
  919. int CMatQueuedIndexBuffer::GetRoomRemaining() const
  920. {
  921. return m_pOwner->GetMaxIndicesToRender() - m_nIndices;
  922. }
  923. int CMatQueuedIndexBuffer::RT_GetIndexStart() const
  924. {
  925. return m_nRTStartIndex;
  926. }
  927. IIndexBuffer* CMatQueuedIndexBuffer::RT_GetDynamicIndexBuffer()
  928. {
  929. return m_pRTDynamicIndexBuffer;
  930. }
  931. void CMatQueuedIndexBuffer::FreeIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData )
  932. {
  933. if ( pIndexData )
  934. {
  935. Assert( fmt != MATERIAL_INDEX_FORMAT_UNKNOWN );
  936. int nIndexSize = ( fmt == MATERIAL_INDEX_FORMAT_16BIT ) ? sizeof(uint16) : sizeof(uint32);
  937. m_pOwner->FreeIndices( (byte*)pIndexData, nIndexCount, nIndexSize );
  938. }
  939. }
  940. void CMatQueuedIndexBuffer::RT_CopyIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData, bool bIsExternal )
  941. {
  942. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  943. if ( bIsExternal )
  944. {
  945. Assert( !m_pRTDynamicIndexBuffer );
  946. Assert( fmt == MATERIAL_INDEX_FORMAT_16BIT || fmt == MATERIAL_INDEX_FORMAT_UNKNOWN );
  947. m_pRTDynamicIndexBuffer = g_pShaderAPI->GetExternalIndexBuffer( nIndexCount, (uint16*)pIndexData );
  948. m_nRTStartIndex = 0;
  949. return;
  950. }
  951. #endif
  952. IndexDesc_t desc;
  953. Assert( !m_pRTDynamicIndexBuffer );
  954. m_pRTDynamicIndexBuffer = m_pHardwareContext->GetDynamicIndexBuffer();
  955. if ( !m_pRTDynamicIndexBuffer->Lock( nIndexCount, false, desc ) )
  956. {
  957. m_pRTDynamicIndexBuffer = NULL;
  958. m_nRTStartIndex = -1;
  959. return;
  960. }
  961. int nIndexSize = sizeof( uint16 ) * desc.m_nIndexSize;
  962. m_nRTStartIndex = desc.m_nOffset / nIndexSize;
  963. if ( pIndexData && desc.m_nIndexSize )
  964. {
  965. FastCopy( (byte *)desc.m_pIndices, (byte *)pIndexData, nIndexCount * nIndexSize );
  966. }
  967. m_pRTDynamicIndexBuffer->Unlock( nIndexCount, desc );
  968. }
  969. void CMatQueuedIndexBuffer::RT_FreeIndexData( int nIndexCount, MaterialIndexFormat_t fmt, void *pIndexData )
  970. {
  971. FreeIndexData( nIndexCount, fmt, pIndexData );
  972. m_pRTDynamicIndexBuffer = NULL;
  973. m_nRTStartIndex = -1;
  974. }
  975. void CMatQueuedIndexBuffer::ReleaseBuffer()
  976. {
  977. if ( m_pIndexData )
  978. {
  979. m_pCallQueue->QueueCall( this, &CMatQueuedIndexBuffer::RT_FreeIndexData, m_nIndices, m_nIndexFormat, m_pIndexData );
  980. m_pIndexData = 0;
  981. m_nIndices = 0;
  982. m_bExternalIB = false;
  983. }
  984. }
  985. bool CMatQueuedIndexBuffer::Lock( int nMaxIndexCount, bool bAppend, IndexDesc_t &desc )
  986. {
  987. // Locking this buffer has the effect of causing us to not need this index data any more
  988. ReleaseBuffer();
  989. // Append mode is not supported. We need to kill it altogether.
  990. if ( bAppend )
  991. return false;
  992. m_nIndices = nMaxIndexCount;
  993. int nIndexSize = ( m_nIndexFormat == MATERIAL_INDEX_FORMAT_16BIT ) ? sizeof(uint16) : sizeof(uint32);
  994. m_pIndexData = m_pOwner->AllocIndices( nMaxIndexCount, nIndexSize, &m_bExternalIB );
  995. desc.m_pIndices = (uint16*)m_pIndexData;
  996. desc.m_nIndexSize = nIndexSize >> 1;
  997. desc.m_nFirstIndex = 0;
  998. desc.m_nOffset = 0;
  999. return ( m_pIndexData != 0 );
  1000. }
  1001. void CMatQueuedIndexBuffer::Unlock( int nWrittenIndexCount, IndexDesc_t &desc )
  1002. {
  1003. if ( m_pIndexData && nWrittenIndexCount < m_nIndices )
  1004. {
  1005. m_pIndexData = m_pOwner->ReallocIndices( (byte*)m_pIndexData, m_nIndices, nWrittenIndexCount, desc.m_nIndexSize * sizeof(uint16), m_bExternalIB );
  1006. }
  1007. m_nIndices = nWrittenIndexCount;
  1008. // Unlocking this buffer has the effect of queuing a call to
  1009. // write these indices into the dynamic indexbuffer, caching off the
  1010. // base index it was written into
  1011. if ( m_pIndexData )
  1012. {
  1013. m_pCallQueue->QueueCall( this, &CMatQueuedIndexBuffer::RT_CopyIndexData, m_nIndices, m_nIndexFormat, m_pIndexData, m_bExternalIB );
  1014. }
  1015. }
  1016. void CMatQueuedIndexBuffer::ModifyBegin( bool bReadOnly, int nFirstIndex, int nIndexCount, IndexDesc_t& desc )
  1017. {
  1018. CannotSupport();
  1019. }
  1020. void CMatQueuedIndexBuffer::ModifyEnd( IndexDesc_t& desc )
  1021. {
  1022. CannotSupport();
  1023. }
  1024. void CMatQueuedIndexBuffer::Spew( int nIndexCount, const IndexDesc_t &desc )
  1025. {
  1026. }
  1027. void CMatQueuedIndexBuffer::ValidateData( int nIndexCount, const IndexDesc_t &desc )
  1028. {
  1029. }
  1030. //-----------------------------------------------------------------------------
  1031. //
  1032. // MatQueuedRenderContext starts here
  1033. //
  1034. //-----------------------------------------------------------------------------
  1035. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1036. CMemoryStack CMatQueuedRenderContext::s_Vertices[RENDER_CONTEXT_STACKS];
  1037. CMemoryStack CMatQueuedRenderContext::s_Indices[RENDER_CONTEXT_STACKS];
  1038. #ifdef _PS3
  1039. CPs3gcmLocalMemoryBlock s_RSXMemory;
  1040. #endif
  1041. int CMatQueuedRenderContext::s_nCurStack = 0;
  1042. bool CMatQueuedRenderContext::s_bInitializedStacks = false;
  1043. #define DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE 128 * 1024
  1044. #define DYNAMIC_INDEX_BUFFER_BLOCK_SIZE 16 * 1024
  1045. #define DYNAMIC_VERTEX_BUFFER_TOTAL_SIZE (1536+128) * 1024
  1046. #define DYNAMIC_INDEX_BUFFER_TOTAL_SIZE (128+128) * 1024
  1047. #define DYNAMIC_VERTEX_BUFFER_ALIGNMENT 16
  1048. #define DYNAMIC_INDEX_BUFFER_ALIGNMENT 4
  1049. #endif
  1050. void AllocateScratchRSXMemory()
  1051. {
  1052. #if _PS3
  1053. s_RSXMemory.Alloc( kAllocPs3GcmDynamicBufferPool,
  1054. RENDER_CONTEXT_STACKS * ( ( DYNAMIC_VERTEX_BUFFER_TOTAL_SIZE + DYNAMIC_VERTEX_BUFFER_ALIGNMENT ) +
  1055. ( DYNAMIC_INDEX_BUFFER_TOTAL_SIZE + DYNAMIC_INDEX_BUFFER_ALIGNMENT ) ) );
  1056. #endif
  1057. }
  1058. void CMatQueuedRenderContext::Init( CMaterialSystem *pMaterialSystem, CMatRenderContextBase *pHardwareContext )
  1059. {
  1060. BaseClass::Init();
  1061. m_pMaterialSystem = pMaterialSystem;
  1062. m_pHardwareContext = pHardwareContext;
  1063. m_pQueuedMesh = new CMatQueuedMesh( this, pHardwareContext, false );
  1064. m_pQueuedFlexMesh = new CMatQueuedMesh( this, pHardwareContext, true );
  1065. m_pQueuedIndexBuffer = new CMatQueuedIndexBuffer( this, pHardwareContext );
  1066. MEM_ALLOC_CREDIT();
  1067. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1068. if ( !s_bInitializedStacks )
  1069. {
  1070. #if _PS3
  1071. uint8 *pMem = (uint8*)s_RSXMemory.DataInLocalMemory();
  1072. #endif
  1073. // NOTE: Allocation size must be at least double DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE
  1074. // or DYNAMIC_INDEX_BUFFER_BLOCK_SIZE to avoid massive overflow
  1075. for ( int i = 0; i < RENDER_CONTEXT_STACKS; ++i )
  1076. {
  1077. CFmtStr verticesName( "CMatQueuedRenderContext::s_Vertices[%d]", i );
  1078. CFmtStr indicesName( "CMatQueuedRenderContext::s_Vertices[%d]", i );
  1079. #ifdef _X360
  1080. s_Vertices[i].InitPhysical( (const char *)verticesName, DYNAMIC_VERTEX_BUFFER_TOTAL_SIZE, 0, DYNAMIC_VERTEX_BUFFER_ALIGNMENT, PAGE_WRITECOMBINE );
  1081. s_Indices[i].InitPhysical( (const char *)indicesName, DYNAMIC_INDEX_BUFFER_TOTAL_SIZE, 0, DYNAMIC_INDEX_BUFFER_ALIGNMENT, PAGE_WRITECOMBINE );
  1082. #elif defined( _PS3 )
  1083. s_Vertices[i].InitPhysical( (const char *)verticesName, DYNAMIC_VERTEX_BUFFER_TOTAL_SIZE, 0, DYNAMIC_VERTEX_BUFFER_ALIGNMENT, (uint32)pMem );
  1084. pMem += DYNAMIC_VERTEX_BUFFER_TOTAL_SIZE + DYNAMIC_VERTEX_BUFFER_ALIGNMENT;
  1085. s_Indices[i].InitPhysical( (const char *)indicesName, DYNAMIC_INDEX_BUFFER_TOTAL_SIZE, 0, DYNAMIC_INDEX_BUFFER_ALIGNMENT, (uint32)pMem );
  1086. pMem += DYNAMIC_INDEX_BUFFER_TOTAL_SIZE + DYNAMIC_INDEX_BUFFER_ALIGNMENT;
  1087. #else
  1088. #pragma error
  1089. #endif
  1090. }
  1091. s_bInitializedStacks = true;
  1092. }
  1093. s_nCurStack++;
  1094. if ( s_nCurStack > 2 )
  1095. {
  1096. s_nCurStack = 0;
  1097. }
  1098. m_pVertices = &s_Vertices[ s_nCurStack ];
  1099. m_pIndices = &s_Indices[ s_nCurStack ];
  1100. #endif
  1101. unsigned int vertSize = 16 * 1024 * 1024;
  1102. unsigned int indSize = 16 * 1024 * 1024;
  1103. #ifdef DX_TO_GL_ABSTRACTION
  1104. vertSize = 12 * 1024 * 1024;
  1105. indSize = 4 * 1024 * 1024;
  1106. #endif
  1107. m_Vertices.Init( "CMatQueuedRenderContext::m_Vertices", vertSize, 128 * 1024 );
  1108. m_Indices.Init( "CMatQueuedRenderContext::m_Indices", indSize, 128 * 1024 );
  1109. }
  1110. //-----------------------------------------------------------------------------
  1111. //
  1112. //-----------------------------------------------------------------------------
  1113. void CMatQueuedRenderContext::Shutdown()
  1114. {
  1115. if ( !m_pHardwareContext )
  1116. return;
  1117. Assert( !m_pCurrentMaterial );
  1118. delete m_pQueuedMesh;
  1119. delete m_pQueuedFlexMesh;
  1120. delete m_pQueuedIndexBuffer;
  1121. m_pMaterialSystem = NULL;
  1122. m_pHardwareContext = NULL;
  1123. m_pQueuedMesh = NULL;
  1124. m_pQueuedFlexMesh = NULL;
  1125. m_pQueuedIndexBuffer = NULL;
  1126. m_Vertices.Term();
  1127. m_Indices.Term();
  1128. BaseClass::Shutdown();
  1129. Assert(m_queue.Count() == 0);
  1130. }
  1131. //-----------------------------------------------------------------------------
  1132. //
  1133. //-----------------------------------------------------------------------------
  1134. void CMatQueuedRenderContext::CompactMemory()
  1135. {
  1136. BaseClass::CompactMemory();
  1137. m_Vertices.FreeAll();
  1138. m_Indices.FreeAll();
  1139. }
  1140. //-----------------------------------------------------------------------------
  1141. //
  1142. //-----------------------------------------------------------------------------
  1143. void CMatQueuedRenderContext::CycleDynamicBuffers( )
  1144. {
  1145. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1146. s_nCurStack++;
  1147. if ( s_nCurStack > 2 )
  1148. {
  1149. s_nCurStack = 0;
  1150. }
  1151. m_pVertices = &s_Vertices[ s_nCurStack ];
  1152. m_pIndices = &s_Indices[ s_nCurStack ];
  1153. g_pShaderAPI->FlushGPUCache( m_pVertices->GetBase(), m_pVertices->GetUsed() );
  1154. g_pShaderAPI->FlushGPUCache( m_pIndices->GetBase(), m_pIndices->GetUsed() );
  1155. m_pVertices->FreeAll( false );
  1156. m_pIndices->FreeAll( false );
  1157. #endif
  1158. }
  1159. void CMatQueuedRenderContext::BeginQueue( CMatRenderContextBase *pInitialState )
  1160. {
  1161. if ( !pInitialState )
  1162. {
  1163. pInitialState = m_pHardwareContext;
  1164. }
  1165. CycleDynamicBuffers();
  1166. CMatRenderContextBase::InitializeFrom( pInitialState );
  1167. g_pShaderAPI->GetBackBufferDimensions( m_WidthBackBuffer, m_HeightBackBuffer );
  1168. m_FogMode = pInitialState->GetFogMode();
  1169. m_nBoneCount = pInitialState->GetCurrentNumBones();
  1170. pInitialState->GetFogDistances( &m_flFogStart, &m_flFogEnd, &m_flFogZ );
  1171. }
  1172. //-----------------------------------------------------------------------------
  1173. //
  1174. //-----------------------------------------------------------------------------
  1175. void CMatQueuedRenderContext::EndQueue( bool bCallQueued )
  1176. {
  1177. if ( bCallQueued )
  1178. {
  1179. CallQueued();
  1180. }
  1181. int i;
  1182. if ( m_pCurrentMaterial )
  1183. {
  1184. m_pCurrentMaterial = NULL;
  1185. }
  1186. if ( m_pUserDefinedLightmap )
  1187. {
  1188. m_pUserDefinedLightmap = NULL;
  1189. }
  1190. if ( m_pLocalCubemapTexture )
  1191. {
  1192. m_pLocalCubemapTexture = NULL;
  1193. }
  1194. for ( i = 0; i < MAX_FB_TEXTURES; i++ )
  1195. {
  1196. if ( m_pCurrentFrameBufferCopyTexture[i] )
  1197. {
  1198. m_pCurrentFrameBufferCopyTexture[i] = NULL;
  1199. }
  1200. }
  1201. for ( i = 0; i < m_RenderTargetStack.Count(); i++ )
  1202. {
  1203. for ( int j = 0; j < MAX_RENDER_TARGETS; j++ )
  1204. {
  1205. if ( m_RenderTargetStack[i].m_pRenderTargets[j] )
  1206. {
  1207. m_RenderTargetStack[i].m_pRenderTargets[j] = NULL;
  1208. }
  1209. }
  1210. }
  1211. m_RenderTargetStack.Clear();
  1212. m_ScissorRectStack.Clear();
  1213. }
  1214. void CMatQueuedRenderContext::Bind( IMaterial *iMaterial, void *proxyData )
  1215. {
  1216. if ( !iMaterial )
  1217. {
  1218. if( !g_pErrorMaterial )
  1219. return;
  1220. iMaterial = static_cast<IMaterialInternal *>( g_pErrorMaterial );
  1221. }
  1222. else
  1223. {
  1224. iMaterial = static_cast< IMaterialInternal*>( iMaterial )->GetRealTimeVersion(); //always work with the real time versions of materials internally
  1225. }
  1226. m_pCurrentMaterial = static_cast< IMaterialInternal*>( iMaterial );
  1227. m_pCurrentProxyData = proxyData;
  1228. if ( !m_pCurrentMaterial->HasQueueFriendlyProxies() )
  1229. {
  1230. // We've always gotta call the bind proxy (assuming there is one)
  1231. // so we can copy off the material vars at this point.
  1232. // However in case the material must have proxies bound on QMS then we don't call bind proxies
  1233. // now and rely on queued material bind to setup proxies on QMS.
  1234. m_pCurrentMaterial->CallBindProxy( proxyData, &m_CallQueueExternal );
  1235. }
  1236. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::Bind, iMaterial, proxyData );
  1237. }
  1238. //-----------------------------------------------------------------------------
  1239. //
  1240. //-----------------------------------------------------------------------------
  1241. void CMatQueuedRenderContext::BeginRender()
  1242. {
  1243. if ( ++m_iRenderDepth == 1 )
  1244. {
  1245. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::BeginRender );
  1246. }
  1247. }
  1248. //-----------------------------------------------------------------------------
  1249. //
  1250. //-----------------------------------------------------------------------------
  1251. void CMatQueuedRenderContext::EndRender()
  1252. {
  1253. // This can fail if someone holds a render context pointer over a video mode change. Find it if you hit this.
  1254. Assert(m_pHardwareContext);
  1255. if ( --m_iRenderDepth == 0 )
  1256. {
  1257. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EndRender );
  1258. }
  1259. }
  1260. //-----------------------------------------------------------------------------
  1261. //
  1262. //-----------------------------------------------------------------------------
  1263. void CMatQueuedRenderContext::CallQueued( bool bTermAfterCall )
  1264. {
  1265. if ( mat_report_queue_status.GetBool() )
  1266. {
  1267. #ifndef MS_NO_DYNAMIC_BUFFER_COPY
  1268. Msg( "%d calls queued for %d bytes in parameters and overhead, %d bytes verts, %d bytes indices, %d bytes other\n",
  1269. m_queue.Count(), m_queue.GetMemoryUsed(), Vertices().GetUsed(), Indices().GetUsed(), RenderDataSizeUsed() );
  1270. #else
  1271. Msg( "%d calls queued for %d bytes in parameters and overhead, %d bytes verts, %d bytes indices, %d bytes other\n",
  1272. m_queue.Count(), m_queue.GetMemoryUsed(), Vertices().GetUsed() + m_Vertices.GetUsed(), Indices().GetUsed() + m_Indices.GetUsed(), RenderDataSizeUsed() );
  1273. #endif
  1274. }
  1275. m_queue.CallQueued();
  1276. #if defined( MS_NO_DYNAMIC_BUFFER_COPY ) && !defined( _CERT )
  1277. static int s_nFrameCount = 0;
  1278. static int s_nVBOverflowCount = 0;
  1279. static int s_nIBOverflowCount = 0;
  1280. ++s_nFrameCount;
  1281. if ( m_Vertices.GetUsed() > 0 )
  1282. {
  1283. ++s_nVBOverflowCount;
  1284. }
  1285. if ( m_Indices.GetUsed() > 0 )
  1286. {
  1287. ++s_nIBOverflowCount;
  1288. }
  1289. if ( s_nFrameCount > 1024 )
  1290. {
  1291. static bool s_bVBWarned = false;
  1292. static bool s_bIBWarned = false;
  1293. if ( !s_bVBWarned )
  1294. {
  1295. if ( ( (float)s_nVBOverflowCount / (float)s_nFrameCount ) > 0.1f )
  1296. {
  1297. for ( int w = 0; w < 30; ++w )
  1298. {
  1299. Msg( "[Ignore this for splitscreen] Tell Brian to up the VB memory (and which map this occurred on)!\n" );
  1300. }
  1301. s_bVBWarned = true;
  1302. }
  1303. }
  1304. if ( !s_bIBWarned )
  1305. {
  1306. if ( ( (float)s_nIBOverflowCount / (float)s_nFrameCount ) > 0.1f )
  1307. {
  1308. for ( int w = 0; w < 30; ++w )
  1309. {
  1310. Msg( "[Ignore this for splitscreen] Tell Brian to up the IB memory (and which map this occurred on)!\n" );
  1311. }
  1312. s_bIBWarned = true;
  1313. }
  1314. }
  1315. }
  1316. #endif
  1317. #if 0
  1318. static int s_nVHisto[ 33 ];
  1319. static int s_nIHisto[ 9 ];
  1320. static int s_nHistoCount;
  1321. int nMem = ( Vertices().GetUsed() + m_Vertices.GetUsed() + ( 64 * 1024 ) - 1 ) / ( 64 * 1024 );
  1322. nMem = clamp( nMem, 0, 32 );
  1323. s_nVHisto[ nMem ]++;
  1324. nMem = ( Indices().GetUsed() + m_Indices.GetUsed() + ( 32 * 1024 ) - 1 ) / ( 32 * 1024 );
  1325. nMem = clamp( nMem, 0, 8 );
  1326. s_nIHisto[ nMem ]++;
  1327. if ( ( ++s_nHistoCount % 1024 ) == 0 )
  1328. {
  1329. Msg( "Verts:" );
  1330. bool bFound = false;
  1331. for( int i = 32; i >= 0; --i )
  1332. {
  1333. if ( s_nVHisto[i] )
  1334. {
  1335. bFound = true;
  1336. }
  1337. if ( !bFound )
  1338. continue;
  1339. Msg( "[%dk %d] ", i * 64, s_nVHisto[i] );
  1340. }
  1341. Msg( "\n" );
  1342. Msg( "Indices: " );
  1343. bFound = false;
  1344. for( int i = 8; i >= 0; --i )
  1345. {
  1346. if ( s_nIHisto[i] )
  1347. {
  1348. bFound = true;
  1349. }
  1350. if ( !bFound )
  1351. continue;
  1352. Msg( "[%dk %d] ", i * 32, s_nIHisto[i] );
  1353. }
  1354. Msg( "\n" );
  1355. }
  1356. #endif
  1357. m_Vertices.FreeAll( false );
  1358. m_Indices.FreeAll( false );
  1359. if ( bTermAfterCall )
  1360. {
  1361. Shutdown();
  1362. }
  1363. }
  1364. //-----------------------------------------------------------------------------
  1365. //
  1366. //-----------------------------------------------------------------------------
  1367. void CMatQueuedRenderContext::FlushQueued()
  1368. {
  1369. m_queue.Flush();
  1370. }
  1371. //-----------------------------------------------------------------------------
  1372. //
  1373. //-----------------------------------------------------------------------------
  1374. ICallQueue *CMatQueuedRenderContext::GetCallQueue()
  1375. {
  1376. return &m_CallQueueExternal;
  1377. }
  1378. //-----------------------------------------------------------------------------
  1379. //
  1380. //-----------------------------------------------------------------------------
  1381. void CMatQueuedRenderContext::SetRenderTargetEx( int nRenderTargetID, ITexture *pNewTarget )
  1382. {
  1383. CMatRenderContextBase::SetRenderTargetEx( nRenderTargetID, pNewTarget );
  1384. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetRenderTargetEx, nRenderTargetID, pNewTarget );
  1385. }
  1386. //-----------------------------------------------------------------------------
  1387. //
  1388. //-----------------------------------------------------------------------------
  1389. void CMatQueuedRenderContext::GetRenderTargetDimensions( int &width, int &height) const
  1390. {
  1391. // Target at top of stack
  1392. ITexture *pTOS = m_RenderTargetStack.Top().m_pRenderTargets[0];
  1393. // If top of stack isn't the back buffer, get dimensions from the texture
  1394. if ( pTOS != NULL )
  1395. {
  1396. width = pTOS->GetActualWidth();
  1397. height = pTOS->GetActualHeight();
  1398. }
  1399. else // otherwise, get them from the shader API
  1400. {
  1401. width = m_WidthBackBuffer;
  1402. height = m_HeightBackBuffer;
  1403. }
  1404. }
  1405. //-----------------------------------------------------------------------------
  1406. //
  1407. //-----------------------------------------------------------------------------
  1408. void CMatQueuedRenderContext::Viewport( int x, int y, int width, int height )
  1409. {
  1410. CMatRenderContextBase::Viewport( x, y, width, height );
  1411. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::Viewport, x, y, width, height );
  1412. }
  1413. //-----------------------------------------------------------------------------
  1414. //
  1415. //-----------------------------------------------------------------------------
  1416. void CMatQueuedRenderContext::SetLights( int nCount, const LightDesc_t *pLights )
  1417. {
  1418. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetLights, nCount, m_queue.CopyArray( pLights, nCount ) );
  1419. }
  1420. void CMatQueuedRenderContext::SetLightingState( const MaterialLightingState_t &state )
  1421. {
  1422. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetLightingState, RefToVal( state ) );
  1423. }
  1424. //-----------------------------------------------------------------------------
  1425. //
  1426. //-----------------------------------------------------------------------------
  1427. void CMatQueuedRenderContext::SetLightingOrigin( Vector vLightingOrigin )
  1428. {
  1429. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetLightingOrigin, vLightingOrigin );
  1430. }
  1431. //-----------------------------------------------------------------------------
  1432. //
  1433. //-----------------------------------------------------------------------------
  1434. void CMatQueuedRenderContext::SetAmbientLightCube( LightCube_t cube )
  1435. {
  1436. // FIXME: does compiler do the right thing, is envelope needed?
  1437. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetAmbientLightCube, m_queue.CopyArray( &cube[0], 6 ) );
  1438. }
  1439. //-----------------------------------------------------------------------------
  1440. // Bone count
  1441. //-----------------------------------------------------------------------------
  1442. void CMatQueuedRenderContext::SetNumBoneWeights( int nBoneCount )
  1443. {
  1444. m_nBoneCount = nBoneCount;
  1445. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetNumBoneWeights, nBoneCount );
  1446. }
  1447. int CMatQueuedRenderContext::GetCurrentNumBones( ) const
  1448. {
  1449. return m_nBoneCount;
  1450. }
  1451. //-----------------------------------------------------------------------------
  1452. //
  1453. //-----------------------------------------------------------------------------
  1454. void CMatQueuedRenderContext::FogMode( MaterialFogMode_t fogMode )
  1455. {
  1456. m_FogMode = fogMode;
  1457. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogMode, fogMode );
  1458. }
  1459. void CMatQueuedRenderContext::FogStart( float fStart )
  1460. {
  1461. m_flFogStart = fStart;
  1462. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogStart, fStart );
  1463. }
  1464. void CMatQueuedRenderContext::FogEnd( float fEnd )
  1465. {
  1466. m_flFogEnd = fEnd;
  1467. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogEnd, fEnd );
  1468. }
  1469. void CMatQueuedRenderContext::FogMaxDensity( float flMaxDensity )
  1470. {
  1471. m_flFogMaxDensity = flMaxDensity;
  1472. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogMaxDensity, flMaxDensity );
  1473. }
  1474. void CMatQueuedRenderContext::SetFogZ( float fogZ )
  1475. {
  1476. m_flFogZ = fogZ;
  1477. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetFogZ, fogZ );
  1478. }
  1479. MaterialFogMode_t CMatQueuedRenderContext::GetFogMode( void )
  1480. {
  1481. return m_FogMode;
  1482. }
  1483. void CMatQueuedRenderContext::FogColor3f( float r, float g, float b )
  1484. {
  1485. FogColor3ub( clamp( (int)(r * 255.0f), 0, 255 ), clamp( (int)(g * 255.0f), 0, 255 ), clamp( (int)(b * 255.0f), 0, 255 ) );
  1486. }
  1487. void CMatQueuedRenderContext::FogColor3fv( float const* rgb )
  1488. {
  1489. FogColor3ub( clamp( (int)(rgb[0] * 255.0f), 0, 255 ), clamp( (int)(rgb[1] * 255.0f), 0, 255 ), clamp( (int)(rgb[2] * 255.0f), 0, 255 ) );
  1490. }
  1491. void CMatQueuedRenderContext::FogColor3ub( unsigned char r, unsigned char g, unsigned char b )
  1492. {
  1493. m_FogColor.r = r;
  1494. m_FogColor.g = g;
  1495. m_FogColor.b = b;
  1496. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::FogColor3ub, r, g, b );
  1497. }
  1498. void CMatQueuedRenderContext::FogColor3ubv( unsigned char const* rgb )
  1499. {
  1500. FogColor3ub( rgb[0], rgb[1], rgb[2] );
  1501. }
  1502. void CMatQueuedRenderContext::GetFogColor( unsigned char *rgb )
  1503. {
  1504. rgb[0] = m_FogColor.r;
  1505. rgb[1] = m_FogColor.g;
  1506. rgb[2] = m_FogColor.b;
  1507. }
  1508. void CMatQueuedRenderContext::GetFogDistances( float *fStart, float *fEnd, float *fFogZ )
  1509. {
  1510. if( fStart )
  1511. *fStart = m_flFogStart;
  1512. if( fEnd )
  1513. *fEnd = m_flFogEnd;
  1514. if( fFogZ )
  1515. *fFogZ = m_flFogZ;
  1516. }
  1517. //-----------------------------------------------------------------------------
  1518. //
  1519. //-----------------------------------------------------------------------------
  1520. void CMatQueuedRenderContext::GetViewport( int& x, int& y, int& width, int& height ) const
  1521. {
  1522. // Verify valid top of RT stack
  1523. Assert ( m_RenderTargetStack.Count() > 0 );
  1524. // Grab the top of stack
  1525. const RenderTargetStackElement_t& element = m_RenderTargetStack.Top();
  1526. // If either dimension is negative, set to full bounds of current target
  1527. if ( (element.m_nViewW < 0) || (element.m_nViewH < 0) )
  1528. {
  1529. // Viewport origin at target origin
  1530. x = y = 0;
  1531. // If target is back buffer
  1532. if ( element.m_pRenderTargets[0] == NULL )
  1533. {
  1534. width = m_WidthBackBuffer;
  1535. height = m_HeightBackBuffer;
  1536. }
  1537. else // if target is texture
  1538. {
  1539. width = element.m_pRenderTargets[0]->GetActualWidth();
  1540. height = element.m_pRenderTargets[0]->GetActualHeight();
  1541. }
  1542. }
  1543. else // use the bounds from the stack directly
  1544. {
  1545. x = element.m_nViewX;
  1546. y = element.m_nViewY;
  1547. width = element.m_nViewW;
  1548. height = element.m_nViewH;
  1549. }
  1550. }
  1551. //-----------------------------------------------------------------------------
  1552. //
  1553. //-----------------------------------------------------------------------------
  1554. void CMatQueuedRenderContext::SyncToken( const char *p )
  1555. {
  1556. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SyncToken, m_queue.Copy( p ) );
  1557. }
  1558. //-----------------------------------------------------------------------------
  1559. //
  1560. //-----------------------------------------------------------------------------
  1561. IIndexBuffer* CMatQueuedRenderContext::GetDynamicIndexBuffer()
  1562. {
  1563. return m_pQueuedIndexBuffer;
  1564. }
  1565. //-----------------------------------------------------------------------------
  1566. //
  1567. //-----------------------------------------------------------------------------
  1568. IMesh* CMatQueuedRenderContext::GetDynamicMesh( bool buffered, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterial *pAutoBind )
  1569. {
  1570. return GetDynamicMeshEx( 0, buffered, pVertexOverride, pIndexOverride, pAutoBind );
  1571. }
  1572. IMesh* CMatQueuedRenderContext::GetDynamicMeshEx( VertexFormat_t vertexFormat, bool bBuffered, IMesh* pVertexOverride, IMesh* pIndexOverride, IMaterial *pAutoBind )
  1573. {
  1574. if( pAutoBind )
  1575. {
  1576. Bind( pAutoBind, NULL );
  1577. }
  1578. if ( pVertexOverride && pIndexOverride )
  1579. {
  1580. // Use the new batch API
  1581. DebuggerBreak();
  1582. return NULL;
  1583. }
  1584. if ( pVertexOverride )
  1585. {
  1586. if ( CompressionType( pVertexOverride->GetVertexFormat() ) != VERTEX_COMPRESSION_NONE )
  1587. {
  1588. // UNDONE: support compressed dynamic meshes if needed (pro: less VB memory, con: time spent compressing)
  1589. DebuggerBreak();
  1590. return NULL;
  1591. }
  1592. }
  1593. // For anything more than 1 bone, imply the last weight from the 1 - the sum of the others.
  1594. int nCurrentBoneCount = GetCurrentNumBones();
  1595. Assert( nCurrentBoneCount <= 4 );
  1596. if ( nCurrentBoneCount > 1 )
  1597. {
  1598. --nCurrentBoneCount;
  1599. }
  1600. m_pQueuedMesh->OnGetDynamicMesh( vertexFormat, ( bBuffered ) ? MQM_BUFFERED : 0, pVertexOverride, pIndexOverride, GetCurrentMaterialInternal(), nCurrentBoneCount );
  1601. return m_pQueuedMesh;
  1602. }
  1603. //-----------------------------------------------------------------------------
  1604. //
  1605. //-----------------------------------------------------------------------------
  1606. int CMatQueuedRenderContext::GetMaxVerticesToRender( IMaterial *pMaterial )
  1607. {
  1608. pMaterial = ((IMaterialInternal *)pMaterial)->GetRealTimeVersion(); //always work with the real time version of materials internally.
  1609. // Be conservative, assume no compression (in here, we don't know if the caller will used a compressed VB or not)
  1610. // FIXME: allow the caller to specify which compression type should be used to compute size from the vertex format
  1611. // (this can vary between multiple VBs/Meshes using the same material)
  1612. VertexFormat_t materialFormat = pMaterial->GetVertexFormat() & ~VERTEX_FORMAT_COMPRESSED;
  1613. int nVertexFormatSize = g_pShaderAPI->VertexFormatSize( materialFormat );
  1614. if ( nVertexFormatSize == 0 )
  1615. {
  1616. Warning( "bad vertex size for material %s\n", pMaterial->GetName() );
  1617. return 65535;
  1618. }
  1619. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1620. int nDynamicVBSize = DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE;
  1621. #else
  1622. int nDynamicVBSize = g_pShaderAPI->GetCurrentDynamicVBSize();
  1623. #endif
  1624. int maxVerts = nDynamicVBSize / nVertexFormatSize;
  1625. if ( maxVerts > 65535 )
  1626. {
  1627. maxVerts = 65535;
  1628. }
  1629. return maxVerts;
  1630. }
  1631. int CMatQueuedRenderContext::GetMaxIndicesToRender( )
  1632. {
  1633. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1634. return DYNAMIC_INDEX_BUFFER_BLOCK_SIZE / sizeof(uint16);
  1635. #else
  1636. return INDEX_BUFFER_SIZE;
  1637. #endif
  1638. }
  1639. //-----------------------------------------------------------------------------
  1640. //
  1641. //-----------------------------------------------------------------------------
  1642. void CMatQueuedRenderContext::GetMaxToRender( IMesh *pMesh, bool bMaxUntilFlush, int *pMaxVerts, int *pMaxIndices )
  1643. {
  1644. Assert( !bMaxUntilFlush );
  1645. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1646. int nDynamicVBSize = DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE;
  1647. int nDynamicIBSize = DYNAMIC_INDEX_BUFFER_BLOCK_SIZE;
  1648. #else
  1649. int nDynamicVBSize = g_pShaderAPI->GetCurrentDynamicVBSize();
  1650. int nDynamicIBSize = INDEX_BUFFER_SIZE * sizeof(uint16);
  1651. #endif
  1652. *pMaxVerts = nDynamicVBSize / m_pQueuedMesh->GetVertexSize();
  1653. if ( *pMaxVerts > 65535 )
  1654. {
  1655. *pMaxVerts = 65535;
  1656. }
  1657. *pMaxIndices = nDynamicIBSize / sizeof(uint16);
  1658. }
  1659. //-----------------------------------------------------------------------------
  1660. //
  1661. //-----------------------------------------------------------------------------
  1662. IMesh *CMatQueuedRenderContext::GetFlexMesh()
  1663. {
  1664. m_pQueuedFlexMesh->OnGetDynamicMesh( 0, 0, NULL, NULL, NULL, 0 );
  1665. return m_pQueuedFlexMesh;
  1666. }
  1667. //-----------------------------------------------------------------------------
  1668. //
  1669. //-----------------------------------------------------------------------------
  1670. OcclusionQueryObjectHandle_t CMatQueuedRenderContext::CreateOcclusionQueryObject()
  1671. {
  1672. OcclusionQueryObjectHandle_t h = g_pOcclusionQueryMgr->CreateOcclusionQueryObject();
  1673. m_queue.QueueCall( g_pOcclusionQueryMgr, &COcclusionQueryMgr::OnCreateOcclusionQueryObject, h );
  1674. return h;
  1675. }
  1676. int CMatQueuedRenderContext::OcclusionQuery_GetNumPixelsRendered( OcclusionQueryObjectHandle_t h )
  1677. {
  1678. m_queue.QueueCall( g_pOcclusionQueryMgr, &COcclusionQueryMgr::OcclusionQuery_IssueNumPixelsRenderedQuery, h );
  1679. return g_pOcclusionQueryMgr->OcclusionQuery_GetNumPixelsRendered( h, false );
  1680. }
  1681. //-----------------------------------------------------------------------------
  1682. //
  1683. //-----------------------------------------------------------------------------
  1684. void CMatQueuedRenderContext::SetFlashlightState( const FlashlightState_t &s, const VMatrix &m )
  1685. {
  1686. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::SetFlashlightState, RefToVal( s ), RefToVal( m ) );
  1687. }
  1688. //-----------------------------------------------------------------------------
  1689. //
  1690. //-----------------------------------------------------------------------------
  1691. bool CMatQueuedRenderContext::EnableClipping( bool bEnable )
  1692. {
  1693. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EnableClipping, bEnable );
  1694. return BaseClass::EnableClipping( bEnable );
  1695. }
  1696. //-----------------------------------------------------------------------------
  1697. //
  1698. //-----------------------------------------------------------------------------
  1699. void CMatQueuedRenderContext::UserClipTransform( const VMatrix &m )
  1700. {
  1701. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::UserClipTransform, RefToVal( m ) );
  1702. }
  1703. //-----------------------------------------------------------------------------
  1704. //
  1705. //-----------------------------------------------------------------------------
  1706. void CMatQueuedRenderContext::GetWindowSize( int &width, int &height ) const
  1707. {
  1708. width = m_WidthBackBuffer;
  1709. height = m_HeightBackBuffer;
  1710. }
  1711. //-----------------------------------------------------------------------------
  1712. //
  1713. //-----------------------------------------------------------------------------
  1714. void CMatQueuedRenderContext::DrawScreenSpaceRectangle(
  1715. IMaterial *pMaterial,
  1716. int destx, int desty,
  1717. int width, int height,
  1718. float src_texture_x0, float src_texture_y0, // which texel you want to appear at
  1719. // destx/y
  1720. float src_texture_x1, float src_texture_y1, // which texel you want to appear at
  1721. // destx+width-1, desty+height-1
  1722. int src_texture_width, int src_texture_height, // needed for fixup
  1723. void *pClientRenderable,
  1724. int nXDice, int nYDice ) // Amount to tessellate the quad
  1725. {
  1726. IMaterial *pRealTimeVersionMaterial = ((IMaterialInternal *)pMaterial)->GetRealTimeVersion();
  1727. pRealTimeVersionMaterial->CallBindProxy( pClientRenderable, &m_CallQueueExternal );
  1728. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::DrawScreenSpaceRectangle, pMaterial, destx, desty, width, height, src_texture_x0, src_texture_y0, src_texture_x1, src_texture_y1, src_texture_width, src_texture_height, pClientRenderable, nXDice, nYDice );
  1729. }
  1730. //-----------------------------------------------------------------------------
  1731. //
  1732. //-----------------------------------------------------------------------------
  1733. void CMatQueuedRenderContext::LoadBoneMatrix( int i, const matrix3x4_t &m )
  1734. {
  1735. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::LoadBoneMatrix, i, RefToVal( m ) );
  1736. }
  1737. //-----------------------------------------------------------------------------
  1738. //
  1739. //-----------------------------------------------------------------------------
  1740. void CMatQueuedRenderContext::CopyRenderTargetToTextureEx( ITexture *pTexture, int i, Rect_t *pSrc, Rect_t *pDst )
  1741. {
  1742. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::CopyRenderTargetToTextureEx, pTexture, i, ( pSrc ) ? &m_queue.Copy(*pSrc) : NULL, ( pDst ) ? &m_queue.Copy(*pDst) : NULL );
  1743. }
  1744. //-----------------------------------------------------------------------------
  1745. //
  1746. //-----------------------------------------------------------------------------
  1747. void CMatQueuedRenderContext::CopyTextureToRenderTargetEx( int i, ITexture *pTexture, Rect_t *pSrc, Rect_t *pDst )
  1748. {
  1749. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::CopyTextureToRenderTargetEx, i, pTexture, CUtlEnvelope<Rect_t>(pSrc), CUtlEnvelope<Rect_t>(pDst) );
  1750. }
  1751. //-----------------------------------------------------------------------------
  1752. //
  1753. //-----------------------------------------------------------------------------
  1754. bool CMatQueuedRenderContext::OnDrawMesh( IMesh *pMesh, int firstIndex, int numIndices )
  1755. {
  1756. Assert( pMesh != m_pQueuedMesh );
  1757. void (IMesh::*pfnDraw)( int, int) = &IMesh::Draw; // need assignment to disambiguate overloaded function
  1758. m_queue.QueueCall( pMesh, pfnDraw, firstIndex, numIndices );
  1759. return false;
  1760. }
  1761. //-----------------------------------------------------------------------------
  1762. //
  1763. //-----------------------------------------------------------------------------
  1764. bool CMatQueuedRenderContext::OnDrawMesh( IMesh *pMesh, CPrimList *pLists, int nLists )
  1765. {
  1766. CMatRenderData< CPrimList > rdPrimList( this, nLists, pLists );
  1767. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredDrawPrimList, pMesh, rdPrimList.Base(), nLists );
  1768. return false;
  1769. }
  1770. //-----------------------------------------------------------------------------
  1771. //
  1772. //-----------------------------------------------------------------------------
  1773. bool CMatQueuedRenderContext::OnDrawMeshModulated( IMesh *pMesh, const Vector4D& vecDiffuseModulation, int firstIndex, int numIndices )
  1774. {
  1775. Assert( pMesh != m_pQueuedMesh );
  1776. m_queue.QueueCall( pMesh, &IMesh::DrawModulated, vecDiffuseModulation, firstIndex, numIndices );
  1777. return false;
  1778. }
  1779. void CMatQueuedRenderContext::DeferredDrawPrimList( IMesh *pMesh, CPrimList *pLists, int nLists )
  1780. {
  1781. Assert( pMesh != m_pQueuedMesh );
  1782. pMesh->Draw( pLists, nLists );
  1783. }
  1784. //-----------------------------------------------------------------------------
  1785. //
  1786. //-----------------------------------------------------------------------------
  1787. void CMatQueuedRenderContext::DeferredSetFlexMesh( IMesh *pStaticMesh, int nVertexOffsetInBytes )
  1788. {
  1789. pStaticMesh->SetFlexMesh( m_pQueuedFlexMesh->MST_GetActualMesh(), m_pQueuedFlexMesh->MST_GetActualVertexOffsetInBytes() );
  1790. }
  1791. bool CMatQueuedRenderContext::OnSetFlexMesh( IMesh *pStaticMesh, IMesh *pMesh, int nVertexOffsetInBytes )
  1792. {
  1793. Assert( pMesh == m_pQueuedFlexMesh || !pMesh );
  1794. if ( pMesh )
  1795. {
  1796. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredSetFlexMesh, pStaticMesh, nVertexOffsetInBytes );
  1797. }
  1798. else
  1799. {
  1800. m_queue.QueueCall( pStaticMesh, &IMesh::SetFlexMesh, (IMesh *)NULL, 0 );
  1801. }
  1802. return false;
  1803. }
  1804. //-----------------------------------------------------------------------------
  1805. //
  1806. //-----------------------------------------------------------------------------
  1807. bool CMatQueuedRenderContext::OnSetColorMesh( IMesh *pStaticMesh, IMesh *pMesh, int nVertexOffsetInBytes )
  1808. {
  1809. Assert( pStaticMesh != m_pQueuedMesh && pStaticMesh != m_pQueuedFlexMesh );
  1810. m_queue.QueueCall( pStaticMesh, &IMesh::SetColorMesh, pMesh, nVertexOffsetInBytes );
  1811. return false;
  1812. }
  1813. //-----------------------------------------------------------------------------
  1814. //
  1815. //-----------------------------------------------------------------------------
  1816. bool CMatQueuedRenderContext::OnSetPrimitiveType( IMesh *pMesh, MaterialPrimitiveType_t type )
  1817. {
  1818. Assert( pMesh != m_pQueuedMesh && pMesh != m_pQueuedFlexMesh );
  1819. m_queue.QueueCall( pMesh, &IMesh::SetPrimitiveType, type );
  1820. return false;
  1821. }
  1822. //-----------------------------------------------------------------------------
  1823. //
  1824. //-----------------------------------------------------------------------------
  1825. inline void CMatQueuedRenderContext::QueueMatrixSync()
  1826. {
  1827. void (IMatRenderContext::*pfnLoadMatrix)( const VMatrix & ) = &IMatRenderContext::LoadMatrix; // need assignment to disambiguate overloaded function
  1828. m_queue.QueueCall( m_pHardwareContext, pfnLoadMatrix, RefToVal( AccessCurrentMatrix() ) );
  1829. }
  1830. void CMatQueuedRenderContext::MatrixMode( MaterialMatrixMode_t mode )
  1831. {
  1832. CMatRenderContextBase::MatrixMode( mode );
  1833. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::MatrixMode, mode );
  1834. }
  1835. void CMatQueuedRenderContext::PushMatrix()
  1836. {
  1837. CMatRenderContextBase::PushMatrix();
  1838. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::PushMatrix );
  1839. }
  1840. void CMatQueuedRenderContext::PopMatrix()
  1841. {
  1842. CMatRenderContextBase::PopMatrix();
  1843. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::PopMatrix );
  1844. }
  1845. void CMatQueuedRenderContext::LoadMatrix( const VMatrix& matrix )
  1846. {
  1847. CMatRenderContextBase::LoadMatrix( matrix );
  1848. QueueMatrixSync();
  1849. }
  1850. void CMatQueuedRenderContext::LoadMatrix( const matrix3x4_t& matrix )
  1851. {
  1852. CMatRenderContextBase::LoadMatrix( matrix );
  1853. QueueMatrixSync();
  1854. }
  1855. void CMatQueuedRenderContext::MultMatrix( const VMatrix& matrix )
  1856. {
  1857. CMatRenderContextBase::MultMatrix( matrix );
  1858. QueueMatrixSync();
  1859. }
  1860. void CMatQueuedRenderContext::MultMatrix( const matrix3x4_t& matrix )
  1861. {
  1862. CMatRenderContextBase::MultMatrix( VMatrix( matrix ) );
  1863. QueueMatrixSync();
  1864. }
  1865. void CMatQueuedRenderContext::MultMatrixLocal( const VMatrix& matrix )
  1866. {
  1867. CMatRenderContextBase::MultMatrixLocal( matrix );
  1868. QueueMatrixSync();
  1869. }
  1870. void CMatQueuedRenderContext::MultMatrixLocal( const matrix3x4_t& matrix )
  1871. {
  1872. CMatRenderContextBase::MultMatrixLocal( VMatrix( matrix ) );
  1873. QueueMatrixSync();
  1874. }
  1875. void CMatQueuedRenderContext::LoadIdentity()
  1876. {
  1877. CMatRenderContextBase::LoadIdentity();
  1878. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::LoadIdentity );
  1879. }
  1880. void CMatQueuedRenderContext::Ortho( double left, double top, double right, double bottom, double zNear, double zFar )
  1881. {
  1882. CMatRenderContextBase::Ortho( left, top, right, bottom, zNear, zFar );
  1883. QueueMatrixSync();
  1884. }
  1885. void CMatQueuedRenderContext::PerspectiveX( double flFovX, double flAspect, double flZNear, double flZFar )
  1886. {
  1887. CMatRenderContextBase::PerspectiveX( flFovX, flAspect, flZNear, flZFar );
  1888. QueueMatrixSync();
  1889. }
  1890. void CMatQueuedRenderContext::PerspectiveOffCenterX( double flFovX, double flAspect, double flZNear, double flZFar, double bottom, double top, double left, double right )
  1891. {
  1892. CMatRenderContextBase::PerspectiveOffCenterX( flFovX, flAspect, flZNear, flZFar, bottom, top, left, right );
  1893. QueueMatrixSync();
  1894. }
  1895. void CMatQueuedRenderContext::PickMatrix( int x, int y, int nWidth, int nHeight )
  1896. {
  1897. CMatRenderContextBase::PickMatrix( x, y, nWidth, nHeight );
  1898. QueueMatrixSync();
  1899. }
  1900. void CMatQueuedRenderContext::Rotate( float flAngle, float x, float y, float z )
  1901. {
  1902. CMatRenderContextBase::Rotate( flAngle, x, y, z );
  1903. QueueMatrixSync();
  1904. }
  1905. void CMatQueuedRenderContext::Translate( float x, float y, float z )
  1906. {
  1907. CMatRenderContextBase::Translate( x, y, z );
  1908. QueueMatrixSync();
  1909. }
  1910. void CMatQueuedRenderContext::Scale( float x, float y, float z )
  1911. {
  1912. CMatRenderContextBase::Scale( x, y, z );
  1913. QueueMatrixSync();
  1914. }
  1915. void CMatQueuedRenderContext::BeginBatch( IMesh* pIndices )
  1916. {
  1917. Assert( pIndices == (IMesh *)m_pQueuedMesh );
  1918. m_pQueuedMesh->InvalidateAuxMeshSet();
  1919. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredBeginBatch );
  1920. }
  1921. void CMatQueuedRenderContext::BindBatch( IMesh* pVertices, IMaterial *pAutoBind )
  1922. {
  1923. Assert( pVertices != (IMesh *)m_pQueuedMesh );
  1924. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::BindBatch, pVertices, pAutoBind );
  1925. }
  1926. void CMatQueuedRenderContext::DrawBatch(MaterialPrimitiveType_t primType, int firstIndex, int numIndices )
  1927. {
  1928. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::DrawBatch, primType, firstIndex, numIndices );
  1929. }
  1930. void CMatQueuedRenderContext::EndBatch()
  1931. {
  1932. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::EndBatch );
  1933. }
  1934. void CMatQueuedRenderContext::DeferredBeginBatch( )
  1935. {
  1936. m_pHardwareContext->BeginBatch( m_pQueuedMesh->MST_DetachActualMesh() );
  1937. }
  1938. //-----------------------------------------------------------------------------
  1939. // Memory allocation calls for queued mesh, et. al.
  1940. //-----------------------------------------------------------------------------
  1941. byte *CMatQueuedRenderContext::AllocVertices( int nVerts, int nVertexSize, bool *pExternalVB )
  1942. {
  1943. MEM_ALLOC_CREDIT();
  1944. size_t nSizeInBytes = nVerts * nVertexSize;
  1945. byte *pMemory = ( byte * )Vertices().Alloc( nSizeInBytes, false );
  1946. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1947. if ( nSizeInBytes > DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE )
  1948. {
  1949. Warning( "AllocVertices: Tried to allocate vertex buffer too large! (%d/%d)\n", nSizeInBytes, DYNAMIC_VERTEX_BUFFER_BLOCK_SIZE );
  1950. }
  1951. if ( pMemory )
  1952. {
  1953. *pExternalVB = true;
  1954. }
  1955. else
  1956. {
  1957. *pExternalVB = false;
  1958. pMemory = ( byte * )m_Vertices.Alloc( nSizeInBytes, false );
  1959. }
  1960. #else
  1961. *pExternalVB = false;
  1962. #endif
  1963. Assert( pMemory );
  1964. return pMemory;
  1965. }
  1966. byte *CMatQueuedRenderContext::AllocIndices( int nIndices, int nIndexSize, bool *pExternalIB )
  1967. {
  1968. MEM_ALLOC_CREDIT();
  1969. size_t nSizeInBytes = nIndices * nIndexSize;
  1970. byte *pMemory = ( byte * )Indices().Alloc( nSizeInBytes, false );
  1971. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1972. if ( nSizeInBytes > DYNAMIC_INDEX_BUFFER_BLOCK_SIZE )
  1973. {
  1974. Warning( "AllocIndices: Tried to allocate index buffer too large! (%d/%d)\n", nSizeInBytes, DYNAMIC_INDEX_BUFFER_BLOCK_SIZE );
  1975. }
  1976. if ( pMemory )
  1977. {
  1978. *pExternalIB = true;
  1979. }
  1980. else
  1981. {
  1982. *pExternalIB = false;
  1983. pMemory = ( byte * )m_Indices.Alloc( nSizeInBytes, false );
  1984. }
  1985. #else
  1986. *pExternalIB = false;
  1987. #endif
  1988. Assert( pMemory );
  1989. return pMemory;
  1990. }
  1991. byte *CMatQueuedRenderContext::ReallocVertices( byte *pVerts, int nVertsOld, int nVertsNew, int nVertexSize, bool bExternalMemory )
  1992. {
  1993. Assert( nVertsNew <= nVertsOld );
  1994. if ( nVertsNew < nVertsOld )
  1995. {
  1996. unsigned nBytes = ( ( nVertsOld - nVertsNew ) * nVertexSize );
  1997. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  1998. CMemoryStack &stack = bExternalMemory ? Vertices() : m_Vertices;
  1999. #else
  2000. CMemoryStack &stack = Vertices();
  2001. #endif
  2002. stack.FreeToAllocPoint( stack.GetCurrentAllocPoint() - nBytes, false ); // memstacks 128 bit aligned
  2003. }
  2004. return pVerts;
  2005. }
  2006. byte *CMatQueuedRenderContext::ReallocIndices( byte *pIndices, int nIndicesOld, int nIndicesNew, int nIndexSize, bool bExternalMemory )
  2007. {
  2008. Assert( nIndicesNew <= nIndicesOld );
  2009. if ( nIndicesNew < nIndicesOld )
  2010. {
  2011. unsigned nBytes = ( ( nIndicesOld - nIndicesNew ) * nIndexSize );
  2012. #ifdef MS_NO_DYNAMIC_BUFFER_COPY
  2013. CMemoryStack &stack = bExternalMemory ? Indices() : m_Indices;
  2014. #else
  2015. CMemoryStack &stack = Indices();
  2016. #endif
  2017. stack.FreeToAllocPoint( stack.GetCurrentAllocPoint() - nBytes, false ); // memstacks 128 bit aligned
  2018. }
  2019. return pIndices;
  2020. }
  2021. void CMatQueuedRenderContext::FreeVertices( byte *pVerts, int nVerts, int nVertexSize )
  2022. {
  2023. // free at end of call dispatch
  2024. }
  2025. void CMatQueuedRenderContext::FreeIndices( byte *pIndices, int nIndices, int nIndexSize )
  2026. {
  2027. // free at end of call dispatch
  2028. }
  2029. //------------------------------------------------------------------------------
  2030. // Called from rendering thread, fixes up dynamic buffers
  2031. //------------------------------------------------------------------------------
  2032. void CMatQueuedRenderContext::DeferredDrawInstances( int nInstanceCount, const MeshInstanceData_t *pConstInstance )
  2033. {
  2034. MeshInstanceData_t *pInstance = const_cast<MeshInstanceData_t*>( pConstInstance );
  2035. // Adjust the instances pointing to the dynamic index buffer
  2036. IIndexBuffer *pDynamicIndexBuffer = m_pQueuedIndexBuffer->RT_GetDynamicIndexBuffer();
  2037. int nStartIndex = m_pQueuedIndexBuffer->RT_GetIndexStart();
  2038. if ( ( nStartIndex < 0 ) || !pDynamicIndexBuffer )
  2039. return;
  2040. for ( int i = 0; i < nInstanceCount; ++i )
  2041. {
  2042. MeshInstanceData_t &instance = pInstance[i];
  2043. // FIXME: Make dynamic vertex buffers work!
  2044. Assert( !instance.m_pVertexBuffer->IsDynamic() );
  2045. if ( !instance.m_pIndexBuffer->IsDynamic() )
  2046. continue;
  2047. instance.m_pIndexBuffer = pDynamicIndexBuffer;
  2048. instance.m_nIndexOffset += nStartIndex;
  2049. }
  2050. m_pHardwareContext->DrawInstances( nInstanceCount, pConstInstance );
  2051. }
  2052. //------------------------------------------------------------------------------
  2053. // Draws instances with different meshes
  2054. //------------------------------------------------------------------------------
  2055. void CMatQueuedRenderContext::DrawInstances( int nInstanceCount, const MeshInstanceData_t *pInstance )
  2056. {
  2057. CMatRenderData< MeshInstanceData_t > renderData( this );
  2058. if ( !IsRenderData( pInstance ) )
  2059. {
  2060. renderData.Lock( nInstanceCount );
  2061. memcpy( renderData.Base(), pInstance, nInstanceCount * sizeof(MeshInstanceData_t) );
  2062. pInstance = renderData.Base();
  2063. }
  2064. #ifdef _DEBUG
  2065. for ( int i = 0; i < nInstanceCount; ++i )
  2066. {
  2067. Assert( !pInstance[i].m_pPoseToWorld || IsRenderData( pInstance[i].m_pPoseToWorld ) );
  2068. Assert( !pInstance[i].m_pLightingState || IsRenderData( pInstance[i].m_pLightingState ) );
  2069. Assert( !pInstance[i].m_pBoneRemap || IsRenderData( pInstance[i].m_pBoneRemap ) );
  2070. Assert( !pInstance[i].m_pStencilState || IsRenderData( pInstance[i].m_pStencilState ) );
  2071. }
  2072. #endif
  2073. m_queue.QueueCall( this, &CMatQueuedRenderContext::DeferredDrawInstances, nInstanceCount, pInstance );
  2074. }
  2075. //------------------------------------------------------------------------------
  2076. // Color correction related methods
  2077. //------------------------------------------------------------------------------
  2078. ColorCorrectionHandle_t CMatQueuedRenderContext::AddLookup( const char *pName )
  2079. {
  2080. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2081. ColorCorrectionHandle_t hCC = ColorCorrectionSystem()->AddLookup( pName );
  2082. m_pMaterialSystem->Unlock( hLock );
  2083. return hCC;
  2084. }
  2085. bool CMatQueuedRenderContext::RemoveLookup( ColorCorrectionHandle_t handle )
  2086. {
  2087. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2088. bool bRemoved = ColorCorrectionSystem()->RemoveLookup( handle );
  2089. m_pMaterialSystem->Unlock( hLock );
  2090. return bRemoved;
  2091. }
  2092. ColorCorrectionHandle_t CMatQueuedRenderContext::FindLookup( const char *pName )
  2093. {
  2094. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2095. ColorCorrectionHandle_t hCC = ColorCorrectionSystem()->FindLookup( pName );
  2096. m_pMaterialSystem->Unlock( hLock );
  2097. return hCC;
  2098. }
  2099. void CMatQueuedRenderContext::LockLookup( ColorCorrectionHandle_t handle )
  2100. {
  2101. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2102. ColorCorrectionSystem()->LockLookup( handle );
  2103. m_pMaterialSystem->Unlock( hLock );
  2104. }
  2105. void CMatQueuedRenderContext::LoadLookup( ColorCorrectionHandle_t handle, const char *pLookupName )
  2106. {
  2107. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2108. ColorCorrectionSystem()->LoadLookup( handle, pLookupName );
  2109. m_pMaterialSystem->Unlock( hLock );
  2110. }
  2111. void CMatQueuedRenderContext::UnlockLookup( ColorCorrectionHandle_t handle )
  2112. {
  2113. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2114. ColorCorrectionSystem()->UnlockLookup( handle );
  2115. m_pMaterialSystem->Unlock( hLock );
  2116. }
  2117. // NOTE: These are synchronous calls! The rendering thread is stopped, the current queue is drained and the pixels are read
  2118. // NOTE: We should also have a queued read pixels in the API for doing mid frame reads (as opposed to screenshots)
  2119. void CMatQueuedRenderContext::ReadPixels( int x, int y, int width, int height, unsigned char *data, ImageFormat dstFormat, ITexture *pRenderTargetTexture )
  2120. {
  2121. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2122. g_pShaderAPI->ReadPixels( x, y, width, height, data, dstFormat, pRenderTargetTexture );
  2123. m_pMaterialSystem->Unlock( hLock );
  2124. }
  2125. void CMatQueuedRenderContext::ReadPixelsAsync( int x, int y, int width, int height, unsigned char *data, ImageFormat dstFormat, ITexture *pRenderTargetTexture, CThreadEvent *pPixelsReadEvent )
  2126. {
  2127. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::ReadPixelsAsync, x, y, width, height, data, dstFormat, pRenderTargetTexture, pPixelsReadEvent );
  2128. }
  2129. void CMatQueuedRenderContext::ReadPixelsAsyncGetResult( int x, int y, int width, int height, unsigned char *data, ImageFormat dstFormat, CThreadEvent *pGetResultEvent )
  2130. {
  2131. m_queue.QueueCall( m_pHardwareContext, &IMatRenderContext::ReadPixelsAsyncGetResult, x, y, width, height, data, dstFormat, pGetResultEvent );
  2132. }
  2133. void CMatQueuedRenderContext::ReadPixelsAndStretch( Rect_t *pSrcRect, Rect_t *pDstRect, unsigned char *pBuffer, ImageFormat dstFormat, int nDstStride )
  2134. {
  2135. MaterialLock_t hLock = m_pMaterialSystem->Lock();
  2136. g_pShaderAPI->ReadPixels( pSrcRect, pDstRect, pBuffer, dstFormat, nDstStride );
  2137. m_pMaterialSystem->Unlock( hLock );
  2138. }