Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1193 lines
34 KiB

  1. //============ Copyright (c) Valve Corporation, All rights reserved. ============
  2. //
  3. // cglmbuffer.cpp
  4. //
  5. //===============================================================================
  6. #include "togl/rendermechanism.h"
  7. // memdbgon -must- be the last include file in a .cpp file.
  8. #include "tier0/memdbgon.h"
  9. // LINUXTODO : took out cmdline here
  10. bool g_bUsePseudoBufs = false; //( Plat_GetCommandLineA() ) ? ( strstr( Plat_GetCommandLineA(), "-gl_enable_pseudobufs" ) != NULL ) : false;
  11. #ifdef OSX
  12. // Significant perf degradation on some OSX parts if static buffers not disabled
  13. bool g_bDisableStaticBuffer = true;
  14. #else
  15. bool g_bDisableStaticBuffer = false; //( Plat_GetCommandLineA() ) ? ( strstr( Plat_GetCommandLineA(), "-gl_disable_static_buffer" ) != NULL ) : false;
  16. #endif
  17. // http://www.opengl.org/registry/specs/ARB/vertex_buffer_object.txt
  18. // http://www.opengl.org/registry/specs/ARB/pixel_buffer_object.txt
  19. // gl_bufmode: zero means we mark all vertex/index buffers static
  20. // non zero means buffers are initially marked static..
  21. // ->but can shift to dynamic upon first 'discard' (orphaning)
  22. // #define REPORT_LOCK_TIME 0
  23. ConVar gl_bufmode( "gl_bufmode", "1" );
  24. char ALIGN16 CGLMBuffer::m_StaticBuffers[ GL_MAX_STATIC_BUFFERS ][ GL_STATIC_BUFFER_SIZE ] ALIGN16_POST;
  25. bool CGLMBuffer::m_bStaticBufferUsed[ GL_MAX_STATIC_BUFFERS ];
  26. extern bool g_bNullD3DDevice;
  27. //===========================================================================//
  28. static uint gMaxPersistentOffset[kGLMNumBufferTypes] =
  29. {
  30. 0,
  31. 0,
  32. 0,
  33. 0
  34. };
  35. CON_COMMAND( gl_persistent_buffer_max_offset, "" )
  36. {
  37. ConMsg( "OpenGL Persistent buffer max offset :\n" );
  38. ConMsg( " Vertex buffer : %d bytes (%f MB) \n", gMaxPersistentOffset[kGLMVertexBuffer], gMaxPersistentOffset[kGLMVertexBuffer] / (1024.0f*1024.0f) );
  39. ConMsg( " Index buffer : %d bytes (%f MB) \n", gMaxPersistentOffset[kGLMIndexBuffer], gMaxPersistentOffset[kGLMIndexBuffer] / (1024.0f*1024.0f) );
  40. ConMsg( " Uniform buffer : %d bytes (%f MB) \n", gMaxPersistentOffset[kGLMUniformBuffer], gMaxPersistentOffset[kGLMUniformBuffer] / (1024.0f*1024.0f) );
  41. ConMsg( " Pixel buffer : %d bytes (%f MB) \n", gMaxPersistentOffset[kGLMPixelBuffer], gMaxPersistentOffset[kGLMPixelBuffer] / (1024.0f*1024.0f) );
  42. }
  43. CPersistentBuffer::CPersistentBuffer()
  44. :
  45. m_nSize( 0 )
  46. , m_nHandle( 0 )
  47. , m_pImmutablePersistentBuf( NULL )
  48. , m_nOffset( 0 )
  49. #ifdef HAVE_GL_ARB_SYNC
  50. , m_nSyncObj( 0 )
  51. #endif
  52. {}
  53. CPersistentBuffer::~CPersistentBuffer()
  54. {
  55. Deinit();
  56. }
  57. void CPersistentBuffer::Init( EGLMBufferType type,uint nSize )
  58. {
  59. Assert( gGL->m_bHave_GL_ARB_buffer_storage );
  60. Assert( gGL->m_bHave_GL_ARB_map_buffer_range );
  61. m_nSize = nSize;
  62. m_nOffset = 0;
  63. m_type = type;
  64. switch ( type )
  65. {
  66. case kGLMVertexBuffer: m_buffGLTarget = GL_ARRAY_BUFFER_ARB; break;
  67. case kGLMIndexBuffer: m_buffGLTarget = GL_ELEMENT_ARRAY_BUFFER_ARB; break;
  68. default: Assert( nSize == 0 );
  69. }
  70. if ( m_nSize > 0 )
  71. {
  72. gGL->glGenBuffersARB( 1, &m_nHandle );
  73. gGL->glBindBufferARB( m_buffGLTarget, m_nHandle );
  74. // Create persistent immutable buffer that we will permanently map. This buffer can be written from any thread (not just
  75. // the renderthread)
  76. gGL->glBufferStorage( m_buffGLTarget, m_nSize, (const GLvoid *)NULL, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT ); // V_GL_REQ: GL_ARB_buffer_storage, GL_ARB_map_buffer_range, GL_VERSION_4_4
  77. // Map the buffer for all of eternity. Pointer can be used from multiple threads.
  78. m_pImmutablePersistentBuf = gGL->glMapBufferRange( m_buffGLTarget, 0, m_nSize, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT ); // V_GL_REQ: GL_ARB_map_buffer_range, GL_ARB_buffer_storage, GL_VERSION_4_4
  79. Assert( m_pImmutablePersistentBuf != NULL );
  80. }
  81. }
  82. void CPersistentBuffer::Deinit()
  83. {
  84. if ( !m_pImmutablePersistentBuf )
  85. {
  86. return;
  87. }
  88. BlockUntilNotBusy();
  89. gGL->glBindBufferARB( m_buffGLTarget, m_nHandle );
  90. gGL->glUnmapBuffer( m_buffGLTarget );
  91. gGL->glBindBufferARB( m_buffGLTarget, 0 );
  92. gGL->glDeleteBuffersARB( 1, &m_nHandle );
  93. m_nSize = 0;
  94. m_nHandle = 0;
  95. m_nOffset = 0;
  96. m_pImmutablePersistentBuf = NULL;
  97. }
  98. void CPersistentBuffer::InsertFence()
  99. {
  100. #ifdef HAVE_GL_ARB_SYNC
  101. if (m_nSyncObj)
  102. {
  103. gGL->glDeleteSync( m_nSyncObj );
  104. }
  105. m_nSyncObj = gGL->glFenceSync( GL_SYNC_GPU_COMMANDS_COMPLETE, 0 );
  106. #endif
  107. }
  108. void CPersistentBuffer::BlockUntilNotBusy()
  109. {
  110. #ifdef HAVE_GL_ARB_SYNC
  111. if (m_nSyncObj)
  112. {
  113. gGL->glClientWaitSync( m_nSyncObj, GL_SYNC_FLUSH_COMMANDS_BIT, 3000000000000ULL );
  114. gGL->glDeleteSync( m_nSyncObj );
  115. m_nSyncObj = 0;
  116. }
  117. #endif
  118. m_nOffset = 0;
  119. }
  120. void CPersistentBuffer::Append( uint nSize )
  121. {
  122. m_nOffset += nSize;
  123. Assert( m_nOffset <= m_nSize );
  124. gMaxPersistentOffset[m_type] = Max( m_nOffset, gMaxPersistentOffset[m_type] );
  125. }
  126. //===========================================================================//
  127. #if GL_ENABLE_INDEX_VERIFICATION
  128. CGLMBufferSpanManager::CGLMBufferSpanManager() :
  129. m_pCtx( NULL ),
  130. m_nBufType( kGLMVertexBuffer ),
  131. m_nBufSize( 0 ),
  132. m_bDynamic( false ),
  133. m_nSpanEndMax( -1 ),
  134. m_nNumAllocatedBufs( 0 ),
  135. m_nTotalBytesAllocated( 0 )
  136. {
  137. }
  138. CGLMBufferSpanManager::~CGLMBufferSpanManager()
  139. {
  140. Deinit();
  141. }
  142. void CGLMBufferSpanManager::Init( GLMContext *pContext, EGLMBufferType nBufType, uint nInitialCapacity, uint nBufSize, bool bDynamic )
  143. {
  144. Assert( ( nBufType == kGLMIndexBuffer ) || ( nBufType == kGLMVertexBuffer ) );
  145. m_pCtx = pContext;
  146. m_nBufType = nBufType;
  147. m_nBufSize = nBufSize;
  148. m_bDynamic = bDynamic;
  149. m_ActiveSpans.EnsureCapacity( nInitialCapacity );
  150. m_DeletedSpans.EnsureCapacity( nInitialCapacity );
  151. m_nSpanEndMax = -1;
  152. m_nNumAllocatedBufs = 0;
  153. m_nTotalBytesAllocated = 0;
  154. }
  155. bool CGLMBufferSpanManager::AllocDynamicBuf( uint nSize, GLDynamicBuf_t &buf )
  156. {
  157. buf.m_nGLType = GetGLBufType();
  158. buf.m_nActualBufSize = nSize;
  159. buf.m_nHandle = 0;
  160. buf.m_nSize = nSize;
  161. m_nNumAllocatedBufs++;
  162. m_nTotalBytesAllocated += buf.m_nActualBufSize;
  163. return true;
  164. }
  165. void CGLMBufferSpanManager::ReleaseDynamicBuf( GLDynamicBuf_t &buf )
  166. {
  167. Assert( m_nNumAllocatedBufs > 0 );
  168. m_nNumAllocatedBufs--;
  169. Assert( m_nTotalBytesAllocated >= (int)buf.m_nActualBufSize );
  170. m_nTotalBytesAllocated -= buf.m_nActualBufSize;
  171. }
  172. void CGLMBufferSpanManager::Deinit()
  173. {
  174. if ( !m_pCtx )
  175. return;
  176. for ( int i = 0; i < m_ActiveSpans.Count(); i++ )
  177. {
  178. if ( m_ActiveSpans[i].m_bOriginalAlloc )
  179. ReleaseDynamicBuf( m_ActiveSpans[i].m_buf );
  180. }
  181. m_ActiveSpans.SetCountNonDestructively( 0 );
  182. for ( int i = 0; i < m_DeletedSpans.Count(); i++ )
  183. ReleaseDynamicBuf( m_DeletedSpans[i].m_buf );
  184. m_DeletedSpans.SetCountNonDestructively( 0 );
  185. m_pCtx->BindGLBufferToCtx( GetGLBufType(), NULL, true );
  186. m_nSpanEndMax = -1;
  187. m_pCtx = NULL;
  188. Assert( !m_nNumAllocatedBufs );
  189. Assert( !m_nTotalBytesAllocated );
  190. }
  191. void CGLMBufferSpanManager::DiscardAllSpans()
  192. {
  193. for ( int i = 0; i < m_ActiveSpans.Count(); i++ )
  194. {
  195. if ( m_ActiveSpans[i].m_bOriginalAlloc )
  196. ReleaseDynamicBuf( m_ActiveSpans[i].m_buf );
  197. }
  198. m_ActiveSpans.SetCountNonDestructively( 0 );
  199. for ( int i = 0; i < m_DeletedSpans.Count(); i++ )
  200. ReleaseDynamicBuf( m_DeletedSpans[i].m_buf );
  201. m_DeletedSpans.SetCountNonDestructively( 0 );
  202. m_nSpanEndMax = -1;
  203. Assert( !m_nNumAllocatedBufs );
  204. Assert( !m_nTotalBytesAllocated );
  205. }
  206. // TODO: Add logic to detect incorrect usage of bNoOverwrite.
  207. CGLMBufferSpanManager::ActiveSpan_t *CGLMBufferSpanManager::AddSpan( uint nOffset, uint nMaxSize, uint nActualSize, bool bDiscard, bool bNoOverwrite )
  208. {
  209. (void)bDiscard;
  210. (void)bNoOverwrite;
  211. const uint nStart = nOffset;
  212. const uint nSize = nActualSize;
  213. const uint nEnd = nStart + nSize;
  214. GLDynamicBuf_t newDynamicBuf;
  215. if ( !AllocDynamicBuf( nSize, newDynamicBuf ) )
  216. {
  217. DXABSTRACT_BREAK_ON_ERROR();
  218. return NULL;
  219. }
  220. if ( (int)nStart < m_nSpanEndMax )
  221. {
  222. // Lock region potentially overlaps another previously locked region (since the last discard) - this is a very rarely (if ever) taken path in Source1 games.
  223. int i = 0;
  224. while ( i < m_ActiveSpans.Count() )
  225. {
  226. ActiveSpan_t &existingSpan = m_ActiveSpans[i];
  227. if ( ( nEnd <= existingSpan.m_nStart ) || ( nStart >= existingSpan.m_nEnd ) )
  228. {
  229. i++;
  230. continue;
  231. }
  232. Warning( "GL performance warning: AddSpan() at offset %u max size %u actual size %u, on a %s %s buffer of total size %u, overwrites an existing active lock span at offset %u size %u!\n",
  233. nOffset, nMaxSize, nActualSize,
  234. m_bDynamic ? "dynamic" : "static", ( m_nBufType == kGLMVertexBuffer ) ? "vertex" : "index", m_nBufSize,
  235. existingSpan.m_nStart, existingSpan.m_nEnd - existingSpan.m_nStart );
  236. if ( ( nStart <= existingSpan.m_nStart ) && ( nEnd >= existingSpan.m_nEnd ) )
  237. {
  238. if ( existingSpan.m_bOriginalAlloc )
  239. {
  240. // New span totally covers existing span
  241. // Can't immediately delete the span's buffer because it could be referred to by another (child) span.
  242. m_DeletedSpans.AddToTail( existingSpan );
  243. }
  244. // Delete span
  245. m_ActiveSpans[i] = m_ActiveSpans[ m_ActiveSpans.Count() - 1 ];
  246. m_ActiveSpans.SetCountNonDestructively( m_ActiveSpans.Count() - 1 );
  247. continue;
  248. }
  249. // New span does NOT fully cover the existing span (partial overlap)
  250. if ( nStart < existingSpan.m_nStart )
  251. {
  252. // New span starts before existing span, but ends somewhere inside, so shrink it (start moves "right")
  253. existingSpan.m_nStart = nEnd;
  254. }
  255. else if ( nEnd > existingSpan.m_nEnd )
  256. {
  257. // New span ends after existing span, but starts somewhere inside (end moves "left")
  258. existingSpan.m_nEnd = nStart;
  259. }
  260. else //if ( ( nStart >= existingSpan.m_nStart ) && ( nEnd <= existingSpan.m_nEnd ) )
  261. {
  262. // New span lies inside of existing span
  263. if ( nStart == existingSpan.m_nStart )
  264. {
  265. // New span begins inside the existing span (start moves "right")
  266. existingSpan.m_nStart = nEnd;
  267. }
  268. else
  269. {
  270. if ( nEnd < existingSpan.m_nEnd )
  271. {
  272. // New span is completely inside existing span
  273. m_ActiveSpans.AddToTail( ActiveSpan_t( nEnd, existingSpan.m_nEnd, existingSpan.m_buf, false ) );
  274. }
  275. existingSpan.m_nEnd = nStart;
  276. }
  277. }
  278. Assert( existingSpan.m_nStart < existingSpan.m_nEnd );
  279. i++;
  280. }
  281. }
  282. newDynamicBuf.m_nLockOffset = nStart;
  283. newDynamicBuf.m_nLockSize = nSize;
  284. m_ActiveSpans.AddToTail( ActiveSpan_t( nStart, nEnd, newDynamicBuf, true ) );
  285. m_nSpanEndMax = MAX( m_nSpanEndMax, (int)nEnd );
  286. return &m_ActiveSpans.Tail();
  287. }
  288. bool CGLMBufferSpanManager::IsValid( uint nOffset, uint nSize ) const
  289. {
  290. const uint nEnd = nOffset + nSize;
  291. int nTotalBytesRemaining = nSize;
  292. for ( int i = m_ActiveSpans.Count() - 1; i >= 0; --i )
  293. {
  294. const ActiveSpan_t &span = m_ActiveSpans[i];
  295. if ( span.m_nEnd <= nOffset )
  296. continue;
  297. if ( span.m_nStart >= nEnd )
  298. continue;
  299. uint nIntersectStart = MAX( span.m_nStart, nOffset );
  300. uint nIntersectEnd = MIN( span.m_nEnd, nEnd );
  301. Assert( nIntersectStart <= nIntersectEnd );
  302. nTotalBytesRemaining -= ( nIntersectEnd - nIntersectStart );
  303. Assert( nTotalBytesRemaining >= 0 );
  304. if ( nTotalBytesRemaining <= 0 )
  305. break;
  306. }
  307. return nTotalBytesRemaining == 0;
  308. }
  309. #endif // GL_ENABLE_INDEX_VERIFICATION
  310. // glBufferSubData() with a max size limit, to work around NVidia's threaded driver limits (anything > than roughly 256KB triggers a sync with the server thread).
  311. void glBufferSubDataMaxSize( GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid *data, uint nMaxSizePerCall )
  312. {
  313. #if TOGL_SUPPORT_NULL_DEVICE
  314. if ( g_bNullD3DDevice ) return;
  315. #endif
  316. uint nBytesLeft = size;
  317. uint nOfs = 0;
  318. while ( nBytesLeft )
  319. {
  320. uint nBytesToCopy = MIN( nMaxSizePerCall, nBytesLeft );
  321. gGL->glBufferSubData( target, offset + nOfs, nBytesToCopy, static_cast<const unsigned char *>( data ) + nOfs );
  322. nBytesLeft -= nBytesToCopy;
  323. nOfs += nBytesToCopy;
  324. }
  325. }
  326. CGLMBuffer::CGLMBuffer( GLMContext *pCtx, EGLMBufferType type, uint size, uint options )
  327. {
  328. m_pCtx = pCtx;
  329. m_type = type;
  330. m_bDynamic = ( options & GLMBufferOptionDynamic ) != 0;
  331. switch ( m_type )
  332. {
  333. case kGLMVertexBuffer: m_buffGLTarget = GL_ARRAY_BUFFER_ARB; break;
  334. case kGLMIndexBuffer: m_buffGLTarget = GL_ELEMENT_ARRAY_BUFFER_ARB; break;
  335. case kGLMUniformBuffer: m_buffGLTarget = GL_UNIFORM_BUFFER_EXT; break;
  336. case kGLMPixelBuffer: m_buffGLTarget = GL_PIXEL_UNPACK_BUFFER_ARB; break;
  337. default: Assert(!"Unknown buffer type" ); DXABSTRACT_BREAK_ON_ERROR();
  338. }
  339. m_nSize = size;
  340. m_nActualSize = size;
  341. m_bMapped = false;
  342. m_pLastMappedAddress = NULL;
  343. m_pStaticBuffer = NULL;
  344. m_nPinnedMemoryOfs = -1;
  345. m_nPersistentBufferStartOffset = 0;
  346. m_bUsingPersistentBuffer = false;
  347. m_bEnableAsyncMap = false;
  348. m_bEnableExplicitFlush = false;
  349. m_dirtyMinOffset = m_dirtyMaxOffset = 0; // adjust/grow on lock, clear on unlock
  350. m_pCtx->CheckCurrent();
  351. m_nRevision = rand();
  352. m_pPseudoBuf = NULL;
  353. m_pActualPseudoBuf = NULL;
  354. m_bPseudo = false;
  355. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  356. m_bPseudo = true;
  357. #endif
  358. #if GL_ENABLE_INDEX_VERIFICATION
  359. m_BufferSpanManager.Init( m_pCtx, m_type, 512, m_nSize, m_bDynamic );
  360. if ( m_type == kGLMIndexBuffer )
  361. m_bPseudo = true;
  362. #endif
  363. if ( g_bUsePseudoBufs && m_bDynamic )
  364. {
  365. m_bPseudo = true;
  366. }
  367. if ( m_bPseudo )
  368. {
  369. m_nHandle = 0;
  370. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  371. m_nDirtyRangeStart = 0xFFFFFFFF;
  372. m_nDirtyRangeEnd = 0;
  373. m_nActualSize = ALIGN_VALUE( ( m_nSize + sizeof( uint32 ) ), 4096 );
  374. m_pPseudoBuf = m_pActualPseudoBuf = (char *)VirtualAlloc( NULL, m_nActualSize, MEM_COMMIT, PAGE_READWRITE );
  375. if ( !m_pPseudoBuf )
  376. {
  377. Error( "VirtualAlloc() failed!\n" );
  378. }
  379. for ( uint i = 0; i < m_nActualSize / sizeof( uint32 ); i++ )
  380. {
  381. reinterpret_cast< uint32 * >( m_pPseudoBuf )[i] = 0xDEADBEEF;
  382. }
  383. DWORD nOldProtect;
  384. BOOL bResult = VirtualProtect( m_pActualPseudoBuf, m_nActualSize, PAGE_READONLY, &nOldProtect );
  385. if ( !bResult )
  386. {
  387. Error( "VirtualProtect() failed!\n" );
  388. }
  389. #else
  390. m_nActualSize = size + 15;
  391. m_pActualPseudoBuf = (char*)malloc( m_nActualSize );
  392. m_pPseudoBuf = (char*)(((intp)m_pActualPseudoBuf + 15) & ~15);
  393. #endif
  394. m_pCtx->BindBufferToCtx( m_type, NULL ); // exit with no buffer bound
  395. }
  396. else
  397. {
  398. gGL->glGenBuffersARB( 1, &m_nHandle );
  399. m_pCtx->BindBufferToCtx( m_type, this ); // causes glBindBufferARB
  400. // buffers start out static, but if they get orphaned and gl_bufmode is non zero,
  401. // then they will get flipped to dynamic.
  402. GLenum hint = GL_STATIC_DRAW_ARB;
  403. switch (m_type)
  404. {
  405. case kGLMVertexBuffer: hint = m_bDynamic ? GL_DYNAMIC_DRAW_ARB : GL_STATIC_DRAW_ARB; break;
  406. case kGLMIndexBuffer: hint = m_bDynamic ? GL_DYNAMIC_DRAW_ARB : GL_STATIC_DRAW_ARB; break;
  407. case kGLMUniformBuffer: hint = GL_DYNAMIC_DRAW_ARB; break;
  408. case kGLMPixelBuffer: hint = m_bDynamic ? GL_DYNAMIC_DRAW_ARB : GL_STATIC_DRAW_ARB; break;
  409. default: Assert(!"Unknown buffer type" ); DXABSTRACT_BREAK_ON_ERROR();
  410. }
  411. gGL->glBufferDataARB( m_buffGLTarget, m_nSize, (const GLvoid*)NULL, hint ); // may ultimately need more hints to set the usage correctly (esp for streaming)
  412. SetModes( false, true, true );
  413. m_pCtx->BindBufferToCtx( m_type, NULL ); // unbind me
  414. }
  415. }
  416. CGLMBuffer::~CGLMBuffer( )
  417. {
  418. m_pCtx->CheckCurrent();
  419. if ( m_bPseudo )
  420. {
  421. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  422. BOOL bResult = VirtualFree( m_pActualPseudoBuf, 0, MEM_RELEASE );
  423. if ( !bResult )
  424. {
  425. Error( "VirtualFree() failed!\n" );
  426. }
  427. #else
  428. free( m_pActualPseudoBuf );
  429. #endif
  430. m_pActualPseudoBuf = NULL;
  431. m_pPseudoBuf = NULL;
  432. }
  433. else
  434. {
  435. gGL->glDeleteBuffersARB( 1, &m_nHandle );
  436. }
  437. m_pCtx = NULL;
  438. m_nHandle = 0;
  439. m_pLastMappedAddress = NULL;
  440. #if GL_ENABLE_INDEX_VERIFICATION
  441. m_BufferSpanManager.Deinit();
  442. #endif
  443. }
  444. void CGLMBuffer::SetModes( bool bAsyncMap, bool bExplicitFlush, bool bForce )
  445. {
  446. // assumes buffer is bound. called by constructor and by Lock.
  447. if ( m_bPseudo )
  448. {
  449. // ignore it...
  450. }
  451. else
  452. {
  453. if ( bForce || ( m_bEnableAsyncMap != bAsyncMap ) )
  454. {
  455. // note the sense of the parameter, it's TRUE if you *want* serialization, so for async you turn it to false.
  456. if ( ( gGL->m_bHave_GL_APPLE_flush_buffer_range ) && ( !gGL->m_bHave_GL_ARB_map_buffer_range ) )
  457. {
  458. gGL->glBufferParameteriAPPLE( m_buffGLTarget, GL_BUFFER_SERIALIZED_MODIFY_APPLE, bAsyncMap == false );
  459. }
  460. m_bEnableAsyncMap = bAsyncMap;
  461. }
  462. if ( bForce || ( m_bEnableExplicitFlush != bExplicitFlush ) )
  463. {
  464. // Note that the GL_ARB_map_buffer_range path handles this in the glMapBufferRange() call in Lock().
  465. // note the sense of the parameter, it's TRUE if you *want* auto-flush-on-unmap, so for explicit-flush, you turn it to false.
  466. if ( ( gGL->m_bHave_GL_APPLE_flush_buffer_range ) && ( !gGL->m_bHave_GL_ARB_map_buffer_range ) )
  467. {
  468. gGL->glBufferParameteriAPPLE( m_buffGLTarget, GL_BUFFER_FLUSHING_UNMAP_APPLE, bExplicitFlush == false );
  469. }
  470. m_bEnableExplicitFlush = bExplicitFlush;
  471. }
  472. }
  473. }
  474. #if GL_ENABLE_INDEX_VERIFICATION
  475. bool CGLMBuffer::IsSpanValid( uint nOffset, uint nSize ) const
  476. {
  477. return m_BufferSpanManager.IsValid( nOffset, nSize );
  478. }
  479. #endif
  480. void CGLMBuffer::FlushRange( uint offset, uint size )
  481. {
  482. if ( m_pStaticBuffer )
  483. {
  484. }
  485. else if ( m_bPseudo )
  486. {
  487. // nothing to do
  488. }
  489. else
  490. {
  491. #ifdef REPORT_LOCK_TIME
  492. double flStart = Plat_FloatTime();
  493. #endif
  494. // assumes buffer is bound.
  495. if ( gGL->m_bHave_GL_ARB_map_buffer_range )
  496. {
  497. gGL->glFlushMappedBufferRange( m_buffGLTarget, (GLintptr)( offset - m_dirtyMinOffset ), (GLsizeiptr)size );
  498. }
  499. else if ( gGL->m_bHave_GL_APPLE_flush_buffer_range )
  500. {
  501. gGL->glFlushMappedBufferRangeAPPLE( m_buffGLTarget, (GLintptr)offset, (GLsizeiptr)size );
  502. }
  503. #ifdef REPORT_LOCK_TIME
  504. double flEnd = Plat_FloatTime();
  505. if ( flEnd - flStart > 5.0 / 1000.0 )
  506. {
  507. int nDelta = ( int )( ( flEnd - flStart ) * 1000 );
  508. if ( nDelta > 2 )
  509. {
  510. Msg( "**** " );
  511. }
  512. Msg( "glFlushMappedBufferRange Time %d: ( Name=%d BufSize=%d ) Target=%p Offset=%d FlushSize=%d\n", nDelta, m_nHandle, m_nSize, m_buffGLTarget, offset - m_dirtyMinOffset, size );
  513. }
  514. #endif
  515. // If you don't have any extension support here, you'll flush the whole buffer on unmap. Performance loss, but it's still safe and correct.
  516. }
  517. }
  518. void CGLMBuffer::Lock( GLMBuffLockParams *pParams, char **pAddressOut )
  519. {
  520. #if GL_TELEMETRY_GPU_ZONES
  521. CScopedGLMPIXEvent glmPIXEvent( "CGLMBuffer::Lock" );
  522. g_TelemetryGPUStats.m_nTotalBufferLocksAndUnlocks++;
  523. #endif
  524. char *resultPtr = NULL;
  525. if ( m_bMapped )
  526. {
  527. DXABSTRACT_BREAK_ON_ERROR();
  528. return;
  529. }
  530. m_pCtx->CheckCurrent();
  531. Assert( pParams->m_nSize );
  532. m_LockParams = *pParams;
  533. if ( pParams->m_nOffset >= m_nSize )
  534. {
  535. DXABSTRACT_BREAK_ON_ERROR();
  536. return;
  537. }
  538. if ( ( pParams->m_nOffset + pParams->m_nSize ) > m_nSize)
  539. {
  540. DXABSTRACT_BREAK_ON_ERROR();
  541. return;
  542. }
  543. #if GL_ENABLE_INDEX_VERIFICATION
  544. if ( pParams->m_bDiscard )
  545. {
  546. m_BufferSpanManager.DiscardAllSpans();
  547. }
  548. #endif
  549. m_pStaticBuffer = NULL;
  550. bool bUsingPersistentBuffer = false;
  551. uint padding = 0;
  552. if ( m_bDynamic && gGL->m_bHave_GL_ARB_buffer_storage )
  553. {
  554. // Compute padding to add to make sure the start offset is valid
  555. CPersistentBuffer *pTempBuffer = m_pCtx->GetCurPersistentBuffer( m_type );
  556. uint persistentBufferOffset = pTempBuffer->GetOffset();
  557. if (pParams->m_nOffset > persistentBufferOffset)
  558. {
  559. // Make sure the start offset if valid (adding padding to the persistent buffer)
  560. padding = pParams->m_nOffset - persistentBufferOffset;
  561. }
  562. }
  563. if ( m_bPseudo )
  564. {
  565. if ( pParams->m_bDiscard )
  566. {
  567. m_nRevision++;
  568. }
  569. // async map modes are a no-op
  570. // calc lock address
  571. resultPtr = m_pPseudoBuf + pParams->m_nOffset;
  572. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  573. BOOL bResult;
  574. DWORD nOldProtect;
  575. if ( pParams->m_bDiscard )
  576. {
  577. bResult = VirtualProtect( m_pActualPseudoBuf, m_nSize, PAGE_READWRITE, &nOldProtect );
  578. if ( !bResult )
  579. {
  580. Error( "VirtualProtect() failed!\n" );
  581. }
  582. m_nDirtyRangeStart = 0xFFFFFFFF;
  583. m_nDirtyRangeEnd = 0;
  584. for ( uint i = 0; i < m_nSize / sizeof( uint32 ); i++ )
  585. {
  586. reinterpret_cast< uint32 * >( m_pPseudoBuf )[i] = 0xDEADBEEF;
  587. }
  588. bResult = VirtualProtect( m_pActualPseudoBuf, m_nSize, PAGE_READONLY, &nOldProtect );
  589. if ( !bResult )
  590. {
  591. Error( "VirtualProtect() failed!\n" );
  592. }
  593. }
  594. uint nProtectOfs = m_LockParams.m_nOffset & 4095;
  595. uint nProtectEnd = ( m_LockParams.m_nOffset + m_LockParams.m_nSize + 4095 ) & ~4095;
  596. uint nProtectSize = nProtectEnd - nProtectOfs;
  597. bResult = VirtualProtect( m_pActualPseudoBuf + nProtectOfs, nProtectSize, PAGE_READWRITE, &nOldProtect );
  598. if ( !bResult )
  599. {
  600. Error( "VirtualProtect() failed!\n" );
  601. }
  602. #endif
  603. }
  604. else if ( m_bDynamic && gGL->m_bHave_GL_ARB_buffer_storage && ( m_pCtx->GetCurPersistentBuffer( m_type )->GetBytesRemaining() >= ( pParams->m_nSize + padding ) ) )
  605. {
  606. CPersistentBuffer *pTempBuffer = m_pCtx->GetCurPersistentBuffer( m_type );
  607. // Make sure the start offset if valid (adding padding to the persistent buffer)
  608. pTempBuffer->Append( padding );
  609. uint persistentBufferOffset = pTempBuffer->GetOffset();
  610. uint startOffset = persistentBufferOffset - pParams->m_nOffset;
  611. if ( pParams->m_bDiscard || ( startOffset != m_nPersistentBufferStartOffset ) )
  612. {
  613. m_nRevision++;
  614. // Offset to be added to the vertex and index buffer when setting the vertex and index buffer (before drawing)
  615. // Since we are using a immutable buffer storage, the persistent buffer is actually bigger than
  616. // buffer size requested upon creation. We keep appending to the end of the persistent buffer
  617. // and therefore need to keep track of the start of the actual buffer (in the persistent one)
  618. m_nPersistentBufferStartOffset = startOffset;
  619. //DevMsg( "Discard (%s): startOffset = %d\n", pParams->m_bDiscard ? "true" : "false", m_nPersistentBufferStartOffset );
  620. }
  621. resultPtr = static_cast<char*>(pTempBuffer->GetPtr()) + persistentBufferOffset;
  622. bUsingPersistentBuffer = true;
  623. //DevMsg( " --> buff=%x, startOffset=%d, paramsOffset=%d, persistOffset = %d\n", this, m_nPersistentBufferStartOffset, pParams->m_nOffset, persistentBufferOffset );
  624. }
  625. #ifndef OSX
  626. else if ( m_bDynamic && gGL->m_bHave_GL_AMD_pinned_memory && ( m_pCtx->GetCurPinnedMemoryBuffer()->GetBytesRemaining() >= pParams->m_nSize ) )
  627. {
  628. if ( pParams->m_bDiscard )
  629. {
  630. m_nRevision++;
  631. }
  632. m_dirtyMinOffset = pParams->m_nOffset;
  633. m_dirtyMaxOffset = pParams->m_nOffset + pParams->m_nSize;
  634. CPinnedMemoryBuffer *pTempBuffer = m_pCtx->GetCurPinnedMemoryBuffer();
  635. m_nPinnedMemoryOfs = pTempBuffer->GetOfs();
  636. resultPtr = static_cast<char*>( pTempBuffer->GetPtr() ) + m_nPinnedMemoryOfs;
  637. pTempBuffer->Append( pParams->m_nSize );
  638. }
  639. #endif // OSX
  640. else if ( !g_bDisableStaticBuffer && ( pParams->m_bDiscard || pParams->m_bNoOverwrite ) && ( pParams->m_nSize <= GL_STATIC_BUFFER_SIZE ) )
  641. {
  642. #if TOGL_SUPPORT_NULL_DEVICE
  643. if ( !g_bNullD3DDevice )
  644. #endif
  645. {
  646. if ( pParams->m_bDiscard )
  647. {
  648. m_pCtx->BindBufferToCtx( m_type, this );
  649. // observe gl_bufmode on any orphan event.
  650. // if orphaned and bufmode is nonzero, flip it to dynamic.
  651. GLenum hint = gl_bufmode.GetInt() ? GL_DYNAMIC_DRAW_ARB : GL_STATIC_DRAW_ARB;
  652. gGL->glBufferDataARB( m_buffGLTarget, m_nSize, (const GLvoid*)NULL, hint );
  653. m_nRevision++; // revision grows on orphan event
  654. }
  655. }
  656. m_dirtyMinOffset = pParams->m_nOffset;
  657. m_dirtyMaxOffset = pParams->m_nOffset + pParams->m_nSize;
  658. switch ( m_type )
  659. {
  660. case kGLMVertexBuffer:
  661. {
  662. m_pStaticBuffer = m_StaticBuffers[ 0 ];
  663. break;
  664. }
  665. case kGLMIndexBuffer:
  666. {
  667. m_pStaticBuffer = m_StaticBuffers[ 1 ];
  668. break;
  669. }
  670. default:
  671. {
  672. DXABSTRACT_BREAK_ON_ERROR();
  673. return;
  674. }
  675. }
  676. resultPtr = m_pStaticBuffer;
  677. }
  678. else
  679. {
  680. // bind (yes, even for pseudo - this binds name 0)
  681. m_pCtx->BindBufferToCtx( m_type, this );
  682. // perform discard if requested
  683. if ( pParams->m_bDiscard )
  684. {
  685. // observe gl_bufmode on any orphan event.
  686. // if orphaned and bufmode is nonzero, flip it to dynamic.
  687. // We always want to call glBufferData( ..., NULL ) on discards, even though we're using the GL_MAP_INVALIDATE_BUFFER_BIT flag, because this flag is actually only a hint according to AMD.
  688. GLenum hint = gl_bufmode.GetInt() ? GL_DYNAMIC_DRAW_ARB : GL_STATIC_DRAW_ARB;
  689. gGL->glBufferDataARB( m_buffGLTarget, m_nSize, (const GLvoid*)NULL, hint );
  690. m_nRevision++; // revision grows on orphan event
  691. }
  692. // adjust async map option appropriately, leave explicit flush unchanged
  693. SetModes( pParams->m_bNoOverwrite, m_bEnableExplicitFlush );
  694. // map
  695. char *mapPtr;
  696. if ( gGL->m_bHave_GL_ARB_map_buffer_range )
  697. {
  698. // m_bEnableAsyncMap is actually pParams->m_bNoOverwrite
  699. GLbitfield parms = GL_MAP_WRITE_BIT | ( m_bEnableAsyncMap ? GL_MAP_UNSYNCHRONIZED_BIT : 0 ) | ( pParams->m_bDiscard ? GL_MAP_INVALIDATE_BUFFER_BIT : 0 ) | ( m_bEnableExplicitFlush ? GL_MAP_FLUSH_EXPLICIT_BIT : 0 );
  700. #ifdef REPORT_LOCK_TIME
  701. double flStart = Plat_FloatTime();
  702. #endif
  703. mapPtr = (char*)gGL->glMapBufferRange( m_buffGLTarget, pParams->m_nOffset, pParams->m_nSize, parms);
  704. #ifdef REPORT_LOCK_TIME
  705. double flEnd = Plat_FloatTime();
  706. if ( flEnd - flStart > 5.0 / 1000.0 )
  707. {
  708. int nDelta = ( int )( ( flEnd - flStart ) * 1000 );
  709. if ( nDelta > 2 )
  710. {
  711. Msg( "**** " );
  712. }
  713. Msg( "glMapBufferRange Time=%d: ( Name=%d BufSize=%d ) Target=%p Offset=%d LockSize=%d ", nDelta, m_nHandle, m_nSize, m_buffGLTarget, pParams->m_nOffset, pParams->m_nSize );
  714. if ( parms & GL_MAP_WRITE_BIT )
  715. {
  716. Msg( "GL_MAP_WRITE_BIT ");
  717. }
  718. if ( parms & GL_MAP_UNSYNCHRONIZED_BIT )
  719. {
  720. Msg( "GL_MAP_UNSYNCHRONIZED_BIT ");
  721. }
  722. if ( parms & GL_MAP_INVALIDATE_BUFFER_BIT )
  723. {
  724. Msg( "GL_MAP_INVALIDATE_BUFFER_BIT ");
  725. }
  726. if ( parms & GL_MAP_INVALIDATE_RANGE_BIT )
  727. {
  728. Msg( "GL_MAP_INVALIDATE_RANGE_BIT ");
  729. }
  730. if ( parms & GL_MAP_FLUSH_EXPLICIT_BIT )
  731. {
  732. Msg( "GL_MAP_FLUSH_EXPLICIT_BIT ");
  733. }
  734. Msg( "\n" );
  735. }
  736. #endif
  737. }
  738. else
  739. {
  740. mapPtr = (char*)gGL->glMapBufferARB( m_buffGLTarget, GL_WRITE_ONLY_ARB );
  741. }
  742. Assert( mapPtr );
  743. // calculate offset location
  744. resultPtr = mapPtr;
  745. if ( !gGL->m_bHave_GL_ARB_map_buffer_range )
  746. {
  747. resultPtr += pParams->m_nOffset;
  748. }
  749. // set range
  750. m_dirtyMinOffset = pParams->m_nOffset;
  751. m_dirtyMaxOffset = pParams->m_nOffset + pParams->m_nSize;
  752. }
  753. if ( m_bUsingPersistentBuffer != bUsingPersistentBuffer )
  754. {
  755. // Up the revision number when switching from a persistent to a non persistent buffer (or vice versa)
  756. // Ensure the right GL buffer is bound before drawing (and vertex attribs properly set)
  757. m_nRevision++;
  758. m_bUsingPersistentBuffer = bUsingPersistentBuffer;
  759. }
  760. m_bMapped = true;
  761. m_pLastMappedAddress = (float*)resultPtr;
  762. *pAddressOut = resultPtr;
  763. }
  764. void CGLMBuffer::Unlock( int nActualSize, const void *pActualData )
  765. {
  766. #if GL_TELEMETRY_GPU_ZONES
  767. CScopedGLMPIXEvent glmPIXEvent( "CGLMBuffer::Unlock" );
  768. g_TelemetryGPUStats.m_nTotalBufferLocksAndUnlocks++;
  769. #endif
  770. m_pCtx->CheckCurrent();
  771. if ( !m_bMapped )
  772. {
  773. DXABSTRACT_BREAK_ON_ERROR();
  774. return;
  775. }
  776. if ( nActualSize < 0 )
  777. {
  778. nActualSize = m_LockParams.m_nSize;
  779. }
  780. if ( nActualSize > (int)m_LockParams.m_nSize )
  781. {
  782. DXABSTRACT_BREAK_ON_ERROR();
  783. return;
  784. }
  785. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  786. if ( m_bPseudo )
  787. {
  788. // Check guard DWORD to detect buffer overruns (but are still within the last 4KB page so they don't get caught via pagefaults)
  789. if ( *reinterpret_cast< const uint32 * >( m_pPseudoBuf + m_nSize ) != 0xDEADBEEF )
  790. {
  791. // If this fires the client app has overwritten the guard DWORD beyond the end of the buffer.
  792. DXABSTRACT_BREAK_ON_ERROR();
  793. }
  794. static const uint s_nInitialValues[4] = { 0xEF, 0xBE, 0xAD, 0xDE };
  795. int nActualModifiedStart, nActualModifiedEnd;
  796. for ( nActualModifiedStart = 0; nActualModifiedStart < (int)m_LockParams.m_nSize; ++nActualModifiedStart )
  797. if ( reinterpret_cast< const uint8 * >( m_pLastMappedAddress )[nActualModifiedStart] != s_nInitialValues[ ( m_LockParams.m_nOffset + nActualModifiedStart ) & 3 ] )
  798. break;
  799. for ( nActualModifiedEnd = m_LockParams.m_nSize - 1; nActualModifiedEnd > nActualModifiedStart; --nActualModifiedEnd )
  800. if ( reinterpret_cast< const uint8 * >( m_pLastMappedAddress )[nActualModifiedEnd] != s_nInitialValues[ ( m_LockParams.m_nOffset + nActualModifiedEnd ) & 3 ] )
  801. break;
  802. int nNumActualBytesModified = 0;
  803. if ( nActualModifiedEnd >= nActualModifiedStart )
  804. {
  805. // The modified check is conservative (i.e. it should always err on the side of detecting <= actual bytes than where actually modified, never more).
  806. // We primarily care about the case where the user lies about the actual # of modified bytes, which can lead to difficult to debug/inconsistent problems with some drivers.
  807. // Round up/down the modified range, because the user's data may alias with the initial buffer values (0xDEADBEEF) so we may miss some bytes that where written.
  808. if ( m_type == kGLMIndexBuffer )
  809. {
  810. nActualModifiedStart &= ~1;
  811. nActualModifiedEnd = MIN( (int)m_LockParams.m_nSize, ( ( nActualModifiedEnd + 1 ) + 1 ) & ~1 ) - 1;
  812. }
  813. else
  814. {
  815. nActualModifiedStart &= ~3;
  816. nActualModifiedEnd = MIN( (int)m_LockParams.m_nSize, ( ( nActualModifiedEnd + 1 ) + 3 ) & ~3 ) - 1;
  817. }
  818. nNumActualBytesModified = nActualModifiedEnd + 1;
  819. if ( nActualSize < nNumActualBytesModified )
  820. {
  821. // The caller may be lying about the # of actually modified bytes in this lock.
  822. // Has this lock region been previously locked? If so, it may have been previously overwritten before. Otherwise, the region had to be the 0xDEADBEEF fill DWORD at lock time.
  823. if ( ( m_nDirtyRangeStart > m_nDirtyRangeEnd ) ||
  824. ( m_LockParams.m_nOffset > m_nDirtyRangeEnd ) || ( ( m_LockParams.m_nOffset + m_LockParams.m_nSize ) <= m_nDirtyRangeStart ) )
  825. {
  826. // If this fires the client has lied about the actual # of bytes they've modified in the buffer - this will cause unreliable rendering on AMD drivers (because AMD actually pays attention to the actual # of flushed bytes).
  827. DXABSTRACT_BREAK_ON_ERROR();
  828. }
  829. }
  830. m_nDirtyRangeStart = MIN( m_nDirtyRangeStart, m_LockParams.m_nOffset + nActualModifiedStart );
  831. m_nDirtyRangeEnd = MAX( m_nDirtyRangeEnd, m_LockParams.m_nOffset + nActualModifiedEnd );
  832. }
  833. #if GL_ENABLE_INDEX_VERIFICATION
  834. if ( nActualModifiedEnd >= nActualModifiedStart )
  835. {
  836. int n = nActualModifiedEnd + 1;
  837. if ( n != nActualSize )
  838. {
  839. // The actual detected modified size is < than the reported size, which is common because the last few DWORD's of the vertex format may not actually be used/written (or read by the vertex shader). So just fudge it so the batch consumption checks work.
  840. if ( ( (int)nActualSize - n ) <= 32 )
  841. {
  842. n = nActualSize;
  843. }
  844. }
  845. m_BufferSpanManager.AddSpan( m_LockParams.m_nOffset + nActualModifiedStart, m_LockParams.m_nSize, n - nActualModifiedStart, m_LockParams.m_bDiscard, m_LockParams.m_bNoOverwrite );
  846. }
  847. #endif
  848. }
  849. #elif GL_ENABLE_INDEX_VERIFICATION
  850. if ( nActualSize > 0 )
  851. {
  852. m_BufferSpanManager.AddSpan( m_LockParams.m_nOffset, m_LockParams.m_nSize, nActualSize, m_LockParams.m_bDiscard, m_LockParams.m_bNoOverwrite );
  853. }
  854. #endif
  855. #if GL_BATCH_PERF_ANALYSIS
  856. if ( m_type == kGLMIndexBuffer )
  857. g_nTotalIBLockBytes += nActualSize;
  858. else if ( m_type == kGLMVertexBuffer )
  859. g_nTotalVBLockBytes += nActualSize;
  860. #endif
  861. #ifndef OSX
  862. if ( m_nPinnedMemoryOfs >= 0 )
  863. {
  864. #if TOGL_SUPPORT_NULL_DEVICE
  865. if ( !g_bNullD3DDevice )
  866. {
  867. #endif
  868. if ( nActualSize )
  869. {
  870. m_pCtx->BindBufferToCtx( m_type, this );
  871. gGL->glCopyBufferSubData(
  872. GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD,
  873. m_buffGLTarget,
  874. m_nPinnedMemoryOfs,
  875. m_dirtyMinOffset,
  876. nActualSize );
  877. }
  878. #if TOGL_SUPPORT_NULL_DEVICE
  879. }
  880. #endif
  881. m_nPinnedMemoryOfs = -1;
  882. }
  883. else
  884. #endif // OSX
  885. if ( m_bUsingPersistentBuffer )
  886. {
  887. if ( nActualSize )
  888. {
  889. CPersistentBuffer *pTempBuffer = m_pCtx->GetCurPersistentBuffer( m_type );
  890. pTempBuffer->Append( nActualSize );
  891. //DevMsg( " <-- actualSize=%d, persistOffset = %d\n", nActualSize, pTempBuffer->GetOffset() );
  892. }
  893. }
  894. else if ( m_pStaticBuffer )
  895. {
  896. #if TOGL_SUPPORT_NULL_DEVICE
  897. if ( !g_bNullD3DDevice )
  898. #endif
  899. {
  900. if ( nActualSize )
  901. {
  902. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "UnlockSubData" );
  903. #ifdef REPORT_LOCK_TIME
  904. double flStart = Plat_FloatTime();
  905. #endif
  906. m_pCtx->BindBufferToCtx( m_type, this );
  907. Assert( nActualSize <= (int)( m_dirtyMaxOffset - m_dirtyMinOffset ) );
  908. glBufferSubDataMaxSize( m_buffGLTarget, m_dirtyMinOffset, nActualSize, pActualData ? pActualData : m_pStaticBuffer );
  909. #ifdef REPORT_LOCK_TIME
  910. double flEnd = Plat_FloatTime();
  911. if ( flEnd - flStart > 5.0 / 1000.0 )
  912. {
  913. int nDelta = ( int )( ( flEnd - flStart ) * 1000 );
  914. if ( nDelta > 2 )
  915. {
  916. Msg( "**** " );
  917. }
  918. // Msg( "glBufferSubData Time=%d: ( Name=%d BufSize=%d ) Target=%p Offset=%d Size=%d\n", nDelta, m_nHandle, m_nSize, m_buffGLTarget, m_dirtyMinOffset, m_dirtyMaxOffset - m_dirtyMinOffset );
  919. }
  920. #endif
  921. }
  922. }
  923. m_pStaticBuffer = NULL;
  924. }
  925. else if ( m_bPseudo )
  926. {
  927. if ( pActualData )
  928. {
  929. memcpy( m_pLastMappedAddress, pActualData, nActualSize );
  930. }
  931. #if GL_ENABLE_UNLOCK_BUFFER_OVERWRITE_DETECTION
  932. uint nProtectOfs = m_LockParams.m_nOffset & 4095;
  933. uint nProtectEnd = ( m_LockParams.m_nOffset + m_LockParams.m_nSize + 4095 ) & ~4095;
  934. uint nProtectSize = nProtectEnd - nProtectOfs;
  935. DWORD nOldProtect;
  936. BOOL bResult = VirtualProtect( m_pActualPseudoBuf + nProtectOfs, nProtectSize, PAGE_READONLY, &nOldProtect );
  937. if ( !bResult )
  938. {
  939. Error( "VirtualProtect() failed!\n" );
  940. }
  941. #endif
  942. }
  943. else
  944. {
  945. tmZone( TELEMETRY_LEVEL2, TMZF_NONE, "UnlockUnmap" );
  946. if ( pActualData )
  947. {
  948. memcpy( m_pLastMappedAddress, pActualData, nActualSize );
  949. }
  950. m_pCtx->BindBufferToCtx( m_type, this );
  951. Assert( nActualSize <= (int)( m_dirtyMaxOffset - m_dirtyMinOffset ) );
  952. // time to do explicit flush (currently m_bEnableExplicitFlush is always true)
  953. if ( m_bEnableExplicitFlush )
  954. {
  955. FlushRange( m_dirtyMinOffset, nActualSize );
  956. }
  957. // clear dirty range no matter what
  958. m_dirtyMinOffset = m_dirtyMaxOffset = 0; // adjust/grow on lock, clear on unlock
  959. #ifdef REPORT_LOCK_TIME
  960. double flStart = Plat_FloatTime();
  961. #endif
  962. gGL->glUnmapBuffer( m_buffGLTarget );
  963. #ifdef REPORT_LOCK_TIME
  964. double flEnd = Plat_FloatTime();
  965. if ( flEnd - flStart > 5.0 / 1000.0 )
  966. {
  967. int nDelta = ( int )( ( flEnd - flStart ) * 1000 );
  968. if ( nDelta > 2 )
  969. {
  970. Msg( "**** " );
  971. }
  972. Msg( "glUnmapBuffer Time=%d: ( Name=%d BufSize=%d ) Target=%p\n", nDelta, m_nHandle, m_nSize, m_buffGLTarget );
  973. }
  974. #endif
  975. }
  976. m_bMapped = false;
  977. }
  978. GLuint CGLMBuffer::GetHandle() const
  979. {
  980. return ( m_bUsingPersistentBuffer ? m_pCtx->GetCurPersistentBuffer( m_type )->GetHandle() : m_nHandle );
  981. }