Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2931 lines
80 KiB

  1. //========= Copyright (c) 1996-2005, Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose: Memory allocation!
  4. //
  5. // $NoKeywords: $
  6. //=============================================================================//
  7. #include "tier0/platform.h"
  8. #if !defined(STEAM) && !defined(NO_MALLOC_OVERRIDE)
  9. //#include <malloc.h>
  10. #include <algorithm>
  11. #include "tier0/dbg.h"
  12. #include "tier0/memalloc.h"
  13. #include "tier0/threadtools.h"
  14. #include "mem_helpers.h"
  15. #include "memstd.h"
  16. #include "tier0/stacktools.h"
  17. #include "tier0/minidump.h"
  18. #ifdef _X360
  19. #include "xbox/xbox_console.h"
  20. #endif
  21. #ifdef _PS3
  22. #include "memoverride_ps3.h"
  23. #endif
  24. #ifndef _WIN32
  25. #define IsDebuggerPresent() false
  26. #endif
  27. #ifdef USE_LIGHT_MEM_DEBUG
  28. #undef USE_MEM_DEBUG
  29. #pragma message("*** USE_LIGHT_MEM_DEBUG is ON ***")
  30. #endif
  31. #define DEF_REGION 0
  32. #if defined( _WIN32 ) || defined( _PS3 )
  33. #define USE_DLMALLOC
  34. #ifdef PLATFORM_WINDOWS_PC64
  35. #define MEMALLOC_REGIONS
  36. #else
  37. #define MEMALLOC_SEGMENT_MIXED
  38. #define MBH_SIZE_MB ( 32 + MBYTES_STEAM_MBH_USAGE )
  39. //#define MEMALLOC_REGIONS
  40. #endif
  41. #endif // _WIN32 || _PS3
  42. // Record a list of memory callbacks for printing information
  43. // about non-heap memory.
  44. // Allow a fixed maximum number of memory callbacks. We can't use
  45. // CUtlVector or other classes so a fixed maximum is necessary.
  46. IMemoryInfo* s_MemoryInfoCallbacks[100];
  47. static size_t s_nMemoryInfoCallbacks;
  48. // Don't modify the MemoryInfoCallbacks without acquiring this mutex
  49. static CThreadMutex s_callbackMutex;
  50. void AddMemoryInfoCallback( IMemoryInfo* pMemoryInfo )
  51. {
  52. CAutoLock locker( s_callbackMutex );
  53. // This is O(n^2) but that's okay because n is just 10-20
  54. for ( size_t i = 0; i < s_nMemoryInfoCallbacks; ++i )
  55. {
  56. if ( s_MemoryInfoCallbacks[ i ] == pMemoryInfo )
  57. {
  58. Assert( !"This pointer has already been added!" );
  59. }
  60. }
  61. if ( s_nMemoryInfoCallbacks < ARRAYSIZE( s_MemoryInfoCallbacks ) )
  62. {
  63. s_MemoryInfoCallbacks[ s_nMemoryInfoCallbacks ] = pMemoryInfo;
  64. ++s_nMemoryInfoCallbacks;
  65. }
  66. }
  67. void RemoveMemoryInfoCallback( IMemoryInfo* pMemoryInfo )
  68. {
  69. CAutoLock locker( s_callbackMutex );
  70. for ( size_t i = 0; i < s_nMemoryInfoCallbacks; ++i )
  71. {
  72. if ( s_MemoryInfoCallbacks[ i ] == pMemoryInfo )
  73. {
  74. // Copy the last pointer into this slot and then decrement
  75. // the count of how many callbacks we have.
  76. s_MemoryInfoCallbacks[ i ] = s_MemoryInfoCallbacks[ s_nMemoryInfoCallbacks - 1 ];
  77. --s_nMemoryInfoCallbacks;
  78. return;
  79. }
  80. }
  81. Assert( !"Tried removing a callback that wasn't there!" );
  82. }
  83. // Dump a summary of all of the non-heap memory blocks that have been
  84. // registered with AddMemoryInfoCallback.
  85. void DumpMemoryInfoStats()
  86. {
  87. CAutoLock locker( s_callbackMutex );
  88. size_t nTotalAllocatedBytes = 0;
  89. size_t nTotalPeakBytes = 0;
  90. size_t nTotalCommittedBytes = 0;
  91. size_t nTotalReservedBytes = 0;
  92. const double MB = 1024.0 * 1024.0;
  93. for ( size_t i = 0; i < s_nMemoryInfoCallbacks; ++i )
  94. {
  95. IMemoryInfo* pMemoryInfo = s_MemoryInfoCallbacks[ i ];
  96. nTotalAllocatedBytes += pMemoryInfo->GetAllocatedBytes();
  97. nTotalPeakBytes += pMemoryInfo->GetHighestBytes();
  98. nTotalCommittedBytes += pMemoryInfo->GetCommittedBytes();
  99. nTotalReservedBytes += pMemoryInfo->GetReservedBytes();
  100. const char* name = pMemoryInfo->GetMemoryName();
  101. if ( !name )
  102. {
  103. name = "Unknown memory";
  104. }
  105. if ( pMemoryInfo->GetReservedBytes() != 0 )
  106. {
  107. Msg( "%-40s: %4.1f MB allocated (%4.1f MB peak), %4.1f MB committed, %4.1f MB reserved\n",
  108. name,
  109. pMemoryInfo->GetAllocatedBytes() / MB,
  110. pMemoryInfo->GetHighestBytes() / MB,
  111. pMemoryInfo->GetCommittedBytes() / MB,
  112. pMemoryInfo->GetReservedBytes() / MB );
  113. }
  114. }
  115. Msg( "%-40s: %4.1f MB allocated (%4.1f MB peak), %4.1f MB committed, %4.1f MB reserved\n",
  116. "Extra memory totals",
  117. nTotalAllocatedBytes / MB, nTotalPeakBytes / MB,
  118. nTotalCommittedBytes / MB, nTotalReservedBytes / MB );
  119. }
  120. #ifndef USE_DLMALLOC
  121. #ifdef _PS3
  122. #define malloc_internal( region, bytes ) (g_pMemOverrideRawCrtFns->pfn_malloc)(bytes)
  123. #define malloc_aligned_internal( region, bytes, align ) (g_pMemOverrideRawCrtFns->pfn_memalign)(align, bytes)
  124. #define realloc_internal (g_pMemOverrideRawCrtFns->pfn_realloc)
  125. #define realloc_aligned_internal (g_pMemOverrideRawCrtFns->pfn_reallocalign)
  126. #define free_internal (g_pMemOverrideRawCrtFns->pfn_free)
  127. #define msize_internal (g_pMemOverrideRawCrtFns->pfn_malloc_usable_size)
  128. #define compact_internal() (0)
  129. #define heapstats_internal(p) (void)(0)
  130. #else // _PS3
  131. #define malloc_internal( region, bytes) malloc(bytes)
  132. #define malloc_aligned_internal( region, bytes, align ) memalign(align, bytes)
  133. #define realloc_internal realloc
  134. #define realloc_aligned_internal realloc
  135. #define free_internal free
  136. #ifdef POSIX
  137. #define msize_internal malloc_usable_size
  138. #else // POSIX
  139. #define msize_internal _msize
  140. #endif // POSIX
  141. #define compact_internal() (0)
  142. #define heapstats_internal(p) (void)(0)
  143. #endif // _PS3
  144. #else // USE_DLMALLOC
  145. #define MSPACES 1
  146. #include "dlmalloc/malloc-2.8.3.h"
  147. // Track whether we are using the process heap (-processheap) so that we don't
  148. // unnecessarily reserve tons of memory for the standard heap.
  149. static bool s_bUsingProcessHeap = false;
  150. #ifdef MEMALLOC_REGIONS
  151. static size_t s_nMemSpaceSize = 2ULL * 1024 * 1024 * 1024ULL;
  152. #endif
  153. void *g_AllocRegions[] =
  154. {
  155. #ifndef MEMALLOC_REGIONS
  156. #ifdef MEMALLOC_SEGMENT_MIXED
  157. s_bUsingProcessHeap ? NULL : create_mspace( 0, 1 ), // unified
  158. s_bUsingProcessHeap ? NULL : create_mspace( MBH_SIZE_MB * 1024 * 1024, 1 ),
  159. #else
  160. s_bUsingProcessHeap ? NULL : create_mspace( 100*1024*1024, 1 ),
  161. #endif
  162. #else // MEMALLOC_REGIONS
  163. // @TODO: per DLL regions didn't work out very well. flux of usage left too much overhead. need to try lifetime-based management [6/9/2009 tom]
  164. s_bUsingProcessHeap ? NULL : create_mspace( s_nMemSpaceSize, 1 ), // unified
  165. #endif // MEMALLOC_REGIONS
  166. };
  167. #ifndef MEMALLOC_REGIONS
  168. #ifndef MEMALLOC_SEGMENT_MIXED
  169. #define SelectRegion( region, bytes ) 0
  170. #else
  171. // NOTE: this split is designed to force the 'large block' heap to ONLY perform virtual allocs (see
  172. // DEFAULT_MMAP_THRESHOLD in malloc.cpp), to avoid ANY fragmentation or waste in an internal arena
  173. #define REGION_SPLIT (256*1024)
  174. #define SelectRegion( region, bytes ) g_AllocRegions[bytes < REGION_SPLIT]
  175. #endif
  176. #else // MEMALLOC_REGIONS
  177. #define SelectRegion( region, bytes ) g_AllocRegions[region]
  178. #endif // MEMALLOC_REGIONS
  179. #define malloc_internal( region, bytes ) mspace_malloc(SelectRegion(region,bytes), bytes)
  180. #define malloc_aligned_internal( region, bytes, align ) mspace_memalign(SelectRegion(region,bytes), align, bytes)
  181. FORCEINLINE void *realloc_aligned_internal( void *mem, size_t bytes, size_t align )
  182. {
  183. // TODO: implement realloc_aligned inside dlmalloc (requires splitting realloc's existing
  184. // 'grow in-place' code into a new function, then call that w/ alloc_align/copy/free on failure)
  185. byte *newMem = (byte *)dlrealloc( mem, bytes );
  186. if ( ((size_t)newMem&(align-1)) == 0 )
  187. return newMem;
  188. // realloc broke alignment...
  189. byte *fallback = (byte *)malloc_aligned_internal( DEF_REGION, bytes, align );
  190. if ( !fallback )
  191. return NULL;
  192. memcpy( fallback, newMem, bytes );
  193. dlfree( newMem );
  194. return fallback;
  195. }
  196. inline size_t compact_internal()
  197. {
  198. size_t start = 0, end = 0;
  199. for ( int i = 0; i < ARRAYSIZE(g_AllocRegions); i++ )
  200. {
  201. start += mspace_footprint( g_AllocRegions[i] );
  202. mspace_trim( g_AllocRegions[i], 0 );
  203. end += mspace_footprint( g_AllocRegions[i] );
  204. }
  205. return ( start - end );
  206. }
  207. inline void heapstats_internal( FILE *pFile, IMemAlloc::DumpStatsFormat_t nFormat )
  208. {
  209. // @TODO: improve this presentation, as a table [6/1/2009 tom]
  210. char buf[1024];
  211. for ( int i = 0; i < ARRAYSIZE( g_AllocRegions ); i++ )
  212. {
  213. struct mallinfo info = mspace_mallinfo( g_AllocRegions[ i ] );
  214. size_t footPrint = mspace_footprint( g_AllocRegions[ i ] );
  215. size_t maxFootPrint = mspace_max_footprint( g_AllocRegions[ i ] );
  216. if ( nFormat == IMemAlloc::FORMAT_HTML )
  217. {
  218. _snprintf( buf, sizeof(buf),
  219. "\n<div class=\"sbhTable\"><legend>dlmalloc mspace #%d: %u MiB allocated of %u MiB footprint</legend><pre>\n"
  220. " %d:footprint ~ %5u MiB (total space used by the mspace)\n"
  221. " %d:footprint_max ~ %5u MiB (maximum total space used by the mspace)\n"
  222. " %d:arena ~ %5u MiB (non-mmapped space allocated from system)\n"
  223. " %d:ordblks ~ %5u MiB (number of free chunks)\n"
  224. " %d:hblkhd ~ %5u MiB (space in mmapped regions)\n"
  225. " %d:usmblks ~ %5u MiB (maximum total allocated space)\n"
  226. " %d:uordblks ~ %5u MiB (total allocated space)\n"
  227. " %d:fordblks ~ %5u MiB (total free space)\n"
  228. " %d:keepcost ~ %5u MiB (releasable (via malloc_trim) space)\n</pre></div>",
  229. i, uint( info.uordblks >> 20 ), uint( footPrint >> 20 ),
  230. i,uint( footPrint >> 20 ),
  231. i,uint( maxFootPrint >> 20 ),
  232. i,uint( info.arena >> 20 ),
  233. i,uint( info.ordblks >> 20 ),
  234. i,uint( info.hblkhd >> 20 ),
  235. i,uint( info.usmblks >> 20 ),
  236. i,uint( info.uordblks >> 20 ),
  237. i,uint( info.fordblks >> 20 ),
  238. i,uint( info.keepcost >> 20 )
  239. );
  240. }
  241. else
  242. {
  243. _snprintf( buf, sizeof(buf),
  244. "\ndlmalloc mspace #%d. 1 MiB=2^20 bytes\n"
  245. " %d:footprint ~ %5u MiB (total space used by the mspace)\n"
  246. " %d:footprint_max ~ %5u MiB (maximum total space used by the mspace)\n"
  247. " %d:arena ~ %5u MiB (non-mmapped space allocated from system)\n"
  248. " %d:ordblks ~ %5u MiB (number of free chunks)\n"
  249. " %d:hblkhd ~ %5u MiB (space in mmapped regions)\n"
  250. " %d:usmblks ~ %5u MiB (maximum total allocated space)\n"
  251. " %d:uordblks ~ %5u MiB (total allocated space)\n"
  252. " %d:fordblks ~ %5u MiB (total free space)\n"
  253. " %d:keepcost ~ %5u MiB (releasable (via malloc_trim) space)\n",
  254. i,
  255. i,uint( footPrint >> 20 ),
  256. i,uint( maxFootPrint >> 20 ),
  257. i,uint( info.arena >> 20 ),
  258. i,uint( info.ordblks >> 20 ),
  259. i,uint( info.hblkhd >> 20 ),
  260. i,uint( info.usmblks >> 20 ),
  261. i,uint( info.uordblks >> 20 ),
  262. i,uint( info.fordblks >> 20 ),
  263. i,uint( info.keepcost >> 20 )
  264. );
  265. }
  266. if ( pFile )
  267. fprintf( pFile, "%s", buf );
  268. else
  269. Msg( "%s", buf );
  270. }
  271. }
  272. #define realloc_internal dlrealloc
  273. #define free_internal dlfree
  274. #define msize_internal dlmalloc_usable_size
  275. #endif // USE_DLMALLOC
  276. #ifdef TIME_ALLOC
  277. CAverageCycleCounter g_MallocCounter;
  278. CAverageCycleCounter g_ReallocCounter;
  279. CAverageCycleCounter g_FreeCounter;
  280. #define PrintOne( name ) \
  281. Msg("%-48s: %6.4f avg (%8.1f total, %7.3f peak, %5d iters)\n", \
  282. #name, \
  283. g_##name##Counter.GetAverageMilliseconds(), \
  284. g_##name##Counter.GetTotalMilliseconds(), \
  285. g_##name##Counter.GetPeakMilliseconds(), \
  286. g_##name##Counter.GetIters() ); \
  287. memset( &g_##name##Counter, 0, sizeof(g_##name##Counter) )
  288. void PrintAllocTimes()
  289. {
  290. PrintOne( Malloc );
  291. PrintOne( Realloc );
  292. PrintOne( Free );
  293. }
  294. #define PROFILE_ALLOC(name) CAverageTimeMarker name##_ATM( &g_##name##Counter )
  295. #else // TIME_ALLOC
  296. #define PROFILE_ALLOC( name ) ((void)0)
  297. #define PrintAllocTimes() ((void)0)
  298. #endif // TIME_ALLOC
  299. #if _MSC_VER < 1400 && defined( MSVC ) && !defined(_STATIC_LINKED) && (defined(_DEBUG) || defined(USE_MEM_DEBUG))
  300. void *operator new( unsigned int nSize, int nBlockUse, const char *pFileName, int nLine )
  301. {
  302. return ::operator new( nSize );
  303. }
  304. void *operator new[] ( unsigned int nSize, int nBlockUse, const char *pFileName, int nLine )
  305. {
  306. return ::operator new[]( nSize );
  307. }
  308. #endif
  309. #include "mem_impl_type.h"
  310. #if MEM_IMPL_TYPE_STD
  311. //-----------------------------------------------------------------------------
  312. // Singleton...
  313. //-----------------------------------------------------------------------------
  314. #pragma warning( disable:4074 ) // warning C4074: initializers put in compiler reserved initialization area
  315. #pragma init_seg( compiler )
  316. #if MEM_SBH_ENABLED
  317. CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_PRIMARY_SBH, true> >::SharedData_t CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_PRIMARY_SBH, true> >::gm_SharedData CONSTRUCT_EARLY;
  318. #ifdef MEMALLOC_USE_SECONDARY_SBH
  319. CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_SECONDARY_SBH, false> >::SharedData_t CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_SECONDARY_SBH, false> >::gm_SharedData CONSTRUCT_EARLY;
  320. #endif
  321. #ifndef MEMALLOC_NO_FALLBACK
  322. CSmallBlockPool< CStdMemAlloc::CVirtualAllocator >::SharedData_t CSmallBlockPool< CStdMemAlloc::CVirtualAllocator >::gm_SharedData CONSTRUCT_EARLY;
  323. #endif
  324. #endif // MEM_SBH_ENABLED
  325. static CStdMemAlloc s_StdMemAlloc CONSTRUCT_EARLY;
  326. #ifdef _PS3
  327. MemOverrideRawCrtFunctions_t *g_pMemOverrideRawCrtFns;
  328. IMemAlloc *g_pMemAllocInternalPS3 = &s_StdMemAlloc;
  329. PLATFORM_OVERRIDE_MEM_ALLOC_INTERNAL_PS3_IMPL
  330. #else // !_PS3
  331. #ifndef TIER0_VALIDATE_HEAP
  332. IMemAlloc *g_pMemAlloc = &s_StdMemAlloc;
  333. void SetAllocatorObject( IMemAlloc* pAllocator )
  334. {
  335. g_pMemAlloc = pAllocator;
  336. }
  337. #else
  338. IMemAlloc *g_pActualAlloc = &s_StdMemAlloc;
  339. void SetAllocatorObject( IMemAlloc* pAllocator )
  340. {
  341. g_pActualAlloc = pAllocator;
  342. }
  343. #endif
  344. #endif // _PS3
  345. CStdMemAlloc::CStdMemAlloc()
  346. : m_pfnFailHandler( DefaultFailHandler ),
  347. m_sMemoryAllocFailed( (size_t)0 ),
  348. m_bInCompact( false )
  349. {
  350. #ifdef _PS3
  351. g_pMemAllocInternalPS3 = &s_StdMemAlloc;
  352. PLATFORM_OVERRIDE_MEM_ALLOC_INTERNAL_PS3.m_pMemAllocCached = &s_StdMemAlloc;
  353. malloc_managed_size mms;
  354. mms.current_inuse_size = 0x12345678;
  355. mms.current_system_size = 0x09ABCDEF;
  356. mms.max_system_size = reinterpret_cast< size_t >( this );
  357. int iResult = malloc_stats( &mms );
  358. g_pMemOverrideRawCrtFns = reinterpret_cast< MemOverrideRawCrtFunctions_t * >( iResult );
  359. #elif IsPlatformWindowsPC()
  360. char *pStr = (char*)Plat_GetCommandLineA();
  361. if ( pStr )
  362. {
  363. char tempStr[512];
  364. strncpy( tempStr, pStr, sizeof( tempStr ) - 1 );
  365. tempStr[ sizeof( tempStr ) - 1 ] = 0;
  366. _strupr( tempStr );
  367. s_bUsingProcessHeap = CheckWindowsAllocSettings( tempStr );
  368. #ifdef MEMALLOC_REGIONS
  369. const char *pMemSpaceParam = "-memspacemb ";
  370. if ( const char *pMemSpace = strstr( pStr, pMemSpaceParam ) )
  371. {
  372. const char *pMemSpaceMb = pMemSpace + strlen( pMemSpaceParam );
  373. int nMb = atoi( pMemSpaceMb );
  374. s_nMemSpaceSize = size_t( nMb ) * ( 1024 * 1024ull );
  375. }
  376. #endif
  377. }
  378. #endif
  379. }
  380. #if MEM_SBH_ENABLED
  381. //-----------------------------------------------------------------------------
  382. // Small block heap (multi-pool)
  383. //-----------------------------------------------------------------------------
  384. //-----------------------------------------------------------------------------
  385. //
  386. //-----------------------------------------------------------------------------
  387. template <typename T>
  388. inline T MemAlign( T val, unsigned alignment )
  389. {
  390. return (T)( ( (unsigned)val + alignment - 1 ) & ~( alignment - 1 ) );
  391. }
  392. template <typename CAllocator>
  393. void CSmallBlockHeap<CAllocator>::InitPools( const uint *pSizes )
  394. {
  395. if ( pSizes[ NUM_POOLS - 1 ] != MAX_SBH_BLOCK )
  396. {
  397. Error( "SBH Configuration failure: size[%d]=%u, expected %d", NUM_POOLS - 1, pSizes[ NUM_POOLS - 1 ], MAX_SBH_BLOCK );
  398. }
  399. for ( int iPool = 0; iPool < NUM_POOLS; ++iPool )
  400. {
  401. m_Pools[ iPool ].Init( pSizes[ iPool ] );
  402. }
  403. int iCurPool = 0;
  404. const int MAX_TABLE = MAX_SBH_BLOCK >> SBH_BLOCK_LOOKUP_GRANULARITY;
  405. for ( int i = 0; i < MAX_TABLE; i++ )
  406. {
  407. int nByteSize = ( i + 1 ) << SBH_BLOCK_LOOKUP_GRANULARITY;
  408. Assert( ( ( nByteSize - 1 ) >> SBH_BLOCK_LOOKUP_GRANULARITY ) == i ); //Like putting Assert( FindPool( nByteSize ) == m_PoolLookup[ i ] ) in the end of the loop body;
  409. if ( m_Pools[ iCurPool ].GetBlockSize() < nByteSize )
  410. {
  411. ++iCurPool;// move on to the next pool
  412. Assert( iCurPool );
  413. }
  414. Assert( nByteSize <= m_Pools[ iCurPool ].GetBlockSize() );
  415. m_PoolLookup[ i ] = &m_Pools[ iCurPool ];
  416. }
  417. }
  418. //-----------------------------------------------------------------------------
  419. //
  420. //-----------------------------------------------------------------------------
  421. size_t g_nSBHOverride = 0;
  422. bool g_bSBHCompactDisabled = false;
  423. template <typename CAllocator>
  424. void CSmallBlockPool<CAllocator>::Init( unsigned nBlockSize )
  425. {
  426. SharedData_t *pSharedData = GetSharedData();
  427. if ( !pSharedData->m_pBase )
  428. {
  429. if ( !g_nSBHOverride )
  430. {
  431. //
  432. // Check command-line for settings and overrides
  433. //
  434. const char *pszPlatCommandLine = Plat_GetCommandLineA();
  435. //
  436. // SBH size in megabytes
  437. //
  438. char const *szSBH = pszPlatCommandLine ? strstr( pszPlatCommandLine, "-forcesbhsizemb " ) : NULL;
  439. if ( szSBH )
  440. {
  441. g_nSBHOverride = size_t( atoi( szSBH + strlen( "-forcesbhsizemb " ) ) ) * size_t( 1024 * 1024 );
  442. COMPILE_TIME_ASSERT( !( ( CAllocator::BYTES_PAGE - 1 ) & CAllocator::BYTES_PAGE ) ); // allocator page size must be power of 2
  443. g_nSBHOverride = ( g_nSBHOverride + CAllocator::BYTES_PAGE - 1 ) & ~( CAllocator::BYTES_PAGE - 1 ); // the size of arena must be integer number of pages. Otherwise, a) we waste space; b) some SBH code assumptions seem to be broken
  444. Msg( "SBH size forced override -forcesbhsizemb: %llu bytes (%u MB)\n", g_nSBHOverride, uint( g_nSBHOverride / (1024*1024) ) );
  445. }
  446. else
  447. {
  448. g_nSBHOverride = MBYTES_PRIMARY_SBH * 1024 * 1024;
  449. }
  450. //
  451. // SBH compact control
  452. //
  453. char const *szSBHcompact = pszPlatCommandLine ? strstr( pszPlatCommandLine, "-sbhcompactdisabled " ) : NULL;
  454. if ( szSBHcompact )
  455. {
  456. g_bSBHCompactDisabled = true;
  457. Msg( "SBH compact disabled\n" );
  458. }
  459. }
  460. pSharedData->m_pBase = pSharedData->m_Allocator.AllocatePoolMemory();
  461. pSharedData->m_numPages = pSharedData->m_Allocator.GetNumPages();
  462. if ( Q_ARRAYSIZE( pSharedData->m_PageStatus ) < pSharedData->m_numPages )
  463. Error( "SBH Configuration Error! (%u < %u)", Q_ARRAYSIZE( pSharedData->m_PageStatus ), pSharedData->m_numPages );
  464. pSharedData->m_pLimit = pSharedData->m_pBase + pSharedData->m_Allocator.GetTotalBytes();
  465. pSharedData->m_pNextBlock = pSharedData->m_pBase;
  466. }
  467. if ( !( nBlockSize % MIN_SBH_ALIGN == 0 && nBlockSize >= MIN_SBH_BLOCK && nBlockSize >= sizeof(TSLNodeBase_t) ) )
  468. DebuggerBreak();
  469. m_nBlockSize = nBlockSize;
  470. m_pNextAlloc = NULL;
  471. m_nCommittedPages = 0;
  472. m_nIsCompact = 1;
  473. }
  474. template <typename CAllocator>
  475. size_t CSmallBlockPool<CAllocator>::GetBlockSize()
  476. {
  477. return m_nBlockSize;
  478. }
  479. // Define VALIDATE_SBH_FREE_LIST to a given block size to validate that pool's freelist (it'll crash on the next alloc/free after the list is corrupted)
  480. // NOTE: this may affect perf more than USE_LIGHT_MEM_DEBUG
  481. //#define VALIDATE_SBH_FREE_LIST 320
  482. template <typename CAllocator>
  483. void CSmallBlockPool<CAllocator>::ValidateFreelist( SharedData_t *pSharedData )
  484. {
  485. #ifdef VALIDATE_SBH_FREE_LIST
  486. if ( m_nBlockSize != VALIDATE_SBH_FREE_LIST )
  487. return;
  488. static int count = 0;
  489. count++; // Track when the corruption occurs, if repeatable
  490. pSharedData->m_Lock.LockForWrite();
  491. #ifdef USE_NATIVE_SLIST
  492. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->Next.Next);
  493. #else
  494. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->value.Next);
  495. #endif
  496. while( pNode )
  497. pNode = pNode->Next;
  498. pSharedData->m_Lock.UnlockWrite();
  499. #endif // VALIDATE_SBH_FREE_LIST
  500. }
  501. //CThreadFastMutex g_SergiyTest;
  502. template <typename CAllocator>
  503. void *CSmallBlockPool<CAllocator>::Alloc()
  504. {
  505. SharedData_t *pSharedData = GetSharedData();
  506. ValidateFreelist( pSharedData );
  507. CThreadSpinRWLock &sharedLock = pSharedData->m_Lock;
  508. if ( !sharedLock.TryLockForRead() )
  509. {
  510. sharedLock.LockForRead();
  511. }
  512. byte *pResult;
  513. intp iPage = -1;
  514. int iThreadPriority = INT_MAX;
  515. while (1)
  516. {
  517. pResult = m_FreeList.Pop();
  518. if ( !pResult )
  519. {
  520. int nBlockSize = m_nBlockSize;
  521. byte *pNextAlloc;
  522. while (1)
  523. {
  524. pResult = m_pNextAlloc;
  525. if ( pResult )
  526. {
  527. pNextAlloc = pResult + nBlockSize;
  528. if ( ( ( uintp( pNextAlloc - pSharedData->m_pBase ) - 1 ) % BYTES_PAGE ) + nBlockSize > BYTES_PAGE )
  529. {
  530. // Crossed a logical page boundary; note that logical pages may be larger than physical pages, and VirtualAlloc may return memory that is not aligned to a logical page boundary
  531. pNextAlloc = 0;
  532. }
  533. if ( m_pNextAlloc.AssignIf( pResult, pNextAlloc ) )
  534. {
  535. iPage = (size_t)((byte *)pResult - pSharedData->m_pBase) / BYTES_PAGE;
  536. break;
  537. }
  538. }
  539. else if ( m_CommitMutex.TryLock() )
  540. {
  541. if ( !m_pNextAlloc )
  542. {
  543. PageStatus_t *pAllocatedPageStatus = (PageStatus_t *)pSharedData->m_FreePages.Pop();
  544. if ( pAllocatedPageStatus )
  545. {
  546. iPage = pAllocatedPageStatus - &pSharedData->m_PageStatus[0];
  547. }
  548. else
  549. {
  550. while (1)
  551. {
  552. byte *pBlock = pSharedData->m_pNextBlock;
  553. if ( pBlock >= pSharedData->m_pLimit )
  554. {
  555. break;
  556. }
  557. if ( ThreadInterlockedAssignPointerIf( (void **)&pSharedData->m_pNextBlock, (void *)( pBlock + BYTES_PAGE ), (void *)pBlock ) )
  558. {
  559. iPage = (size_t)((byte *)pBlock - pSharedData->m_pBase) / BYTES_PAGE;
  560. pAllocatedPageStatus = &pSharedData->m_PageStatus[iPage];
  561. break;
  562. }
  563. }
  564. }
  565. if ( pAllocatedPageStatus )
  566. {
  567. byte *pBlock = pSharedData->m_pBase + ( iPage * BYTES_PAGE );
  568. if ( pAllocatedPageStatus->m_nAllocated == NOT_COMMITTED )
  569. {
  570. pSharedData->m_Allocator.Commit( pBlock );
  571. }
  572. pAllocatedPageStatus->m_pPool = this;
  573. pAllocatedPageStatus->m_nAllocated = 0;
  574. pAllocatedPageStatus->m_pNextPageInPool = m_pFirstPage;
  575. m_pFirstPage = pAllocatedPageStatus;
  576. #ifdef TRACK_SBH_COUNTS
  577. m_nFreeBlocks += ( BYTES_PAGE / m_nBlockSize );
  578. #endif
  579. m_nCommittedPages++;
  580. m_pNextAlloc = pBlock;
  581. }
  582. else
  583. {
  584. m_pNextAlloc = NULL;
  585. m_CommitMutex.Unlock();
  586. sharedLock.UnlockRead();
  587. return NULL;
  588. }
  589. }
  590. m_CommitMutex.Unlock();
  591. }
  592. else
  593. {
  594. if ( iThreadPriority == INT_MAX)
  595. {
  596. iThreadPriority = ThreadGetPriority();
  597. }
  598. if ( iThreadPriority > 0 )
  599. {
  600. ThreadSleep( 0 );
  601. }
  602. }
  603. }
  604. if ( pResult )
  605. {
  606. break;
  607. }
  608. }
  609. else
  610. {
  611. iPage = (size_t)((byte *)pResult - pSharedData->m_pBase) / BYTES_PAGE;
  612. break;
  613. }
  614. }
  615. #ifdef TRACK_SBH_COUNTS
  616. --m_nFreeBlocks;
  617. #endif
  618. ++pSharedData->m_PageStatus[iPage].m_nAllocated;
  619. sharedLock.UnlockRead();
  620. return pResult;
  621. }
  622. template <typename CAllocator>
  623. void CSmallBlockPool<CAllocator>::Free( void *p )
  624. {
  625. SharedData_t *pSharedData = GetSharedData();
  626. size_t iPage = (size_t)((byte *)p - pSharedData->m_pBase) / BYTES_PAGE;
  627. CThreadSpinRWLock &sharedLock = pSharedData->m_Lock;
  628. if ( !sharedLock.TryLockForRead() )
  629. {
  630. sharedLock.LockForRead();
  631. }
  632. --pSharedData->m_PageStatus[iPage].m_nAllocated;
  633. // Once the last allocation is removed from any page in a pool, the pool will no longer be considered compact
  634. // and if the compact process is run on the heap all of the pages of this pool will have to be examined to
  635. // determine if they can be returned to the free page list. Note, it is possible that a pool will be marked
  636. // as not compact when all allocations are removed from a page, but then a new allocation may be put in the
  637. // same page, meaning the pool will not actually have any empty pages but will still be flagged as not compact.
  638. if ( pSharedData->m_PageStatus[iPage].m_nAllocated == 0 )
  639. {
  640. m_nIsCompact = 0;
  641. }
  642. #ifdef TRACK_SBH_COUNTS
  643. ++m_nFreeBlocks;
  644. #endif
  645. m_FreeList.Push( p );
  646. pSharedData->m_Lock.UnlockRead();
  647. ValidateFreelist( pSharedData );
  648. }
  649. // Count the free blocks.
  650. template <typename CAllocator>
  651. int CSmallBlockPool<CAllocator>::CountFreeBlocks()
  652. {
  653. #ifdef TRACK_SBH_COUNTS
  654. return m_nFreeBlocks;
  655. #else
  656. return 0;
  657. #endif
  658. }
  659. // Size of committed memory managed by this heap:
  660. template <typename CAllocator>
  661. size_t CSmallBlockPool<CAllocator>::GetCommittedSize()
  662. {
  663. return size_t( m_nCommittedPages ) * size_t( BYTES_PAGE );
  664. }
  665. // Return the total blocks memory is committed for in the heap
  666. template <typename CAllocator>
  667. int CSmallBlockPool<CAllocator>::CountCommittedBlocks()
  668. {
  669. return m_nCommittedPages * ( BYTES_PAGE / m_nBlockSize );
  670. }
  671. // Count the number of allocated blocks in the heap:
  672. template <typename CAllocator>
  673. int CSmallBlockPool<CAllocator>::CountAllocatedBlocks()
  674. {
  675. #ifdef TRACK_SBH_COUNTS
  676. return CountCommittedBlocks() - CountFreeBlocks();
  677. #else
  678. return 0;
  679. #endif
  680. }
  681. template <typename CAllocator>
  682. int CSmallBlockPool<CAllocator>::PageSort( const void *p1, const void *p2 )
  683. {
  684. SharedData_t *pSharedData = GetSharedData();
  685. return pSharedData->m_PageStatus[*((int *)p1)].m_SortList.Count() - pSharedData->m_PageStatus[*((int *)p2)].m_SortList.Count();
  686. }
  687. template <typename CAllocator>
  688. bool CSmallBlockPool<CAllocator>::RemovePagesFromFreeList( byte **pPages, int nPages, bool bSortList )
  689. {
  690. // Since we don't use the depth of the tslist, and sequence is only used for push, we can remove in-place
  691. int i;
  692. byte **pLimits = (byte **)stackalloc( nPages * sizeof(byte *) );
  693. int nBlocksNotInFreeList = 0;
  694. for ( i = 0; i < nPages; i++ )
  695. {
  696. pLimits[i] = pPages[i] + BYTES_PAGE;
  697. if ( m_pNextAlloc >= pPages[i] && m_pNextAlloc < pLimits[i] )
  698. {
  699. nBlocksNotInFreeList = ( pLimits[i] - m_pNextAlloc ) / m_nBlockSize;
  700. m_pNextAlloc = NULL;
  701. }
  702. }
  703. int iTarget = ( ( BYTES_PAGE/m_nBlockSize ) * nPages ) - nBlocksNotInFreeList;
  704. int iCount = 0;
  705. TSLHead_t *pRawFreeList = m_FreeList.AccessUnprotected();
  706. bool bRemove;
  707. if ( !bSortList || m_nCommittedPages - nPages == 1 )
  708. {
  709. #ifdef USE_NATIVE_SLIST
  710. TSLNodeBase_t **ppPrevNext = (TSLNodeBase_t **)&(pRawFreeList->Next);
  711. #else
  712. TSLNodeBase_t **ppPrevNext = (TSLNodeBase_t **)&(pRawFreeList->value.Next);
  713. #endif
  714. TSLNodeBase_t *pNode = *ppPrevNext;
  715. while ( pNode && iCount != iTarget )
  716. {
  717. bRemove = false;
  718. for ( i = 0; i < nPages; i++ )
  719. {
  720. if ( (byte *)pNode >= pPages[i] && (byte *)pNode < pLimits[i] )
  721. {
  722. bRemove = true;
  723. break;
  724. }
  725. }
  726. if ( bRemove )
  727. {
  728. iCount++;
  729. *ppPrevNext = pNode->Next;
  730. }
  731. else
  732. {
  733. *ppPrevNext = pNode;
  734. ppPrevNext = &pNode->Next;
  735. }
  736. pNode = pNode->Next;
  737. }
  738. }
  739. else
  740. {
  741. SharedData_t *pSharedData = GetSharedData();
  742. byte *pSharedBase = pSharedData->m_pBase;
  743. TSLNodeBase_t *pNode = m_FreeList.Detach();
  744. TSLNodeBase_t *pNext;
  745. int iSortPage;
  746. int nSortPages = 0;
  747. int *sortPages = (int *)stackalloc( m_nCommittedPages * sizeof(int) );
  748. while ( pNode )
  749. {
  750. pNext = pNode->Next;
  751. bRemove = false;
  752. for ( i = 0; i < nPages; i++ )
  753. {
  754. if ( (byte *)pNode >= pPages[i] && (byte *)pNode < pLimits[i] )
  755. {
  756. iCount++;
  757. bRemove = true;
  758. break;
  759. }
  760. }
  761. if ( !bRemove )
  762. {
  763. iSortPage = ( (byte *)pNode - pSharedBase ) / BYTES_PAGE;
  764. if ( !pSharedData->m_PageStatus[iSortPage].m_SortList.Count() )
  765. {
  766. sortPages[nSortPages++] = iSortPage;
  767. }
  768. pSharedData->m_PageStatus[iSortPage].m_SortList.Push( pNode );
  769. }
  770. pNode = pNext;
  771. }
  772. if ( nSortPages > 1 )
  773. {
  774. qsort( sortPages, nSortPages, sizeof(int), &PageSort );
  775. }
  776. for ( i = 0; i < nSortPages; i++ )
  777. {
  778. while ( ( pNode = pSharedData->m_PageStatus[sortPages[i]].m_SortList.Pop() ) != NULL )
  779. {
  780. m_FreeList.Push( pNode );
  781. }
  782. }
  783. }
  784. if ( iTarget != iCount )
  785. {
  786. DebuggerBreakIfDebugging();
  787. }
  788. return ( iTarget == iCount );
  789. }
  790. template <typename CAllocator>
  791. size_t CSmallBlockPool<CAllocator>::Compact( bool bIncremental )
  792. {
  793. // If the pool is flagged as being compact there have been no free operations which resulted
  794. // in a page in the pool becoming empty, as a result there is no need to try to compact this pool.
  795. if ( m_nIsCompact || g_bSBHCompactDisabled )
  796. return 0;
  797. static bool bWarnedCorruption;
  798. bool bIsCorrupt = false;
  799. int i;
  800. size_t nFreed = 0;
  801. SharedData_t *pSharedData = GetSharedData();
  802. pSharedData->m_Lock.LockForWrite();
  803. if ( m_pFirstPage )
  804. {
  805. PageStatus_t **pReleasedPages = (PageStatus_t **)stackalloc( m_nCommittedPages * sizeof(PageStatus_t *) );
  806. PageStatus_t **pReleasedPagesPrevs = (PageStatus_t **)stackalloc( m_nCommittedPages * sizeof(PageStatus_t *) );
  807. byte **pPageBases = (byte **)stackalloc( m_nCommittedPages * sizeof(byte *) );
  808. int nPages = 0;
  809. // Gather the pages to return to the backing pool
  810. PageStatus_t *pPage = m_pFirstPage;
  811. PageStatus_t *pPagePrev = NULL;
  812. while ( pPage )
  813. {
  814. if ( pPage->m_nAllocated == 0 )
  815. {
  816. pReleasedPages[nPages] = pPage;
  817. pPageBases[nPages] = pSharedData->m_pBase + ( pPage - &pSharedData->m_PageStatus[0] ) * BYTES_PAGE;
  818. pReleasedPagesPrevs[nPages] = pPagePrev;
  819. nPages++;
  820. if ( bIncremental )
  821. {
  822. break;
  823. }
  824. }
  825. pPagePrev = pPage;
  826. pPage = pPage->m_pNextPageInPool;
  827. }
  828. if ( nPages )
  829. {
  830. // Remove the pages from the pool's free list
  831. if ( !RemovePagesFromFreeList( pPageBases, nPages, !bIncremental ) && !bWarnedCorruption )
  832. {
  833. // We don't know which of the pages encountered an incomplete free list
  834. // so we'll just push them all back in and hope for the best. This isn't
  835. // ventilator control software!
  836. bWarnedCorruption = true;
  837. bIsCorrupt = true;
  838. }
  839. nFreed = nPages * BYTES_PAGE;
  840. m_nCommittedPages -= nPages;
  841. #ifdef TRACK_SBH_COUNTS
  842. m_nFreeBlocks -= nPages * ( BYTES_PAGE / m_nBlockSize );
  843. #endif
  844. // Unlink the pages
  845. for ( i = nPages - 1; i >= 0; --i )
  846. {
  847. if ( pReleasedPagesPrevs[i] )
  848. {
  849. pReleasedPagesPrevs[i]->m_pNextPageInPool = pReleasedPages[i]->m_pNextPageInPool;
  850. }
  851. else
  852. {
  853. m_pFirstPage = pReleasedPages[i]->m_pNextPageInPool;
  854. }
  855. pReleasedPages[i]->m_pNextPageInPool = NULL;
  856. pReleasedPages[i]->m_pPool = NULL;
  857. }
  858. // Push them onto the backing free lists
  859. if ( !pSharedData->m_Allocator.IsVirtual() )
  860. {
  861. for ( i = 0; i < nPages; i++ )
  862. {
  863. pSharedData->m_FreePages.Push( pReleasedPages[i] );
  864. }
  865. }
  866. else
  867. {
  868. size_t nMinReserve = ( bIncremental ) ? pSharedData->m_Allocator.GetMinReservePages() * 8 : pSharedData->m_Allocator.GetMinReservePages();
  869. ptrdiff_t nReserveNeeded = nMinReserve - pSharedData->m_FreePages.Count();
  870. if ( nReserveNeeded > 0 )
  871. {
  872. int nToKeepCommitted = MIN( nReserveNeeded, nPages );
  873. while ( nToKeepCommitted-- )
  874. {
  875. nPages--;
  876. pSharedData->m_FreePages.Push( pReleasedPages[nPages] );
  877. }
  878. }
  879. if ( nPages )
  880. {
  881. // Detach the list, push the decommitted page on, iterate up to previous
  882. // decommits, but them on, then push the committed pages on
  883. TSLNodeBase_t *pNodes = pSharedData->m_FreePages.Detach();
  884. for ( i = 0; i < nPages; i++ )
  885. {
  886. pReleasedPages[i]->m_nAllocated = NOT_COMMITTED;
  887. pSharedData->m_Allocator.Decommit( pPageBases[i] );
  888. pSharedData->m_FreePages.Push( pReleasedPages[i] );
  889. }
  890. TSLNodeBase_t *pCur, *pTemp = NULL;
  891. pCur = pNodes;
  892. while ( pCur )
  893. {
  894. if ( ((PageStatus_t *)pCur)->m_nAllocated == NOT_COMMITTED )
  895. {
  896. if ( pTemp )
  897. {
  898. pTemp->Next = NULL;
  899. }
  900. else
  901. {
  902. pNodes = NULL; // The list only has decommitted pages, don't go circular
  903. }
  904. while ( pCur )
  905. {
  906. pTemp = pCur->Next;
  907. pSharedData->m_FreePages.Push( pCur );
  908. pCur = pTemp;
  909. }
  910. break;
  911. }
  912. pTemp = pCur;
  913. pCur = pCur->Next;
  914. }
  915. while ( pNodes )
  916. {
  917. pTemp = pNodes->Next;
  918. pSharedData->m_FreePages.Push( pNodes );
  919. pNodes = pTemp;
  920. }
  921. }
  922. }
  923. }
  924. }
  925. if ( !bIncremental )
  926. {
  927. m_nIsCompact = 1;
  928. }
  929. pSharedData->m_Lock.UnlockWrite();
  930. if ( bIsCorrupt )
  931. {
  932. Warning( "***** HEAP IS CORRUPT (free compromised for block size %d,in %s heap, possible write after free *****)\n", m_nBlockSize, ( pSharedData->m_Allocator.IsVirtual() ) ? "virtual" : "physical" );
  933. }
  934. return nFreed;
  935. }
  936. template <typename CAllocator>
  937. bool CSmallBlockPool<CAllocator>::Validate()
  938. {
  939. #ifdef NO_SBH
  940. return true;
  941. #else
  942. int invalid = 0;
  943. SharedData_t *pSharedData = GetSharedData();
  944. pSharedData->m_Lock.LockForWrite();
  945. byte **pPageBases = (byte **)stackalloc( m_nCommittedPages * sizeof(byte *) );
  946. unsigned *pageCounts = (unsigned *)stackalloc( m_nCommittedPages * sizeof(unsigned) );
  947. memset( pageCounts, 0, m_nCommittedPages * sizeof(int) );
  948. unsigned nPages = 0;
  949. unsigned nEmptyPages = 0;
  950. unsigned sumAllocated = 0;
  951. unsigned freeNotInFreeList = 0;
  952. // Validate page list is consistent
  953. if ( !m_pFirstPage )
  954. {
  955. if ( m_nCommittedPages != 0 )
  956. {
  957. invalid = __LINE__;
  958. goto notValid;
  959. }
  960. }
  961. else
  962. {
  963. PageStatus_t *pPage = m_pFirstPage;
  964. while ( pPage )
  965. {
  966. pPageBases[nPages] = pSharedData->m_pBase + ( pPage - &pSharedData->m_PageStatus[0] ) * BYTES_PAGE;
  967. if ( pPage->m_pPool != this )
  968. {
  969. invalid = __LINE__;
  970. goto notValid;
  971. }
  972. if ( nPages > m_nCommittedPages )
  973. {
  974. invalid = __LINE__;
  975. goto notValid;
  976. }
  977. sumAllocated += pPage->m_nAllocated;
  978. if ( m_pNextAlloc >= pPageBases[nPages] && m_pNextAlloc < pPageBases[nPages] + BYTES_PAGE )
  979. {
  980. freeNotInFreeList = pageCounts[nPages] = ( ( pPageBases[nPages] + BYTES_PAGE ) - m_pNextAlloc ) / m_nBlockSize;
  981. }
  982. if ( pPage->m_nAllocated == 0 )
  983. {
  984. nEmptyPages++;
  985. }
  986. nPages++;
  987. pPage = pPage->m_pNextPageInPool;
  988. };
  989. if ( nPages != m_nCommittedPages )
  990. {
  991. invalid = __LINE__;
  992. goto notValid;
  993. }
  994. // If there are empty pages then the pool should always be marked as not compact, however
  995. // its fine for the pool to be marked as not compact even if there are no empty pages.
  996. if ( ( nEmptyPages > 0 ) && m_nIsCompact )
  997. {
  998. invalid = __LINE__;
  999. goto notValid;
  1000. }
  1001. }
  1002. // Validate block counts
  1003. {
  1004. unsigned blocksPerPage = ( BYTES_PAGE / m_nBlockSize );
  1005. #ifdef USE_NATIVE_SLIST
  1006. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->Next.Next);
  1007. #else
  1008. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->value.Next);
  1009. #endif
  1010. unsigned i;
  1011. while ( pNode )
  1012. {
  1013. for ( i = 0; i < nPages; i++ )
  1014. {
  1015. if ( (byte *)pNode >= pPageBases[i] && (byte *)pNode < pPageBases[i] + BYTES_PAGE )
  1016. {
  1017. pageCounts[i]++;
  1018. break;
  1019. }
  1020. }
  1021. if ( i == nPages )
  1022. {
  1023. invalid = __LINE__;
  1024. goto notValid;
  1025. }
  1026. pNode = pNode->Next;
  1027. }
  1028. PageStatus_t *pPage = m_pFirstPage;
  1029. i = 0;
  1030. while ( pPage )
  1031. {
  1032. unsigned nFreeOnPage = blocksPerPage - pPage->m_nAllocated;
  1033. if ( nFreeOnPage != pageCounts[i++] )
  1034. {
  1035. invalid = __LINE__;
  1036. goto notValid;
  1037. }
  1038. pPage = pPage->m_pNextPageInPool;
  1039. }
  1040. }
  1041. notValid:
  1042. pSharedData->m_Lock.UnlockWrite();
  1043. if ( invalid != 0 )
  1044. {
  1045. return false;
  1046. }
  1047. return true;
  1048. #endif
  1049. }
  1050. static const uint s_nPoolSizesServer64[] = { 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1280, 1536, 1792, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2 * 1048576, 4 * 1048576, 8 * 1048576, 16 * 1048576 };
  1051. //-----------------------------------------------------------------------------
  1052. //
  1053. //-----------------------------------------------------------------------------
  1054. template <typename CAllocator>
  1055. CSmallBlockHeap<CAllocator>::CSmallBlockHeap()
  1056. {
  1057. m_pSharedData = CPool::GetSharedData();
  1058. // Build a lookup table used to find the correct pool based on size
  1059. #ifdef _M_X64
  1060. COMPILE_TIME_ASSERT( sizeof( s_nPoolSizesServer64 ) / sizeof( s_nPoolSizesServer64[ 0 ] ) == NUM_POOLS );
  1061. InitPools( s_nPoolSizesServer64 );
  1062. #else
  1063. const int MAX_TABLE = MAX_SBH_BLOCK >> SBH_BLOCK_LOOKUP_GRANULARITY;
  1064. int i = 0;
  1065. int nBytesElement = 0;
  1066. CPool *pCurPool = NULL;
  1067. int iCurPool = 0;
  1068. // Blocks sized 0 - 128 are in pools in increments of 8
  1069. for ( ; i < 32; i++ )
  1070. {
  1071. if ( (i + 1) % 2 == 1)
  1072. {
  1073. nBytesElement += 8;
  1074. pCurPool = &m_Pools[iCurPool];
  1075. pCurPool->Init( nBytesElement );
  1076. iCurPool++;
  1077. m_PoolLookup[i] = pCurPool;
  1078. }
  1079. else
  1080. {
  1081. m_PoolLookup[i] = pCurPool;
  1082. }
  1083. }
  1084. // Blocks sized 129 - 256 are in pools in increments of 16
  1085. for ( ; i < 64; i++ )
  1086. {
  1087. if ( (i + 1) % 4 == 1)
  1088. {
  1089. nBytesElement += 16;
  1090. pCurPool = &m_Pools[iCurPool];
  1091. pCurPool->Init( nBytesElement );
  1092. iCurPool++;
  1093. m_PoolLookup[i] = pCurPool;
  1094. }
  1095. else
  1096. {
  1097. m_PoolLookup[i] = pCurPool;
  1098. }
  1099. }
  1100. // Blocks sized 257 - 512 are in pools in increments of 32
  1101. for ( ; i < 128; i++ )
  1102. {
  1103. if ( (i + 1) % 8 == 1)
  1104. {
  1105. nBytesElement += 32;
  1106. pCurPool = &m_Pools[iCurPool];
  1107. pCurPool->Init( nBytesElement );
  1108. iCurPool++;
  1109. m_PoolLookup[i] = pCurPool;
  1110. }
  1111. else
  1112. {
  1113. m_PoolLookup[i] = pCurPool;
  1114. }
  1115. }
  1116. // Blocks sized 513 - 768 are in pools in increments of 64
  1117. for ( ; i < 192; i++ )
  1118. {
  1119. if ( (i + 1) % 16 == 1)
  1120. {
  1121. nBytesElement += 64;
  1122. pCurPool = &m_Pools[iCurPool];
  1123. pCurPool->Init( nBytesElement );
  1124. iCurPool++;
  1125. m_PoolLookup[i] = pCurPool;
  1126. }
  1127. else
  1128. {
  1129. m_PoolLookup[i] = pCurPool;
  1130. }
  1131. }
  1132. // Blocks sized 769 - 1024 are in pools in increments of 128
  1133. for ( ; i < 256; i++ )
  1134. {
  1135. if ( (i + 1) % 32 == 1)
  1136. {
  1137. nBytesElement += 128;
  1138. pCurPool = &m_Pools[iCurPool];
  1139. pCurPool->Init( nBytesElement );
  1140. iCurPool++;
  1141. m_PoolLookup[i] = pCurPool;
  1142. }
  1143. else
  1144. {
  1145. m_PoolLookup[i] = pCurPool;
  1146. }
  1147. }
  1148. // Blocks sized 1025 - 2048 are in pools in increments of 256
  1149. for ( ; i < MAX_TABLE; i++ )
  1150. {
  1151. if ( (i + 1) % 64 == 1)
  1152. {
  1153. nBytesElement += 256;
  1154. pCurPool = &m_Pools[iCurPool];
  1155. pCurPool->Init( nBytesElement );
  1156. iCurPool++;
  1157. m_PoolLookup[i] = pCurPool;
  1158. }
  1159. else
  1160. {
  1161. m_PoolLookup[i] = pCurPool;
  1162. }
  1163. }
  1164. if ( iCurPool != NUM_POOLS )
  1165. {
  1166. Error( "SBH configuration error: %d/%d pools initialized\n", iCurPool, NUM_POOLS );
  1167. }
  1168. #endif
  1169. }
  1170. template <typename CAllocator>
  1171. bool CSmallBlockHeap<CAllocator>::ShouldUse( size_t nBytes )
  1172. {
  1173. return ( nBytes <= MAX_SBH_BLOCK );
  1174. }
  1175. template <typename CAllocator>
  1176. bool CSmallBlockHeap<CAllocator>::IsOwner( void * p )
  1177. {
  1178. if ( uintp(p) >= uintp(m_pSharedData->m_pBase) )
  1179. {
  1180. size_t index = (size_t)((byte *)p - m_pSharedData->m_pBase) / BYTES_PAGE;
  1181. return ( index < m_pSharedData->m_numPages );
  1182. }
  1183. return false;
  1184. }
  1185. template <typename CAllocator>
  1186. void *CSmallBlockHeap<CAllocator>::Alloc( size_t nBytes )
  1187. {
  1188. if ( nBytes == 0)
  1189. {
  1190. nBytes = 1;
  1191. }
  1192. Assert( ShouldUse( nBytes ) );
  1193. CPool *pPool = FindPool( nBytes );
  1194. void *p = pPool->Alloc();
  1195. return p;
  1196. }
  1197. template <typename CAllocator>
  1198. void *CSmallBlockHeap<CAllocator>::Realloc( void *p, size_t nBytes )
  1199. {
  1200. if ( nBytes == 0)
  1201. {
  1202. nBytes = 1;
  1203. }
  1204. CPool *pOldPool = FindPool( p );
  1205. CPool *pNewPool = ( ShouldUse( nBytes ) ) ? FindPool( nBytes ) : NULL;
  1206. if ( pOldPool == pNewPool )
  1207. {
  1208. return p;
  1209. }
  1210. void *pNewBlock = NULL;
  1211. if ( !pNewBlock )
  1212. {
  1213. pNewBlock = MemAlloc_Alloc( nBytes ); // Call back out so blocks can move from the secondary to the primary pools
  1214. }
  1215. if ( !pNewBlock )
  1216. {
  1217. pNewBlock = malloc_internal( DEF_REGION, nBytes );
  1218. }
  1219. if ( pNewBlock )
  1220. {
  1221. size_t nBytesCopy = MIN( nBytes, pOldPool->GetBlockSize() );
  1222. memcpy( pNewBlock, p, nBytesCopy );
  1223. }
  1224. else if ( nBytes < pOldPool->GetBlockSize() )
  1225. {
  1226. return p;
  1227. }
  1228. pOldPool->Free( p );
  1229. return pNewBlock;
  1230. }
  1231. template <typename CAllocator>
  1232. void CSmallBlockHeap<CAllocator>::Free( void *p )
  1233. {
  1234. CPool *pPool = FindPool( p );
  1235. if ( pPool )
  1236. {
  1237. pPool->Free( p );
  1238. }
  1239. else
  1240. {
  1241. // we probably didn't hook some allocation and now we're freeing it or the heap has been trashed!
  1242. DebuggerBreakIfDebugging();
  1243. }
  1244. }
  1245. template <typename CAllocator>
  1246. size_t CSmallBlockHeap<CAllocator>::GetSize( void *p )
  1247. {
  1248. CPool *pPool = FindPool( p );
  1249. return pPool->GetBlockSize();
  1250. }
  1251. template <typename CAllocator>
  1252. void CSmallBlockHeap<CAllocator>::Usage( size_t &bytesCommitted, size_t &bytesAllocated )
  1253. {
  1254. bytesCommitted = 0;
  1255. bytesAllocated = 0;
  1256. for ( int i = 0; i < NUM_POOLS; i++ )
  1257. {
  1258. bytesCommitted += m_Pools[i].GetCommittedSize();
  1259. bytesAllocated += ( size_t( m_Pools[i].CountAllocatedBlocks() ) * size_t( m_Pools[i].GetBlockSize() ) );
  1260. }
  1261. }
  1262. const char *Tier0_Prettynum( int64 num )
  1263. {
  1264. static char s_Buffer[ 16 * 64 ], *s_pNext = s_Buffer;
  1265. int nDigits = 1, nSymbols = num < 0 ? 1 : 0; // how many digits there are so far in the string; at least one digit is always there
  1266. for ( int64 remaining = num / 10; remaining; remaining /= 10 )
  1267. {
  1268. if ( ( nDigits % 3 ) == 0 ) // we already have 3n digits in the string, and we're about to put 3n+1st digit. Add comma there
  1269. nSymbols++; // comma
  1270. nDigits++;
  1271. }
  1272. if ( s_pNext + nDigits + nSymbols + 1 >= s_Buffer + sizeof( s_Buffer ) )
  1273. {
  1274. s_pNext = s_Buffer ;
  1275. }
  1276. char *pEndOfString = s_pNext + nDigits + nSymbols, *pRunning = pEndOfString;
  1277. *pRunning = '\0';
  1278. int nDigitsWritten = 1;
  1279. *--pRunning = ( num % 10 ) + '0';
  1280. for ( int64 remaining = num / 10; remaining; remaining /= 10 )
  1281. {
  1282. if ( ( nDigitsWritten % 3 ) == 0 ) // we already have 3n digits in the string, and we're about to put 3n+1st digit. Add comma there
  1283. *--pRunning = ',';
  1284. *--pRunning = ( remaining % 10 ) + '0';
  1285. ++nDigitsWritten;
  1286. }
  1287. if ( num < 0 )
  1288. *--pRunning = '-';
  1289. if ( pRunning != s_pNext )
  1290. DebuggerBreakIfDebugging();
  1291. s_pNext = pEndOfString+1;
  1292. return pRunning;
  1293. }
  1294. template <typename CAllocator>
  1295. void CSmallBlockHeap<CAllocator>::DumpStats( const char *pszTag, FILE *pFile, IMemAlloc::DumpStatsFormat_t nFormat )
  1296. {
  1297. size_t bytesCommitted, bytesAllocated;
  1298. Usage( bytesCommitted, bytesAllocated );
  1299. if ( pFile )
  1300. {
  1301. if ( bytesCommitted || bytesAllocated )
  1302. {
  1303. if ( nFormat == IMemAlloc::FORMAT_HTML )
  1304. {
  1305. fprintf( pFile, "<div class=\"sbhTable\" data-role=\"collapsible\">" );
  1306. const char *pExtraAttrib = CAllocator::IsVirtual() ? " style=\"color:red\"" : ""; // we shouldn't be having any bytes committed on a virtual allocator..
  1307. fprintf( pFile, "<legend%s>Committed:<b>%16s</b> Allocated:<b>%16s</b></legend>\n", pExtraAttrib, Tier0_Prettynum( bytesCommitted ), Tier0_Prettynum( bytesAllocated ) );
  1308. fprintf( pFile, "<table class=\"dataTable\" style=\"border:1px solid grey;text-align:right;margin:1px;width:auto;\">"
  1309. "<tbody><tr style=\"color:white;border:1px solid grey;margin:2px\"><th>Pool&nbsp;</th><th>Size&nbsp;</th><th>Used#&nbsp;</th><th>(%%)&nbsp;</th><th>Free#&nbsp;</th><th>(%%)&nbsp;</th><th>Commit#&nbsp;</th><th>Commit mem&nbsp;</th></tr>"
  1310. );
  1311. for ( int i = 0; i < NUM_POOLS; i++ )
  1312. {
  1313. uint nBlockSize = uint( m_Pools[ i ].GetBlockSize() );
  1314. uint nAllocatedBlocks = m_Pools[ i ].CountAllocatedBlocks();
  1315. uint nFreeBlocks = m_Pools[ i ].CountFreeBlocks();
  1316. uint nCommittedBlocks = m_Pools[ i ].CountCommittedBlocks();
  1317. uint64 nCommittedSize = m_Pools[ i ].GetCommittedSize();
  1318. if ( nCommittedBlocks )
  1319. {
  1320. // output for vxconsole parsing
  1321. fprintf( pFile, "<tr><td>%d</td><td>%d</td><td>%s</td><td><i>%d%%</i></td><td>%s</td><td><i>%d%%</i></td><td>%s</td><td>%s</td></tr>\n",
  1322. i,
  1323. nBlockSize,
  1324. Tier0_Prettynum( nAllocatedBlocks ),
  1325. nCommittedBlocks ? int( nAllocatedBlocks * 100.0 / nCommittedBlocks ) : 0,
  1326. Tier0_Prettynum( nFreeBlocks ),
  1327. nCommittedBlocks ? int( nFreeBlocks * 100.0 / nCommittedBlocks ) : 0,
  1328. Tier0_Prettynum( nCommittedBlocks ),
  1329. Tier0_Prettynum( nCommittedSize )
  1330. );
  1331. }
  1332. else
  1333. {
  1334. fprintf( pFile, "<tr style=\"color:#444444\"><td>%d</td><td>%d</td><td colspan=6>Not Used</td></tr>\n", i, nBlockSize );
  1335. }
  1336. }
  1337. fprintf( pFile, "</tbody></table></div><script>$(document).ready(function(){"
  1338. "$(\".sbhTable\").accordion( { collapsible:true, active:false } );"
  1339. "});</script>" );
  1340. }
  1341. else
  1342. {
  1343. for ( int i = 0; i < NUM_POOLS; i++ )
  1344. {
  1345. uint nBlockSize = uint( m_Pools[ i ].GetBlockSize() );
  1346. uint nAllocatedBlocks = m_Pools[ i ].CountAllocatedBlocks();
  1347. uint nFreeBlocks = m_Pools[ i ].CountFreeBlocks();
  1348. uint nCommittedBlocks = m_Pools[ i ].CountCommittedBlocks();
  1349. uint64 nCommittedSize = m_Pools[ i ].GetCommittedSize();
  1350. if ( nCommittedBlocks )
  1351. {
  1352. // output for vxconsole parsing
  1353. fprintf( pFile, "Pool %3i: (%5u-byte) blocks used:%16s (%2d%%) free:%16s (%2d%%) commit:%16s (bytes:%16s)\n",
  1354. i,
  1355. nBlockSize,
  1356. Tier0_Prettynum( nAllocatedBlocks ),
  1357. nCommittedBlocks ? int( nAllocatedBlocks * 100.0 / nCommittedBlocks ) : 0,
  1358. Tier0_Prettynum( nFreeBlocks ),
  1359. nCommittedBlocks ? int( nFreeBlocks * 100.0 / nCommittedBlocks ) : 0,
  1360. Tier0_Prettynum( nCommittedBlocks ),
  1361. Tier0_Prettynum( nCommittedSize )
  1362. );
  1363. }
  1364. else
  1365. {
  1366. fprintf( pFile, "Pool %3i: (%5u-byte) not used\n", i, nBlockSize );
  1367. }
  1368. }
  1369. fprintf( pFile, "Totals (%s): Committed:%16s Allocated:%16s\n", pszTag, Tier0_Prettynum( bytesCommitted ), Tier0_Prettynum( bytesAllocated ) );
  1370. }
  1371. }
  1372. else
  1373. {
  1374. fprintf( pFile, "%s is Empty\n", pszTag );
  1375. }
  1376. }
  1377. else
  1378. {
  1379. if ( bytesCommitted || bytesAllocated )
  1380. for ( int i = 0; i < NUM_POOLS; i++ )
  1381. {
  1382. uint nBlockSize = uint( m_Pools[ i ].GetBlockSize() );
  1383. uint nAllocatedBlocks = m_Pools[ i ].CountAllocatedBlocks();
  1384. uint nFreeBlocks = m_Pools[ i ].CountFreeBlocks();
  1385. uint nCommittedBlocks = m_Pools[ i ].CountCommittedBlocks();
  1386. if ( nCommittedBlocks )
  1387. {
  1388. uint64 nCommittedSize = m_Pools[ i ].GetCommittedSize();
  1389. Msg( "Pool %3i: (size: %5u) blocks: allocated:%16s (%2d%%) free:%16s (%2d%%) committed:%16s (committed size:%16s)\n",
  1390. i,
  1391. nBlockSize,
  1392. Tier0_Prettynum( nAllocatedBlocks ),
  1393. nCommittedBlocks ? int( nAllocatedBlocks * 100.0 / nCommittedBlocks ) : 0,
  1394. Tier0_Prettynum( nFreeBlocks ),
  1395. nCommittedBlocks ? int( nFreeBlocks * 100.0 / nCommittedBlocks ) : 0,
  1396. Tier0_Prettynum( nCommittedBlocks ),
  1397. Tier0_Prettynum( nCommittedSize )
  1398. );
  1399. }
  1400. else
  1401. {
  1402. Msg( "Pool %3i: (%5u-byte) not used\n", i, nBlockSize );
  1403. }
  1404. }
  1405. Msg( "Totals (%s): Committed:%16s Allocated:%16s\n", pszTag, Tier0_Prettynum( bytesCommitted ), Tier0_Prettynum( bytesAllocated ) );
  1406. }
  1407. }
  1408. template <typename CAllocator>
  1409. CSmallBlockPool<CAllocator> *CSmallBlockHeap<CAllocator>::FindPool( size_t nBytes )
  1410. {
  1411. return m_PoolLookup[ ( nBytes - 1 ) >> SBH_BLOCK_LOOKUP_GRANULARITY ];
  1412. }
  1413. template <typename CAllocator>
  1414. CSmallBlockPool<CAllocator> *CSmallBlockHeap<CAllocator>::FindPool( void *p )
  1415. {
  1416. // NOTE: If p < m_pBase, cast to unsigned size_t will cause it to be large
  1417. size_t index = (size_t)((byte *)p - m_pSharedData->m_pBase) / BYTES_PAGE;
  1418. if ( index < m_pSharedData->m_numPages )
  1419. return m_pSharedData->m_PageStatus[index].m_pPool;
  1420. return NULL;
  1421. }
  1422. template <typename CAllocator>
  1423. size_t CSmallBlockHeap<CAllocator>::Compact( bool bIncremental )
  1424. {
  1425. if ( g_bSBHCompactDisabled )
  1426. return 0;
  1427. size_t nRecovered = 0;
  1428. if ( bIncremental )
  1429. {
  1430. static int iLastIncremental;
  1431. iLastIncremental++;
  1432. for ( int i = 0; i < NUM_POOLS; i++ )
  1433. {
  1434. int idx = ( i + iLastIncremental ) % NUM_POOLS;
  1435. nRecovered = m_Pools[idx].Compact( bIncremental );
  1436. if ( nRecovered )
  1437. {
  1438. iLastIncremental = idx;
  1439. break;
  1440. }
  1441. }
  1442. }
  1443. else
  1444. {
  1445. for ( int i = 0; i < NUM_POOLS; i++ )
  1446. {
  1447. nRecovered += m_Pools[i].Compact( bIncremental );
  1448. }
  1449. }
  1450. return nRecovered;
  1451. }
  1452. template <typename CAllocator>
  1453. bool CSmallBlockHeap<CAllocator>::Validate()
  1454. {
  1455. bool valid = true;
  1456. for ( int i = 0; i < NUM_POOLS; i++ )
  1457. {
  1458. valid = m_Pools[i].Validate() && valid;
  1459. }
  1460. return valid;
  1461. }
  1462. #endif // MEM_SBH_ENABLED
  1463. //-----------------------------------------------------------------------------
  1464. // Lightweight memory tracking
  1465. //-----------------------------------------------------------------------------
  1466. #ifdef USE_LIGHT_MEM_DEBUG
  1467. #ifndef LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1468. #define UsingLMD() true
  1469. #else // LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1470. bool g_bUsingLMD = ( Plat_GetCommandLineA() ) ? ( strstr( Plat_GetCommandLineA(), "-uselmd" ) != NULL ) : false;
  1471. #define UsingLMD() g_bUsingLMD
  1472. #if defined( _PS3 )
  1473. #error "Plat_GetCommandLineA() not implemented on PS3"
  1474. #endif
  1475. #endif // LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1476. const char *g_pszUnknown = "unknown";
  1477. struct Sentinal_t
  1478. {
  1479. DWORD value[4];
  1480. };
  1481. Sentinal_t g_HeadSentinel =
  1482. {
  1483. 0xdeadbeef,
  1484. 0xbaadf00d,
  1485. 0xbd122969,
  1486. 0xdeadbeef,
  1487. };
  1488. Sentinal_t g_TailSentinel =
  1489. {
  1490. 0xbaadf00d,
  1491. 0xbd122969,
  1492. 0xdeadbeef,
  1493. 0xbaadf00d,
  1494. };
  1495. const byte g_FreeFill = 0xdd;
  1496. static const uint LWD_FREE = 0;
  1497. static const uint LWD_ALLOCATED = 1;
  1498. #define LMD_STATUS_BITS ( 1 )
  1499. #define LMD_ALIGN_BITS ( 32 - LMD_STATUS_BITS )
  1500. #define LMD_MAX_ALIGN ( 1 << ( LMD_ALIGN_BITS - 1) )
  1501. struct AllocHeader_t
  1502. {
  1503. const char *pszModule;
  1504. int line;
  1505. size_t nBytes;
  1506. uint status : LMD_STATUS_BITS;
  1507. uint align : LMD_ALIGN_BITS;
  1508. Sentinal_t sentinal;
  1509. };
  1510. const int g_nRecentFrees = ( IsPC() ) ? 8192 : 512;
  1511. AllocHeader_t **g_pRecentFrees = (AllocHeader_t **)calloc( g_nRecentFrees, sizeof(AllocHeader_t *) );
  1512. int g_iNextFreeSlot;
  1513. #define INTERNAL_INLINE
  1514. #define LMDToHeader( pUserPtr ) ( ((AllocHeader_t *)(pUserPtr)) - 1 )
  1515. #define LMDFromHeader( pHeader ) ( (byte *)((pHeader) + 1) )
  1516. CThreadFastMutex g_LMDMutex;
  1517. const char *g_pLMDFileName = NULL;
  1518. int g_nLMDLine;
  1519. int g_iLMDDepth;
  1520. void LMDPushAllocDbgInfo( const char *pFileName, int nLine )
  1521. {
  1522. if ( ThreadInMainThread() )
  1523. {
  1524. if ( !g_iLMDDepth )
  1525. {
  1526. g_pLMDFileName = pFileName;
  1527. g_nLMDLine = nLine;
  1528. }
  1529. g_iLMDDepth++;
  1530. }
  1531. }
  1532. void LMDPopAllocDbgInfo()
  1533. {
  1534. if ( ThreadInMainThread() && g_iLMDDepth > 0 )
  1535. {
  1536. g_iLMDDepth--;
  1537. if ( g_iLMDDepth == 0 )
  1538. {
  1539. g_pLMDFileName = NULL;
  1540. g_nLMDLine = 0;
  1541. }
  1542. }
  1543. }
  1544. void LMDReportInvalidBlock( AllocHeader_t *pHeader, const char *pszMessage )
  1545. {
  1546. char szMsg[256];
  1547. if ( pHeader )
  1548. {
  1549. sprintf( szMsg, "HEAP IS CORRUPT: %s (block 0x%x, size %d, alignment %d)\n", pszMessage, (size_t)LMDFromHeader( pHeader ), pHeader->nBytes, pHeader->align );
  1550. }
  1551. else
  1552. {
  1553. sprintf( szMsg, "HEAP IS CORRUPT: %s\n", pszMessage );
  1554. }
  1555. if ( Plat_IsInDebugSession() )
  1556. {
  1557. DebuggerBreak();
  1558. }
  1559. else
  1560. {
  1561. WriteMiniDump();
  1562. }
  1563. #ifdef IS_WINDOWS_PC
  1564. ::MessageBox( NULL, szMsg, "Error", MB_SYSTEMMODAL | MB_OK );
  1565. #else
  1566. Warning( szMsg );
  1567. #endif
  1568. }
  1569. void LMDValidateBlock( AllocHeader_t *pHeader, bool bFreeList )
  1570. {
  1571. if ( !pHeader )
  1572. return;
  1573. if ( memcmp( &pHeader->sentinal, &g_HeadSentinel, sizeof(Sentinal_t) ) != 0 )
  1574. {
  1575. LMDReportInvalidBlock( pHeader, "Head sentinel corrupt" );
  1576. }
  1577. if ( memcmp( ((Sentinal_t *)(LMDFromHeader( pHeader ) + pHeader->nBytes)), &g_TailSentinel, sizeof(Sentinal_t) ) != 0 )
  1578. {
  1579. LMDReportInvalidBlock( pHeader, "Tail sentinel corrupt" );
  1580. }
  1581. if ( bFreeList )
  1582. {
  1583. byte *pStart = (byte *)pHeader + sizeof(AllocHeader_t);
  1584. byte *pCur = pStart;
  1585. byte *pLimit = pCur + pHeader->nBytes;
  1586. while ( pCur != pLimit )
  1587. {
  1588. if ( *pCur++ != g_FreeFill )
  1589. {
  1590. char szMsg[128];
  1591. sprintf( szMsg, "Write after free, %d bytes after start of allocation", ( (pCur-1) - pStart ) );
  1592. LMDReportInvalidBlock( pHeader, szMsg );
  1593. }
  1594. }
  1595. }
  1596. }
  1597. size_t LMDComputeHeaderSize( size_t align = 0 )
  1598. {
  1599. if ( !align )
  1600. return sizeof(AllocHeader_t);
  1601. // For aligned allocs, the header is preceded by padding which maintains alignment
  1602. if ( align > LMD_MAX_ALIGN )
  1603. s_StdMemAlloc.SetCRTAllocFailed( align ); // TODO: could convert alignment to exponent to get around this, or use a flag for alignments over 1KB or 1MB...
  1604. return ( ( sizeof( AllocHeader_t ) + (align-1) ) & ~(align-1) );
  1605. }
  1606. size_t LMDAdjustSize( size_t &nBytes, size_t align = 0 )
  1607. {
  1608. if ( !UsingLMD() )
  1609. return nBytes;
  1610. // Add data before+after each alloc
  1611. return ( nBytes + LMDComputeHeaderSize( align ) + sizeof(Sentinal_t) );
  1612. }
  1613. void *LMDNoteAlloc( void *p, size_t nBytes, size_t align = 0, const char *pszModule = g_pszUnknown, int line = 0 )
  1614. {
  1615. if ( !UsingLMD() )
  1616. {
  1617. return p;
  1618. }
  1619. if ( g_pLMDFileName )
  1620. {
  1621. pszModule = g_pLMDFileName;
  1622. line = g_nLMDLine;
  1623. }
  1624. if ( p )
  1625. {
  1626. byte *pUserPtr = ((byte*)p) + LMDComputeHeaderSize( align );
  1627. AllocHeader_t *pHeader = LMDToHeader( pUserPtr );
  1628. pHeader->pszModule = pszModule;
  1629. pHeader->line = line;
  1630. pHeader->status = LWD_ALLOCATED;
  1631. pHeader->nBytes = nBytes;
  1632. pHeader->align = (uint)align;
  1633. pHeader->sentinal = g_HeadSentinel;
  1634. *((Sentinal_t *)(pUserPtr + pHeader->nBytes)) = g_TailSentinel;
  1635. LMDValidateBlock( pHeader, false );
  1636. return pUserPtr;
  1637. }
  1638. return NULL;
  1639. // Some SBH clients rely on allocations > 16 bytes being 16-byte aligned, so we mustn't break that assumption:
  1640. MEMSTD_COMPILE_TIME_ASSERT( sizeof( AllocHeader_t ) % 16 == 0 );
  1641. }
  1642. void *LMDNoteFree( void *p )
  1643. {
  1644. if ( !UsingLMD() )
  1645. {
  1646. return p;
  1647. }
  1648. AUTO_LOCK( g_LMDMutex );
  1649. if ( !p )
  1650. {
  1651. return NULL;
  1652. }
  1653. AllocHeader_t *pHeader = LMDToHeader( p );
  1654. if ( pHeader->status == LWD_FREE )
  1655. {
  1656. LMDReportInvalidBlock( pHeader, "Double free" );
  1657. }
  1658. LMDValidateBlock( pHeader, false );
  1659. AllocHeader_t *pToReturn;
  1660. if ( pHeader->nBytes < 16*1024 )
  1661. {
  1662. pToReturn = g_pRecentFrees[g_iNextFreeSlot];
  1663. LMDValidateBlock( pToReturn, true );
  1664. g_pRecentFrees[g_iNextFreeSlot] = pHeader;
  1665. g_iNextFreeSlot = (g_iNextFreeSlot + 1 ) % g_nRecentFrees;
  1666. }
  1667. else
  1668. {
  1669. pToReturn = pHeader;
  1670. LMDValidateBlock( g_pRecentFrees[rand() % g_nRecentFrees], true );
  1671. }
  1672. pHeader->status = LWD_FREE;
  1673. memset( pHeader + 1, g_FreeFill, pHeader->nBytes );
  1674. if ( pToReturn && ( pToReturn->align ) )
  1675. {
  1676. // For aligned allocations, the actual system allocation starts *before* the LMD header:
  1677. size_t headerPadding = LMDComputeHeaderSize( pToReturn->align ) - sizeof( AllocHeader_t );
  1678. return ( ((byte*)pToReturn) - headerPadding );
  1679. }
  1680. return pToReturn;
  1681. }
  1682. size_t LMDGetSize( void *p )
  1683. {
  1684. if ( !UsingLMD() )
  1685. {
  1686. return (size_t)(-1);
  1687. }
  1688. AllocHeader_t *pHeader = LMDToHeader( p );
  1689. return pHeader->nBytes;
  1690. }
  1691. bool LMDValidateHeap()
  1692. {
  1693. if ( !UsingLMD() )
  1694. {
  1695. return true;
  1696. }
  1697. AUTO_LOCK( g_LMDMutex );
  1698. for ( int i = 0; i < g_nRecentFrees && g_pRecentFrees[i]; i++ )
  1699. {
  1700. LMDValidateBlock( g_pRecentFrees[i], true );
  1701. }
  1702. return true;
  1703. }
  1704. void *LMDRealloc( void *pMem, size_t nSize, size_t align = 0, const char *pszModule = g_pszUnknown, int line = 0 )
  1705. {
  1706. if ( nSize == 0 )
  1707. {
  1708. s_StdMemAlloc.Free( pMem );
  1709. return NULL;
  1710. }
  1711. void *pNew;
  1712. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1713. if ( align )
  1714. pNew = s_StdMemAlloc.AllocAlign( nSize, align, pszModule, line );
  1715. else
  1716. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1717. pNew = s_StdMemAlloc.Alloc( nSize, pszModule, line );
  1718. if ( !pMem )
  1719. {
  1720. return pNew;
  1721. }
  1722. AllocHeader_t *pHeader = LMDToHeader( pMem );
  1723. if ( align != pHeader->align )
  1724. {
  1725. LMDReportInvalidBlock( pHeader, "Realloc changed alignment!" );
  1726. }
  1727. size_t nCopySize = MIN( nSize, pHeader->nBytes );
  1728. memcpy( pNew, pMem, nCopySize );
  1729. s_StdMemAlloc.Free( pMem, pszModule, line );
  1730. return pNew;
  1731. }
  1732. #else // USE_LIGHT_MEM_DEBUG
  1733. #define INTERNAL_INLINE FORCEINLINE
  1734. #define UsingLMD() false
  1735. FORCEINLINE size_t LMDAdjustSize( size_t &nBytes, size_t align = 0 ) { return nBytes; }
  1736. #define LMDNoteAlloc( pHeader, ... ) (pHeader)
  1737. #define LMDNoteFree( pHeader, ... ) (pHeader)
  1738. #define LMDGetSize( pHeader ) (size_t)(-1)
  1739. #define LMDToHeader( pHeader ) (pHeader)
  1740. #define LMDFromHeader( pHeader ) (pHeader)
  1741. #define LMDValidateHeap() (true)
  1742. #define LMDPushAllocDbgInfo( pFileName, nLine ) ((void)0)
  1743. #define LMDPopAllocDbgInfo() ((void)0)
  1744. FORCEINLINE void *LMDRealloc( void *pMem, size_t nSize, size_t align = 0, const char *pszModule = NULL, int line = 0 ) { return NULL; }
  1745. #endif // USE_LIGHT_MEM_DEBUG
  1746. //-----------------------------------------------------------------------------
  1747. // Internal versions
  1748. //-----------------------------------------------------------------------------
  1749. INTERNAL_INLINE void *CStdMemAlloc::InternalAllocFromPools( size_t nSize )
  1750. {
  1751. #if MEM_SBH_ENABLED
  1752. void *pMem;
  1753. pMem = m_PrimarySBH.Alloc( nSize );
  1754. if ( pMem )
  1755. {
  1756. return pMem;
  1757. }
  1758. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1759. pMem = m_SecondarySBH.Alloc( nSize );
  1760. if ( pMem )
  1761. {
  1762. return pMem;
  1763. }
  1764. #endif // MEMALLOC_USE_SECONDARY_SBH
  1765. #ifndef MEMALLOC_NO_FALLBACK
  1766. pMem = m_FallbackSBH.Alloc( nSize );
  1767. if ( pMem )
  1768. {
  1769. return pMem;
  1770. }
  1771. #endif // MEMALLOC_NO_FALLBACK
  1772. CallAllocFailHandler( nSize );
  1773. #endif // MEM_SBH_ENABLED
  1774. return NULL;
  1775. }
  1776. INTERNAL_INLINE void *CStdMemAlloc::InternalAlloc( int region, size_t nSize )
  1777. {
  1778. PROFILE_ALLOC(Malloc);
  1779. void *pMem;
  1780. #if MEM_SBH_ENABLED
  1781. if ( m_PrimarySBH.ShouldUse( nSize ) ) // test valid for either pool
  1782. {
  1783. pMem = InternalAllocFromPools( nSize );
  1784. if ( !pMem )
  1785. {
  1786. // Only compact the small block heaps and only try
  1787. // the allocation again if memory is recovered.
  1788. if ( InternalCompact( true ) )
  1789. {
  1790. pMem = InternalAllocFromPools( nSize );
  1791. }
  1792. }
  1793. if ( pMem )
  1794. {
  1795. ApplyMemoryInitializations( pMem, nSize );
  1796. return pMem;
  1797. }
  1798. ExecuteOnce( DevWarning( "\n\nDRASTIC MEMORY OVERFLOW: Fell out of small block heap!\n\n\n") );
  1799. }
  1800. #endif // MEM_SBH_ENABLED
  1801. pMem = malloc_internal( region, nSize );
  1802. if ( !pMem )
  1803. {
  1804. CompactOnFail();
  1805. pMem = malloc_internal( region, nSize );
  1806. if ( !pMem )
  1807. {
  1808. SetCRTAllocFailed( nSize );
  1809. return NULL;
  1810. }
  1811. }
  1812. ApplyMemoryInitializations( pMem, nSize );
  1813. return pMem;
  1814. }
  1815. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1816. INTERNAL_INLINE void *CStdMemAlloc::InternalAllocAligned( int region, size_t nSize, size_t align )
  1817. {
  1818. PROFILE_ALLOC(MallocAligned);
  1819. void *pMem;
  1820. #if MEM_SBH_ENABLED
  1821. size_t nSizeAligned = ( nSize + align - 1 ) & ~( align - 1 );
  1822. if ( m_PrimarySBH.ShouldUse( nSizeAligned ) ) // test valid for either pool
  1823. {
  1824. pMem = InternalAllocFromPools( nSizeAligned );
  1825. if ( !pMem )
  1826. {
  1827. CompactOnFail();
  1828. pMem = InternalAllocFromPools( nSizeAligned );
  1829. }
  1830. if ( pMem )
  1831. {
  1832. ApplyMemoryInitializations( pMem, nSizeAligned );
  1833. return pMem;
  1834. }
  1835. ExecuteOnce( DevWarning( "Warning: Fell out of small block heap!\n") );
  1836. }
  1837. #endif // MEM_SBH_ENABLED
  1838. pMem = malloc_aligned_internal( region, nSize, align );
  1839. if ( !pMem )
  1840. {
  1841. CompactOnFail();
  1842. pMem = malloc_aligned_internal( region, nSize, align );
  1843. if ( !pMem )
  1844. {
  1845. SetCRTAllocFailed( nSize );
  1846. return NULL;
  1847. }
  1848. }
  1849. ApplyMemoryInitializations( pMem, nSize );
  1850. return pMem;
  1851. }
  1852. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1853. INTERNAL_INLINE void *CStdMemAlloc::InternalRealloc( void *pMem, size_t nSize )
  1854. {
  1855. if ( !pMem )
  1856. {
  1857. return RegionAlloc( DEF_REGION, nSize );
  1858. }
  1859. PROFILE_ALLOC(Realloc);
  1860. #if MEM_SBH_ENABLED
  1861. if ( m_PrimarySBH.IsOwner( pMem ) )
  1862. {
  1863. return m_PrimarySBH.Realloc( pMem, nSize );
  1864. }
  1865. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1866. if ( m_SecondarySBH.IsOwner( pMem ) )
  1867. {
  1868. return m_SecondarySBH.Realloc( pMem, nSize );
  1869. }
  1870. #endif // MEMALLOC_USE_SECONDARY_SBH
  1871. #ifndef MEMALLOC_NO_FALLBACK
  1872. if ( m_FallbackSBH.IsOwner( pMem ) )
  1873. {
  1874. return m_FallbackSBH.Realloc( pMem, nSize );
  1875. }
  1876. #endif // MEMALLOC_NO_FALLBACK
  1877. #endif // MEM_SBH_ENABLED
  1878. void *pRet = realloc_internal( pMem, nSize );
  1879. if ( !pRet )
  1880. {
  1881. CompactOnFail();
  1882. pRet = realloc_internal( pMem, nSize );
  1883. if ( !pRet )
  1884. {
  1885. SetCRTAllocFailed( nSize );
  1886. }
  1887. }
  1888. return pRet;
  1889. }
  1890. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1891. INTERNAL_INLINE void *CStdMemAlloc::InternalReallocAligned( void *pMem, size_t nSize, size_t align )
  1892. {
  1893. if ( !pMem )
  1894. {
  1895. return InternalAllocAligned( DEF_REGION, nSize, align );
  1896. }
  1897. PROFILE_ALLOC(ReallocAligned);
  1898. #if MEM_SBH_ENABLED
  1899. if ( m_PrimarySBH.IsOwner( pMem ) )
  1900. {
  1901. return m_PrimarySBH.Realloc( pMem, nSize );
  1902. }
  1903. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1904. if ( m_SecondarySBH.IsOwner( pMem ) )
  1905. {
  1906. return m_SecondarySBH.Realloc( pMem, nSize );
  1907. }
  1908. #endif // MEMALLOC_USE_SECONDARY_SBH
  1909. #ifndef MEMALLOC_NO_FALLBACK
  1910. if ( m_FallbackSBH.IsOwner( pMem ) )
  1911. {
  1912. return m_FallbackSBH.Realloc( pMem, nSize );
  1913. }
  1914. #endif // MEMALLOC_NO_FALLBACK
  1915. #endif // MEM_SBH_ENABLED
  1916. void *pRet = realloc_aligned_internal( pMem, nSize, align );
  1917. if ( !pRet )
  1918. {
  1919. CompactOnFail();
  1920. pRet = realloc_aligned_internal( pMem, nSize, align );
  1921. if ( !pRet )
  1922. {
  1923. SetCRTAllocFailed( nSize );
  1924. }
  1925. }
  1926. return pRet;
  1927. }
  1928. #endif
  1929. INTERNAL_INLINE void CStdMemAlloc::InternalFree( void *pMem )
  1930. {
  1931. if ( !pMem )
  1932. {
  1933. return;
  1934. }
  1935. PROFILE_ALLOC(Free);
  1936. #if MEM_SBH_ENABLED
  1937. if ( m_PrimarySBH.IsOwner( pMem ) )
  1938. {
  1939. m_PrimarySBH.Free( pMem );
  1940. return;
  1941. }
  1942. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1943. if ( m_SecondarySBH.IsOwner( pMem ) )
  1944. {
  1945. return m_SecondarySBH.Free( pMem );
  1946. }
  1947. #endif // MEMALLOC_USE_SECONDARY_SBH
  1948. #ifndef MEMALLOC_NO_FALLBACK
  1949. if ( m_FallbackSBH.IsOwner( pMem ) )
  1950. {
  1951. m_FallbackSBH.Free( pMem );
  1952. return;
  1953. }
  1954. #endif // MEMALLOC_NO_FALLBACK
  1955. #endif // MEM_SBH_ENABLED
  1956. free_internal( pMem );
  1957. }
  1958. void CStdMemAlloc::CompactOnFail()
  1959. {
  1960. CompactHeap();
  1961. }
  1962. //-----------------------------------------------------------------------------
  1963. // Release versions
  1964. //-----------------------------------------------------------------------------
  1965. void *CStdMemAlloc::Alloc( size_t nSize )
  1966. {
  1967. size_t nAdjustedSize = LMDAdjustSize( nSize );
  1968. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( DEF_REGION, nAdjustedSize ), nSize );
  1969. }
  1970. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1971. void * CStdMemAlloc::AllocAlign( size_t nSize, size_t align )
  1972. {
  1973. size_t nAdjustedSize = LMDAdjustSize( nSize, align );
  1974. return LMDNoteAlloc( CStdMemAlloc::InternalAllocAligned( DEF_REGION, nAdjustedSize, align ), nSize, align );
  1975. }
  1976. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1977. void *CStdMemAlloc::Realloc( void *pMem, size_t nSize )
  1978. {
  1979. if ( UsingLMD() )
  1980. return LMDRealloc( pMem, nSize );
  1981. return CStdMemAlloc::InternalRealloc( pMem, nSize );
  1982. }
  1983. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1984. void * CStdMemAlloc::ReallocAlign( void *pMem, size_t nSize, size_t align )
  1985. {
  1986. if ( UsingLMD() )
  1987. return LMDRealloc( pMem, nSize, align );
  1988. return CStdMemAlloc::InternalReallocAligned( pMem, nSize, align );
  1989. }
  1990. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1991. void CStdMemAlloc::Free( void *pMem )
  1992. {
  1993. pMem = LMDNoteFree( pMem );
  1994. CStdMemAlloc::InternalFree( pMem );
  1995. }
  1996. void *CStdMemAlloc::Expand_NoLongerSupported( void *pMem, size_t nSize )
  1997. {
  1998. return NULL;
  1999. }
  2000. //-----------------------------------------------------------------------------
  2001. // Debug versions
  2002. //-----------------------------------------------------------------------------
  2003. void *CStdMemAlloc::Alloc( size_t nSize, const char *pFileName, int nLine )
  2004. {
  2005. size_t nAdjustedSize = LMDAdjustSize( nSize );
  2006. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( DEF_REGION, nAdjustedSize ), nSize, 0, pFileName, nLine );
  2007. }
  2008. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  2009. void *CStdMemAlloc::AllocAlign( size_t nSize, size_t align, const char *pFileName, int nLine )
  2010. {
  2011. size_t nAdjustedSize = LMDAdjustSize( nSize, align );
  2012. return LMDNoteAlloc( CStdMemAlloc::InternalAllocAligned( DEF_REGION, nAdjustedSize, align ), nSize, align, pFileName, nLine );
  2013. }
  2014. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  2015. void *CStdMemAlloc::Realloc( void *pMem, size_t nSize, const char *pFileName, int nLine )
  2016. {
  2017. if ( UsingLMD() )
  2018. return LMDRealloc( pMem, nSize, 0, pFileName, nLine );
  2019. return CStdMemAlloc::InternalRealloc( pMem, nSize );
  2020. }
  2021. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  2022. void * CStdMemAlloc::ReallocAlign( void *pMem, size_t nSize, size_t align, const char *pFileName, int nLine )
  2023. {
  2024. if ( UsingLMD() )
  2025. return LMDRealloc( pMem, nSize, align, pFileName, nLine );
  2026. return CStdMemAlloc::InternalReallocAligned( pMem, nSize, align );
  2027. }
  2028. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  2029. void CStdMemAlloc::Free( void *pMem, const char *pFileName, int nLine )
  2030. {
  2031. pMem = LMDNoteFree( pMem );
  2032. CStdMemAlloc::InternalFree( pMem );
  2033. }
  2034. void *CStdMemAlloc::Expand_NoLongerSupported( void *pMem, size_t nSize, const char *pFileName, int nLine )
  2035. {
  2036. return NULL;
  2037. }
  2038. //-----------------------------------------------------------------------------
  2039. // Region support
  2040. //-----------------------------------------------------------------------------
  2041. void *CStdMemAlloc::RegionAlloc( int region, size_t nSize )
  2042. {
  2043. size_t nAdjustedSize = LMDAdjustSize( nSize );
  2044. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( region, nAdjustedSize ), nSize );
  2045. }
  2046. void *CStdMemAlloc::RegionAlloc( int region, size_t nSize, const char *pFileName, int nLine )
  2047. {
  2048. size_t nAdjustedSize = LMDAdjustSize( nSize );
  2049. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( region, nAdjustedSize ), nSize, 0, pFileName, nLine );
  2050. }
  2051. #if defined (LINUX)
  2052. #include <malloc.h>
  2053. #elif defined (OSX)
  2054. #define malloc_usable_size( ptr ) malloc_size( ptr )
  2055. extern "C" {
  2056. extern size_t malloc_size( const void *ptr );
  2057. }
  2058. #endif // LINUX/OSX
  2059. //-----------------------------------------------------------------------------
  2060. // Returns the size of a particular allocation (NOTE: may be larger than the size requested!)
  2061. //-----------------------------------------------------------------------------
  2062. size_t CStdMemAlloc::GetSize( void *pMem )
  2063. {
  2064. if ( !pMem )
  2065. return CalcHeapUsed();
  2066. if ( UsingLMD() )
  2067. {
  2068. return LMDGetSize( pMem );
  2069. }
  2070. #if MEM_SBH_ENABLED
  2071. if ( m_PrimarySBH.IsOwner( pMem ) )
  2072. {
  2073. return m_PrimarySBH.GetSize( pMem );
  2074. }
  2075. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2076. if ( m_SecondarySBH.IsOwner( pMem ) )
  2077. {
  2078. return m_SecondarySBH.GetSize( pMem );
  2079. }
  2080. #endif // MEMALLOC_USE_SECONDARY_SBH
  2081. #ifndef MEMALLOC_NO_FALLBACK
  2082. if ( m_FallbackSBH.IsOwner( pMem ) )
  2083. {
  2084. return m_FallbackSBH.GetSize( pMem );
  2085. }
  2086. #endif // MEMALLOC_NO_FALLBACK
  2087. #endif // MEM_SBH_ENABLED
  2088. return msize_internal( pMem );
  2089. }
  2090. //-----------------------------------------------------------------------------
  2091. // Force file + line information for an allocation
  2092. //-----------------------------------------------------------------------------
  2093. void CStdMemAlloc::PushAllocDbgInfo( const char *pFileName, int nLine )
  2094. {
  2095. LMDPushAllocDbgInfo( pFileName, nLine );
  2096. }
  2097. void CStdMemAlloc::PopAllocDbgInfo()
  2098. {
  2099. LMDPopAllocDbgInfo();
  2100. }
  2101. //-----------------------------------------------------------------------------
  2102. // FIXME: Remove when we make our own heap! Crt stuff we're currently using
  2103. //-----------------------------------------------------------------------------
  2104. int32 CStdMemAlloc::CrtSetBreakAlloc( int32 lNewBreakAlloc )
  2105. {
  2106. return 0;
  2107. }
  2108. int CStdMemAlloc::CrtSetReportMode( int nReportType, int nReportMode )
  2109. {
  2110. return 0;
  2111. }
  2112. int CStdMemAlloc::CrtIsValidHeapPointer( const void *pMem )
  2113. {
  2114. return 1;
  2115. }
  2116. int CStdMemAlloc::CrtIsValidPointer( const void *pMem, unsigned int size, int access )
  2117. {
  2118. return 1;
  2119. }
  2120. int CStdMemAlloc::CrtCheckMemory( void )
  2121. {
  2122. #ifndef _CERT
  2123. LMDValidateHeap();
  2124. #if MEM_SBH_ENABLED
  2125. if ( !m_PrimarySBH.Validate() )
  2126. {
  2127. ExecuteOnce( Msg( "Small block heap is corrupt (primary)\n " ) );
  2128. }
  2129. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2130. if ( !m_SecondarySBH.Validate() )
  2131. {
  2132. ExecuteOnce( Msg( "Small block heap is corrupt (secondary)\n " ) );
  2133. }
  2134. #endif // MEMALLOC_USE_SECONDARY_SBH
  2135. #ifndef MEMALLOC_NO_FALLBACK
  2136. if ( !m_FallbackSBH.Validate() )
  2137. {
  2138. ExecuteOnce( Msg( "Small block heap is corrupt (fallback)\n " ) );
  2139. }
  2140. #endif // MEMALLOC_NO_FALLBACK
  2141. #endif // MEM_SBH_ENABLED
  2142. #endif // _CERT
  2143. return 1;
  2144. }
  2145. int CStdMemAlloc::CrtSetDbgFlag( int nNewFlag )
  2146. {
  2147. return 0;
  2148. }
  2149. void CStdMemAlloc::CrtMemCheckpoint( _CrtMemState *pState )
  2150. {
  2151. }
  2152. // FIXME: Remove when we have our own allocator
  2153. void* CStdMemAlloc::CrtSetReportFile( int nRptType, void* hFile )
  2154. {
  2155. return 0;
  2156. }
  2157. void* CStdMemAlloc::CrtSetReportHook( void* pfnNewHook )
  2158. {
  2159. return 0;
  2160. }
  2161. int CStdMemAlloc::CrtDbgReport( int nRptType, const char * szFile,
  2162. int nLine, const char * szModule, const char * pMsg )
  2163. {
  2164. return 0;
  2165. }
  2166. int CStdMemAlloc::heapchk()
  2167. {
  2168. #ifdef _WIN32
  2169. CrtCheckMemory();
  2170. return _HEAPOK;
  2171. #else
  2172. return 1;
  2173. #endif
  2174. }
  2175. void CStdMemAlloc::DumpStats()
  2176. {
  2177. DumpStatsFileBase( "memstats" );
  2178. }
  2179. void CStdMemAlloc::DumpStatsFileBase( char const *pchFileBase, DumpStatsFormat_t nFormat )
  2180. {
  2181. #if defined( _WIN32 ) || defined( _GAMECONSOLE )
  2182. char filename[ 512 ];
  2183. _snprintf( filename, sizeof( filename ) - 1,
  2184. #ifdef _X360
  2185. "game:\\%s.txt",
  2186. #elif defined( _PS3 )
  2187. "/app_home/%s.txt",
  2188. #else
  2189. "%s.txt",
  2190. #endif
  2191. pchFileBase );
  2192. filename[ sizeof( filename ) - 1 ] = 0;
  2193. FILE *pFile = ( IsGameConsole() ) ? NULL : fopen( filename, "wt" );
  2194. #if MEM_SBH_ENABLED
  2195. if ( pFile )
  2196. fprintf( pFile, "Fixed Page SBH:\n" );
  2197. else
  2198. Msg( "Fixed Page SBH:\n" );
  2199. m_PrimarySBH.DumpStats("Fixed Page SBH", pFile, nFormat);
  2200. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2201. if ( pFile )
  2202. fprintf( pFile, "Secondary Fixed Page SBH:\n" );
  2203. else
  2204. Msg( "Secondary Page SBH:\n" );
  2205. m_SecondarySBH.DumpStats("Secondary Page SBH", pFile);
  2206. #endif // MEMALLOC_USE_SECONDARY_SBH
  2207. #ifndef MEMALLOC_NO_FALLBACK
  2208. if ( pFile )
  2209. fprintf( pFile, "\nFallback SBH:\n" );
  2210. else
  2211. Msg( "\nFallback SBH:\n" );
  2212. m_FallbackSBH.DumpStats("Fallback SBH", pFile, nFormat); // Dump statistics to small block heap
  2213. #endif // MEMALLOC_NO_FALLBACK
  2214. #endif // MEM_SBH_ENABLED
  2215. #ifdef _PS3
  2216. malloc_managed_size mms;
  2217. (g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  2218. Msg( "PS3 malloc_stats: %u / %u / %u \n", mms.current_inuse_size, mms.current_system_size, mms.max_system_size );
  2219. #endif // _PS3
  2220. heapstats_internal( pFile, nFormat );
  2221. #if defined( _X360 )
  2222. XBX_rMemDump( filename );
  2223. #endif
  2224. if ( pFile )
  2225. fclose( pFile );
  2226. #endif // _WIN32 || _GAMECONSOLE
  2227. }
  2228. IVirtualMemorySection * CStdMemAlloc::AllocateVirtualMemorySection( size_t numMaxBytes )
  2229. {
  2230. #if defined( _GAMECONSOLE ) || defined( _WIN32 )
  2231. extern IVirtualMemorySection * VirtualMemoryManager_AllocateVirtualMemorySection( size_t numMaxBytes );
  2232. return VirtualMemoryManager_AllocateVirtualMemorySection( numMaxBytes );
  2233. #else
  2234. return NULL;
  2235. #endif
  2236. }
  2237. size_t CStdMemAlloc::ComputeMemoryUsedBy( char const *pchSubStr )
  2238. {
  2239. return 0;//dbg heap only.
  2240. }
  2241. static inline size_t ExtraDevkitMemory( void )
  2242. {
  2243. #if defined( _PS3 )
  2244. // 213MB are available in retail mode, so adjust free mem to reflect that even if we're in devkit mode
  2245. const size_t RETAIL_SIZE = 213*1024*1024;
  2246. static sys_memory_info stat;
  2247. sys_memory_get_user_memory_size( &stat );
  2248. if ( stat.total_user_memory > RETAIL_SIZE )
  2249. return ( stat.total_user_memory - RETAIL_SIZE );
  2250. #elif defined( _X360 )
  2251. // TODO: detect the new 1GB devkit...
  2252. #endif // _PS3/_X360
  2253. return 0;
  2254. }
  2255. void CStdMemAlloc::GlobalMemoryStatus( size_t *pUsedMemory, size_t *pFreeMemory )
  2256. {
  2257. if ( !pUsedMemory || !pFreeMemory )
  2258. return;
  2259. size_t dlMallocFree = 0;
  2260. #if defined( USE_DLMALLOC )
  2261. // Account for free memory contained within DLMalloc's FIRST region. The rationale is as follows:
  2262. // - the first region is supposed to service large allocations via virtual allocation, and to grow as
  2263. // needed (until all physical pages are used), so true 'out of memory' failures should occur there.
  2264. // - other regions (the 2-256kb 'medium block heap', or per-DLL heaps, and the Small Block Heap)
  2265. // are sized to a pre-determined high watermark, and not intended to grow. Free memory within
  2266. // those regions is not available for large allocations, so adding that to the 'free memory'
  2267. // yields confusing data which does not correspond well with out-of-memory failures.
  2268. mallinfo info = mspace_mallinfo( g_AllocRegions[ 0 ] );
  2269. dlMallocFree += info.fordblks;
  2270. #endif // USE_DLMALLOC
  2271. #if defined ( _X360 )
  2272. // GlobalMemoryStatus tells us how much physical memory is free
  2273. MEMORYSTATUS stat;
  2274. ::GlobalMemoryStatus( &stat );
  2275. *pFreeMemory = stat.dwAvailPhys;
  2276. *pFreeMemory += dlMallocFree;
  2277. // Adjust free mem to reflect a retail box, even if we're using a devkit with extra memory
  2278. *pFreeMemory -= ExtraDevkitMemory();
  2279. // Used is total minus free (discount the 32MB system reservation)
  2280. *pUsedMemory = ( stat.dwTotalPhys - 32*1024*1024 ) - *pFreeMemory;
  2281. #elif defined( _PS3 )
  2282. // NOTE: we use dlmalloc instead of the system heap, so we do NOT count the system heap's free space!
  2283. //static malloc_managed_size mms;
  2284. //(g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  2285. //int heapFree = mms.current_system_size - mms.current_inuse_size;
  2286. // sys_memory_get_user_memory_size tells us how much PPU memory is used/free
  2287. static sys_memory_info stat;
  2288. sys_memory_get_user_memory_size( &stat );
  2289. *pFreeMemory = stat.available_user_memory;
  2290. *pFreeMemory += dlMallocFree;
  2291. *pUsedMemory = stat.total_user_memory - *pFreeMemory;
  2292. // Adjust free mem to reflect a retail box, even if we're using a devkit with extra memory
  2293. *pFreeMemory -= ExtraDevkitMemory();
  2294. #else // _X360/_PS3/other
  2295. // no data
  2296. *pFreeMemory = 0;
  2297. *pUsedMemory = 0;
  2298. #endif // _X360/_PS3//other
  2299. }
  2300. #define MAX_GENERIC_MEMORY_STATS 64
  2301. GenericMemoryStat_t g_MemStats[MAX_GENERIC_MEMORY_STATS];
  2302. int g_nMemStats = 0;
  2303. static inline int AddGenericMemoryStat( const char *name, int value )
  2304. {
  2305. Assert( g_nMemStats < MAX_GENERIC_MEMORY_STATS );
  2306. if ( g_nMemStats < MAX_GENERIC_MEMORY_STATS )
  2307. {
  2308. g_MemStats[ g_nMemStats ].name = name;
  2309. g_MemStats[ g_nMemStats ].value = value;
  2310. g_nMemStats++;
  2311. }
  2312. return g_nMemStats;
  2313. }
  2314. int CStdMemAlloc::GetGenericMemoryStats( GenericMemoryStat_t **ppMemoryStats )
  2315. {
  2316. if ( !ppMemoryStats )
  2317. return 0;
  2318. g_nMemStats = 0;
  2319. #if MEM_SBH_ENABLED
  2320. {
  2321. // Small block heap
  2322. size_t SBHCommitted = 0, SBHAllocated = 0;
  2323. size_t commitTmp, allocTmp;
  2324. #if MEM_SBH_ENABLED
  2325. m_PrimarySBH.Usage( commitTmp, allocTmp );
  2326. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  2327. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2328. m_SecondarySBH.Usage( commitTmp, allocTmp );
  2329. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  2330. #endif // MEMALLOC_USE_SECONDARY_SBH
  2331. #ifndef MEMALLOC_NO_FALLBACK
  2332. m_FallbackSBH.Usage( commitTmp, allocTmp );
  2333. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  2334. #endif // MEMALLOC_NO_FALLBACK
  2335. #endif // MEM_SBH_ENABLED
  2336. static size_t SBHMaxCommitted = 0; SBHMaxCommitted = MAX( SBHMaxCommitted, SBHCommitted );
  2337. AddGenericMemoryStat( "SBH_cur", (int)SBHCommitted );
  2338. AddGenericMemoryStat( "SBH_max", (int)SBHMaxCommitted );
  2339. }
  2340. #endif // MEM_SBH_ENABLED
  2341. #if defined( USE_DLMALLOC )
  2342. #if !defined( MEMALLOC_REGIONS ) && defined( MEMALLOC_SEGMENT_MIXED )
  2343. {
  2344. // Medium block heap
  2345. mallinfo infoMBH = mspace_mallinfo( g_AllocRegions[ 1 ] );
  2346. size_t nMBHCurUsed = infoMBH.uordblks;// nMBH_WRONG_MaxUsed = infoMBH.usmblks; // TODO: figure out why dlmalloc mis-reports MBH max usage (it just returns the footprint)
  2347. static size_t nMBHMaxUsed = 0; nMBHMaxUsed = MAX( nMBHMaxUsed, nMBHCurUsed );
  2348. AddGenericMemoryStat( "MBH_cur", (int)nMBHCurUsed );
  2349. AddGenericMemoryStat( "MBH_max", (int)nMBHMaxUsed );
  2350. // Large block heap
  2351. mallinfo infoLBH = mspace_mallinfo( g_AllocRegions[ 0 ] );
  2352. size_t nLBHCurUsed = mspace_footprint( g_AllocRegions[ 0 ] ), nLBHMaxUsed = mspace_max_footprint( g_AllocRegions[ 0 ] ), nLBHArenaSize = infoLBH.arena, nLBHFree = infoLBH.fordblks;
  2353. AddGenericMemoryStat( "LBH_cur", (int)nLBHCurUsed );
  2354. AddGenericMemoryStat( "LBH_max", (int)nLBHMaxUsed );
  2355. // LBH arena used+free (these are non-virtual allocations - there should be none, since we only allocate 256KB+ items in the LBH)
  2356. // TODO: I currently see the arena grow to 320KB due to a larger allocation being realloced down... if this gets worse, add an 'ALWAYS use VMM' flag to the mspace.
  2357. AddGenericMemoryStat( "LBH_arena", (int)nLBHArenaSize );
  2358. AddGenericMemoryStat( "LBH_free", (int)nLBHFree );
  2359. }
  2360. #else // (!MEMALLOC_REGIONS && MEMALLOC_SEGMENT_MIXED)
  2361. {
  2362. // Single dlmalloc heap (TODO: per-DLL heap stats, if we resurrect that)
  2363. mallinfo info = mspace_mallinfo( g_AllocRegions[ 0 ] );
  2364. AddGenericMemoryStat( "mspace_cur", (int)info.uordblks );
  2365. AddGenericMemoryStat( "mspace_max", (int)info.usmblks );
  2366. AddGenericMemoryStat( "mspace_size", (int)mspace_footprint( g_AllocRegions[ 0 ] ) );
  2367. }
  2368. #endif // (!MEMALLOC_REGIONS && MEMALLOC_SEGMENT_MIXED)
  2369. #endif // USE_DLMALLOC
  2370. size_t nMaxPhysMemUsed_Delta;
  2371. nMaxPhysMemUsed_Delta = 0;
  2372. #ifdef _PS3
  2373. {
  2374. // System heap (should not exist!)
  2375. static malloc_managed_size mms;
  2376. (g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  2377. if ( mms.current_system_size )
  2378. AddGenericMemoryStat( "sys_heap", (int)mms.current_system_size );
  2379. // Virtual Memory Manager
  2380. size_t nReserved = 0, nReservedMax = 0, nCommitted = 0, nCommittedMax = 0;
  2381. extern void VirtualMemoryManager_GetStats( size_t &nReserved, size_t &nReservedMax, size_t &nCommitted, size_t &nCommittedMax );
  2382. VirtualMemoryManager_GetStats( nReserved, nReservedMax, nCommitted, nCommittedMax );
  2383. AddGenericMemoryStat( "VMM_reserved", (int)nReserved );
  2384. AddGenericMemoryStat( "VMM_reserved_max", (int)nReservedMax );
  2385. AddGenericMemoryStat( "VMM_committed", (int)nCommitted );
  2386. AddGenericMemoryStat( "VMM_committed_max", (int)nCommittedMax );
  2387. // Estimate memory committed by memory stacks (these account for all VMM allocations other than the SBH/MBH/LBH)
  2388. size_t nHeapTotal = 1024*1024*MBYTES_PRIMARY_SBH;
  2389. #if defined( USE_DLMALLOC )
  2390. for ( int i = 0; i < ARRAYSIZE(g_AllocRegions); i++ )
  2391. {
  2392. nHeapTotal += mspace_footprint( g_AllocRegions[i] );
  2393. }
  2394. #endif // USE_DLMALLOC
  2395. size_t nMemStackTotal = nCommitted - nHeapTotal;
  2396. AddGenericMemoryStat( "MemStacks", (int)nMemStackTotal );
  2397. // On PS3, we can more accurately determine 'phys_free_min', since we know nCommittedMax
  2398. // (otherwise nPhysFreeMin is only updated intermittently; when this function is called):
  2399. nMaxPhysMemUsed_Delta = nCommittedMax - nCommitted;
  2400. }
  2401. #endif // _PS3
  2402. #if defined( _GAMECONSOLE )
  2403. // Total/free/min-free physical pages
  2404. {
  2405. #if defined( _X360 )
  2406. MEMORYSTATUS stat;
  2407. ::GlobalMemoryStatus( &stat );
  2408. size_t nPhysTotal = stat.dwTotalPhys, nPhysFree = stat.dwAvailPhys - ExtraDevkitMemory();
  2409. #elif defined( _PS3 )
  2410. static sys_memory_info stat;
  2411. sys_memory_get_user_memory_size( &stat );
  2412. size_t nPhysTotal = stat.total_user_memory, nPhysFree = stat.available_user_memory - ExtraDevkitMemory();
  2413. #endif // _X360/_PS3
  2414. static size_t nPhysFreeMin = nPhysTotal;
  2415. nPhysFreeMin = MIN( nPhysFreeMin, ( nPhysFree - nMaxPhysMemUsed_Delta ) );
  2416. AddGenericMemoryStat( "phys_total", (int)nPhysTotal );
  2417. AddGenericMemoryStat( "phys_free", (int)nPhysFree );
  2418. AddGenericMemoryStat( "phys_free_min", (int)nPhysFreeMin );
  2419. }
  2420. #endif // _GAMECONSOLE
  2421. *ppMemoryStats = &g_MemStats[0];
  2422. return g_nMemStats;
  2423. }
  2424. void CStdMemAlloc::CompactHeap()
  2425. {
  2426. InternalCompact( false );
  2427. }
  2428. size_t CStdMemAlloc::InternalCompact( bool bSmallBlockOnly )
  2429. {
  2430. size_t nTotalBytesRecovered = 0;
  2431. #if MEM_SBH_ENABLED
  2432. if ( !m_CompactMutex.TryLock() )
  2433. {
  2434. return 0;
  2435. }
  2436. if ( m_bInCompact )
  2437. {
  2438. m_CompactMutex.Unlock();
  2439. return 0;
  2440. }
  2441. m_bInCompact = true;
  2442. size_t nBytesRecovered = 0;
  2443. #ifndef MEMALLOC_NO_FALLBACK
  2444. nBytesRecovered = m_FallbackSBH.Compact( false );
  2445. nTotalBytesRecovered += nBytesRecovered;
  2446. if ( nBytesRecovered && IsGameConsole() )
  2447. {
  2448. Msg( "Compact freed %d bytes from virtual heap (up to 256k still committed)\n", nBytesRecovered );
  2449. }
  2450. #endif // MEMALLOC_NO_FALLBACK
  2451. nBytesRecovered = m_PrimarySBH.Compact( false );
  2452. nTotalBytesRecovered += nBytesRecovered;
  2453. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2454. nBytesRecovered += m_SecondarySBH.Compact( false );
  2455. nTotalBytesRecovered += nBytesRecovered;
  2456. #endif
  2457. // Skip compacting the main heap if the call requested
  2458. //only the small block heaps to be compacted.
  2459. if ( !bSmallBlockOnly )
  2460. {
  2461. nBytesRecovered = compact_internal();
  2462. nTotalBytesRecovered += nBytesRecovered;
  2463. if ( nBytesRecovered && IsGameConsole() )
  2464. {
  2465. Msg( "Compact released %d bytes from the mixed block heap\n", nBytesRecovered );
  2466. }
  2467. }
  2468. nBytesRecovered = compact_internal();
  2469. if ( nBytesRecovered && IsGameConsole() )
  2470. {
  2471. Msg( "Compact released %d bytes from the mixed block heap\n", nBytesRecovered );
  2472. }
  2473. m_bInCompact = false;
  2474. m_CompactMutex.Unlock();
  2475. #endif // MEM_SBH_ENABLED
  2476. return nTotalBytesRecovered;
  2477. }
  2478. void CStdMemAlloc::CompactIncremental()
  2479. {
  2480. #if MEM_SBH_ENABLED
  2481. if ( !m_CompactMutex.TryLock() )
  2482. {
  2483. return;
  2484. }
  2485. if ( m_bInCompact )
  2486. {
  2487. m_CompactMutex.Unlock();
  2488. return;
  2489. }
  2490. m_bInCompact = true;
  2491. #ifndef MEMALLOC_NO_FALLBACK
  2492. m_FallbackSBH.Compact( true );
  2493. #endif
  2494. m_PrimarySBH.Compact( true );
  2495. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2496. m_SecondarySBH.Compact( true );
  2497. #endif
  2498. m_bInCompact = false;
  2499. m_CompactMutex.Unlock();
  2500. #endif // MEM_SBH_ENABLED
  2501. }
  2502. MemAllocFailHandler_t CStdMemAlloc::SetAllocFailHandler( MemAllocFailHandler_t pfnMemAllocFailHandler )
  2503. {
  2504. MemAllocFailHandler_t pfnPrevious = m_pfnFailHandler;
  2505. m_pfnFailHandler = pfnMemAllocFailHandler;
  2506. return pfnPrevious;
  2507. }
  2508. size_t CStdMemAlloc::DefaultFailHandler( size_t nBytes )
  2509. {
  2510. if ( IsX360() )
  2511. {
  2512. #ifdef _X360
  2513. ExecuteOnce(
  2514. {
  2515. char buffer[256];
  2516. _snprintf( buffer, sizeof( buffer ), "***** Memory pool overflow, attempted allocation size: %u (not a critical error)\n", nBytes );
  2517. XBX_OutputDebugString( buffer );
  2518. }
  2519. );
  2520. #endif // _X360
  2521. }
  2522. return 0;
  2523. }
  2524. void CStdMemAlloc::SetStatsExtraInfo( const char *pMapName, const char *pComment )
  2525. {
  2526. }
  2527. void CStdMemAlloc::SetCRTAllocFailed( size_t nSize )
  2528. {
  2529. m_sMemoryAllocFailed = nSize;
  2530. DebuggerBreakIfDebugging();
  2531. #if defined( _PS3 ) && defined( _DEBUG )
  2532. DebuggerBreak();
  2533. #endif // _PS3
  2534. char buffer[256];
  2535. #ifdef COMPILER_GCC
  2536. _snprintf( buffer, sizeof( buffer ), "***** OUT OF MEMORY! attempted allocation size: %u ****\n", nSize );
  2537. #else
  2538. _snprintf( buffer, sizeof( buffer ), "***** OUT OF MEMORY! attempted allocation size: %u ****\n", nSize );
  2539. #endif // COMPILER_GCC
  2540. #ifdef _X360
  2541. XBX_OutputDebugString( buffer );
  2542. if ( !Plat_IsInDebugSession() )
  2543. {
  2544. XBX_CrashDump( true );
  2545. #if defined( _DEMO )
  2546. XLaunchNewImage( XLAUNCH_KEYWORD_DEFAULT_APP, 0 );
  2547. #else
  2548. XLaunchNewImage( "default.xex", 0 );
  2549. #endif // _DEMO
  2550. }
  2551. #elif defined(_WIN32 )
  2552. OutputDebugString( buffer );
  2553. if ( !Plat_IsInDebugSession() )
  2554. {
  2555. WriteMiniDump();
  2556. abort();
  2557. }
  2558. #else // _X360/_WIN32/other
  2559. printf( "%s\n", buffer );
  2560. if ( !Plat_IsInDebugSession() )
  2561. {
  2562. WriteMiniDump();
  2563. #if defined( _PS3 )
  2564. DumpStats();
  2565. #endif
  2566. Plat_ExitProcess( EXIT_FAILURE );
  2567. }
  2568. #endif // _X360/_WIN32/other
  2569. }
  2570. size_t CStdMemAlloc::MemoryAllocFailed()
  2571. {
  2572. return m_sMemoryAllocFailed;
  2573. }
  2574. #endif // MEM_IMPL_TYPE_STD
  2575. #endif // STEAM