Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2493 lines
64 KiB

  1. //========= Copyright (c) 1996-2005, Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose: Memory allocation!
  4. //
  5. // $NoKeywords: $
  6. //=============================================================================//
  7. #include "tier0/platform.h"
  8. #if !defined(STEAM) && !defined(NO_MALLOC_OVERRIDE)
  9. //#include <malloc.h>
  10. #include <algorithm>
  11. #include "tier0/dbg.h"
  12. #include "tier0/memalloc.h"
  13. #include "tier0/threadtools.h"
  14. #include "mem_helpers.h"
  15. #include "memstd.h"
  16. #include "tier0/stacktools.h"
  17. #include "tier0/minidump.h"
  18. #ifdef _X360
  19. #include "xbox/xbox_console.h"
  20. #endif
  21. #ifdef _PS3
  22. #include "memoverride_ps3.h"
  23. #endif
  24. #ifndef _WIN32
  25. #define IsDebuggerPresent() false
  26. #endif
  27. #ifdef USE_LIGHT_MEM_DEBUG
  28. #undef USE_MEM_DEBUG
  29. #pragma message("*** USE_LIGHT_MEM_DEBUG is ON ***")
  30. #pragma optimize( "", off )
  31. #endif
  32. #define DEF_REGION 0
  33. #if defined( _WIN32 ) || defined( _PS3 )
  34. #define USE_DLMALLOC
  35. #define MEMALLOC_SEGMENT_MIXED
  36. #define MBH_SIZE_MB ( 45 + MBYTES_STEAM_MBH_USAGE )
  37. //#define MEMALLOC_REGIONS
  38. #endif // _WIN32 || _PS3
  39. #ifndef USE_DLMALLOC
  40. #ifdef _PS3
  41. #define malloc_internal( region, bytes ) (g_pMemOverrideRawCrtFns->pfn_malloc)(bytes)
  42. #define malloc_aligned_internal( region, bytes, align ) (g_pMemOverrideRawCrtFns->pfn_memalign)(align, bytes)
  43. #define realloc_internal (g_pMemOverrideRawCrtFns->pfn_realloc)
  44. #define realloc_aligned_internal (g_pMemOverrideRawCrtFns->pfn_reallocalign)
  45. #define free_internal (g_pMemOverrideRawCrtFns->pfn_free)
  46. #define msize_internal (g_pMemOverrideRawCrtFns->pfn_malloc_usable_size)
  47. #define compact_internal() (0)
  48. #define heapstats_internal(p) (void)(0)
  49. #else // _PS3
  50. #define malloc_internal( region, bytes) malloc(bytes)
  51. #define malloc_aligned_internal( region, bytes, align ) memalign(align, bytes)
  52. #define realloc_internal realloc
  53. #define realloc_aligned_internal realloc
  54. #define free_internal free
  55. #ifdef POSIX
  56. #define msize_internal malloc_usable_size
  57. #else // POSIX
  58. #define msize_internal _msize
  59. #endif // POSIX
  60. #define compact_internal() (0)
  61. #define heapstats_internal(p) (void)(0)
  62. #endif // _PS3
  63. #else // USE_DLMALLOC
  64. #define MSPACES 1
  65. #include "dlmalloc/malloc-2.8.3.h"
  66. void *g_AllocRegions[] =
  67. {
  68. #ifndef MEMALLOC_REGIONS
  69. #ifdef MEMALLOC_SEGMENT_MIXED
  70. create_mspace( 0, 1 ), // unified
  71. create_mspace( MBH_SIZE_MB*1024*1024, 1 ),
  72. #else
  73. create_mspace( 100*1024*1024, 1 ),
  74. #endif
  75. #else // MEMALLOC_REGIONS
  76. // @TODO: per DLL regions didn't work out very well. flux of usage left too much overhead. need to try lifetime-based management [6/9/2009 tom]
  77. create_mspace( 82*1024*1024, 1 ), // unified
  78. #endif // MEMALLOC_REGIONS
  79. };
  80. #ifndef MEMALLOC_REGIONS
  81. #ifndef MEMALLOC_SEGMENT_MIXED
  82. #define SelectRegion( region, bytes ) 0
  83. #else
  84. // NOTE: this split is designed to force the 'large block' heap to ONLY perform virtual allocs (see
  85. // DEFAULT_MMAP_THRESHOLD in malloc.cpp), to avoid ANY fragmentation or waste in an internal arena
  86. #define REGION_SPLIT (256*1024)
  87. #define SelectRegion( region, bytes ) g_AllocRegions[bytes < REGION_SPLIT]
  88. #endif
  89. #else // MEMALLOC_REGIONS
  90. #define SelectRegion( region, bytes ) g_AllocRegions[region]
  91. #endif // MEMALLOC_REGIONS
  92. #define malloc_internal( region, bytes ) mspace_malloc(SelectRegion(region,bytes), bytes)
  93. #define malloc_aligned_internal( region, bytes, align ) mspace_memalign(SelectRegion(region,bytes), align, bytes)
  94. FORCEINLINE void *realloc_aligned_internal( void *mem, size_t bytes, size_t align )
  95. {
  96. // TODO: implement realloc_aligned inside dlmalloc (requires splitting realloc's existing
  97. // 'grow in-place' code into a new function, then call that w/ alloc_align/copy/free on failure)
  98. byte *newMem = (byte *)dlrealloc( mem, bytes );
  99. if ( ((size_t)newMem&(align-1)) == 0 )
  100. return newMem;
  101. // realloc broke alignment...
  102. byte *fallback = (byte *)malloc_aligned_internal( DEF_REGION, bytes, align );
  103. if ( !fallback )
  104. return NULL;
  105. memcpy( fallback, newMem, bytes );
  106. dlfree( newMem );
  107. return fallback;
  108. }
  109. inline size_t compact_internal()
  110. {
  111. size_t start = 0, end = 0;
  112. for ( int i = 0; i < ARRAYSIZE(g_AllocRegions); i++ )
  113. {
  114. start += mspace_footprint( g_AllocRegions[i] );
  115. mspace_trim( g_AllocRegions[i], 0 );
  116. end += mspace_footprint( g_AllocRegions[i] );
  117. }
  118. return ( start - end );
  119. }
  120. inline void heapstats_internal( FILE *pFile )
  121. {
  122. // @TODO: improve this presentation, as a table [6/1/2009 tom]
  123. char buf[1024];
  124. for ( int i = 0; i < ARRAYSIZE( g_AllocRegions ); i++ )
  125. {
  126. struct mallinfo info = mspace_mallinfo( g_AllocRegions[ i ] );
  127. size_t footPrint = mspace_footprint( g_AllocRegions[ i ] );
  128. size_t maxFootPrint = mspace_max_footprint( g_AllocRegions[ i ] );
  129. _snprintf( buf, sizeof(buf),
  130. "\ndlmalloc mspace %d (%s)\n"
  131. " %d:footprint -%10d (total space used by the mspace)\n"
  132. " %d:footprint_max -%10d (maximum total space used by the mspace)\n"
  133. " %d:arena -%10d (non-mmapped space allocated from system)\n"
  134. " %d:ordblks -%10d (number of free chunks)\n"
  135. " %d:hblkhd -%10d (space in mmapped regions)\n"
  136. " %d:usmblks -%10d (maximum total allocated space)\n"
  137. " %d:uordblks -%10d (total allocated space)\n"
  138. " %d:fordblks -%10d (total free space)\n"
  139. " %d:keepcost -%10d (releasable (via malloc_trim) space)\n",
  140. i, i?"medium-block":"large-block", i,footPrint, i,maxFootPrint, i,info.arena, i,info.ordblks, i,info.hblkhd, i,info.usmblks, i,info.uordblks, i,info.fordblks, i,info.keepcost );
  141. if ( pFile )
  142. fprintf( pFile, "%s", buf );
  143. else
  144. Msg( "%s", buf );
  145. }
  146. }
  147. #define realloc_internal dlrealloc
  148. #define free_internal dlfree
  149. #define msize_internal dlmalloc_usable_size
  150. #endif // USE_DLMALLOC
  151. #ifdef TIME_ALLOC
  152. CAverageCycleCounter g_MallocCounter;
  153. CAverageCycleCounter g_ReallocCounter;
  154. CAverageCycleCounter g_FreeCounter;
  155. #define PrintOne( name ) \
  156. Msg("%-48s: %6.4f avg (%8.1f total, %7.3f peak, %5d iters)\n", \
  157. #name, \
  158. g_##name##Counter.GetAverageMilliseconds(), \
  159. g_##name##Counter.GetTotalMilliseconds(), \
  160. g_##name##Counter.GetPeakMilliseconds(), \
  161. g_##name##Counter.GetIters() ); \
  162. memset( &g_##name##Counter, 0, sizeof(g_##name##Counter) )
  163. void PrintAllocTimes()
  164. {
  165. PrintOne( Malloc );
  166. PrintOne( Realloc );
  167. PrintOne( Free );
  168. }
  169. #define PROFILE_ALLOC(name) CAverageTimeMarker name##_ATM( &g_##name##Counter )
  170. #else // TIME_ALLOC
  171. #define PROFILE_ALLOC( name ) ((void)0)
  172. #define PrintAllocTimes() ((void)0)
  173. #endif // TIME_ALLOC
  174. #if _MSC_VER < 1400 && defined( MSVC ) && !defined(_STATIC_LINKED) && (defined(_DEBUG) || defined(USE_MEM_DEBUG))
  175. void *operator new( unsigned int nSize, int nBlockUse, const char *pFileName, int nLine )
  176. {
  177. return ::operator new( nSize );
  178. }
  179. void *operator new[] ( unsigned int nSize, int nBlockUse, const char *pFileName, int nLine )
  180. {
  181. return ::operator new[]( nSize );
  182. }
  183. #endif
  184. #include "mem_impl_type.h"
  185. #if MEM_IMPL_TYPE_STD
  186. //-----------------------------------------------------------------------------
  187. // Singleton...
  188. //-----------------------------------------------------------------------------
  189. #pragma warning( disable:4074 ) // warning C4074: initializers put in compiler reserved initialization area
  190. #pragma init_seg( compiler )
  191. #if MEM_SBH_ENABLED
  192. CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_PRIMARY_SBH, true> >::SharedData_t CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_PRIMARY_SBH, true> >::gm_SharedData CONSTRUCT_EARLY;
  193. #ifdef MEMALLOC_USE_SECONDARY_SBH
  194. CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_SECONDARY_SBH, false> >::SharedData_t CSmallBlockPool< CStdMemAlloc::CFixedAllocator< MBYTES_SECONDARY_SBH, false> >::gm_SharedData CONSTRUCT_EARLY;
  195. #endif
  196. #ifndef MEMALLOC_NO_FALLBACK
  197. CSmallBlockPool< CStdMemAlloc::CVirtualAllocator >::SharedData_t CSmallBlockPool< CStdMemAlloc::CVirtualAllocator >::gm_SharedData CONSTRUCT_EARLY;
  198. #endif
  199. #endif // MEM_SBH_ENABLED
  200. static CStdMemAlloc s_StdMemAlloc CONSTRUCT_EARLY;
  201. #ifdef _PS3
  202. MemOverrideRawCrtFunctions_t *g_pMemOverrideRawCrtFns;
  203. IMemAlloc *g_pMemAllocInternalPS3 = &s_StdMemAlloc;
  204. PLATFORM_OVERRIDE_MEM_ALLOC_INTERNAL_PS3_IMPL
  205. #else // !_PS3
  206. #ifndef TIER0_VALIDATE_HEAP
  207. IMemAlloc *g_pMemAlloc = &s_StdMemAlloc;
  208. #else
  209. IMemAlloc *g_pActualAlloc = &s_StdMemAlloc;
  210. #endif
  211. #endif // _PS3
  212. CStdMemAlloc::CStdMemAlloc()
  213. : m_pfnFailHandler( DefaultFailHandler ),
  214. m_sMemoryAllocFailed( (size_t)0 ),
  215. m_bInCompact( false )
  216. {
  217. #ifdef _PS3
  218. g_pMemAllocInternalPS3 = &s_StdMemAlloc;
  219. PLATFORM_OVERRIDE_MEM_ALLOC_INTERNAL_PS3.m_pMemAllocCached = &s_StdMemAlloc;
  220. malloc_managed_size mms;
  221. mms.current_inuse_size = 0x12345678;
  222. mms.current_system_size = 0x09ABCDEF;
  223. mms.max_system_size = reinterpret_cast< size_t >( this );
  224. int iResult = malloc_stats( &mms );
  225. g_pMemOverrideRawCrtFns = reinterpret_cast< MemOverrideRawCrtFunctions_t * >( iResult );
  226. #endif
  227. }
  228. #if MEM_SBH_ENABLED
  229. //-----------------------------------------------------------------------------
  230. // Small block heap (multi-pool)
  231. //-----------------------------------------------------------------------------
  232. //-----------------------------------------------------------------------------
  233. //
  234. //-----------------------------------------------------------------------------
  235. template <typename T>
  236. inline T MemAlign( T val, unsigned alignment )
  237. {
  238. return (T)( ( (unsigned)val + alignment - 1 ) & ~( alignment - 1 ) );
  239. }
  240. //-----------------------------------------------------------------------------
  241. //
  242. //-----------------------------------------------------------------------------
  243. template <typename CAllocator>
  244. void CSmallBlockPool<CAllocator>::Init( unsigned nBlockSize )
  245. {
  246. SharedData_t *pSharedData = GetSharedData();
  247. if ( !pSharedData->m_pBase )
  248. {
  249. pSharedData->m_pBase = pSharedData->m_Allocator.AllocatePoolMemory();
  250. pSharedData->m_pLimit = pSharedData->m_pBase + CAllocator::TOTAL_BYTES;
  251. pSharedData->m_pNextBlock = pSharedData->m_pBase;
  252. }
  253. if ( !( nBlockSize % MIN_SBH_ALIGN == 0 && nBlockSize >= MIN_SBH_BLOCK && nBlockSize >= sizeof(TSLNodeBase_t) ) )
  254. DebuggerBreak();
  255. m_nBlockSize = nBlockSize;
  256. m_pNextAlloc = NULL;
  257. m_nCommittedPages = 0;
  258. }
  259. template <typename CAllocator>
  260. size_t CSmallBlockPool<CAllocator>::GetBlockSize()
  261. {
  262. return m_nBlockSize;
  263. }
  264. // Define VALIDATE_SBH_FREE_LIST to a given block size to validate that pool's freelist (it'll crash on the next alloc/free after the list is corrupted)
  265. // NOTE: this may affect perf more than USE_LIGHT_MEM_DEBUG
  266. //#define VALIDATE_SBH_FREE_LIST 320
  267. template <typename CAllocator>
  268. void CSmallBlockPool<CAllocator>::ValidateFreelist( SharedData_t *pSharedData )
  269. {
  270. #ifdef VALIDATE_SBH_FREE_LIST
  271. if ( m_nBlockSize != VALIDATE_SBH_FREE_LIST )
  272. return;
  273. static int count = 0;
  274. count++; // Track when the corruption occurs, if repeatable
  275. pSharedData->m_Lock.LockForWrite();
  276. #ifdef USE_NATIVE_SLIST
  277. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->Next.Next);
  278. #else
  279. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->value.Next);
  280. #endif
  281. while( pNode )
  282. pNode = pNode->Next;
  283. pSharedData->m_Lock.UnlockWrite();
  284. #endif // VALIDATE_SBH_FREE_LIST
  285. }
  286. template <typename CAllocator>
  287. void *CSmallBlockPool<CAllocator>::Alloc()
  288. {
  289. SharedData_t *pSharedData = GetSharedData();
  290. ValidateFreelist( pSharedData );
  291. CThreadSpinRWLock &sharedLock = pSharedData->m_Lock;
  292. if ( !sharedLock.TryLockForRead() )
  293. {
  294. sharedLock.LockForRead();
  295. }
  296. byte *pResult;
  297. intp iPage = -1;
  298. int iThreadPriority = INT_MAX;
  299. while (1)
  300. {
  301. pResult = m_FreeList.Pop();
  302. if ( !pResult )
  303. {
  304. int nBlockSize = m_nBlockSize;
  305. byte *pNextAlloc;
  306. while (1)
  307. {
  308. pResult = m_pNextAlloc;
  309. if ( pResult )
  310. {
  311. pNextAlloc = pResult + nBlockSize;
  312. if ( ( ( (uintp)(pNextAlloc) - 1 ) % BYTES_PAGE ) + nBlockSize > BYTES_PAGE )
  313. {
  314. // Crossed a page boundary
  315. pNextAlloc = 0;
  316. }
  317. if ( m_pNextAlloc.AssignIf( pResult, pNextAlloc ) )
  318. {
  319. iPage = (size_t)((byte *)pResult - pSharedData->m_pBase) / BYTES_PAGE;
  320. break;
  321. }
  322. }
  323. else if ( m_CommitMutex.TryLock() )
  324. {
  325. if ( !m_pNextAlloc )
  326. {
  327. PageStatus_t *pAllocatedPageStatus = (PageStatus_t *)pSharedData->m_FreePages.Pop();
  328. if ( pAllocatedPageStatus )
  329. {
  330. iPage = pAllocatedPageStatus - &pSharedData->m_PageStatus[0];
  331. }
  332. else
  333. {
  334. while (1)
  335. {
  336. byte *pBlock = pSharedData->m_pNextBlock;
  337. if ( pBlock >= pSharedData->m_pLimit )
  338. {
  339. break;
  340. }
  341. if ( ThreadInterlockedAssignPointerIf( (void **)&pSharedData->m_pNextBlock, (void *)( pBlock + BYTES_PAGE ), (void *)pBlock ) )
  342. {
  343. iPage = (size_t)((byte *)pBlock - pSharedData->m_pBase) / BYTES_PAGE;
  344. pAllocatedPageStatus = &pSharedData->m_PageStatus[iPage];
  345. break;
  346. }
  347. }
  348. }
  349. if ( pAllocatedPageStatus )
  350. {
  351. byte *pBlock = pSharedData->m_pBase + ( iPage * BYTES_PAGE );
  352. if ( pAllocatedPageStatus->m_nAllocated == NOT_COMMITTED )
  353. {
  354. pSharedData->m_Allocator.Commit( pBlock );
  355. }
  356. pAllocatedPageStatus->m_pPool = this;
  357. pAllocatedPageStatus->m_nAllocated = 0;
  358. pAllocatedPageStatus->m_pNextPageInPool = m_pFirstPage;
  359. m_pFirstPage = pAllocatedPageStatus;
  360. #ifdef TRACK_SBH_COUNTS
  361. m_nFreeBlocks += ( BYTES_PAGE / m_nBlockSize );
  362. #endif
  363. m_nCommittedPages++;
  364. m_pNextAlloc = pBlock;
  365. }
  366. else
  367. {
  368. m_pNextAlloc = NULL;
  369. m_CommitMutex.Unlock();
  370. sharedLock.UnlockRead();
  371. return NULL;
  372. }
  373. }
  374. m_CommitMutex.Unlock();
  375. }
  376. else
  377. {
  378. if ( iThreadPriority == INT_MAX)
  379. {
  380. iThreadPriority = ThreadGetPriority();
  381. }
  382. if ( iThreadPriority > 0 )
  383. {
  384. ThreadSleep( 0 );
  385. }
  386. }
  387. }
  388. if ( pResult )
  389. {
  390. break;
  391. }
  392. }
  393. else
  394. {
  395. iPage = (size_t)((byte *)pResult - pSharedData->m_pBase) / BYTES_PAGE;
  396. break;
  397. }
  398. }
  399. #ifdef TRACK_SBH_COUNTS
  400. --m_nFreeBlocks;
  401. #endif
  402. ++pSharedData->m_PageStatus[iPage].m_nAllocated;
  403. sharedLock.UnlockRead();
  404. return pResult;
  405. }
  406. template <typename CAllocator>
  407. void CSmallBlockPool<CAllocator>::Free( void *p )
  408. {
  409. SharedData_t *pSharedData = GetSharedData();
  410. size_t iPage = (size_t)((byte *)p - pSharedData->m_pBase) / BYTES_PAGE;
  411. CThreadSpinRWLock &sharedLock = pSharedData->m_Lock;
  412. if ( !sharedLock.TryLockForRead() )
  413. {
  414. sharedLock.LockForRead();
  415. }
  416. --pSharedData->m_PageStatus[iPage].m_nAllocated;
  417. #ifdef TRACK_SBH_COUNTS
  418. ++m_nFreeBlocks;
  419. #endif
  420. m_FreeList.Push( p );
  421. pSharedData->m_Lock.UnlockRead();
  422. ValidateFreelist( pSharedData );
  423. }
  424. // Count the free blocks.
  425. template <typename CAllocator>
  426. int CSmallBlockPool<CAllocator>::CountFreeBlocks()
  427. {
  428. #ifdef TRACK_SBH_COUNTS
  429. return m_nFreeBlocks;
  430. #else
  431. return 0;
  432. #endif
  433. }
  434. // Size of committed memory managed by this heap:
  435. template <typename CAllocator>
  436. int CSmallBlockPool<CAllocator>::GetCommittedSize()
  437. {
  438. return m_nCommittedPages * BYTES_PAGE;
  439. }
  440. // Return the total blocks memory is committed for in the heap
  441. template <typename CAllocator>
  442. int CSmallBlockPool<CAllocator>::CountCommittedBlocks()
  443. {
  444. return m_nCommittedPages * ( BYTES_PAGE / m_nBlockSize );
  445. }
  446. // Count the number of allocated blocks in the heap:
  447. template <typename CAllocator>
  448. int CSmallBlockPool<CAllocator>::CountAllocatedBlocks()
  449. {
  450. #ifdef TRACK_SBH_COUNTS
  451. return CountCommittedBlocks() - CountFreeBlocks();
  452. #else
  453. return 0;
  454. #endif
  455. }
  456. template <typename CAllocator>
  457. int CSmallBlockPool<CAllocator>::PageSort( const void *p1, const void *p2 )
  458. {
  459. SharedData_t *pSharedData = GetSharedData();
  460. return pSharedData->m_PageStatus[*((int *)p1)].m_SortList.Count() - pSharedData->m_PageStatus[*((int *)p2)].m_SortList.Count();
  461. }
  462. template <typename CAllocator>
  463. bool CSmallBlockPool<CAllocator>::RemovePagesFromFreeList( byte **pPages, int nPages, bool bSortList )
  464. {
  465. // Since we don't use the depth of the tslist, and sequence is only used for push, we can remove in-place
  466. int i;
  467. byte **pLimits = (byte **)stackalloc( nPages * sizeof(byte *) );
  468. int nBlocksNotInFreeList = 0;
  469. for ( i = 0; i < nPages; i++ )
  470. {
  471. pLimits[i] = pPages[i] + BYTES_PAGE;
  472. if ( m_pNextAlloc >= pPages[i] && m_pNextAlloc < pLimits[i] )
  473. {
  474. nBlocksNotInFreeList = ( pLimits[i] - m_pNextAlloc ) / m_nBlockSize;
  475. m_pNextAlloc = NULL;
  476. }
  477. }
  478. int iTarget = ( ( BYTES_PAGE/m_nBlockSize ) * nPages ) - nBlocksNotInFreeList;
  479. int iCount = 0;
  480. TSLHead_t *pRawFreeList = m_FreeList.AccessUnprotected();
  481. bool bRemove;
  482. if ( !bSortList || m_nCommittedPages - nPages == 1 )
  483. {
  484. #ifdef USE_NATIVE_SLIST
  485. TSLNodeBase_t **ppPrevNext = (TSLNodeBase_t **)&(pRawFreeList->Next);
  486. #else
  487. TSLNodeBase_t **ppPrevNext = (TSLNodeBase_t **)&(pRawFreeList->value.Next);
  488. #endif
  489. TSLNodeBase_t *pNode = *ppPrevNext;
  490. while ( pNode && iCount != iTarget )
  491. {
  492. bRemove = false;
  493. for ( i = 0; i < nPages; i++ )
  494. {
  495. if ( (byte *)pNode >= pPages[i] && (byte *)pNode < pLimits[i] )
  496. {
  497. bRemove = true;
  498. break;
  499. }
  500. }
  501. if ( bRemove )
  502. {
  503. iCount++;
  504. *ppPrevNext = pNode->Next;
  505. }
  506. else
  507. {
  508. *ppPrevNext = pNode;
  509. ppPrevNext = &pNode->Next;
  510. }
  511. pNode = pNode->Next;
  512. }
  513. }
  514. else
  515. {
  516. SharedData_t *pSharedData = GetSharedData();
  517. byte *pSharedBase = pSharedData->m_pBase;
  518. TSLNodeBase_t *pNode = m_FreeList.Detach();
  519. TSLNodeBase_t *pNext;
  520. int iSortPage;
  521. int nSortPages = 0;
  522. int *sortPages = (int *)stackalloc( m_nCommittedPages * sizeof(int) );
  523. while ( pNode )
  524. {
  525. pNext = pNode->Next;
  526. bRemove = false;
  527. for ( i = 0; i < nPages; i++ )
  528. {
  529. if ( (byte *)pNode >= pPages[i] && (byte *)pNode < pLimits[i] )
  530. {
  531. iCount++;
  532. bRemove = true;
  533. break;
  534. }
  535. }
  536. if ( !bRemove )
  537. {
  538. iSortPage = ( (byte *)pNode - pSharedBase ) / BYTES_PAGE;
  539. if ( !pSharedData->m_PageStatus[iSortPage].m_SortList.Count() )
  540. {
  541. sortPages[nSortPages++] = iSortPage;
  542. }
  543. pSharedData->m_PageStatus[iSortPage].m_SortList.Push( pNode );
  544. }
  545. pNode = pNext;
  546. }
  547. if ( nSortPages > 1 )
  548. {
  549. qsort( sortPages, nSortPages, sizeof(int), &PageSort );
  550. }
  551. for ( i = 0; i < nSortPages; i++ )
  552. {
  553. while ( ( pNode = pSharedData->m_PageStatus[sortPages[i]].m_SortList.Pop() ) != NULL )
  554. {
  555. m_FreeList.Push( pNode );
  556. }
  557. }
  558. }
  559. if ( iTarget != iCount )
  560. {
  561. DebuggerBreakIfDebugging();
  562. }
  563. return ( iTarget == iCount );
  564. }
  565. template <typename CAllocator>
  566. size_t CSmallBlockPool<CAllocator>::Compact( bool bIncremental )
  567. {
  568. static bool bWarnedCorruption;
  569. bool bIsCorrupt = false;
  570. int i;
  571. size_t nFreed = 0;
  572. SharedData_t *pSharedData = GetSharedData();
  573. pSharedData->m_Lock.LockForWrite();
  574. if ( m_pFirstPage )
  575. {
  576. PageStatus_t **pReleasedPages = (PageStatus_t **)stackalloc( m_nCommittedPages * sizeof(PageStatus_t *) );
  577. PageStatus_t **pReleasedPagesPrevs = (PageStatus_t **)stackalloc( m_nCommittedPages * sizeof(PageStatus_t *) );
  578. byte **pPageBases = (byte **)stackalloc( m_nCommittedPages * sizeof(byte *) );
  579. int nPages = 0;
  580. // Gather the pages to return to the backing pool
  581. PageStatus_t *pPage = m_pFirstPage;
  582. PageStatus_t *pPagePrev = NULL;
  583. while ( pPage )
  584. {
  585. if ( pPage->m_nAllocated == 0 )
  586. {
  587. pReleasedPages[nPages] = pPage;
  588. pPageBases[nPages] = pSharedData->m_pBase + ( pPage - &pSharedData->m_PageStatus[0] ) * BYTES_PAGE;
  589. pReleasedPagesPrevs[nPages] = pPagePrev;
  590. nPages++;
  591. if ( bIncremental )
  592. {
  593. break;
  594. }
  595. }
  596. pPagePrev = pPage;
  597. pPage = pPage->m_pNextPageInPool;
  598. }
  599. if ( nPages )
  600. {
  601. // Remove the pages from the pool's free list
  602. if ( !RemovePagesFromFreeList( pPageBases, nPages, !bIncremental ) && !bWarnedCorruption )
  603. {
  604. // We don't know which of the pages encountered an incomplete free list
  605. // so we'll just push them all back in and hope for the best. This isn't
  606. // ventilator control software!
  607. bWarnedCorruption = true;
  608. bIsCorrupt = true;
  609. }
  610. nFreed = nPages * BYTES_PAGE;
  611. m_nCommittedPages -= nPages;
  612. #ifdef TRACK_SBH_COUNTS
  613. m_nFreeBlocks -= nPages * ( BYTES_PAGE / m_nBlockSize );
  614. #endif
  615. // Unlink the pages
  616. for ( i = nPages - 1; i >= 0; --i )
  617. {
  618. if ( pReleasedPagesPrevs[i] )
  619. {
  620. pReleasedPagesPrevs[i]->m_pNextPageInPool = pReleasedPages[i]->m_pNextPageInPool;
  621. }
  622. else
  623. {
  624. m_pFirstPage = pReleasedPages[i]->m_pNextPageInPool;
  625. }
  626. pReleasedPages[i]->m_pNextPageInPool = NULL;
  627. pReleasedPages[i]->m_pPool = NULL;
  628. }
  629. // Push them onto the backing free lists
  630. if ( !pSharedData->m_Allocator.IsVirtual() )
  631. {
  632. for ( i = 0; i < nPages; i++ )
  633. {
  634. pSharedData->m_FreePages.Push( pReleasedPages[i] );
  635. }
  636. }
  637. else
  638. {
  639. int nMinReserve = ( bIncremental ) ? CAllocator::MIN_RESERVE_PAGES * 8 : CAllocator::MIN_RESERVE_PAGES;
  640. int nReserveNeeded = nMinReserve - pSharedData->m_FreePages.Count();
  641. if ( nReserveNeeded > 0 )
  642. {
  643. int nToKeepCommitted = MIN( nReserveNeeded, nPages );
  644. while ( nToKeepCommitted-- )
  645. {
  646. nPages--;
  647. pSharedData->m_FreePages.Push( pReleasedPages[nPages] );
  648. }
  649. }
  650. if ( nPages )
  651. {
  652. // Detach the list, push the decommitted page on, iterate up to previous
  653. // decommits, but them on, then push the committed pages on
  654. TSLNodeBase_t *pNodes = pSharedData->m_FreePages.Detach();
  655. for ( i = 0; i < nPages; i++ )
  656. {
  657. pReleasedPages[i]->m_nAllocated = NOT_COMMITTED;
  658. pSharedData->m_Allocator.Decommit( pPageBases[i] );
  659. pSharedData->m_FreePages.Push( pReleasedPages[i] );
  660. }
  661. TSLNodeBase_t *pCur, *pTemp = NULL;
  662. pCur = pNodes;
  663. while ( pCur )
  664. {
  665. if ( ((PageStatus_t *)pCur)->m_nAllocated == NOT_COMMITTED )
  666. {
  667. if ( pTemp )
  668. {
  669. pTemp->Next = NULL;
  670. }
  671. else
  672. {
  673. pNodes = NULL; // The list only has decommitted pages, don't go circular
  674. }
  675. while ( pCur )
  676. {
  677. pTemp = pCur->Next;
  678. pSharedData->m_FreePages.Push( pCur );
  679. pCur = pTemp;
  680. }
  681. break;
  682. }
  683. pTemp = pCur;
  684. pCur = pCur->Next;
  685. }
  686. while ( pNodes )
  687. {
  688. pTemp = pNodes->Next;
  689. pSharedData->m_FreePages.Push( pNodes );
  690. pNodes = pTemp;
  691. }
  692. }
  693. }
  694. }
  695. }
  696. pSharedData->m_Lock.UnlockWrite();
  697. if ( bIsCorrupt )
  698. {
  699. Warning( "***** HEAP IS CORRUPT (free compromised for block size %d,in %s heap, possible write after free *****)\n", m_nBlockSize, ( pSharedData->m_Allocator.IsVirtual() ) ? "virtual" : "physical" );
  700. }
  701. return nFreed;
  702. }
  703. template <typename CAllocator>
  704. bool CSmallBlockPool<CAllocator>::Validate()
  705. {
  706. #ifdef NO_SBH
  707. return true;
  708. #else
  709. int invalid = 0;
  710. SharedData_t *pSharedData = GetSharedData();
  711. pSharedData->m_Lock.LockForWrite();
  712. byte **pPageBases = (byte **)stackalloc( m_nCommittedPages * sizeof(byte *) );
  713. unsigned *pageCounts = (unsigned *)stackalloc( m_nCommittedPages * sizeof(unsigned) );
  714. memset( pageCounts, 0, m_nCommittedPages * sizeof(int) );
  715. unsigned nPages = 0;
  716. unsigned sumAllocated = 0;
  717. unsigned freeNotInFreeList = 0;
  718. // Validate page list is consistent
  719. if ( !m_pFirstPage )
  720. {
  721. if ( m_nCommittedPages != 0 )
  722. {
  723. invalid = __LINE__;
  724. goto notValid;
  725. }
  726. }
  727. else
  728. {
  729. PageStatus_t *pPage = m_pFirstPage;
  730. while ( pPage )
  731. {
  732. pPageBases[nPages] = pSharedData->m_pBase + ( pPage - &pSharedData->m_PageStatus[0] ) * BYTES_PAGE;
  733. if ( pPage->m_pPool != this )
  734. {
  735. invalid = __LINE__;
  736. goto notValid;
  737. }
  738. if ( nPages > m_nCommittedPages )
  739. {
  740. invalid = __LINE__;
  741. goto notValid;
  742. }
  743. sumAllocated += pPage->m_nAllocated;
  744. if ( m_pNextAlloc >= pPageBases[nPages] && m_pNextAlloc < pPageBases[nPages] + BYTES_PAGE )
  745. {
  746. freeNotInFreeList = pageCounts[nPages] = ( ( pPageBases[nPages] + BYTES_PAGE ) - m_pNextAlloc ) / m_nBlockSize;
  747. }
  748. nPages++;
  749. pPage = pPage->m_pNextPageInPool;
  750. };
  751. if ( nPages != m_nCommittedPages )
  752. {
  753. invalid = __LINE__;
  754. goto notValid;
  755. }
  756. }
  757. // Validate block counts
  758. {
  759. unsigned blocksPerPage = ( BYTES_PAGE / m_nBlockSize );
  760. #ifdef USE_NATIVE_SLIST
  761. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->Next.Next);
  762. #else
  763. TSLNodeBase_t *pNode = (TSLNodeBase_t *)(m_FreeList.AccessUnprotected()->value.Next);
  764. #endif
  765. unsigned i;
  766. while ( pNode )
  767. {
  768. for ( i = 0; i < nPages; i++ )
  769. {
  770. if ( (byte *)pNode >= pPageBases[i] && (byte *)pNode < pPageBases[i] + BYTES_PAGE )
  771. {
  772. pageCounts[i]++;
  773. break;
  774. }
  775. }
  776. if ( i == nPages )
  777. {
  778. invalid = __LINE__;
  779. goto notValid;
  780. }
  781. pNode = pNode->Next;
  782. }
  783. PageStatus_t *pPage = m_pFirstPage;
  784. i = 0;
  785. while ( pPage )
  786. {
  787. unsigned nFreeOnPage = blocksPerPage - pPage->m_nAllocated;
  788. if ( nFreeOnPage != pageCounts[i++] )
  789. {
  790. invalid = __LINE__;
  791. goto notValid;
  792. }
  793. pPage = pPage->m_pNextPageInPool;
  794. }
  795. }
  796. notValid:
  797. pSharedData->m_Lock.UnlockWrite();
  798. if ( invalid != 0 )
  799. {
  800. return false;
  801. }
  802. return true;
  803. #endif
  804. }
  805. //-----------------------------------------------------------------------------
  806. //
  807. //-----------------------------------------------------------------------------
  808. template <typename CAllocator>
  809. CSmallBlockHeap<CAllocator>::CSmallBlockHeap()
  810. {
  811. m_pSharedData = CPool::GetSharedData();
  812. // Build a lookup table used to find the correct pool based on size
  813. const int MAX_TABLE = MAX_SBH_BLOCK >> 2;
  814. int i = 0;
  815. int nBytesElement = 0;
  816. CPool *pCurPool = NULL;
  817. int iCurPool = 0;
  818. // Blocks sized 0 - 128 are in pools in increments of 8
  819. for ( ; i < 32; i++ )
  820. {
  821. if ( (i + 1) % 2 == 1)
  822. {
  823. nBytesElement += 8;
  824. pCurPool = &m_Pools[iCurPool];
  825. pCurPool->Init( nBytesElement );
  826. iCurPool++;
  827. m_PoolLookup[i] = pCurPool;
  828. }
  829. else
  830. {
  831. m_PoolLookup[i] = pCurPool;
  832. }
  833. }
  834. // Blocks sized 129 - 256 are in pools in increments of 16
  835. for ( ; i < 64; i++ )
  836. {
  837. if ( (i + 1) % 4 == 1)
  838. {
  839. nBytesElement += 16;
  840. pCurPool = &m_Pools[iCurPool];
  841. pCurPool->Init( nBytesElement );
  842. iCurPool++;
  843. m_PoolLookup[i] = pCurPool;
  844. }
  845. else
  846. {
  847. m_PoolLookup[i] = pCurPool;
  848. }
  849. }
  850. // Blocks sized 257 - 512 are in pools in increments of 32
  851. for ( ; i < 128; i++ )
  852. {
  853. if ( (i + 1) % 8 == 1)
  854. {
  855. nBytesElement += 32;
  856. pCurPool = &m_Pools[iCurPool];
  857. pCurPool->Init( nBytesElement );
  858. iCurPool++;
  859. m_PoolLookup[i] = pCurPool;
  860. }
  861. else
  862. {
  863. m_PoolLookup[i] = pCurPool;
  864. }
  865. }
  866. // Blocks sized 513 - 768 are in pools in increments of 64
  867. for ( ; i < 192; i++ )
  868. {
  869. if ( (i + 1) % 16 == 1)
  870. {
  871. nBytesElement += 64;
  872. pCurPool = &m_Pools[iCurPool];
  873. pCurPool->Init( nBytesElement );
  874. iCurPool++;
  875. m_PoolLookup[i] = pCurPool;
  876. }
  877. else
  878. {
  879. m_PoolLookup[i] = pCurPool;
  880. }
  881. }
  882. // Blocks sized 769 - 1024 are in pools in increments of 128
  883. for ( ; i < 256; i++ )
  884. {
  885. if ( (i + 1) % 32 == 1)
  886. {
  887. nBytesElement += 128;
  888. pCurPool = &m_Pools[iCurPool];
  889. pCurPool->Init( nBytesElement );
  890. iCurPool++;
  891. m_PoolLookup[i] = pCurPool;
  892. }
  893. else
  894. {
  895. m_PoolLookup[i] = pCurPool;
  896. }
  897. }
  898. // Blocks sized 1025 - 2048 are in pools in increments of 256
  899. for ( ; i < MAX_TABLE; i++ )
  900. {
  901. if ( (i + 1) % 64 == 1)
  902. {
  903. nBytesElement += 256;
  904. pCurPool = &m_Pools[iCurPool];
  905. pCurPool->Init( nBytesElement );
  906. iCurPool++;
  907. m_PoolLookup[i] = pCurPool;
  908. }
  909. else
  910. {
  911. m_PoolLookup[i] = pCurPool;
  912. }
  913. }
  914. Assert( iCurPool == NUM_POOLS );
  915. }
  916. template <typename CAllocator>
  917. bool CSmallBlockHeap<CAllocator>::ShouldUse( size_t nBytes )
  918. {
  919. return ( nBytes <= MAX_SBH_BLOCK );
  920. }
  921. template <typename CAllocator>
  922. bool CSmallBlockHeap<CAllocator>::IsOwner( void * p )
  923. {
  924. if ( uintp(p) >= uintp(m_pSharedData->m_pBase) )
  925. {
  926. intp index = (intp)((byte *)p - m_pSharedData->m_pBase) / BYTES_PAGE;
  927. return ( index < ARRAYSIZE(m_pSharedData->m_PageStatus) );
  928. }
  929. return false;
  930. }
  931. template <typename CAllocator>
  932. void *CSmallBlockHeap<CAllocator>::Alloc( size_t nBytes )
  933. {
  934. if ( nBytes == 0)
  935. {
  936. nBytes = 1;
  937. }
  938. Assert( ShouldUse( nBytes ) );
  939. CPool *pPool = FindPool( nBytes );
  940. void *p = pPool->Alloc();
  941. return p;
  942. }
  943. template <typename CAllocator>
  944. void *CSmallBlockHeap<CAllocator>::Realloc( void *p, size_t nBytes )
  945. {
  946. if ( nBytes == 0)
  947. {
  948. nBytes = 1;
  949. }
  950. CPool *pOldPool = FindPool( p );
  951. CPool *pNewPool = ( ShouldUse( nBytes ) ) ? FindPool( nBytes ) : NULL;
  952. if ( pOldPool == pNewPool )
  953. {
  954. return p;
  955. }
  956. void *pNewBlock = NULL;
  957. if ( !pNewBlock )
  958. {
  959. pNewBlock = MemAlloc_Alloc( nBytes ); // Call back out so blocks can move from the secondary to the primary pools
  960. }
  961. if ( !pNewBlock )
  962. {
  963. pNewBlock = malloc_internal( DEF_REGION, nBytes );
  964. }
  965. if ( pNewBlock )
  966. {
  967. size_t nBytesCopy = MIN( nBytes, pOldPool->GetBlockSize() );
  968. memcpy( pNewBlock, p, nBytesCopy );
  969. }
  970. else if ( nBytes < pOldPool->GetBlockSize() )
  971. {
  972. return p;
  973. }
  974. pOldPool->Free( p );
  975. return pNewBlock;
  976. }
  977. template <typename CAllocator>
  978. void CSmallBlockHeap<CAllocator>::Free( void *p )
  979. {
  980. CPool *pPool = FindPool( p );
  981. if ( pPool )
  982. {
  983. pPool->Free( p );
  984. }
  985. else
  986. {
  987. // we probably didn't hook some allocation and now we're freeing it or the heap has been trashed!
  988. DebuggerBreakIfDebugging();
  989. }
  990. }
  991. template <typename CAllocator>
  992. size_t CSmallBlockHeap<CAllocator>::GetSize( void *p )
  993. {
  994. CPool *pPool = FindPool( p );
  995. return pPool->GetBlockSize();
  996. }
  997. template <typename CAllocator>
  998. void CSmallBlockHeap<CAllocator>::Usage( size_t &bytesCommitted, size_t &bytesAllocated )
  999. {
  1000. bytesCommitted = 0;
  1001. bytesAllocated = 0;
  1002. for ( int i = 0; i < NUM_POOLS; i++ )
  1003. {
  1004. bytesCommitted += m_Pools[i].GetCommittedSize();
  1005. bytesAllocated += ( m_Pools[i].CountAllocatedBlocks() * m_Pools[i].GetBlockSize() );
  1006. }
  1007. }
  1008. template <typename CAllocator>
  1009. void CSmallBlockHeap<CAllocator>::DumpStats( const char *pszTag, FILE *pFile )
  1010. {
  1011. size_t bytesCommitted, bytesAllocated;
  1012. Usage( bytesCommitted, bytesAllocated );
  1013. if ( pFile )
  1014. {
  1015. for ( int i = 0; i < NUM_POOLS; i++ )
  1016. {
  1017. // output for vxconsole parsing
  1018. fprintf( pFile, "Pool %2i: (size: %4u) blocks: allocated:%5i free:%5i committed:%5i (committed size:%4u kb)\n",
  1019. i,
  1020. m_Pools[i].GetBlockSize(),
  1021. m_Pools[i].CountAllocatedBlocks(),
  1022. m_Pools[i].CountFreeBlocks(),
  1023. m_Pools[i].CountCommittedBlocks(),
  1024. m_Pools[i].GetCommittedSize() );
  1025. }
  1026. fprintf( pFile, "Totals (%s): Committed:%5u kb Allocated:%5u kb\n", pszTag, bytesCommitted / 1024, bytesAllocated / 1024 );
  1027. }
  1028. else
  1029. {
  1030. for ( int i = 0; i < NUM_POOLS; i++ )
  1031. {
  1032. Msg( "Pool %2i: (size: %4u) blocks: allocated:%5i free:%5i committed:%5i (committed size:%4u kb)\n",i, m_Pools[i].GetBlockSize(),m_Pools[i].CountAllocatedBlocks(), m_Pools[i].CountFreeBlocks(),m_Pools[i].CountCommittedBlocks(), m_Pools[i].GetCommittedSize() / 1024);
  1033. }
  1034. Msg( "Totals (%s): Committed:%5u kb Allocated:%5u kb\n", pszTag, bytesCommitted / 1024, bytesAllocated / 1024 );
  1035. }
  1036. }
  1037. template <typename CAllocator>
  1038. CSmallBlockPool<CAllocator> *CSmallBlockHeap<CAllocator>::FindPool( size_t nBytes )
  1039. {
  1040. return m_PoolLookup[(nBytes - 1) >> 2];
  1041. }
  1042. template <typename CAllocator>
  1043. CSmallBlockPool<CAllocator> *CSmallBlockHeap<CAllocator>::FindPool( void *p )
  1044. {
  1045. // NOTE: If p < m_pBase, cast to unsigned size_t will cause it to be large
  1046. size_t index = (size_t)((byte *)p - m_pSharedData->m_pBase) / BYTES_PAGE;
  1047. if ( index < ARRAYSIZE(m_pSharedData->m_PageStatus) )
  1048. return m_pSharedData->m_PageStatus[index].m_pPool;
  1049. return NULL;
  1050. }
  1051. template <typename CAllocator>
  1052. size_t CSmallBlockHeap<CAllocator>::Compact( bool bIncremental )
  1053. {
  1054. size_t nRecovered = 0;
  1055. if ( bIncremental )
  1056. {
  1057. static int iLastIncremental;
  1058. iLastIncremental++;
  1059. for ( int i = 0; i < NUM_POOLS; i++ )
  1060. {
  1061. int idx = ( i + iLastIncremental ) % NUM_POOLS;
  1062. nRecovered = m_Pools[idx].Compact( bIncremental );
  1063. if ( nRecovered )
  1064. {
  1065. iLastIncremental = idx;
  1066. break;
  1067. }
  1068. }
  1069. }
  1070. else
  1071. {
  1072. for ( int i = 0; i < NUM_POOLS; i++ )
  1073. {
  1074. nRecovered += m_Pools[i].Compact( bIncremental );
  1075. }
  1076. }
  1077. return nRecovered;
  1078. }
  1079. template <typename CAllocator>
  1080. bool CSmallBlockHeap<CAllocator>::Validate()
  1081. {
  1082. bool valid = true;
  1083. for ( int i = 0; i < NUM_POOLS; i++ )
  1084. {
  1085. valid = m_Pools[i].Validate() && valid;
  1086. }
  1087. return valid;
  1088. }
  1089. #endif // MEM_SBH_ENABLED
  1090. //-----------------------------------------------------------------------------
  1091. // Lightweight memory tracking
  1092. //-----------------------------------------------------------------------------
  1093. #ifdef USE_LIGHT_MEM_DEBUG
  1094. #ifndef LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1095. #define UsingLMD() true
  1096. #else // LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1097. bool g_bUsingLMD = ( Plat_GetCommandLineA() ) ? ( strstr( Plat_GetCommandLineA(), "-uselmd" ) != NULL ) : false;
  1098. #define UsingLMD() g_bUsingLMD
  1099. #if defined( _PS3 )
  1100. #error "Plat_GetCommandLineA() not implemented on PS3"
  1101. #endif
  1102. #endif // LIGHT_MEM_DEBUG_REQUIRES_CMD_LINE_SWITCH
  1103. const char *g_pszUnknown = "unknown";
  1104. struct Sentinal_t
  1105. {
  1106. DWORD value[4];
  1107. };
  1108. Sentinal_t g_HeadSentinel =
  1109. {
  1110. 0xdeadbeef,
  1111. 0xbaadf00d,
  1112. 0xbd122969,
  1113. 0xdeadbeef,
  1114. };
  1115. Sentinal_t g_TailSentinel =
  1116. {
  1117. 0xbaadf00d,
  1118. 0xbd122969,
  1119. 0xdeadbeef,
  1120. 0xbaadf00d,
  1121. };
  1122. const byte g_FreeFill = 0xdd;
  1123. static const uint LWD_FREE = 0;
  1124. static const uint LWD_ALLOCATED = 1;
  1125. #define LMD_STATUS_BITS ( 1 )
  1126. #define LMD_ALIGN_BITS ( 32 - LMD_STATUS_BITS )
  1127. #define LMD_MAX_ALIGN ( 1 << ( LMD_ALIGN_BITS - 1) )
  1128. struct AllocHeader_t
  1129. {
  1130. const char *pszModule;
  1131. int line;
  1132. size_t nBytes;
  1133. uint status : LMD_STATUS_BITS;
  1134. uint align : LMD_ALIGN_BITS;
  1135. Sentinal_t sentinal;
  1136. };
  1137. const int g_nRecentFrees = ( IsPC() ) ? 8192 : 512;
  1138. AllocHeader_t **g_pRecentFrees = (AllocHeader_t **)calloc( g_nRecentFrees, sizeof(AllocHeader_t *) );
  1139. int g_iNextFreeSlot;
  1140. #define INTERNAL_INLINE
  1141. #define LMDToHeader( pUserPtr ) ( ((AllocHeader_t *)(pUserPtr)) - 1 )
  1142. #define LMDFromHeader( pHeader ) ( (byte *)((pHeader) + 1) )
  1143. CThreadFastMutex g_LMDMutex;
  1144. const char *g_pLMDFileName = NULL;
  1145. int g_nLMDLine;
  1146. int g_iLMDDepth;
  1147. void LMDPushAllocDbgInfo( const char *pFileName, int nLine )
  1148. {
  1149. if ( ThreadInMainThread() )
  1150. {
  1151. if ( !g_iLMDDepth )
  1152. {
  1153. g_pLMDFileName = pFileName;
  1154. g_nLMDLine = nLine;
  1155. }
  1156. g_iLMDDepth++;
  1157. }
  1158. }
  1159. void LMDPopAllocDbgInfo()
  1160. {
  1161. if ( ThreadInMainThread() && g_iLMDDepth > 0 )
  1162. {
  1163. g_iLMDDepth--;
  1164. if ( g_iLMDDepth == 0 )
  1165. {
  1166. g_pLMDFileName = NULL;
  1167. g_nLMDLine = 0;
  1168. }
  1169. }
  1170. }
  1171. void LMDReportInvalidBlock( AllocHeader_t *pHeader, const char *pszMessage )
  1172. {
  1173. char szMsg[256];
  1174. if ( pHeader )
  1175. {
  1176. sprintf( szMsg, "HEAP IS CORRUPT: %s (block 0x%x, size %d, alignment %d)\n", pszMessage, (size_t)LMDFromHeader( pHeader ), pHeader->nBytes, pHeader->align );
  1177. }
  1178. else
  1179. {
  1180. sprintf( szMsg, "HEAP IS CORRUPT: %s\n", pszMessage );
  1181. }
  1182. if ( Plat_IsInDebugSession() )
  1183. {
  1184. DebuggerBreak();
  1185. }
  1186. else
  1187. {
  1188. WriteMiniDump();
  1189. }
  1190. #ifdef IS_WINDOWS_PC
  1191. ::MessageBox( NULL, szMsg, "Error", MB_SYSTEMMODAL | MB_OK );
  1192. #else
  1193. Warning( szMsg );
  1194. #endif
  1195. }
  1196. void LMDValidateBlock( AllocHeader_t *pHeader, bool bFreeList )
  1197. {
  1198. if ( !pHeader )
  1199. return;
  1200. if ( memcmp( &pHeader->sentinal, &g_HeadSentinel, sizeof(Sentinal_t) ) != 0 )
  1201. {
  1202. LMDReportInvalidBlock( pHeader, "Head sentinel corrupt" );
  1203. }
  1204. if ( memcmp( ((Sentinal_t *)(LMDFromHeader( pHeader ) + pHeader->nBytes)), &g_TailSentinel, sizeof(Sentinal_t) ) != 0 )
  1205. {
  1206. LMDReportInvalidBlock( pHeader, "Tail sentinel corrupt" );
  1207. }
  1208. if ( bFreeList )
  1209. {
  1210. byte *pCur = (byte *)pHeader + sizeof(AllocHeader_t);
  1211. byte *pLimit = pCur + pHeader->nBytes;
  1212. while ( pCur != pLimit )
  1213. {
  1214. if ( *pCur++ != g_FreeFill )
  1215. {
  1216. LMDReportInvalidBlock( pHeader, "Write after free" );
  1217. }
  1218. }
  1219. }
  1220. }
  1221. size_t LMDComputeHeaderSize( size_t align = 0 )
  1222. {
  1223. if ( !align )
  1224. return sizeof(AllocHeader_t);
  1225. // For aligned allocs, the header is preceded by padding which maintains alignment
  1226. if ( align > LMD_MAX_ALIGN )
  1227. s_StdMemAlloc.SetCRTAllocFailed( align ); // TODO: could convert alignment to exponent to get around this, or use a flag for alignments over 1KB or 1MB...
  1228. return ( ( sizeof( AllocHeader_t ) + (align-1) ) & ~(align-1) );
  1229. }
  1230. size_t LMDAdjustSize( size_t &nBytes, size_t align = 0 )
  1231. {
  1232. if ( !UsingLMD() )
  1233. return nBytes;
  1234. // Add data before+after each alloc
  1235. return ( nBytes + LMDComputeHeaderSize( align ) + sizeof(Sentinal_t) );
  1236. }
  1237. void *LMDNoteAlloc( void *p, size_t nBytes, size_t align = 0, const char *pszModule = g_pszUnknown, int line = 0 )
  1238. {
  1239. if ( !UsingLMD() )
  1240. {
  1241. return p;
  1242. }
  1243. if ( g_pLMDFileName )
  1244. {
  1245. pszModule = g_pLMDFileName;
  1246. line = g_nLMDLine;
  1247. }
  1248. if ( p )
  1249. {
  1250. byte *pUserPtr = ((byte*)p) + LMDComputeHeaderSize( align );
  1251. AllocHeader_t *pHeader = LMDToHeader( pUserPtr );
  1252. pHeader->pszModule = pszModule;
  1253. pHeader->line = line;
  1254. pHeader->status = LWD_ALLOCATED;
  1255. pHeader->nBytes = nBytes;
  1256. pHeader->align = (uint)align;
  1257. pHeader->sentinal = g_HeadSentinel;
  1258. *((Sentinal_t *)(pUserPtr + pHeader->nBytes)) = g_TailSentinel;
  1259. LMDValidateBlock( pHeader, false );
  1260. return pUserPtr;
  1261. }
  1262. return NULL;
  1263. // Some SBH clients rely on allocations > 16 bytes being 16-byte aligned, so we mustn't break that assumption:
  1264. MEMSTD_COMPILE_TIME_ASSERT( sizeof( AllocHeader_t ) % 16 == 0 );
  1265. }
  1266. void *LMDNoteFree( void *p )
  1267. {
  1268. if ( !UsingLMD() )
  1269. {
  1270. return p;
  1271. }
  1272. AUTO_LOCK( g_LMDMutex );
  1273. if ( !p )
  1274. {
  1275. return NULL;
  1276. }
  1277. AllocHeader_t *pHeader = LMDToHeader( p );
  1278. if ( pHeader->status == LWD_FREE )
  1279. {
  1280. LMDReportInvalidBlock( pHeader, "Double free" );
  1281. }
  1282. LMDValidateBlock( pHeader, false );
  1283. AllocHeader_t *pToReturn;
  1284. if ( pHeader->nBytes < 16*1024 )
  1285. {
  1286. pToReturn = g_pRecentFrees[g_iNextFreeSlot];
  1287. LMDValidateBlock( pToReturn, true );
  1288. g_pRecentFrees[g_iNextFreeSlot] = pHeader;
  1289. g_iNextFreeSlot = (g_iNextFreeSlot + 1 ) % g_nRecentFrees;
  1290. }
  1291. else
  1292. {
  1293. pToReturn = pHeader;
  1294. LMDValidateBlock( g_pRecentFrees[rand() % g_nRecentFrees], true );
  1295. }
  1296. pHeader->status = LWD_FREE;
  1297. memset( pHeader + 1, g_FreeFill, pHeader->nBytes );
  1298. if ( pToReturn && ( pToReturn->align ) )
  1299. {
  1300. // For aligned allocations, the actual system allocation starts *before* the LMD header:
  1301. size_t headerPadding = LMDComputeHeaderSize( pToReturn->align ) - sizeof( AllocHeader_t );
  1302. return ( ((byte*)pToReturn) - headerPadding );
  1303. }
  1304. return pToReturn;
  1305. }
  1306. size_t LMDGetSize( void *p )
  1307. {
  1308. if ( !UsingLMD() )
  1309. {
  1310. return (size_t)(-1);
  1311. }
  1312. AllocHeader_t *pHeader = LMDToHeader( p );
  1313. return pHeader->nBytes;
  1314. }
  1315. bool LMDValidateHeap()
  1316. {
  1317. if ( !UsingLMD() )
  1318. {
  1319. return true;
  1320. }
  1321. AUTO_LOCK( g_LMDMutex );
  1322. for ( int i = 0; i < g_nRecentFrees && g_pRecentFrees[i]; i++ )
  1323. {
  1324. LMDValidateBlock( g_pRecentFrees[i], true );
  1325. }
  1326. return true;
  1327. }
  1328. void *LMDRealloc( void *pMem, size_t nSize, size_t align = 0, const char *pszModule = g_pszUnknown, int line = 0 )
  1329. {
  1330. if ( nSize == 0 )
  1331. {
  1332. s_StdMemAlloc.Free( pMem );
  1333. return NULL;
  1334. }
  1335. void *pNew;
  1336. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1337. if ( align )
  1338. pNew = s_StdMemAlloc.AllocAlign( nSize, align, pszModule, line );
  1339. else
  1340. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1341. pNew = s_StdMemAlloc.Alloc( nSize, pszModule, line );
  1342. if ( !pMem )
  1343. {
  1344. return pNew;
  1345. }
  1346. AllocHeader_t *pHeader = LMDToHeader( pMem );
  1347. if ( align != pHeader->align )
  1348. {
  1349. LMDReportInvalidBlock( pHeader, "Realloc changed alignment!" );
  1350. }
  1351. size_t nCopySize = MIN( nSize, pHeader->nBytes );
  1352. memcpy( pNew, pMem, nCopySize );
  1353. s_StdMemAlloc.Free( pMem, pszModule, line );
  1354. return pNew;
  1355. }
  1356. #else // USE_LIGHT_MEM_DEBUG
  1357. #define INTERNAL_INLINE FORCEINLINE
  1358. #define UsingLMD() false
  1359. FORCEINLINE size_t LMDAdjustSize( size_t &nBytes, size_t align = 0 ) { return nBytes; }
  1360. #define LMDNoteAlloc( pHeader, ... ) (pHeader)
  1361. #define LMDNoteFree( pHeader, ... ) (pHeader)
  1362. #define LMDGetSize( pHeader ) (size_t)(-1)
  1363. #define LMDToHeader( pHeader ) (pHeader)
  1364. #define LMDFromHeader( pHeader ) (pHeader)
  1365. #define LMDValidateHeap() (true)
  1366. #define LMDPushAllocDbgInfo( pFileName, nLine ) ((void)0)
  1367. #define LMDPopAllocDbgInfo() ((void)0)
  1368. FORCEINLINE void *LMDRealloc( void *pMem, size_t nSize, size_t align = 0, const char *pszModule = NULL, int line = 0 ) { return NULL; }
  1369. #endif // USE_LIGHT_MEM_DEBUG
  1370. //-----------------------------------------------------------------------------
  1371. // Internal versions
  1372. //-----------------------------------------------------------------------------
  1373. INTERNAL_INLINE void *CStdMemAlloc::InternalAllocFromPools( size_t nSize )
  1374. {
  1375. #if MEM_SBH_ENABLED
  1376. void *pMem;
  1377. pMem = m_PrimarySBH.Alloc( nSize );
  1378. if ( pMem )
  1379. {
  1380. return pMem;
  1381. }
  1382. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1383. pMem = m_SecondarySBH.Alloc( nSize );
  1384. if ( pMem )
  1385. {
  1386. return pMem;
  1387. }
  1388. #endif // MEMALLOC_USE_SECONDARY_SBH
  1389. #ifndef MEMALLOC_NO_FALLBACK
  1390. pMem = m_FallbackSBH.Alloc( nSize );
  1391. if ( pMem )
  1392. {
  1393. return pMem;
  1394. }
  1395. #endif // MEMALLOC_NO_FALLBACK
  1396. CallAllocFailHandler( nSize );
  1397. #endif // MEM_SBH_ENABLED
  1398. return NULL;
  1399. }
  1400. INTERNAL_INLINE void *CStdMemAlloc::InternalAlloc( int region, size_t nSize )
  1401. {
  1402. PROFILE_ALLOC(Malloc);
  1403. void *pMem;
  1404. #if MEM_SBH_ENABLED
  1405. if ( m_PrimarySBH.ShouldUse( nSize ) ) // test valid for either pool
  1406. {
  1407. pMem = InternalAllocFromPools( nSize );
  1408. if ( !pMem )
  1409. {
  1410. CompactOnFail();
  1411. pMem = InternalAllocFromPools( nSize );
  1412. }
  1413. if ( pMem )
  1414. {
  1415. ApplyMemoryInitializations( pMem, nSize );
  1416. return pMem;
  1417. }
  1418. ExecuteOnce( DevWarning( "\n\nDRASTIC MEMORY OVERFLOW: Fell out of small block heap!\n\n\n") );
  1419. }
  1420. #endif // MEM_SBH_ENABLED
  1421. pMem = malloc_internal( region, nSize );
  1422. if ( !pMem )
  1423. {
  1424. CompactOnFail();
  1425. pMem = malloc_internal( region, nSize );
  1426. if ( !pMem )
  1427. {
  1428. SetCRTAllocFailed( nSize );
  1429. return NULL;
  1430. }
  1431. }
  1432. ApplyMemoryInitializations( pMem, nSize );
  1433. return pMem;
  1434. }
  1435. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1436. INTERNAL_INLINE void *CStdMemAlloc::InternalAllocAligned( int region, size_t nSize, size_t align )
  1437. {
  1438. PROFILE_ALLOC(MallocAligned);
  1439. void *pMem;
  1440. #if MEM_SBH_ENABLED
  1441. size_t nSizeAligned = ( nSize + align - 1 ) & ~( align - 1 );
  1442. if ( m_PrimarySBH.ShouldUse( nSizeAligned ) ) // test valid for either pool
  1443. {
  1444. pMem = InternalAllocFromPools( nSizeAligned );
  1445. if ( !pMem )
  1446. {
  1447. CompactOnFail();
  1448. pMem = InternalAllocFromPools( nSizeAligned );
  1449. }
  1450. if ( pMem )
  1451. {
  1452. ApplyMemoryInitializations( pMem, nSizeAligned );
  1453. return pMem;
  1454. }
  1455. ExecuteOnce( DevWarning( "Warning: Fell out of small block heap!\n") );
  1456. }
  1457. #endif // MEM_SBH_ENABLED
  1458. pMem = malloc_aligned_internal( region, nSize, align );
  1459. if ( !pMem )
  1460. {
  1461. CompactOnFail();
  1462. pMem = malloc_aligned_internal( region, nSize, align );
  1463. if ( !pMem )
  1464. {
  1465. SetCRTAllocFailed( nSize );
  1466. return NULL;
  1467. }
  1468. }
  1469. ApplyMemoryInitializations( pMem, nSize );
  1470. return pMem;
  1471. }
  1472. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1473. INTERNAL_INLINE void *CStdMemAlloc::InternalRealloc( void *pMem, size_t nSize )
  1474. {
  1475. if ( !pMem )
  1476. {
  1477. return RegionAlloc( DEF_REGION, nSize );
  1478. }
  1479. PROFILE_ALLOC(Realloc);
  1480. #if MEM_SBH_ENABLED
  1481. if ( m_PrimarySBH.IsOwner( pMem ) )
  1482. {
  1483. return m_PrimarySBH.Realloc( pMem, nSize );
  1484. }
  1485. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1486. if ( m_SecondarySBH.IsOwner( pMem ) )
  1487. {
  1488. return m_SecondarySBH.Realloc( pMem, nSize );
  1489. }
  1490. #endif // MEMALLOC_USE_SECONDARY_SBH
  1491. #ifndef MEMALLOC_NO_FALLBACK
  1492. if ( m_FallbackSBH.IsOwner( pMem ) )
  1493. {
  1494. return m_FallbackSBH.Realloc( pMem, nSize );
  1495. }
  1496. #endif // MEMALLOC_NO_FALLBACK
  1497. #endif // MEM_SBH_ENABLED
  1498. void *pRet = realloc_internal( pMem, nSize );
  1499. if ( !pRet )
  1500. {
  1501. CompactOnFail();
  1502. pRet = realloc_internal( pMem, nSize );
  1503. if ( !pRet )
  1504. {
  1505. SetCRTAllocFailed( nSize );
  1506. }
  1507. }
  1508. return pRet;
  1509. }
  1510. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1511. INTERNAL_INLINE void *CStdMemAlloc::InternalReallocAligned( void *pMem, size_t nSize, size_t align )
  1512. {
  1513. if ( !pMem )
  1514. {
  1515. return InternalAllocAligned( DEF_REGION, nSize, align );
  1516. }
  1517. PROFILE_ALLOC(ReallocAligned);
  1518. #if MEM_SBH_ENABLED
  1519. if ( m_PrimarySBH.IsOwner( pMem ) )
  1520. {
  1521. return m_PrimarySBH.Realloc( pMem, nSize );
  1522. }
  1523. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1524. if ( m_SecondarySBH.IsOwner( pMem ) )
  1525. {
  1526. return m_SecondarySBH.Realloc( pMem, nSize );
  1527. }
  1528. #endif // MEMALLOC_USE_SECONDARY_SBH
  1529. #ifndef MEMALLOC_NO_FALLBACK
  1530. if ( m_FallbackSBH.IsOwner( pMem ) )
  1531. {
  1532. return m_FallbackSBH.Realloc( pMem, nSize );
  1533. }
  1534. #endif // MEMALLOC_NO_FALLBACK
  1535. #endif // MEM_SBH_ENABLED
  1536. void *pRet = realloc_aligned_internal( pMem, nSize, align );
  1537. if ( !pRet )
  1538. {
  1539. CompactOnFail();
  1540. pRet = realloc_aligned_internal( pMem, nSize, align );
  1541. if ( !pRet )
  1542. {
  1543. SetCRTAllocFailed( nSize );
  1544. }
  1545. }
  1546. return pRet;
  1547. }
  1548. #endif
  1549. INTERNAL_INLINE void CStdMemAlloc::InternalFree( void *pMem )
  1550. {
  1551. if ( !pMem )
  1552. {
  1553. return;
  1554. }
  1555. PROFILE_ALLOC(Free);
  1556. #if MEM_SBH_ENABLED
  1557. if ( m_PrimarySBH.IsOwner( pMem ) )
  1558. {
  1559. m_PrimarySBH.Free( pMem );
  1560. return;
  1561. }
  1562. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1563. if ( m_SecondarySBH.IsOwner( pMem ) )
  1564. {
  1565. return m_SecondarySBH.Free( pMem );
  1566. }
  1567. #endif // MEMALLOC_USE_SECONDARY_SBH
  1568. #ifndef MEMALLOC_NO_FALLBACK
  1569. if ( m_FallbackSBH.IsOwner( pMem ) )
  1570. {
  1571. m_FallbackSBH.Free( pMem );
  1572. return;
  1573. }
  1574. #endif // MEMALLOC_NO_FALLBACK
  1575. #endif // MEM_SBH_ENABLED
  1576. free_internal( pMem );
  1577. }
  1578. void CStdMemAlloc::CompactOnFail()
  1579. {
  1580. CompactHeap();
  1581. }
  1582. //-----------------------------------------------------------------------------
  1583. // Release versions
  1584. //-----------------------------------------------------------------------------
  1585. void *CStdMemAlloc::Alloc( size_t nSize )
  1586. {
  1587. size_t nAdjustedSize = LMDAdjustSize( nSize );
  1588. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( DEF_REGION, nAdjustedSize ), nSize );
  1589. }
  1590. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1591. void * CStdMemAlloc::AllocAlign( size_t nSize, size_t align )
  1592. {
  1593. size_t nAdjustedSize = LMDAdjustSize( nSize, align );
  1594. return LMDNoteAlloc( CStdMemAlloc::InternalAllocAligned( DEF_REGION, nAdjustedSize, align ), nSize, align );
  1595. }
  1596. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1597. void *CStdMemAlloc::Realloc( void *pMem, size_t nSize )
  1598. {
  1599. if ( UsingLMD() )
  1600. return LMDRealloc( pMem, nSize );
  1601. return CStdMemAlloc::InternalRealloc( pMem, nSize );
  1602. }
  1603. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1604. void * CStdMemAlloc::ReallocAlign( void *pMem, size_t nSize, size_t align )
  1605. {
  1606. if ( UsingLMD() )
  1607. return LMDRealloc( pMem, nSize, align );
  1608. return CStdMemAlloc::InternalReallocAligned( pMem, nSize, align );
  1609. }
  1610. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1611. void CStdMemAlloc::Free( void *pMem )
  1612. {
  1613. pMem = LMDNoteFree( pMem );
  1614. CStdMemAlloc::InternalFree( pMem );
  1615. }
  1616. void *CStdMemAlloc::Expand_NoLongerSupported( void *pMem, size_t nSize )
  1617. {
  1618. return NULL;
  1619. }
  1620. //-----------------------------------------------------------------------------
  1621. // Debug versions
  1622. //-----------------------------------------------------------------------------
  1623. void *CStdMemAlloc::Alloc( size_t nSize, const char *pFileName, int nLine )
  1624. {
  1625. size_t nAdjustedSize = LMDAdjustSize( nSize );
  1626. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( DEF_REGION, nAdjustedSize ), nSize, 0, pFileName, nLine );
  1627. }
  1628. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1629. void *CStdMemAlloc::AllocAlign( size_t nSize, size_t align, const char *pFileName, int nLine )
  1630. {
  1631. size_t nAdjustedSize = LMDAdjustSize( nSize, align );
  1632. return LMDNoteAlloc( CStdMemAlloc::InternalAllocAligned( DEF_REGION, nAdjustedSize, align ), nSize, align, pFileName, nLine );
  1633. }
  1634. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1635. void *CStdMemAlloc::Realloc( void *pMem, size_t nSize, const char *pFileName, int nLine )
  1636. {
  1637. if ( UsingLMD() )
  1638. return LMDRealloc( pMem, nSize, 0, pFileName, nLine );
  1639. return CStdMemAlloc::InternalRealloc( pMem, nSize );
  1640. }
  1641. #ifdef MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1642. void * CStdMemAlloc::ReallocAlign( void *pMem, size_t nSize, size_t align, const char *pFileName, int nLine )
  1643. {
  1644. if ( UsingLMD() )
  1645. return LMDRealloc( pMem, nSize, align, pFileName, nLine );
  1646. return CStdMemAlloc::InternalReallocAligned( pMem, nSize, align );
  1647. }
  1648. #endif // MEMALLOC_SUPPORTS_ALIGNED_ALLOCATIONS
  1649. void CStdMemAlloc::Free( void *pMem, const char *pFileName, int nLine )
  1650. {
  1651. pMem = LMDNoteFree( pMem );
  1652. CStdMemAlloc::InternalFree( pMem );
  1653. }
  1654. void *CStdMemAlloc::Expand_NoLongerSupported( void *pMem, size_t nSize, const char *pFileName, int nLine )
  1655. {
  1656. return NULL;
  1657. }
  1658. //-----------------------------------------------------------------------------
  1659. // Region support
  1660. //-----------------------------------------------------------------------------
  1661. void *CStdMemAlloc::RegionAlloc( int region, size_t nSize )
  1662. {
  1663. size_t nAdjustedSize = LMDAdjustSize( nSize );
  1664. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( region, nAdjustedSize ), nSize );
  1665. }
  1666. void *CStdMemAlloc::RegionAlloc( int region, size_t nSize, const char *pFileName, int nLine )
  1667. {
  1668. size_t nAdjustedSize = LMDAdjustSize( nSize );
  1669. return LMDNoteAlloc( CStdMemAlloc::InternalAlloc( region, nAdjustedSize ), nSize, 0, pFileName, nLine );
  1670. }
  1671. #if defined (LINUX)
  1672. #include <malloc.h>
  1673. #elif defined (OSX)
  1674. #define malloc_usable_size( ptr ) malloc_size( ptr )
  1675. extern "C" {
  1676. extern size_t malloc_size( const void *ptr );
  1677. }
  1678. #endif // LINUX/OSX
  1679. //-----------------------------------------------------------------------------
  1680. // Returns the size of a particular allocation (NOTE: may be larger than the size requested!)
  1681. //-----------------------------------------------------------------------------
  1682. size_t CStdMemAlloc::GetSize( void *pMem )
  1683. {
  1684. if ( !pMem )
  1685. return CalcHeapUsed();
  1686. if ( UsingLMD() )
  1687. {
  1688. return LMDGetSize( pMem );
  1689. }
  1690. #if MEM_SBH_ENABLED
  1691. if ( m_PrimarySBH.IsOwner( pMem ) )
  1692. {
  1693. return m_PrimarySBH.GetSize( pMem );
  1694. }
  1695. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1696. if ( m_SecondarySBH.IsOwner( pMem ) )
  1697. {
  1698. return m_SecondarySBH.GetSize( pMem );
  1699. }
  1700. #endif // MEMALLOC_USE_SECONDARY_SBH
  1701. #ifndef MEMALLOC_NO_FALLBACK
  1702. if ( m_FallbackSBH.IsOwner( pMem ) )
  1703. {
  1704. return m_FallbackSBH.GetSize( pMem );
  1705. }
  1706. #endif // MEMALLOC_NO_FALLBACK
  1707. #endif // MEM_SBH_ENABLED
  1708. return msize_internal( pMem );
  1709. }
  1710. //-----------------------------------------------------------------------------
  1711. // Force file + line information for an allocation
  1712. //-----------------------------------------------------------------------------
  1713. void CStdMemAlloc::PushAllocDbgInfo( const char *pFileName, int nLine )
  1714. {
  1715. LMDPushAllocDbgInfo( pFileName, nLine );
  1716. }
  1717. void CStdMemAlloc::PopAllocDbgInfo()
  1718. {
  1719. LMDPopAllocDbgInfo();
  1720. }
  1721. //-----------------------------------------------------------------------------
  1722. // FIXME: Remove when we make our own heap! Crt stuff we're currently using
  1723. //-----------------------------------------------------------------------------
  1724. int32 CStdMemAlloc::CrtSetBreakAlloc( int32 lNewBreakAlloc )
  1725. {
  1726. return 0;
  1727. }
  1728. int CStdMemAlloc::CrtSetReportMode( int nReportType, int nReportMode )
  1729. {
  1730. return 0;
  1731. }
  1732. int CStdMemAlloc::CrtIsValidHeapPointer( const void *pMem )
  1733. {
  1734. return 1;
  1735. }
  1736. int CStdMemAlloc::CrtIsValidPointer( const void *pMem, unsigned int size, int access )
  1737. {
  1738. return 1;
  1739. }
  1740. int CStdMemAlloc::CrtCheckMemory( void )
  1741. {
  1742. #ifndef _CERT
  1743. LMDValidateHeap();
  1744. #if MEM_SBH_ENABLED
  1745. if ( !m_PrimarySBH.Validate() )
  1746. {
  1747. ExecuteOnce( Msg( "Small block heap is corrupt (primary)\n " ) );
  1748. }
  1749. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1750. if ( !m_SecondarySBH.Validate() )
  1751. {
  1752. ExecuteOnce( Msg( "Small block heap is corrupt (secondary)\n " ) );
  1753. }
  1754. #endif // MEMALLOC_USE_SECONDARY_SBH
  1755. #ifndef MEMALLOC_NO_FALLBACK
  1756. if ( !m_FallbackSBH.Validate() )
  1757. {
  1758. ExecuteOnce( Msg( "Small block heap is corrupt (fallback)\n " ) );
  1759. }
  1760. #endif // MEMALLOC_NO_FALLBACK
  1761. #endif // MEM_SBH_ENABLED
  1762. #endif // _CERT
  1763. return 1;
  1764. }
  1765. int CStdMemAlloc::CrtSetDbgFlag( int nNewFlag )
  1766. {
  1767. return 0;
  1768. }
  1769. void CStdMemAlloc::CrtMemCheckpoint( _CrtMemState *pState )
  1770. {
  1771. }
  1772. // FIXME: Remove when we have our own allocator
  1773. void* CStdMemAlloc::CrtSetReportFile( int nRptType, void* hFile )
  1774. {
  1775. return 0;
  1776. }
  1777. void* CStdMemAlloc::CrtSetReportHook( void* pfnNewHook )
  1778. {
  1779. return 0;
  1780. }
  1781. int CStdMemAlloc::CrtDbgReport( int nRptType, const char * szFile,
  1782. int nLine, const char * szModule, const char * pMsg )
  1783. {
  1784. return 0;
  1785. }
  1786. int CStdMemAlloc::heapchk()
  1787. {
  1788. #ifdef _WIN32
  1789. CrtCheckMemory();
  1790. return _HEAPOK;
  1791. #else
  1792. return 1;
  1793. #endif
  1794. }
  1795. void CStdMemAlloc::DumpStats()
  1796. {
  1797. DumpStatsFileBase( "memstats" );
  1798. }
  1799. void CStdMemAlloc::DumpStatsFileBase( char const *pchFileBase )
  1800. {
  1801. #if defined( _WIN32 ) || defined( _GAMECONSOLE )
  1802. char filename[ 512 ];
  1803. _snprintf( filename, sizeof( filename ) - 1,
  1804. #ifdef _X360
  1805. "D:\\%s.txt",
  1806. #elif defined( _PS3 )
  1807. "/app_home/%s.txt",
  1808. #else
  1809. "%s.txt",
  1810. #endif
  1811. pchFileBase );
  1812. filename[ sizeof( filename ) - 1 ] = 0;
  1813. FILE *pFile = ( IsGameConsole() ) ? NULL : fopen( filename, "wt" );
  1814. #if MEM_SBH_ENABLED
  1815. if ( pFile )
  1816. fprintf( pFile, "Fixed Page SBH:\n" );
  1817. else
  1818. Msg( "Fixed Page SBH:\n" );
  1819. m_PrimarySBH.DumpStats("Fixed Page SBH", pFile);
  1820. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1821. if ( pFile )
  1822. fprintf( pFile, "Secondary Fixed Page SBH:\n" );
  1823. else
  1824. Msg( "Secondary Page SBH:\n" );
  1825. m_SecondarySBH.DumpStats("Secondary Page SBH", pFile);
  1826. #endif // MEMALLOC_USE_SECONDARY_SBH
  1827. #ifndef MEMALLOC_NO_FALLBACK
  1828. if ( pFile )
  1829. fprintf( pFile, "\nFallback SBH:\n" );
  1830. else
  1831. Msg( "\nFallback SBH:\n" );
  1832. m_FallbackSBH.DumpStats("Fallback SBH", pFile); // Dump statistics to small block heap
  1833. #endif // MEMALLOC_NO_FALLBACK
  1834. #endif // MEM_SBH_ENABLED
  1835. #ifdef _PS3
  1836. malloc_managed_size mms;
  1837. (g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  1838. Msg( "PS3 malloc_stats: %u / %u / %u \n", mms.current_inuse_size, mms.current_system_size, mms.max_system_size );
  1839. #endif // _PS3
  1840. heapstats_internal( pFile );
  1841. #if defined( _X360 )
  1842. XBX_rMemDump( filename );
  1843. #endif
  1844. if ( pFile )
  1845. fclose( pFile );
  1846. #endif // _WIN32 || _GAMECONSOLE
  1847. }
  1848. IVirtualMemorySection * CStdMemAlloc::AllocateVirtualMemorySection( size_t numMaxBytes )
  1849. {
  1850. #if defined( _GAMECONSOLE ) || defined( _WIN32 )
  1851. extern IVirtualMemorySection * VirtualMemoryManager_AllocateVirtualMemorySection( size_t numMaxBytes );
  1852. return VirtualMemoryManager_AllocateVirtualMemorySection( numMaxBytes );
  1853. #else
  1854. return NULL;
  1855. #endif
  1856. }
  1857. size_t CStdMemAlloc::ComputeMemoryUsedBy( char const *pchSubStr )
  1858. {
  1859. return 0;//dbg heap only.
  1860. }
  1861. static inline size_t ExtraDevkitMemory( void )
  1862. {
  1863. #if defined( _PS3 )
  1864. // 213MB are available in retail mode, so adjust free mem to reflect that even if we're in devkit mode
  1865. const size_t RETAIL_SIZE = 213*1024*1024;
  1866. static sys_memory_info stat;
  1867. sys_memory_get_user_memory_size( &stat );
  1868. if ( stat.total_user_memory > RETAIL_SIZE )
  1869. return ( stat.total_user_memory - RETAIL_SIZE );
  1870. #elif defined( _X360 )
  1871. // TODO: detect the new 1GB devkit...
  1872. #endif // _PS3/_X360
  1873. return 0;
  1874. }
  1875. void CStdMemAlloc::GlobalMemoryStatus( size_t *pUsedMemory, size_t *pFreeMemory )
  1876. {
  1877. if ( !pUsedMemory || !pFreeMemory )
  1878. return;
  1879. size_t dlMallocFree = 0;
  1880. #if defined( USE_DLMALLOC )
  1881. // Account for free memory contained within DLMalloc's FIRST region. The rationale is as follows:
  1882. // - the first region is supposed to service large allocations via virtual allocation, and to grow as
  1883. // needed (until all physical pages are used), so true 'out of memory' failures should occur there.
  1884. // - other regions (the 2-256kb 'medium block heap', or per-DLL heaps, and the Small Block Heap)
  1885. // are sized to a pre-determined high watermark, and not intended to grow. Free memory within
  1886. // those regions is not available for large allocations, so adding that to the 'free memory'
  1887. // yields confusing data which does not correspond well with out-of-memory failures.
  1888. mallinfo info = mspace_mallinfo( g_AllocRegions[ 0 ] );
  1889. dlMallocFree += info.fordblks;
  1890. #endif // USE_DLMALLOC
  1891. #if defined ( _X360 )
  1892. // GlobalMemoryStatus tells us how much physical memory is free
  1893. MEMORYSTATUS stat;
  1894. ::GlobalMemoryStatus( &stat );
  1895. *pFreeMemory = stat.dwAvailPhys;
  1896. *pFreeMemory += dlMallocFree;
  1897. // Adjust free mem to reflect a retail box, even if we're using a devkit with extra memory
  1898. *pFreeMemory -= ExtraDevkitMemory();
  1899. // Used is total minus free (discount the 32MB system reservation)
  1900. *pUsedMemory = ( stat.dwTotalPhys - 32*1024*1024 ) - *pFreeMemory;
  1901. #elif defined( _PS3 )
  1902. // NOTE: we use dlmalloc instead of the system heap, so we do NOT count the system heap's free space!
  1903. //static malloc_managed_size mms;
  1904. //(g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  1905. //int heapFree = mms.current_system_size - mms.current_inuse_size;
  1906. // sys_memory_get_user_memory_size tells us how much PPU memory is used/free
  1907. static sys_memory_info stat;
  1908. sys_memory_get_user_memory_size( &stat );
  1909. *pFreeMemory = stat.available_user_memory;
  1910. *pFreeMemory += dlMallocFree;
  1911. *pUsedMemory = stat.total_user_memory - *pFreeMemory;
  1912. // Adjust free mem to reflect a retail box, even if we're using a devkit with extra memory
  1913. *pFreeMemory -= ExtraDevkitMemory();
  1914. #else // _X360/_PS3/other
  1915. // no data
  1916. *pFreeMemory = 0;
  1917. *pUsedMemory = 0;
  1918. #endif // _X360/_PS3//other
  1919. }
  1920. #define MAX_GENERIC_MEMORY_STATS 64
  1921. GenericMemoryStat_t g_MemStats[MAX_GENERIC_MEMORY_STATS];
  1922. int g_nMemStats = 0;
  1923. static inline int AddGenericMemoryStat( const char *name, int value )
  1924. {
  1925. Assert( g_nMemStats < MAX_GENERIC_MEMORY_STATS );
  1926. if ( g_nMemStats < MAX_GENERIC_MEMORY_STATS )
  1927. {
  1928. g_MemStats[ g_nMemStats ].name = name;
  1929. g_MemStats[ g_nMemStats ].value = value;
  1930. g_nMemStats++;
  1931. }
  1932. return g_nMemStats;
  1933. }
  1934. int CStdMemAlloc::GetGenericMemoryStats( GenericMemoryStat_t **ppMemoryStats )
  1935. {
  1936. if ( !ppMemoryStats )
  1937. return 0;
  1938. g_nMemStats = 0;
  1939. #if MEM_SBH_ENABLED
  1940. {
  1941. // Small block heap
  1942. size_t SBHCommitted = 0, SBHAllocated = 0;
  1943. size_t commitTmp, allocTmp;
  1944. #if MEM_SBH_ENABLED
  1945. m_PrimarySBH.Usage( commitTmp, allocTmp );
  1946. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  1947. #ifdef MEMALLOC_USE_SECONDARY_SBH
  1948. m_SecondarySBH.Usage( commitTmp, allocTmp );
  1949. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  1950. #endif // MEMALLOC_USE_SECONDARY_SBH
  1951. #ifndef MEMALLOC_NO_FALLBACK
  1952. m_FallbackSBH.Usage( commitTmp, allocTmp );
  1953. SBHCommitted += commitTmp; SBHAllocated += allocTmp;
  1954. #endif // MEMALLOC_NO_FALLBACK
  1955. #endif // MEM_SBH_ENABLED
  1956. static size_t SBHMaxCommitted = 0; SBHMaxCommitted = MAX( SBHMaxCommitted, SBHCommitted );
  1957. AddGenericMemoryStat( "SBH_cur", (int)SBHCommitted );
  1958. AddGenericMemoryStat( "SBH_max", (int)SBHMaxCommitted );
  1959. }
  1960. #endif // MEM_SBH_ENABLED
  1961. #if defined( USE_DLMALLOC )
  1962. #if !defined( MEMALLOC_REGIONS ) && defined( MEMALLOC_SEGMENT_MIXED )
  1963. {
  1964. // Medium block heap
  1965. mallinfo infoMBH = mspace_mallinfo( g_AllocRegions[ 1 ] );
  1966. size_t nMBHCurUsed = infoMBH.uordblks;// nMBH_WRONG_MaxUsed = infoMBH.usmblks; // TODO: figure out why dlmalloc mis-reports MBH max usage (it just returns the footprint)
  1967. static size_t nMBHMaxUsed = 0; nMBHMaxUsed = MAX( nMBHMaxUsed, nMBHCurUsed );
  1968. AddGenericMemoryStat( "MBH_cur", (int)nMBHCurUsed );
  1969. AddGenericMemoryStat( "MBH_max", (int)nMBHMaxUsed );
  1970. // Large block heap
  1971. mallinfo infoLBH = mspace_mallinfo( g_AllocRegions[ 0 ] );
  1972. size_t nLBHCurUsed = mspace_footprint( g_AllocRegions[ 0 ] ), nLBHMaxUsed = mspace_max_footprint( g_AllocRegions[ 0 ] ), nLBHArenaSize = infoLBH.arena, nLBHFree = infoLBH.fordblks;
  1973. AddGenericMemoryStat( "LBH_cur", (int)nLBHCurUsed );
  1974. AddGenericMemoryStat( "LBH_max", (int)nLBHMaxUsed );
  1975. // LBH arena used+free (these are non-virtual allocations - there should be none, since we only allocate 256KB+ items in the LBH)
  1976. // TODO: I currently see the arena grow to 320KB due to a larger allocation being realloced down... if this gets worse, add an 'ALWAYS use VMM' flag to the mspace.
  1977. AddGenericMemoryStat( "LBH_arena", (int)nLBHArenaSize );
  1978. AddGenericMemoryStat( "LBH_free", (int)nLBHFree );
  1979. }
  1980. #else // (!MEMALLOC_REGIONS && MEMALLOC_SEGMENT_MIXED)
  1981. {
  1982. // Single dlmalloc heap (TODO: per-DLL heap stats, if we resurrect that)
  1983. mallinfo info = mspace_mallinfo( g_AllocRegions[ 0 ] );
  1984. AddGenericMemoryStat( "mspace_cur", (int)info.uordblks );
  1985. AddGenericMemoryStat( "mspace_max", (int)info.usmblks );
  1986. AddGenericMemoryStat( "mspace_size", (int)mspace_footprint( g_AllocRegions[ 0 ] ) );
  1987. }
  1988. #endif // (!MEMALLOC_REGIONS && MEMALLOC_SEGMENT_MIXED)
  1989. #endif // USE_DLMALLOC
  1990. size_t nMaxPhysMemUsed_Delta;
  1991. nMaxPhysMemUsed_Delta = 0;
  1992. #ifdef _PS3
  1993. {
  1994. // System heap (should not exist!)
  1995. static malloc_managed_size mms;
  1996. (g_pMemOverrideRawCrtFns->pfn_malloc_stats)( &mms );
  1997. if ( mms.current_system_size )
  1998. AddGenericMemoryStat( "sys_heap", (int)mms.current_system_size );
  1999. // Virtual Memory Manager
  2000. size_t nReserved = 0, nReservedMax = 0, nCommitted = 0, nCommittedMax = 0;
  2001. extern void VirtualMemoryManager_GetStats( size_t &nReserved, size_t &nReservedMax, size_t &nCommitted, size_t &nCommittedMax );
  2002. VirtualMemoryManager_GetStats( nReserved, nReservedMax, nCommitted, nCommittedMax );
  2003. AddGenericMemoryStat( "VMM_reserved", (int)nReserved );
  2004. AddGenericMemoryStat( "VMM_reserved_max", (int)nReservedMax );
  2005. AddGenericMemoryStat( "VMM_committed", (int)nCommitted );
  2006. AddGenericMemoryStat( "VMM_committed_max", (int)nCommittedMax );
  2007. // Estimate memory committed by memory stacks (these account for all VMM allocations other than the SBH/MBH/LBH)
  2008. size_t nHeapTotal = 1024*1024*MBYTES_PRIMARY_SBH;
  2009. #if defined( USE_DLMALLOC )
  2010. for ( int i = 0; i < ARRAYSIZE(g_AllocRegions); i++ )
  2011. {
  2012. nHeapTotal += mspace_footprint( g_AllocRegions[i] );
  2013. }
  2014. #endif // USE_DLMALLOC
  2015. size_t nMemStackTotal = nCommitted - nHeapTotal;
  2016. AddGenericMemoryStat( "MemStacks", (int)nMemStackTotal );
  2017. // On PS3, we can more accurately determine 'phys_free_min', since we know nCommittedMax
  2018. // (otherwise nPhysFreeMin is only updated intermittently; when this function is called):
  2019. nMaxPhysMemUsed_Delta = nCommittedMax - nCommitted;
  2020. }
  2021. #endif // _PS3
  2022. #if defined( _GAMECONSOLE )
  2023. // Total/free/min-free physical pages
  2024. {
  2025. #if defined( _X360 )
  2026. MEMORYSTATUS stat;
  2027. ::GlobalMemoryStatus( &stat );
  2028. size_t nPhysTotal = stat.dwTotalPhys, nPhysFree = stat.dwAvailPhys - ExtraDevkitMemory();
  2029. #elif defined( _PS3 )
  2030. static sys_memory_info stat;
  2031. sys_memory_get_user_memory_size( &stat );
  2032. size_t nPhysTotal = stat.total_user_memory, nPhysFree = stat.available_user_memory - ExtraDevkitMemory();
  2033. #endif // _X360/_PS3
  2034. static size_t nPhysFreeMin = nPhysTotal;
  2035. nPhysFreeMin = MIN( nPhysFreeMin, ( nPhysFree - nMaxPhysMemUsed_Delta ) );
  2036. AddGenericMemoryStat( "phys_total", (int)nPhysTotal );
  2037. AddGenericMemoryStat( "phys_free", (int)nPhysFree );
  2038. AddGenericMemoryStat( "phys_free_min", (int)nPhysFreeMin );
  2039. }
  2040. #endif // _GAMECONSOLE
  2041. *ppMemoryStats = &g_MemStats[0];
  2042. return g_nMemStats;
  2043. }
  2044. void CStdMemAlloc::CompactHeap()
  2045. {
  2046. #if MEM_SBH_ENABLED
  2047. if ( !m_CompactMutex.TryLock() )
  2048. {
  2049. return;
  2050. }
  2051. if ( m_bInCompact )
  2052. {
  2053. m_CompactMutex.Unlock();
  2054. return;
  2055. }
  2056. m_bInCompact = true;
  2057. size_t nBytesRecovered;
  2058. #ifndef MEMALLOC_NO_FALLBACK
  2059. nBytesRecovered = m_FallbackSBH.Compact( false );
  2060. if ( nBytesRecovered && IsGameConsole() )
  2061. {
  2062. Msg( "Compact freed %d bytes from virtual heap (up to 256k still committed)\n", nBytesRecovered );
  2063. }
  2064. #endif // MEMALLOC_NO_FALLBACK
  2065. nBytesRecovered = m_PrimarySBH.Compact( false );
  2066. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2067. nBytesRecovered += m_SecondarySBH.Compact( false );
  2068. #endif
  2069. if ( nBytesRecovered && IsGameConsole() )
  2070. {
  2071. Msg( "Compact released %d bytes from the SBH\n", nBytesRecovered );
  2072. }
  2073. nBytesRecovered = compact_internal();
  2074. if ( nBytesRecovered && IsGameConsole() )
  2075. {
  2076. Msg( "Compact released %d bytes from the mixed block heap\n", nBytesRecovered );
  2077. }
  2078. m_bInCompact = false;
  2079. m_CompactMutex.Unlock();
  2080. #endif // MEM_SBH_ENABLED
  2081. }
  2082. void CStdMemAlloc::CompactIncremental()
  2083. {
  2084. #if MEM_SBH_ENABLED
  2085. if ( !m_CompactMutex.TryLock() )
  2086. {
  2087. return;
  2088. }
  2089. if ( m_bInCompact )
  2090. {
  2091. m_CompactMutex.Unlock();
  2092. return;
  2093. }
  2094. m_bInCompact = true;
  2095. #ifndef MEMALLOC_NO_FALLBACK
  2096. m_FallbackSBH.Compact( true );
  2097. #endif
  2098. m_PrimarySBH.Compact( true );
  2099. #ifdef MEMALLOC_USE_SECONDARY_SBH
  2100. m_SecondarySBH.Compact( true );
  2101. #endif
  2102. m_bInCompact = false;
  2103. m_CompactMutex.Unlock();
  2104. #endif // MEM_SBH_ENABLED
  2105. }
  2106. MemAllocFailHandler_t CStdMemAlloc::SetAllocFailHandler( MemAllocFailHandler_t pfnMemAllocFailHandler )
  2107. {
  2108. MemAllocFailHandler_t pfnPrevious = m_pfnFailHandler;
  2109. m_pfnFailHandler = pfnMemAllocFailHandler;
  2110. return pfnPrevious;
  2111. }
  2112. size_t CStdMemAlloc::DefaultFailHandler( size_t nBytes )
  2113. {
  2114. if ( IsX360() )
  2115. {
  2116. #ifdef _X360
  2117. ExecuteOnce(
  2118. {
  2119. char buffer[256];
  2120. _snprintf( buffer, sizeof( buffer ), "***** Memory pool overflow, attempted allocation size: %u (not a critical error)\n", nBytes );
  2121. XBX_OutputDebugString( buffer );
  2122. }
  2123. );
  2124. #endif // _X360
  2125. }
  2126. return 0;
  2127. }
  2128. void CStdMemAlloc::SetStatsExtraInfo( const char *pMapName, const char *pComment )
  2129. {
  2130. }
  2131. void CStdMemAlloc::SetCRTAllocFailed( size_t nSize )
  2132. {
  2133. m_sMemoryAllocFailed = nSize;
  2134. DebuggerBreakIfDebugging();
  2135. #if defined( _PS3 ) && defined( _DEBUG )
  2136. DebuggerBreak();
  2137. #endif // _PS3
  2138. char buffer[256];
  2139. #ifdef COMPILER_GCC
  2140. _snprintf( buffer, sizeof( buffer ), "***** OUT OF MEMORY! attempted allocation size: %u ****\n", nSize );
  2141. #else
  2142. _snprintf( buffer, sizeof( buffer ), "***** OUT OF MEMORY! attempted allocation size: %u ****\n", nSize );
  2143. #endif // COMPILER_GCC
  2144. #ifdef _X360
  2145. XBX_OutputDebugString( buffer );
  2146. if ( !Plat_IsInDebugSession() )
  2147. {
  2148. XBX_CrashDump( true );
  2149. #if defined( _DEMO )
  2150. XLaunchNewImage( XLAUNCH_KEYWORD_DEFAULT_APP, 0 );
  2151. #else
  2152. XLaunchNewImage( "default.xex", 0 );
  2153. #endif // _DEMO
  2154. }
  2155. #elif defined(_WIN32 )
  2156. OutputDebugString( buffer );
  2157. if ( !Plat_IsInDebugSession() )
  2158. {
  2159. WriteMiniDump();
  2160. abort();
  2161. }
  2162. #else // _X360/_WIN32/other
  2163. printf( "%s\n", buffer );
  2164. if ( !Plat_IsInDebugSession() )
  2165. {
  2166. WriteMiniDump();
  2167. #if defined( _PS3 )
  2168. DumpStats();
  2169. #endif
  2170. Plat_ExitProcess( 0 );
  2171. }
  2172. #endif // _X360/_WIN32/other
  2173. }
  2174. size_t CStdMemAlloc::MemoryAllocFailed()
  2175. {
  2176. return m_sMemoryAllocFailed;
  2177. }
  2178. #endif // MEM_IMPL_TYPE_STD
  2179. #endif // STEAM