Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

509 lines
15 KiB

  1. /*
  2. * This file implements an arena that tracks memory allocations and frees.
  3. * Isaache
  4. */
  5. #include <ADs.hxx>
  6. #if DBG && !defined(MSVC) // we don't have access to nt hdrs with MSVC
  7. // #include <except.hxx>
  8. #include <caiheap.h>
  9. #include <symtrans.h>
  10. #pragma optimize( "y", off )
  11. DECLARE_INFOLEVEL( heap );
  12. DECLARE_DEBUG( heap );
  13. #define heapDebugOut(x) heapInlineDebugOut x
  14. /*
  15. * The maximum number of AllocArenaCreate's we expect
  16. */
  17. static const MAXARENAS = 5;
  18. /*
  19. * When printing leak dumps, the max number we will print out. Note, we keep
  20. * track of all of them, we just don't want to take forever to terminate a
  21. * process
  22. */
  23. static const MAXDUMP = 50;
  24. /*
  25. * The maximum size we'll let any single debug arena get
  26. */
  27. static const ULONG ARENASIZE = 1024*1024;
  28. /*
  29. * The unit of growth for the arena holding the AllocArena data.
  30. * Must be a power of 2
  31. */
  32. static const ALLOCRECINCR = 128;
  33. static AllocArena *AllocArenas[ MAXARENAS + 1 ];
  34. //+---------------------------------------------------------------------------
  35. //
  36. // Function: RecordStack functions(s) below...per processor type
  37. //
  38. // Synopsis: Record a stack backtrace into fTrace
  39. //
  40. // Arguments: [cFrameSkipped] -- How many stack frames to skip over and
  41. // not record
  42. // [fTrace] -- The recorded frames are put in here
  43. //
  44. // Returns: A checksum of the stack frames for fast initial lookups
  45. //
  46. // Notes: If we can do stack backtracing for whatever processor we're
  47. // compiling for, the #define CANDOSTACK
  48. //
  49. //----------------------------------------------------------------------------
  50. #if defined (i386) && !defined(WIN95)
  51. static inline DWORD
  52. RecordStack( int cFrameSkipped, void *fTrace[ DEPTHTRACE ] )
  53. {
  54. #define CANDOSTACK
  55. ULONG sum;
  56. USHORT cStack;
  57. // This routine is found in src/ntos/rtl/i386
  58. // extern "C" USHORT NTAPI
  59. // RtlCaptureStackBackTrace(ULONG, ULONG, PVOID *, PULONG);
  60. cStack = RtlCaptureStackBackTrace(cFrameSkipped + 1,
  61. DEPTHTRACE, fTrace, &sum );
  62. return sum;
  63. }
  64. #else // ! i386
  65. static inline DWORD
  66. RecordStack( int cFrameSkipped, void *fTrace[ DEPTHTRACE ] )
  67. {
  68. #if defined(CANDOSTACK)
  69. #undef CANDOSTACK
  70. #endif
  71. return 0;
  72. }
  73. #endif // ! i386
  74. //
  75. // This allows external monitoring of heap activity by caiheap.exe
  76. //
  77. STDAPI_( AllocArena ** )
  78. AllocArenaAddr( void )
  79. {
  80. return AllocArenas;
  81. }
  82. //
  83. // Create an arena for recording allocation statistics. Return the arena
  84. // pointer to the caller
  85. //
  86. STDAPI_( AllocArena * )
  87. AllocArenaCreate( DWORD memctx, char FAR *comment )
  88. {
  89. struct AllocArena *paa = NULL;
  90. if( memctx == MEMCTX_TASK ) {
  91. #if defined( CANDOSTACK )
  92. if( heapInfoLevel & DEB_WARN ) {
  93. paa = (struct AllocArena *)VirtualAlloc(
  94. NULL, ARENASIZE, MEM_RESERVE, PAGE_NOACCESS );
  95. if( paa == NULL )
  96. return NULL;
  97. paa = (AllocArena *)VirtualAlloc( paa,
  98. sizeof(*paa)+(ALLOCRECINCR-1)*sizeof(HeapAllocRec),
  99. MEM_COMMIT, PAGE_READWRITE );
  100. }
  101. else
  102. #endif
  103. {
  104. paa = (struct AllocArena *)calloc( 1, sizeof(*paa) );
  105. }
  106. }
  107. if( paa == NULL )
  108. return NULL;
  109. memcpy( paa->Signature,HEAPSIG,sizeof(HEAPSIG));
  110. if( comment )
  111. strncpy(paa->comment, comment, sizeof(paa->comment) );
  112. InitializeCriticalSection( &paa->csExclusive );
  113. for( int i=0; i < MAXARENAS; i++ )
  114. if( AllocArenas[i] == 0 ) {
  115. AllocArenas[i] = paa;
  116. break;
  117. }
  118. #if defined( CANDOSTACK )
  119. if( (heapInfoLevel & DEB_WARN) == 0 )
  120. #endif
  121. {
  122. paa->flags.KeepStackTrace = 0;
  123. paa->AllocRec[0].paa = paa;
  124. return paa;
  125. }
  126. #if defined( CANDOSTACK )
  127. paa->cRecords = ALLOCRECINCR;
  128. paa->cTotalRecords = ALLOCRECINCR;
  129. paa->flags.KeepStackTrace = 1;
  130. return paa;
  131. #endif
  132. }
  133. //+---------------------------------------------------------------------------
  134. //
  135. // Function: AllocArenaRecordAlloc
  136. //
  137. // Synopsis: Keep a hash table of the stack backtraces of the allocations
  138. // we've done.
  139. //
  140. // Arguments: [paa] -- Return value from AllocArenaCreate() above
  141. // [bytes] -- the number of bytes being allocated by the caller.
  142. // This value is recorded in the stack backtrace entry.
  143. //
  144. // Algorithm: The arena for the AllocArena is created with VirtualAlloc.
  145. // pAllocArena->cRecords is the index of the next
  146. // free record. The first ALLOCRECINCR records are heads
  147. // of separate lists of the records.
  148. //
  149. // Returns: A pointer to the AllocRec structure recording the entry.
  150. // Can return NULL if we can't record the allocation.
  151. //
  152. //----------------------------------------------------------------------------
  153. STDAPI_( HeapAllocRec FAR * )
  154. AllocArenaRecordAlloc( AllocArena *paa, size_t bytes )
  155. {
  156. if( paa == NULL )
  157. return NULL;
  158. EnterCriticalSection( &paa->csExclusive );
  159. if( bytes ) {
  160. paa->cAllocs++;
  161. paa->cBytesNow += bytes;
  162. paa->cBytesTotal += bytes;
  163. } else {
  164. paa->czAllocs++;
  165. }
  166. //
  167. // Record 'size' in the histogram of requests
  168. //
  169. for( int i=31; i>=0; i-- )
  170. if( bytes & (1<<i) ) {
  171. ++(paa->Histogram.total[i]);
  172. if( paa->Histogram.simul[i] < ++(paa->Histogram.now[i]))
  173. paa->Histogram.simul[i] = paa->Histogram.now[i];
  174. break;
  175. }
  176. LeaveCriticalSection( &paa->csExclusive );
  177. #if defined( CANDOSTACK )
  178. if( paa->flags.KeepStackTrace == 0 )
  179. #endif
  180. return &paa->AllocRec[0];
  181. #if defined( CANDOSTACK )
  182. DWORD sum;
  183. struct HeapAllocRec *phar,*hp;
  184. void *fTrace[ DEPTHTRACE ];
  185. //
  186. // See if we find an existing record of this stack backtrace
  187. //
  188. memset( fTrace, '\0', sizeof( fTrace ) );
  189. sum = RecordStack( 2, fTrace );
  190. hp = &paa->AllocRec[ sum & (ALLOCRECINCR-1) ];
  191. EnterCriticalSection( &paa->csExclusive );
  192. for( phar = hp; phar != NULL; phar = phar->u.next )
  193. if( phar->sum == sum &&
  194. !memcmp(phar->fTrace,fTrace,sizeof(fTrace)))
  195. {
  196. phar->count++;
  197. phar->bytes += bytes;
  198. phar->total.bytes += bytes;
  199. phar->total.count++;
  200. phar->paa = paa;
  201. LeaveCriticalSection( &paa->csExclusive );
  202. return phar;
  203. }
  204. //
  205. // We have no record of this allocation. Make one!
  206. //
  207. if( hp->total.count && paa->cRecords == paa->cTotalRecords ) {
  208. //
  209. // The arena is currently full. Grow it by ALLOCRECINCR
  210. //
  211. AllocArena *npHeap;
  212. npHeap = (AllocArena *)VirtualAlloc(
  213. paa,
  214. sizeof(AllocArena)+
  215. ((paa->cTotalRecords + ALLOCRECINCR) *
  216. sizeof(HeapAllocRec) ),
  217. MEM_COMMIT, PAGE_READWRITE );
  218. if( npHeap != paa ) {
  219. paa->cMissed++;
  220. LeaveCriticalSection( &paa->csExclusive );
  221. return NULL;
  222. }
  223. paa->cTotalRecords += ALLOCRECINCR;
  224. }
  225. if( hp->total.count == 0 ) {
  226. phar = hp;
  227. } else {
  228. phar = &paa->AllocRec[ paa->cRecords++ ];
  229. phar->u.next = hp->u.next;
  230. hp->u.next = phar;
  231. }
  232. paa->cPaths++;
  233. memcpy( phar->fTrace, fTrace, sizeof( fTrace ) );
  234. phar->count = phar->total.count = 1;
  235. phar->bytes = phar->total.bytes = bytes;
  236. phar->sum = sum;
  237. phar->paa = paa;
  238. LeaveCriticalSection( &paa->csExclusive );
  239. return phar;
  240. #endif
  241. }
  242. //+---------------------------------------------------------------------------
  243. //
  244. // Function: AllocArenaRecordReAlloc
  245. //
  246. // Synopsis: Update the record to reflect the fact that we've ReAlloc'd
  247. // the memory chunk.
  248. //
  249. // Arguments: [vp] -- Return value from AllocArenaRecordAlloc() above
  250. // [oldbytes] -- size of the memory before ReAllocation
  251. // [newbytes] -- new size of the memory
  252. //
  253. //----------------------------------------------------------------------------
  254. STDAPI_( void )
  255. AllocArenaRecordReAlloc( HeapAllocRec FAR *vp, size_t oldbytes, size_t newbytes)
  256. {
  257. if( vp == NULL )
  258. return;
  259. struct AllocArena *paa = vp->paa;
  260. EnterCriticalSection( &paa->csExclusive );
  261. paa->cReAllocs++;
  262. paa->cBytesNow -= oldbytes;
  263. paa->cBytesNow += newbytes;
  264. if( newbytes > oldbytes )
  265. paa->cBytesTotal += newbytes - oldbytes;
  266. //
  267. // Take 'oldbytes' out of the histogram of requests
  268. //
  269. for( int i=31; i>=0; i-- )
  270. if( oldbytes & (1<<i) ) {
  271. --(paa->Histogram.now[i]);
  272. break;
  273. }
  274. //
  275. // Record 'newbytes' in the histogram of requests
  276. //
  277. for( i=31; i>=0; i-- )
  278. if( newbytes & (1<<i) ) {
  279. ++(paa->Histogram.total[i]);
  280. if( paa->Histogram.simul[i] < ++(paa->Histogram.now[i]))
  281. paa->Histogram.simul[i] = paa->Histogram.now[i];
  282. break;
  283. }
  284. #if defined( CANDOSTACK )
  285. if( paa->flags.KeepStackTrace ) {
  286. vp->bytes -= oldbytes;
  287. vp->bytes += newbytes;
  288. vp->total.count++;
  289. if( newbytes > oldbytes )
  290. vp->total.bytes += newbytes;
  291. }
  292. #endif
  293. LeaveCriticalSection( &paa->csExclusive );
  294. }
  295. //+---------------------------------------------------------------------------
  296. //
  297. // Function: AllocArenaRecordFree
  298. //
  299. // Synopsis: Caller has freed memory -- keep accounting up to date
  300. //
  301. // Arguments: [vp] -- Value returned by AllocArenaRecordAlloc() above
  302. // [bytes] -- The number of bytes being freed
  303. //
  304. // Algorithm: AllocRec structures, once allocated, are never actually
  305. // freed back to the Hash memory arena. This helps us
  306. // understand historical use of the heap.
  307. //
  308. //----------------------------------------------------------------------------
  309. STDAPI_( void )
  310. AllocArenaRecordFree( HeapAllocRec FAR *vp, size_t bytes )
  311. {
  312. if( vp == NULL )
  313. return;
  314. struct AllocArena *paa = vp->paa;
  315. EnterCriticalSection( &paa->csExclusive );
  316. //
  317. // Record this free in the histogram
  318. //
  319. for( int i=31; i>=0; i-- )
  320. if( bytes & (1<<i) ) {
  321. --(paa->Histogram.now[i]);
  322. break;
  323. }
  324. paa->cFrees++;
  325. paa->cBytesNow -= bytes;
  326. #if defined( CANDOSTACK )
  327. if( paa->flags.KeepStackTrace ) {
  328. vp->count--;
  329. vp->bytes -= bytes;
  330. }
  331. #endif
  332. LeaveCriticalSection( &paa->csExclusive );
  333. }
  334. STDAPI_( void )
  335. AllocArenaDumpRecord( HeapAllocRec FAR *bp )
  336. {
  337. #if defined( CANDOSTACK )
  338. char achBuffer[ MAX_TRANSLATED_LEN ], *p;
  339. heapDebugOut((DEB_WARN, "*** %d allocs, %u bytes:\n",
  340. bp->count, bp->bytes ));
  341. for( int j=0; j<DEPTHTRACE && bp->fTrace[j]; j++ )
  342. {
  343. TranslateAddress(bp->fTrace[j], achBuffer );
  344. if( p = strchr( achBuffer, '\n' ) )
  345. *p = '\0';
  346. heapDebugOut((DEB_WARN, " %s\n", achBuffer));
  347. }
  348. #endif
  349. }
  350. extern "C" ULONG DbgPrint( PCH Format, ... );
  351. STDAPI_( void )
  352. AllocArenaDump( AllocArena *paa )
  353. {
  354. if( paa == NULL ) {
  355. for( int i = 0; i < MAXARENAS && AllocArenas[i]; i++ )
  356. AllocArenaDump( AllocArenas[i] );
  357. return;
  358. }
  359. char *cmdline = GetCommandLineA();
  360. if( cmdline == NULL )
  361. cmdline = "???";
  362. HeapAllocRec *bp = paa->AllocRec;
  363. HeapAllocRec *ep = bp + paa->cRecords;
  364. if( paa->cBytesNow )
  365. heapDebugOut((DEB_WARN,
  366. "***** %u bytes leaked mem for %s in '%s'\n",
  367. paa->cBytesNow,
  368. paa->comment,
  369. cmdline ));
  370. #if defined( CANDOSTACK )
  371. if( paa->cBytesNow && paa->flags.KeepStackTrace )
  372. {
  373. int cleaks = 0;
  374. for( ; bp < ep; bp++) {
  375. if( bp->count )
  376. ++cleaks;
  377. }
  378. if( cleaks ) {
  379. heapDebugOut((DEB_WARN, "***** %s %u MEM LEAKS\n",
  380. paa->comment, cleaks ));
  381. if( heapInfoLevel & DEB_TRACE ) {
  382. HeapAllocRec *bp;
  383. UINT maxdump = MAXDUMP;
  384. for( bp = paa->AllocRec; maxdump && bp<ep; bp++)
  385. if( bp->count ) {
  386. heapDebugOut((DEB_TRACE, "\n"));
  387. AllocArenaDumpRecord( bp );
  388. maxdump--;
  389. }
  390. } else if( cleaks )
  391. heapDebugOut((DEB_WARN, "** Set formidbl!heapInfoLevel to x707 for leak backtrace\n"));
  392. }
  393. }
  394. #endif
  395. if( (heapInfoLevel & DEB_TRACE) && paa->cBytesTotal )
  396. {
  397. heapDebugOut((DEB_TRACE,"\n"));
  398. heapDebugOut((DEB_TRACE,
  399. "'%s' Memory Stats: %u allocations, %u frees\n",
  400. cmdline, paa->cAllocs, paa->cFrees ));
  401. if( paa->czAllocs )
  402. heapDebugOut((DEB_TRACE,
  403. "\t%u zero allocs\n", paa->czAllocs ));
  404. heapDebugOut((DEB_TRACE,
  405. "\t%u bytes allocated\n", paa->cBytesTotal ));
  406. heapDebugOut((DEB_TRACE,
  407. "*** Histogram of Allocated Mem Sizes ***\n"));
  408. heapDebugOut((DEB_TRACE, " Min Max\t Tot\t Simul\n" ));
  409. for( int i=0; i < 32; i++ )
  410. if( paa->Histogram.total[i] )
  411. {
  412. heapDebugOut((DEB_TRACE,
  413. "%6u -> %6u\t%6u\t%6u\n",
  414. 1<<i, (1<<(i+1))-1,
  415. paa->Histogram.total[i],
  416. paa->Histogram.simul[i]
  417. ));
  418. }
  419. }
  420. }
  421. #endif // DBG && !defined(MSVC)