Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

736 lines
22 KiB

  1. //+-------------------------------------------------------------------------
  2. //
  3. // Microsoft Windows
  4. // Copyright (C) Microsoft Corporation, 1992 - 1999.
  5. //
  6. // File: alocdbg.cxx
  7. //
  8. // Contents: This file implements an arena that tracks memory allocations
  9. // and frees.
  10. //
  11. // History: 28-Oct-92 IsaacHe Created
  12. //
  13. //--------------------------------------------------------------------------
  14. #include <pch.cxx>
  15. #pragma hdrstop
  16. #include <alocdbg.hxx>
  17. #include <basetyps.h>
  18. #include <tracheap.h>
  19. #include <wtypes.h>
  20. extern "C"
  21. {
  22. #include <imagehlp.h>
  23. #define MAX_TRANSLATED_LEN 80
  24. }
  25. #include "snapimg.hxx"
  26. //+---------------------------------------------------------------------------
  27. //
  28. // Function: RecordStack functions(s) below...per processor type
  29. //
  30. // Synopsis: Record a stack backtrace into fTrace
  31. //
  32. // Arguments: [cFrameSkipped] -- How many stack frames to skip over and
  33. // not record
  34. // [fTrace] -- The recorded frames are put in here
  35. //
  36. // Returns: A checksum of the stack frames for fast initial lookups
  37. //
  38. // Notes: If we can do stack backtracing for whatever processor we're
  39. // compiling for, the #define CANDOSTACK
  40. //
  41. //----------------------------------------------------------------------------
  42. #if defined (_X86_)
  43. static inline DWORD
  44. RecordStack( int cFrameSkipped, void *fTrace[ DEPTHTRACE ] )
  45. {
  46. #define CANDOSTACK
  47. ULONG sum;
  48. USHORT cStack;
  49. // NOTE: RtlCaptureStackBackTrace does not understand FPOs, so routines
  50. // that have FPOs will be skipped in the backtrace. Also, there is
  51. // a chance of an access violation for inter-module calls from an
  52. // FPO routine, so enclose the call to RtlCaptureStackBackTrace
  53. // in a TRY/CATCH.
  54. __try
  55. {
  56. sum = 0;
  57. cStack = RtlCaptureStackBackTrace(cFrameSkipped + 1,
  58. DEPTHTRACE, fTrace, &sum );
  59. }
  60. __except ( EXCEPTION_EXECUTE_HANDLER )
  61. {
  62. //
  63. // Checksum any addresses that may have been collected in the buffer
  64. //
  65. for ( cStack = 0, sum = 0; cStack < DEPTHTRACE; cStack++ )
  66. {
  67. sum += (ULONG) (fTrace[cStack]);
  68. }
  69. }
  70. return sum;
  71. }
  72. #elif defined( _AMD64_ )
  73. DWORD
  74. RecordStack(
  75. int cFrameSkipped,
  76. void *fTrace[DEPTHTRACE]
  77. )
  78. {
  79. #define CANDOSTACK
  80. ULONG sum;
  81. USHORT cStack;
  82. __try
  83. {
  84. sum = 0;
  85. cStack = RtlCaptureStackBackTrace(cFrameSkipped + 1,
  86. DEPTHTRACE,
  87. &fTrace[0],
  88. &sum);
  89. } __except ( EXCEPTION_EXECUTE_HANDLER ) {
  90. //
  91. // Checksum any addresses that may have been collected in the buffer.
  92. //
  93. for (cStack = 0, sum = 0; cStack < DEPTHTRACE; cStack++) {
  94. sum += (ULONG)((ULONG64)(fTrace[cStack]));
  95. }
  96. }
  97. return sum;
  98. }
  99. #endif // machine-specific RecordStack implementations
  100. #if CIDBG
  101. #pragma optimize( "y", off )
  102. DECLARE_INFOLEVEL( heap );
  103. DECLARE_DEBUG( heap );
  104. #define heapDebugOut(x) heapInlineDebugOut x
  105. DECLARE_INFOLEVEL(Cn);
  106. /*
  107. * The maximum number of AllocArenaCreate's we expect
  108. */
  109. static const MAXARENAS = 5;
  110. /*
  111. * When printing leak dumps, the max number we will print out. Note, we keep
  112. * track of all of them, we just don't want to take forever to terminate a
  113. * process
  114. */
  115. static const MAXDUMP = 500;
  116. /*
  117. * The maximum size we'll let any single debug arena get
  118. */
  119. static const ULONG ARENASIZE = 2048*1024;
  120. /*
  121. * The unit of growth for the arena holding the AllocArena data.
  122. * Must be a power of 2
  123. */
  124. static const ALLOCRECINCR = 128;
  125. AllocArena *AllocArenas[ MAXARENAS + 1 ];
  126. //
  127. // Create an arena for recording allocation statistics. Return the arena
  128. // pointer to the caller
  129. //
  130. STDAPI_( AllocArena * )
  131. AllocArenaCreate( DWORD memctx, char FAR *comment )
  132. {
  133. // the first time through, we set up the symbol handler
  134. static int FirstTime = TRUE;
  135. if ( FirstTime )
  136. {
  137. //
  138. // Snap to imagehlp dll
  139. //
  140. if (!SnapToImageHlp( ))
  141. {
  142. heapDebugOut(( DEB_WARN, "ci heap unable to load imagehlp!\n" ));
  143. return FALSE;
  144. }
  145. LocalSymSetOptions( SYMOPT_DEFERRED_LOADS );
  146. LocalSymInitialize( GetCurrentProcess(), NULL, TRUE );
  147. FirstTime = FALSE;
  148. }
  149. struct AllocArena *paa = NULL;
  150. if( memctx == MEMCTX_TASK ) {
  151. #if defined( CANDOSTACK )
  152. if( heapInfoLevel & DEB_WARN ) {
  153. paa = (struct AllocArena *)VirtualAlloc(
  154. NULL, ARENASIZE, MEM_RESERVE, PAGE_NOACCESS );
  155. if( paa == NULL )
  156. return NULL;
  157. paa = (AllocArena *)VirtualAlloc( paa,
  158. sizeof(*paa)+(ALLOCRECINCR-1)*sizeof(HeapAllocRec),
  159. MEM_COMMIT, PAGE_READWRITE );
  160. }
  161. else
  162. #endif
  163. {
  164. paa = (struct AllocArena *)calloc( 1, sizeof(*paa) );
  165. }
  166. }
  167. if( paa == NULL )
  168. return NULL;
  169. memcpy( paa->Signature,HEAPSIG,sizeof(HEAPSIG));
  170. if( comment )
  171. strncpy(paa->comment, comment, sizeof(paa->comment) );
  172. InitializeCriticalSection( &paa->csExclusive );
  173. for( int i=0; i < MAXARENAS; i++ )
  174. if( AllocArenas[i] == 0 ) {
  175. AllocArenas[i] = paa;
  176. break;
  177. }
  178. #if defined( CANDOSTACK )
  179. if( (heapInfoLevel & DEB_WARN) == 0 )
  180. #endif
  181. {
  182. paa->flags.KeepStackTrace = 0;
  183. paa->AllocRec[0].paa = paa;
  184. return paa;
  185. }
  186. #if defined( CANDOSTACK )
  187. paa->cRecords = ALLOCRECINCR;
  188. paa->cTotalRecords = ALLOCRECINCR;
  189. paa->flags.KeepStackTrace = 1;
  190. return paa;
  191. #endif
  192. }
  193. //+---------------------------------------------------------------------------
  194. //
  195. // Function: AllocArenaRecordAlloc
  196. //
  197. // Synopsis: Keep a hash table of the stack backtraces of the allocations
  198. // we've done.
  199. //
  200. // Arguments: [paa] -- Return value from AllocArenaCreate() above
  201. // [bytes] -- the number of bytes being allocated by the caller.
  202. // This value is recorded in the stack backtrace entry.
  203. //
  204. // Algorithm: The arena for the AllocArena is created with VirtualAlloc.
  205. // pAllocArena->cRecords is the index of the next
  206. // free record. The first ALLOCRECINCR records are heads
  207. // of separate lists of the records.
  208. //
  209. // Returns: A pointer to the AllocRec structure recording the entry.
  210. // Can return NULL if we can't record the allocation.
  211. //
  212. //----------------------------------------------------------------------------
  213. STDAPI_( HeapAllocRec FAR * )
  214. AllocArenaRecordAlloc( AllocArena *paa, size_t bytes )
  215. {
  216. if( paa == NULL )
  217. return NULL;
  218. EnterCriticalSection( &paa->csExclusive );
  219. if( bytes ) {
  220. paa->cAllocs++;
  221. paa->cBytesNow += bytes;
  222. paa->cBytesTotal += bytes;
  223. } else {
  224. paa->czAllocs++;
  225. }
  226. //
  227. // Record 'size' in the histogram of requests
  228. //
  229. for( int i=31; i>=0; i-- )
  230. if( bytes & (1<<i) ) {
  231. ++(paa->Histogram.total[i]);
  232. if( paa->Histogram.simul[i] < ++(paa->Histogram.now[i]))
  233. paa->Histogram.simul[i] = paa->Histogram.now[i];
  234. break;
  235. }
  236. LeaveCriticalSection( &paa->csExclusive );
  237. #if defined( CANDOSTACK )
  238. if( paa->flags.KeepStackTrace == 0 )
  239. #endif
  240. return &paa->AllocRec[0];
  241. #if defined( CANDOSTACK )
  242. DWORD sum;
  243. struct HeapAllocRec *phar,*hp;
  244. void *fTrace[ DEPTHTRACE ];
  245. //
  246. // See if we find an existing record of this stack backtrace
  247. //
  248. memset( fTrace, '\0', sizeof( fTrace ) );
  249. sum = RecordStack( 2, fTrace );
  250. hp = &paa->AllocRec[ sum & (ALLOCRECINCR-1) ];
  251. EnterCriticalSection( &paa->csExclusive );
  252. for( phar = hp; phar != NULL; phar = phar->u.next )
  253. if( phar->sum == sum && RtlEqualMemory(phar->fTrace,fTrace,sizeof(fTrace)))
  254. {
  255. phar->count++;
  256. phar->bytes += bytes;
  257. phar->total.bytes += bytes;
  258. phar->total.count++;
  259. phar->paa = paa;
  260. LeaveCriticalSection( &paa->csExclusive );
  261. return phar;
  262. }
  263. //
  264. // We have no record of this allocation. Make one!
  265. //
  266. if( hp->total.count && paa->cRecords == paa->cTotalRecords ) {
  267. //
  268. // The arena is currently full. Grow it by ALLOCRECINCR
  269. //
  270. AllocArena *npHeap;
  271. npHeap = (AllocArena *)VirtualAlloc(
  272. paa,
  273. sizeof(AllocArena)+
  274. ((paa->cTotalRecords + ALLOCRECINCR) *
  275. sizeof(HeapAllocRec) ),
  276. MEM_COMMIT, PAGE_READWRITE );
  277. if( npHeap != paa )
  278. {
  279. if ( 0 == (paa->cMissed % 1000) )
  280. {
  281. heapDebugOut(( DEB_WARN,
  282. "ci: Missed recording alloc -- couldn't grow arena 0x%x to %u bytes. Error %d\n",
  283. paa,
  284. ((paa->cTotalRecords + ALLOCRECINCR) * sizeof(HeapAllocRec)),
  285. GetLastError() ));
  286. }
  287. paa->cMissed++;
  288. LeaveCriticalSection( &paa->csExclusive );
  289. return NULL;
  290. }
  291. paa->cTotalRecords += ALLOCRECINCR;
  292. }
  293. if( hp->total.count == 0 ) {
  294. phar = hp;
  295. } else {
  296. phar = &paa->AllocRec[ paa->cRecords++ ];
  297. phar->u.next = hp->u.next;
  298. hp->u.next = phar;
  299. }
  300. paa->cPaths++;
  301. memcpy( phar->fTrace, fTrace, sizeof( fTrace ) );
  302. phar->count = phar->total.count = 1;
  303. phar->bytes = phar->total.bytes = bytes;
  304. phar->sum = sum;
  305. phar->paa = paa;
  306. LeaveCriticalSection( &paa->csExclusive );
  307. return phar;
  308. #endif
  309. }
  310. //+---------------------------------------------------------------------------
  311. //
  312. // Function: AllocArenaRecordReAlloc
  313. //
  314. // Synopsis: Update the record to reflect the fact that we've ReAlloc'd
  315. // the memory chunk.
  316. //
  317. // Arguments: [vp] -- Return value from AllocArenaRecordAlloc() above
  318. // [oldbytes] -- size of the memory before ReAllocation
  319. // [newbytes] -- new size of the memory
  320. //
  321. //----------------------------------------------------------------------------
  322. STDAPI_( void )
  323. AllocArenaRecordReAlloc( HeapAllocRec FAR *vp, size_t oldbytes, size_t newbytes)
  324. {
  325. if( vp == NULL )
  326. return;
  327. struct AllocArena *paa = vp->paa;
  328. EnterCriticalSection( &paa->csExclusive );
  329. paa->cReAllocs++;
  330. paa->cBytesNow -= oldbytes;
  331. paa->cBytesNow += newbytes;
  332. if( newbytes > oldbytes )
  333. paa->cBytesTotal += newbytes - oldbytes;
  334. //
  335. // Take 'oldbytes' out of the histogram of requests
  336. //
  337. for( int i=31; i>=0; i-- )
  338. if( oldbytes & (1<<i) ) {
  339. --(paa->Histogram.now[i]);
  340. break;
  341. }
  342. //
  343. // Record 'newbytes' in the histogram of requests
  344. //
  345. for( i=31; i>=0; i-- )
  346. if( newbytes & (1<<i) ) {
  347. ++(paa->Histogram.total[i]);
  348. if( paa->Histogram.simul[i] < ++(paa->Histogram.now[i]))
  349. paa->Histogram.simul[i] = paa->Histogram.now[i];
  350. break;
  351. }
  352. #if defined( CANDOSTACK )
  353. if( paa->flags.KeepStackTrace ) {
  354. vp->bytes -= oldbytes;
  355. vp->bytes += newbytes;
  356. vp->total.count++;
  357. if( newbytes > oldbytes )
  358. vp->total.bytes += newbytes;
  359. }
  360. #endif
  361. LeaveCriticalSection( &paa->csExclusive );
  362. }
  363. //+---------------------------------------------------------------------------
  364. //
  365. // Function: AllocArenaRecordFree
  366. //
  367. // Synopsis: Caller has freed memory -- keep accounting up to date
  368. //
  369. // Arguments: [vp] -- Value returned by AllocArenaRecordAlloc() above
  370. // [bytes] -- The number of bytes being freed
  371. //
  372. // Algorithm: AllocRec structures, once allocated, are never actually
  373. // freed back to the Hash memory arena. This helps us
  374. // understand historical use of the heap.
  375. //
  376. //----------------------------------------------------------------------------
  377. STDAPI_( void )
  378. AllocArenaRecordFree( HeapAllocRec FAR *vp, size_t bytes )
  379. {
  380. if( vp == NULL )
  381. return;
  382. struct AllocArena *paa = vp->paa;
  383. EnterCriticalSection( &paa->csExclusive );
  384. //
  385. // Record this free in the histogram
  386. //
  387. for( int i=31; i>=0; i-- )
  388. if( bytes & (1<<i) ) {
  389. --(paa->Histogram.now[i]);
  390. break;
  391. }
  392. paa->cFrees++;
  393. paa->cBytesNow -= bytes;
  394. #if defined( CANDOSTACK )
  395. if( paa->flags.KeepStackTrace ) {
  396. vp->count--;
  397. vp->bytes -= bytes;
  398. }
  399. #endif
  400. LeaveCriticalSection( &paa->csExclusive );
  401. }
  402. STDAPI_( void )
  403. AllocArenaDumpRecord( HeapAllocRec FAR *bp )
  404. {
  405. #if defined( CANDOSTACK )
  406. char achBuffer[ MAX_TRANSLATED_LEN ], *p;
  407. static int FirstTime = TRUE;
  408. // make sure we print the nice undecorated names
  409. if ( FirstTime )
  410. {
  411. LocalSymSetOptions( SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS );
  412. FirstTime = FALSE;
  413. }
  414. heapDebugOut((DEB_WARN, "*** %d allocs, %u bytes:\n",
  415. bp->count, bp->bytes ));
  416. HANDLE hProcess = GetCurrentProcess();
  417. for( int j=0; j<DEPTHTRACE && bp->fTrace[j]; j++ )
  418. {
  419. BYTE symbolInfo[sizeof(IMAGEHLP_SYMBOL) + MAX_TRANSLATED_LEN];
  420. PIMAGEHLP_SYMBOL psym = (PIMAGEHLP_SYMBOL) &symbolInfo;
  421. DWORD_PTR dwDisplacement;
  422. psym->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL);
  423. psym->MaxNameLength = MAX_TRANSLATED_LEN;
  424. if ( LocalSymGetSymFromAddr( hProcess,
  425. (ULONG_PTR)(bp->fTrace[j]),
  426. &dwDisplacement,
  427. psym ) )
  428. {
  429. if ( LocalSymUnDName( psym, achBuffer, MAX_TRANSLATED_LEN ) )
  430. {
  431. heapDebugOut((DEB_WARN,
  432. " %s+0x%p (0x%p)\n",
  433. achBuffer,
  434. dwDisplacement,
  435. (ULONG_PTR)(bp->fTrace[j]) ));
  436. }
  437. else
  438. {
  439. heapDebugOut(( DEB_WARN,
  440. " %s+%#x (%#x)\n",
  441. psym->Name,
  442. dwDisplacement,
  443. bp->fTrace[j] ));
  444. }
  445. }
  446. else
  447. {
  448. heapDebugOut(( DEB_WARN,
  449. " 0x%x (symbolic name unavailable)\n",
  450. (ULONG_PTR)bp->fTrace[j] ));
  451. }
  452. }
  453. #endif
  454. }
  455. extern "C" ULONG DbgPrint( PCH Format, ... );
  456. STDAPI_( void )
  457. AllocArenaDump( AllocArena *paa )
  458. {
  459. if( paa == NULL ) {
  460. for( int i = 0; i < MAXARENAS && AllocArenas[i]; i++ )
  461. AllocArenaDump( AllocArenas[i] );
  462. return;
  463. }
  464. EnterCriticalSection( &paa->csExclusive );
  465. char *cmdline = GetCommandLineA();
  466. if( cmdline == NULL )
  467. cmdline = "???";
  468. HeapAllocRec *bp = paa->AllocRec;
  469. HeapAllocRec *ep = bp + paa->cRecords;
  470. if( paa->cBytesNow )
  471. heapDebugOut((DEB_WARN, "***** CiExcept.Lib: %u bytes leaked mem for %s in '%s'\n", paa->cBytesNow, paa->comment, cmdline ));
  472. // always dump leaks
  473. ULONG oldLevel = heapInfoLevel;
  474. heapInfoLevel |= DEB_TRACE;
  475. #if defined( CANDOSTACK )
  476. if( paa->cBytesNow && paa->flags.KeepStackTrace )
  477. {
  478. int cleaks = 0;
  479. for( ; bp < ep; bp++) {
  480. if( bp->count )
  481. ++cleaks;
  482. }
  483. if( cleaks ) {
  484. heapDebugOut((DEB_WARN, "***** %s %u MEM LEAKS\n",
  485. paa->comment, cleaks ));
  486. if( heapInfoLevel & DEB_TRACE ) {
  487. HeapAllocRec *bp;
  488. UINT maxdump = MAXDUMP;
  489. for( bp = paa->AllocRec; maxdump && bp<ep; bp++)
  490. if( bp->count ) {
  491. heapDebugOut((DEB_TRACE, "\n"));
  492. AllocArenaDumpRecord( bp );
  493. maxdump--;
  494. }
  495. } else if( cleaks )
  496. heapDebugOut((DEB_WARN, "** Set query!heapInfoLevel to x707 for leak backtrace\n"));
  497. }
  498. }
  499. #endif
  500. heapInfoLevel = oldLevel;
  501. if( (heapInfoLevel & DEB_TRACE) && paa->cBytesTotal )
  502. {
  503. heapDebugOut((DEB_TRACE,"\n"));
  504. heapDebugOut((DEB_TRACE,
  505. "'%s' Memory Stats: %u allocations, %u frees, %u missed\n",
  506. cmdline, paa->cAllocs, paa->cFrees, paa->cMissed ));
  507. if( paa->czAllocs )
  508. heapDebugOut((DEB_TRACE,
  509. "\t%u zero allocs\n", paa->czAllocs ));
  510. // i64s are not handled by debugout
  511. char acBuf[100];
  512. sprintf( acBuf, "\t%I64u bytes allocated\n", paa->cBytesTotal );
  513. heapDebugOut((DEB_TRACE, acBuf ));
  514. heapDebugOut((DEB_TRACE,
  515. "*** Histogram of Allocated Mem Sizes ***\n"));
  516. heapDebugOut((DEB_TRACE,
  517. " Min Max Tot Simul\n" ));
  518. for( int i=0; i < 32; i++ )
  519. {
  520. if( paa->Histogram.total[i] )
  521. {
  522. heapDebugOut((DEB_TRACE,
  523. "%9u -> %9u\t%9u\t%9u\n",
  524. 1<<i, (1<<(i+1))-1,
  525. paa->Histogram.total[i],
  526. paa->Histogram.simul[i] ));
  527. }
  528. }
  529. }
  530. LeaveCriticalSection( &paa->csExclusive );
  531. }
  532. #endif // CIDBG
  533. CStaticMutexSem g_mtxGetStackTrace;
  534. void GetStackTrace( char * pszBuf, ULONG ccMax )
  535. {
  536. // Trial and error shows that Imagehlp isn't thread-safe
  537. CLock lock( g_mtxGetStackTrace );
  538. if ( 0 == pszBuf || ccMax == 0 )
  539. return;
  540. *pszBuf = 0;
  541. // If we cannot get to IMAGEHLP then no stack traces are available
  542. if (!SnapToImageHlp( ))
  543. return;
  544. char * pszCurrent = pszBuf;
  545. TRY
  546. {
  547. #if defined( CANDOSTACK )
  548. static int FirstTime = TRUE;
  549. // make sure we print the nice undecorated names
  550. if ( FirstTime )
  551. {
  552. LocalSymSetOptions( SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS );
  553. FirstTime = FALSE;
  554. }
  555. //
  556. // Determine the current stack.
  557. //
  558. void *fTrace[ DEPTHTRACE ];
  559. //
  560. // See if we find an existing record of this stack backtrace
  561. //
  562. memset( fTrace, '\0', sizeof( fTrace ) );
  563. DWORD sum = RecordStack( 2, fTrace );
  564. if ( 0 == sum )
  565. return;
  566. HANDLE hProcess = GetCurrentProcess();
  567. for( int j=0; j<DEPTHTRACE && fTrace[j]; j++ )
  568. {
  569. BYTE symbolInfo[sizeof(IMAGEHLP_SYMBOL) + MAX_TRANSLATED_LEN];
  570. PIMAGEHLP_SYMBOL psym = (PIMAGEHLP_SYMBOL) &symbolInfo;
  571. DWORD_PTR dwDisplacement;
  572. psym->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL);
  573. psym->MaxNameLength = MAX_TRANSLATED_LEN;
  574. char szTempBuf[MAX_TRANSLATED_LEN+256];
  575. if ( LocalSymGetSymFromAddr( hProcess,
  576. (ULONG_PTR)fTrace[j],
  577. &dwDisplacement,
  578. psym ) )
  579. sprintf( szTempBuf, " %s+0x%p (0x%p)\n",
  580. psym->Name, dwDisplacement, fTrace[j] );
  581. else
  582. sprintf( szTempBuf, " 0x%p\n", (ULONG_PTR)fTrace[j] );
  583. ULONG cc = strlen(szTempBuf);
  584. if ( cc+pszCurrent >= pszBuf+ccMax )
  585. break;
  586. RtlCopyMemory( pszCurrent, szTempBuf, cc );
  587. pszCurrent += cc;
  588. }
  589. #endif
  590. }
  591. CATCH( CException, e )
  592. {
  593. pszBuf[0] = 0;
  594. }
  595. END_CATCH
  596. *pszCurrent = 0;
  597. }