Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1626 lines
58 KiB

  1. //========= Copyright � 1996-2002, Valve LLC, All rights reserved. ============
  2. //
  3. // Purpose: Actual code for our d3d main interface wrapper
  4. //
  5. // $NoKeywords: $
  6. //=============================================================================
  7. #include "tier0/memdbgoff.h"
  8. #include "winlite.h"
  9. typedef __int16 int16;
  10. typedef unsigned __int16 uint16;
  11. typedef __int32 int32;
  12. typedef unsigned __int32 uint32;
  13. typedef __int64 int64;
  14. typedef unsigned __int64 uint64;
  15. typedef char tchar;
  16. #define DEBUG_ENABLE_ERROR_STREAM 0
  17. #define DEBUG_ENABLE_DETOUR_RECORDING 0
  18. // Suppress having to include of tier0 Assert functions
  19. // #define DBG_H
  20. // #define Assert( ... ) ( (void) 0 )
  21. // #define Msg( ... ) ( (void) 0 )
  22. // #define AssertMsg( ... ) ( (void) 0 )
  23. // #define AssertMsg3( ... ) ( (void) 0 )
  24. #include "tier0/basetypes.h"
  25. // #include "tier0/threadtools.h"
  26. #include "detourfunc.h"
  27. #include "disassembler.h"
  28. #include <map>
  29. #include <vector>
  30. #include <set>
  31. // Define this to do verbose logging of detoured calls
  32. //#define DEBUG_LOG_DETOURED_CALLS
  33. #if DEBUG_ENABLE_ERROR_STREAM
  34. // We dump error messages that we want the steam client to be able to read here
  35. #pragma pack( push, 1 )
  36. struct ErrorStreamMsg_t
  37. {
  38. uint32 unStrLen;
  39. char rgchError[1024];
  40. };
  41. #pragma pack( pop )
  42. CSharedMemStream *g_pDetourErrorStream = NULL;
  43. static inline void Log( char const *, ... ) {}
  44. #else
  45. #define Log( ... ) ( (void) 0 )
  46. #endif
  47. #pragma pack( push, 1 ) // very important as we use structs to pack asm instructions together
  48. // Structure that we pack ASM jump code into for hooking function calls
  49. typedef struct
  50. {
  51. BYTE m_JmpOpCode[2]; // 0xFF 0x25 = jmp ptr qword
  52. DWORD m_JumpPtrOffset; // offset to jump to the qword ptr (0)
  53. uint64 m_QWORDTarget; // address to jump to
  54. } JumpCodeDirectX64_t;
  55. // This relative jump is valid in x64 and x86
  56. typedef struct
  57. {
  58. BYTE m_JmpOpCode; // 0xE9 = near jmp( dword )
  59. int32 m_JumpOffset; // offset to jump to
  60. } JumpCodeRelative_t;
  61. #pragma pack( pop )
  62. // Structure to save information about hooked functions that we may need later (ie, for unhooking)
  63. #define MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH 48
  64. typedef struct
  65. {
  66. BYTE *m_pFuncHookedAddr;
  67. BYTE *m_pTrampolineRealFunc;
  68. BYTE *m_pTrampolineEntryPoint;
  69. int32 m_nOriginalPreambleLength;
  70. BYTE m_rgOriginalPreambleCode[ MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH ];
  71. } HookData_t;
  72. class CDetourLock
  73. {
  74. public:
  75. CDetourLock()
  76. {
  77. InitializeCriticalSection( &m_cs );
  78. }
  79. ~CDetourLock()
  80. {
  81. DeleteCriticalSection( &m_cs );
  82. }
  83. void Lock()
  84. {
  85. EnterCriticalSection( &m_cs );
  86. }
  87. void Unlock()
  88. {
  89. LeaveCriticalSection( &m_cs );
  90. }
  91. private:
  92. CRITICAL_SECTION m_cs;
  93. // Private and unimplemented to prevent copying
  94. CDetourLock( const CDetourLock& );
  95. CDetourLock& operator=( const CDetourLock& );
  96. };
  97. class GetLock
  98. {
  99. public:
  100. GetLock( CDetourLock& lock )
  101. : m_lock( lock )
  102. {
  103. m_lock.Lock();
  104. }
  105. ~GetLock()
  106. {
  107. m_lock.Unlock();
  108. }
  109. private:
  110. GetLock( const GetLock& );
  111. GetLock& operator=( const GetLock& );
  112. CDetourLock& m_lock;
  113. };
  114. CDetourLock g_mapLock;
  115. // todo: add marker here so we can find this from VAC
  116. // Set to keep track of all the functions we have hooked
  117. std::map<void *, HookData_t> g_mapHookedFunctions;
  118. #if DEBUG_ENABLE_ERROR_STREAM
  119. // Set to keep track of functions we already reported failures hooking
  120. std::set<void * > g_mapAlreadyReportedDetourFailures;
  121. #endif
  122. // We need at most this many bytes in our allocated trampoline regions, see comments below on HookFunc:
  123. // - 14 (5 on x86) for jump to real detour address
  124. // - 32 for copied code (really should be less than this, 5-12?, but leave some space)
  125. // - 14 (5 on x86) for jump back into body of real function after copied code
  126. #define BYTES_FOR_TRAMPOLINE_ALLOCATION 64
  127. // todo: add some way to find and interpret these from VAC
  128. // Tracking for allocated trampoline memory ready to be used by future hooks
  129. std::vector< void *> g_vecTrampolineRegionsReady;
  130. std::vector< void *> g_vecTrampolinesAllocated;
  131. std::set< const void * > g_setBlacklistedTrampolineSearchAddresses;
  132. class CTrampolineRegionMutex
  133. {
  134. public:
  135. CTrampolineRegionMutex()
  136. {
  137. m_hMutex = ::CreateMutexA( NULL, FALSE, NULL );
  138. }
  139. bool BLock( DWORD dwTimeout )
  140. {
  141. if( WaitForSingleObject( m_hMutex, dwTimeout ) != WAIT_OBJECT_0 )
  142. {
  143. return false;
  144. }
  145. return true;
  146. }
  147. void Release()
  148. {
  149. ReleaseMutex( m_hMutex );
  150. }
  151. private:
  152. HANDLE m_hMutex;
  153. // Private and unimplemented to prevent copying
  154. CTrampolineRegionMutex( const CTrampolineRegionMutex& );
  155. CTrampolineRegionMutex& operator=( const CTrampolineRegionMutex& );
  156. };
  157. CTrampolineRegionMutex g_TrampolineRegionMutex;
  158. static inline DWORD GetSystemPageSize()
  159. {
  160. static DWORD dwSystemPageSize = 0;
  161. if ( !dwSystemPageSize )
  162. {
  163. SYSTEM_INFO sysInfo;
  164. ::GetSystemInfo( &sysInfo );
  165. dwSystemPageSize = sysInfo.dwPageSize;
  166. Log( "System page size: %u\n", dwSystemPageSize );
  167. }
  168. return dwSystemPageSize;
  169. }
  170. //-----------------------------------------------------------------------------
  171. // Purpose: Function to find an existing trampoline region we've allocated near
  172. // the area we need it.
  173. //-----------------------------------------------------------------------------
  174. BYTE * GetTrampolineRegionNearAddress( const void *pAddressToFindNear )
  175. {
  176. if ( !g_TrampolineRegionMutex.BLock( 1000 ) )
  177. Log( "Couldn't get trampoline region lock, will continue possibly unsafely.\n" );
  178. BYTE *pTrampolineAddress = NULL;
  179. // First, see if we can find a trampoline address to use in range in our already allocated set
  180. std::vector<void *>::iterator iter;
  181. for( iter = g_vecTrampolineRegionsReady.begin(); iter != g_vecTrampolineRegionsReady.end(); ++iter )
  182. {
  183. int64 qwAddress = (int64)(*iter);
  184. int64 qwOffset = qwAddress - (int64)pAddressToFindNear;
  185. if ( qwOffset < 0 && qwOffset > LONG_MIN || qwOffset > 0 && qwOffset+BYTES_FOR_TRAMPOLINE_ALLOCATION < LONG_MAX )
  186. {
  187. pTrampolineAddress = (BYTE*)qwAddress;
  188. //Log( "Using already allocated trampoline block at %I64d, distance is %I64d\n", qwAddress, qwOffset );
  189. g_vecTrampolineRegionsReady.erase( iter );
  190. break;
  191. }
  192. }
  193. g_TrampolineRegionMutex.Release();
  194. return pTrampolineAddress;
  195. }
  196. //-----------------------------------------------------------------------------
  197. // Purpose: Return trampoline address for use, maybe we failed detours and didn't end up using
  198. //-----------------------------------------------------------------------------
  199. void ReturnTrampolineAddress( BYTE *pTrampolineAddress )
  200. {
  201. if ( !g_TrampolineRegionMutex.BLock( 1000 ) )
  202. Log( "Couldn't get trampoline region lock, will continue possibly unsafely.\n" );
  203. g_vecTrampolineRegionsReady.push_back( pTrampolineAddress );
  204. g_TrampolineRegionMutex.Release();
  205. }
  206. //-----------------------------------------------------------------------------
  207. // Purpose: Function to allocate new trampoline regions near a target address, call
  208. // only if GetTrampolineRegionNearAddress doesn't return you any existing region to use.
  209. //-----------------------------------------------------------------------------
  210. void AllocateNewTrampolineRegionsNearAddress( const void *pAddressToAllocNear )
  211. {
  212. if ( !g_TrampolineRegionMutex.BLock( 1000 ) )
  213. Log( "Couldn't get trampoline region lock, will continue possibly unsafely.\n" );
  214. // Check we didn't blacklist trying to allocate regions near this address because no memory could be found already,
  215. // otherwise we can keep trying and trying and perf is awful
  216. if ( g_setBlacklistedTrampolineSearchAddresses.find( pAddressToAllocNear ) != g_setBlacklistedTrampolineSearchAddresses.end() )
  217. {
  218. g_TrampolineRegionMutex.Release();
  219. return;
  220. }
  221. // Get handle to process
  222. HANDLE hProc = GetCurrentProcess();
  223. // First, need to know system page size, determine now if we haven't before
  224. DWORD dwSystemPageSize = GetSystemPageSize();
  225. BYTE * pTrampolineAddress = NULL;
  226. if ( pAddressToAllocNear == NULL )
  227. {
  228. //Log( "Allocating trampoline page at random location\n" );
  229. pTrampolineAddress = (BYTE *)VirtualAllocEx( hProc, NULL, dwSystemPageSize, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE );
  230. if ( !pTrampolineAddress )
  231. {
  232. Log ( "Failed allocating memory during hooking: %d\n", GetLastError() );
  233. }
  234. else
  235. {
  236. g_vecTrampolinesAllocated.push_back( pTrampolineAddress );
  237. }
  238. }
  239. else
  240. {
  241. //Log( "Allocating trampoline page at targeted location\n" );
  242. // Ok, we'll search for the closest page that is free and within +/- 2 gigs from our code.
  243. int64 qwPageToOffsetFrom = (int64)pAddressToAllocNear - ( (int64)pAddressToAllocNear % dwSystemPageSize );
  244. int64 qwPageToTryNegative = qwPageToOffsetFrom - dwSystemPageSize;
  245. int64 qwPageToTryPositive = qwPageToOffsetFrom + dwSystemPageSize;
  246. bool bLoggedFailures = false;
  247. while ( !pTrampolineAddress )
  248. {
  249. int64 *pqwPageToTry;
  250. bool bDirectionPositive = false;
  251. if ( qwPageToOffsetFrom - qwPageToTryNegative < qwPageToTryPositive - qwPageToOffsetFrom )
  252. {
  253. pqwPageToTry = &qwPageToTryNegative;
  254. }
  255. else
  256. {
  257. pqwPageToTry = &qwPageToTryPositive;
  258. bDirectionPositive = true;
  259. }
  260. //Log( "Real func at: %I64d, checking %I64d\n", (int64)pFuncToHook, (*pqwPageToTry) );
  261. MEMORY_BASIC_INFORMATION memInfo;
  262. if ( !VirtualQuery( (void *)(*pqwPageToTry), &memInfo, sizeof( memInfo ) ) )
  263. {
  264. if ( !bLoggedFailures )
  265. {
  266. Log( "VirtualQuery failures\n" );
  267. bLoggedFailures = true;
  268. }
  269. }
  270. else
  271. {
  272. if ( memInfo.State == MEM_FREE )
  273. {
  274. pTrampolineAddress = (BYTE *)VirtualAllocEx( hProc, (void*)(*pqwPageToTry), dwSystemPageSize, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE );
  275. if ( !pTrampolineAddress )
  276. {
  277. // Skip this page, another thread may have alloced it while we tried or something, just find the next usuable one
  278. if ( bDirectionPositive )
  279. qwPageToTryPositive += dwSystemPageSize;
  280. else
  281. qwPageToTryNegative -= dwSystemPageSize;
  282. continue;
  283. }
  284. g_vecTrampolinesAllocated.push_back( pTrampolineAddress );
  285. break;
  286. }
  287. }
  288. // Increment page and try again, we can skip ahead RegionSize bytes because
  289. // we know all pages in that region have identical info.
  290. if ( bDirectionPositive )
  291. qwPageToTryPositive += memInfo.RegionSize;
  292. else
  293. qwPageToTryNegative -= memInfo.RegionSize;
  294. if ( qwPageToTryPositive + dwSystemPageSize >= (int64)pAddressToAllocNear + LONG_MAX
  295. && qwPageToTryNegative <= (int64)pAddressToAllocNear - LONG_MIN )
  296. {
  297. Log ( "Could not find page for trampoline in +/- 2GB range of function to hook\n" );
  298. g_setBlacklistedTrampolineSearchAddresses.insert( pAddressToAllocNear );
  299. break;
  300. }
  301. }
  302. }
  303. // If we succeeded allocating a trampoline page, then track the extra pages for later use
  304. if ( pTrampolineAddress )
  305. {
  306. // Track the extra space in the page for future use
  307. BYTE *pNextTrampolineAddress = pTrampolineAddress;
  308. while ( pNextTrampolineAddress <= pTrampolineAddress+dwSystemPageSize-BYTES_FOR_TRAMPOLINE_ALLOCATION )
  309. {
  310. g_vecTrampolineRegionsReady.push_back( pNextTrampolineAddress );
  311. pNextTrampolineAddress += BYTES_FOR_TRAMPOLINE_ALLOCATION;
  312. }
  313. }
  314. g_TrampolineRegionMutex.Release();
  315. return;
  316. }
  317. //-----------------------------------------------------------------------------
  318. // Purpose: RegregisterTrampolines
  319. // when we first allocated these trampolines, our VirtualAlloc/Protect
  320. // monitoring wasnt set up, just re-protect them and that will get them
  321. // recorded so we know they are ours
  322. // could use this code to remove write permission from them
  323. // except that we will redo a bunch of hooking on library load ( PerformHooking )
  324. //-----------------------------------------------------------------------------
  325. void RegregisterTrampolines()
  326. {
  327. if ( !g_TrampolineRegionMutex.BLock( 1000 ) )
  328. Log( "Couldn't get trampoline region lock, will continue possibly unsafely.\n" );
  329. // First, need to know system page size, determine now if we haven't before
  330. DWORD dwSystemPageSize = GetSystemPageSize();
  331. std::vector<void *>::iterator iter;
  332. for( iter = g_vecTrampolinesAllocated.begin(); iter != g_vecTrampolinesAllocated.end(); ++iter )
  333. {
  334. DWORD flOldProtect;
  335. VirtualProtect( *iter, dwSystemPageSize, PAGE_EXECUTE_READWRITE, &flOldProtect );
  336. }
  337. g_TrampolineRegionMutex.Release();
  338. }
  339. //-----------------------------------------------------------------------------
  340. // Purpose: Check if a given address range is fully covered by executable pages
  341. //-----------------------------------------------------------------------------
  342. static bool BIsAddressRangeExecutable( const void *pAddress, size_t length )
  343. {
  344. MEMORY_BASIC_INFORMATION memInfo;
  345. if ( !VirtualQuery( (const void *)pAddress, &memInfo, sizeof( memInfo ) ) )
  346. return false;
  347. if ( memInfo.State != MEM_COMMIT )
  348. return false;
  349. if ( memInfo.Protect != PAGE_EXECUTE && memInfo.Protect != PAGE_EXECUTE_READ &&
  350. memInfo.Protect != PAGE_EXECUTE_READWRITE && memInfo.Protect != PAGE_EXECUTE_WRITECOPY )
  351. {
  352. return false;
  353. }
  354. uintp lastAddress = (uintp)pAddress + length - 1;
  355. uintp lastInRegion = (uintp)memInfo.BaseAddress + memInfo.RegionSize - 1;
  356. if ( lastAddress <= lastInRegion )
  357. return true;
  358. // Start of this address range is executable. But what about subsequent regions?
  359. return BIsAddressRangeExecutable( (const void*)(lastInRegion + 1), lastAddress - lastInRegion );
  360. }
  361. //-----------------------------------------------------------------------------
  362. // Purpose: Hook a function (at pRealFunctionAddr) causing calls to it to instead call
  363. // our own function at pHookFunctionAddr. We'll return a pointer to code that can be
  364. // called as the original function by our hook code and will have the original unhooked
  365. // behavior.
  366. //
  367. // The nJumpsToFolowBeforeHooking parameter determines what we will do if we find an E9
  368. // or FF 25 jmp instruction at the beginning of the code to hook. This probably means the
  369. // function is already hooked. We support both hooking the original address and chaining
  370. // to the old hook then, or alternatively following the jump and hooking it's target. Sometimes
  371. // this follow then hook is preferable because other hook code may not chain nicely and may
  372. // overwrite our hook if we try to put it first (ie, FRAPS & ATI Tray Tools from Guru3d)
  373. //-----------------------------------------------------------------------------
  374. #pragma warning( push )
  375. #pragma warning( disable : 4127 ) // conditional expression is constant, from sizeof( intp ) checks
  376. static bool HookFuncInternal( BYTE *pRealFunctionAddr, const BYTE *pHookFunctionAddr, void ** ppRealFunctionAdr, BYTE **ppTrampolineAddressToReturn, int nJumpsToFollowBeforeHooking );
  377. void * HookFunc( BYTE *pRealFunctionAddr, const BYTE *pHookFunctionAddr, int nJumpsToFollowBeforeHooking /* = 0 */ )
  378. {
  379. void *pTrampolineAddr = NULL;
  380. if ( !HookFuncSafe( pRealFunctionAddr, pHookFunctionAddr, (void **)&pTrampolineAddr, nJumpsToFollowBeforeHooking ) )
  381. return NULL;
  382. return pTrampolineAddr;
  383. }
  384. bool HookFuncSafe( BYTE *pRealFunctionAddr, const BYTE *pHookFunctionAddr, void ** ppRealFunctionAdr, int nJumpsToFollowBeforeHooking /* = 0 */ )
  385. {
  386. // If hook setting fails, then the trampoline is not being used, and can be returned to our pool
  387. BYTE *pTrampolineAddressToReturn = NULL;
  388. bool bRet = HookFuncInternal( pRealFunctionAddr, pHookFunctionAddr, ppRealFunctionAdr, &pTrampolineAddressToReturn, nJumpsToFollowBeforeHooking );
  389. if ( pTrampolineAddressToReturn )
  390. {
  391. ReturnTrampolineAddress( pTrampolineAddressToReturn );
  392. }
  393. return bRet;
  394. }
  395. // We detour with the following setup:
  396. //
  397. // 1) Allocate some memory within 2G range from the function we are detouring (we search with VirtualQuery to find where to alloc)
  398. // 2) Place a relative jump E9 opcode (only 5 bytes) at the beginning of the original function to jump to our allocated memory
  399. // 3) At the start of our allocated memory we place an absolute jump (FF 25, 6 bytes on x86, 14 on x64 because instead of being
  400. // an absolute dword ptr, it has a relative offset to a qword ptr). This jump goes to our hook function we are detouring to,
  401. // the E9 at the start of the original function jumps to this, then this goes to the real function which may be more than 2G away.
  402. // 4) We copy the original 5 bytes + slop for opcode boundaries into the remaining space in our allocated region, after that we place a FF 25 jump
  403. // jump back to the original function 6 bytes in (or a little more if the opcodes didn't have a boundary at 5 bytes).
  404. // 5) We return a ptr to the original 5 bytes we copied's new address and that is the "real function ptr" that our hook function can call
  405. // to call the original implementation.
  406. //
  407. // This method is good because it works with just 5 bytes overwritten in the original function on both x86 and x64, the only tricky part
  408. // is that we have to search for a page we can allocate within 2 gigabytes of the function address and if we can't find one we can fail
  409. // (which would only happen on x64, and doesn't really happen in practice). If it did start to happen more we could fallback to trying to
  410. // put the full 14 byte FF 25 x64 jmp at the start of the function, but many functions are too short or make calls that can't be easily relocated,
  411. // or have other code jumping into them at less than 14 bytes, so thats not very safe.
  412. static bool HookFuncInternal( BYTE *pRealFunctionAddr, const BYTE *pHookFunctionAddr, void ** ppRealFunctionAdr, BYTE **ppTrampolineAddressToReturn, int nJumpsToFollowBeforeHooking )
  413. {
  414. if ( !pRealFunctionAddr )
  415. {
  416. Log( "Aborting HookFunc because pRealFunctionAddr is null\n" );
  417. return false;
  418. }
  419. if ( !pHookFunctionAddr )
  420. {
  421. Log( "Aborting HookFunc because pHookFunctionAddr is null\n" );
  422. return false;
  423. }
  424. // Make sure we aren't double-hooking a function, in case someone else installed a hook
  425. // after ours which made us think that our hook was removed when it was really just relocated.
  426. // UnhookFunc will short-circuit the trampoline and bypass our old hook even if it can't
  427. // fully undo the jump into our trampoline code.
  428. UnhookFunc( pRealFunctionAddr, false /*bLogFailures*/ );
  429. HANDLE hProc = GetCurrentProcess();
  430. BYTE *pFuncToHook = pRealFunctionAddr;
  431. // See if there is already a hook in place on this code and we have been instructed to follow it and hook
  432. // the target instead.
  433. while( nJumpsToFollowBeforeHooking > 0 )
  434. {
  435. if ( pFuncToHook[0] == 0xEB )
  436. {
  437. int8 * pOffset = (int8 *)(pFuncToHook + 1);
  438. pFuncToHook = (BYTE*)((intp)pFuncToHook + 2 + *pOffset);
  439. }
  440. else if ( pFuncToHook[0] == 0xE9 )
  441. {
  442. // Make sure the hook isn't pointing at the same place we are going to detour to (which would mean we've already hooked)
  443. int32 * pOffset = (int32 *)(pFuncToHook+1);
  444. pFuncToHook = (BYTE*)((intp)pFuncToHook + 5 + *pOffset);
  445. }
  446. #ifdef _WIN64
  447. else if ( pFuncToHook[0] == 0xFF && pFuncToHook[1] == 0x25 )
  448. {
  449. // On x64 we have a signed 32-bit relative offset to an absolute qword ptr
  450. int32 * pOffset = (int32 *)(pFuncToHook+2);
  451. intp *pTarget = (intp*)(pFuncToHook + 6 + *pOffset);
  452. pFuncToHook = (BYTE*)*pTarget;
  453. }
  454. #endif
  455. else
  456. {
  457. // Done, no more chained jumps
  458. break;
  459. }
  460. --nJumpsToFollowBeforeHooking;
  461. }
  462. // If the function pointer isn't marked as executable code, or there isn't enough room for our jump, warn
  463. if ( !BIsAddressRangeExecutable( pFuncToHook, sizeof( JumpCodeRelative_t ) ) )
  464. {
  465. Log( "Warning: hook target starting at %#p covers a non-executable page\n", (void*)pFuncToHook );
  466. // non-fatal, as system may not be enforcing Data Execution Prevention / hardware NX-bit.
  467. }
  468. // Special blacklist: if the function begins with an unconditional 2-byte jump, it is unhookable!
  469. // If this becomes necessary, we could follow the jump to see where it goes, and hook there instead.
  470. if ( (BYTE) pFuncToHook[0] == 0xEB )
  471. {
  472. Log( "Warning: hook target starting at %#p begins with uncoditional 2-byte jump, skipping\n", (void*)pFuncToHook );
  473. return false;
  474. }
  475. // This struct will get reused a bunch to compose jumps
  476. JumpCodeRelative_t sRelativeJumpCode;
  477. sRelativeJumpCode.m_JmpOpCode = 0xE9;
  478. //sRelativeJumpCode.m_JumpOffset = ...;
  479. // On X64, we use this struct for jumps > +/-2GB
  480. JumpCodeDirectX64_t sDirectX64JumpCode;
  481. sDirectX64JumpCode.m_JmpOpCode[0] = 0xFF;
  482. sDirectX64JumpCode.m_JmpOpCode[1] = 0x25;
  483. sDirectX64JumpCode.m_JumpPtrOffset = 0;
  484. //sDirectX64JumpCode.m_QWORDTarget = ...;
  485. // We need to figure out if we recognize the preamble for the
  486. // current function so we can match it up with a good hook code length
  487. int32 nHookCodeLength = 0;
  488. BYTE *pOpcode = pFuncToHook;
  489. bool bParsedRETOpcode = false;
  490. BYTE rgCopiedCode[ MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH ];
  491. // we just need a minimum of 5 bytes for our hook code
  492. while ( nHookCodeLength < sizeof( JumpCodeRelative_t ) )
  493. {
  494. int nLength;
  495. EOpCodeOffsetType eOffsetType;
  496. bool bKnown = ParseOpcode( pOpcode, nLength, eOffsetType );
  497. if ( bKnown )
  498. {
  499. // Make sure that if we hook a RET, it is the last byte, or followed by only INT 3 or NOP
  500. // inter-function padding. If this causes hooks to fail, then we need to be smarter
  501. // about examining relative jumps to determine the boundaries of the function, so
  502. // that we know if the RET is an early-out and the function continues onward or not.
  503. // We are trying hard to avoid overwriting the start of another function, in case
  504. // the target function is very small and there is no padding afterwards.
  505. if ( bParsedRETOpcode && *pOpcode != 0xCC && *pOpcode != 0x90 )
  506. {
  507. Log( "Warning: hook target starting at %#p contains early RET\n", (void*)pFuncToHook );
  508. // fall through to expanded error reporting below by setting bKnown to false
  509. bKnown = false;
  510. }
  511. if ( *pOpcode == 0xC3 || *pOpcode == 0xC2 )
  512. {
  513. bParsedRETOpcode = true;
  514. }
  515. }
  516. if ( !bKnown ||
  517. ( eOffsetType != k_ENoRelativeOffsets && eOffsetType != k_EDWORDOffsetAtByteTwo && eOffsetType != k_EDWORDOffsetAtByteThree
  518. && eOffsetType != k_EBYTEOffsetAtByteTwo && eOffsetType != k_EDWORDOffsetAtByteFour ) )
  519. {
  520. #if DEBUG_ENABLE_ERROR_STREAM
  521. bool bAlreadyReported = true;
  522. {
  523. GetLock getLock( g_mapLock );
  524. if ( g_mapAlreadyReportedDetourFailures.find( pFuncToHook ) == g_mapAlreadyReportedDetourFailures.end() )
  525. {
  526. bAlreadyReported = false;
  527. g_mapAlreadyReportedDetourFailures.insert( pFuncToHook );
  528. }
  529. }
  530. ErrorStreamMsg_t msg;
  531. _snprintf( msg.rgchError, sizeof( msg.rgchError ), "Unknown opcodes for %s at %d bytes for func %#p: %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
  532. #ifdef _WIN64
  533. "AMD64",
  534. #else
  535. "X86",
  536. #endif
  537. nHookCodeLength,
  538. pFuncToHook,
  539. pFuncToHook[0], pFuncToHook[1], pFuncToHook[2], pFuncToHook[3],
  540. pFuncToHook[4], pFuncToHook[5], pFuncToHook[6], pFuncToHook[7],
  541. pFuncToHook[8], pFuncToHook[9], pFuncToHook[10], pFuncToHook[11],
  542. pFuncToHook[12], pFuncToHook[13], pFuncToHook[14], pFuncToHook[15]
  543. );
  544. Log( msg.rgchError );
  545. if ( !bAlreadyReported )
  546. {
  547. msg.unStrLen = (uint32)strlen( msg.rgchError );
  548. if ( !g_pDetourErrorStream )
  549. g_pDetourErrorStream = new CSharedMemStream( "GameOverlayRender_DetourErrorStream", SHMEMSTREAM_SIZE_ONE_KBYTE*32, 50 );
  550. g_pDetourErrorStream->Put( &msg, sizeof( msg.unStrLen ) + msg.unStrLen );
  551. }
  552. #endif
  553. return false;
  554. }
  555. // make sure we have enough room, we should always have enough unless an opcode is huge!
  556. if ( sizeof( rgCopiedCode ) - nHookCodeLength - nLength < 0 )
  557. {
  558. Log( "Not enough room to copy function preamble\n" );
  559. return false;
  560. }
  561. // Copy the bytes into our local buffer
  562. memcpy( &rgCopiedCode[ nHookCodeLength ], pOpcode, nLength );
  563. pOpcode += nLength;
  564. nHookCodeLength += nLength;
  565. }
  566. // We only account for a max of 32 bytes that needs relocating in our allocated trampoline area
  567. // if we are over that complain and fail, should never hit this
  568. if ( nHookCodeLength > MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH )
  569. {
  570. Log( "Copied more than MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH bytes to make room for E9 jmp of 5 bytes? Bad opcode parsing?\n" );
  571. return false;
  572. }
  573. // We need to find/allocate a region for our trampoline that is within +/-2GB of the function we are hooking.
  574. BYTE *pTrampolineAddress = GetTrampolineRegionNearAddress( pFuncToHook );
  575. if ( !pTrampolineAddress )
  576. {
  577. AllocateNewTrampolineRegionsNearAddress( pFuncToHook );
  578. pTrampolineAddress = GetTrampolineRegionNearAddress( pFuncToHook );
  579. }
  580. // Total failure at this point, couldn't allocate memory close enough.
  581. if ( !pTrampolineAddress )
  582. {
  583. Log( "Error allocating trampoline memory (no memory within +/-2gb? prior failures?)\n" );
  584. return false;
  585. }
  586. // Store the trampoline address to output parameter so caller can clean up on failure
  587. *ppTrampolineAddressToReturn = pTrampolineAddress;
  588. // Save the original function preamble so we can restore it later
  589. HookData_t SavedData;
  590. memcpy( SavedData.m_rgOriginalPreambleCode, rgCopiedCode, MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH );
  591. SavedData.m_nOriginalPreambleLength = nHookCodeLength;
  592. SavedData.m_pFuncHookedAddr = pFuncToHook;
  593. SavedData.m_pTrampolineRealFunc = NULL;
  594. SavedData.m_pTrampolineEntryPoint = NULL;
  595. // Now fixup any relative offsets in our copied code to account for the new relative base pointer,
  596. // since the copied code will be executing from the trampoline area instead of its original location
  597. int nFixupPosition = 0;
  598. while( nFixupPosition < nHookCodeLength )
  599. {
  600. int nLength;
  601. EOpCodeOffsetType eOffsetType;
  602. bool bKnown = ParseOpcode( &rgCopiedCode[nFixupPosition], nLength, eOffsetType );
  603. if ( !bKnown ||
  604. ( eOffsetType != k_ENoRelativeOffsets && eOffsetType != k_EDWORDOffsetAtByteTwo && eOffsetType != k_EDWORDOffsetAtByteThree
  605. && eOffsetType != k_EBYTEOffsetAtByteTwo && eOffsetType != k_EDWORDOffsetAtByteFour ) )
  606. {
  607. Log( "Failed parsing copied bytes during detour -- shouldn't happen as this is a second pass: position %d\n"
  608. "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", nFixupPosition,
  609. rgCopiedCode[0], rgCopiedCode[1], rgCopiedCode[2], rgCopiedCode[3],
  610. rgCopiedCode[4], rgCopiedCode[5], rgCopiedCode[6], rgCopiedCode[7],
  611. rgCopiedCode[8], rgCopiedCode[9], rgCopiedCode[10], rgCopiedCode[11],
  612. rgCopiedCode[12], rgCopiedCode[13], rgCopiedCode[14], rgCopiedCode[15],
  613. rgCopiedCode[16], rgCopiedCode[17], rgCopiedCode[18], rgCopiedCode[19] );
  614. return false;
  615. }
  616. // If there is a relative offset, we need to fix it up according to how far we moved the code
  617. int iPositionOfDWORDFixup = -1;
  618. switch ( eOffsetType )
  619. {
  620. case k_ENoRelativeOffsets:
  621. break;
  622. case k_EDWORDOffsetAtByteTwo:
  623. iPositionOfDWORDFixup = 1;
  624. break;
  625. case k_EDWORDOffsetAtByteThree:
  626. iPositionOfDWORDFixup = 2;
  627. break;
  628. case k_EDWORDOffsetAtByteFour:
  629. iPositionOfDWORDFixup = 3;
  630. break;
  631. case k_EBYTEOffsetAtByteTwo:
  632. // We need explicit knowledge of the opcode here so that we can convert it to DWORD-offset form
  633. if ( (BYTE)rgCopiedCode[nFixupPosition] == 0xEB && nLength == 2 )
  634. {
  635. if ( nHookCodeLength + 3 > MAX_HOOKED_FUNCTION_PREAMBLE_LENGTH )
  636. {
  637. Log( "Can't fixup EB jmp because there isn't enough room to expand to E9 jmp\n" );
  638. return false;
  639. }
  640. rgCopiedCode[nFixupPosition] = 0xE9;
  641. memmove( &rgCopiedCode[nFixupPosition + 5], &rgCopiedCode[nFixupPosition + 2], nHookCodeLength - nFixupPosition - 2 );
  642. // Expand from 8-bit signed offset to 32-bit signed offset, and remember it for address fixup below
  643. // (subtract 3 from offset to account for additional length of the replacement JMP instruction)
  644. int32 iOffset = (int8) rgCopiedCode[nFixupPosition + 1] - 3;
  645. memcpy( &rgCopiedCode[nFixupPosition + 1], &iOffset, 4 );
  646. iPositionOfDWORDFixup = 1;
  647. // This opcode and the total amount of copied data grew by 3 bytes
  648. nLength += 3;
  649. nHookCodeLength += 3;
  650. }
  651. else
  652. {
  653. Log( "Opcode %x of type k_EBYTEOffsetAtByteTwo can't be converted to larger relative address\n", rgCopiedCode[nFixupPosition] );
  654. return false;
  655. }
  656. break;
  657. default:
  658. Log( "Unknown opcode relative-offset enum value %d\n", (int)eOffsetType );
  659. return false;
  660. }
  661. if ( iPositionOfDWORDFixup != -1 )
  662. {
  663. int32 iOffset;
  664. memcpy( &iOffset, &rgCopiedCode[ nFixupPosition + iPositionOfDWORDFixup ], 4 );
  665. intp iNewOffset = iOffset + (intp)pFuncToHook - (intp)pTrampolineAddress;
  666. iOffset = (int32)iNewOffset;
  667. // On 32-bit platforms, 32-bit relative mode can reach any valid address.
  668. // On 64-bit platforms, 32-bit relative mode can only reach addresses +/- 2GB.
  669. if ( sizeof(intp) > sizeof(int32) && (intp)iOffset != iNewOffset )
  670. {
  671. Log( "Can't relocate and adjust offset because offset is too big after relocation.\n" );
  672. return false;
  673. }
  674. memcpy( &rgCopiedCode[ nFixupPosition + iPositionOfDWORDFixup ], &iOffset, 4 );
  675. }
  676. nFixupPosition += nLength;
  677. }
  678. // Copy out the original code to our allocated memory to save it, keep track of original trampoline beginning
  679. BYTE *pBeginTrampoline = pTrampolineAddress;
  680. SavedData.m_pTrampolineRealFunc = pTrampolineAddress;
  681. memcpy( pTrampolineAddress, rgCopiedCode, nHookCodeLength );
  682. pTrampolineAddress += nHookCodeLength; // move pointer forward past copied code
  683. // Place a jump at the end of the copied code to jump back to the rest of the post-hook function body
  684. intp lJumpTarget = (intp)pFuncToHook + nHookCodeLength;
  685. intp lJumpInstruction = (intp)pTrampolineAddress;
  686. intp lJumpOffset = lJumpTarget - lJumpInstruction - sizeof( JumpCodeRelative_t );
  687. sRelativeJumpCode.m_JumpOffset = (int32)lJumpOffset;
  688. // On 64-bit platforms, 32-bit relative addressing can only reach addresses +/- 2GB.
  689. if ( sizeof(intp) > sizeof(int32) && (intp)sRelativeJumpCode.m_JumpOffset != lJumpOffset )
  690. {
  691. // Use a direct 64-bit jump instead
  692. sDirectX64JumpCode.m_QWORDTarget = lJumpTarget;
  693. memcpy( pTrampolineAddress, &sDirectX64JumpCode, sizeof( JumpCodeDirectX64_t ) );
  694. pTrampolineAddress += sizeof( JumpCodeDirectX64_t );
  695. }
  696. else
  697. {
  698. memcpy( pTrampolineAddress, &sRelativeJumpCode, sizeof( JumpCodeRelative_t ) );
  699. pTrampolineAddress += sizeof( JumpCodeRelative_t );
  700. }
  701. // Ok, now write the other half of the trampoline, which is the entry point that we will make the
  702. // hooked function jump to. This will in turn jump into our hook function, which may then call the
  703. // original function bytes that we relocated into the start of the trampoline.
  704. SavedData.m_pTrampolineEntryPoint = pTrampolineAddress;
  705. BYTE *pIntermediateJumpLocation = pTrampolineAddress;
  706. lJumpTarget = (intp)pHookFunctionAddr;
  707. lJumpInstruction = (intp)pIntermediateJumpLocation;
  708. lJumpOffset = lJumpTarget - lJumpInstruction - sizeof( JumpCodeRelative_t );
  709. sRelativeJumpCode.m_JumpOffset = (int32)lJumpOffset;
  710. if ( sizeof(intp) > sizeof(int32) && (intp)sRelativeJumpCode.m_JumpOffset != lJumpOffset )
  711. {
  712. sDirectX64JumpCode.m_QWORDTarget = lJumpTarget;
  713. memcpy( pTrampolineAddress, &sDirectX64JumpCode, sizeof( JumpCodeDirectX64_t ) );
  714. pTrampolineAddress += sizeof( JumpCodeDirectX64_t );
  715. }
  716. else
  717. {
  718. memcpy( pTrampolineAddress, &sRelativeJumpCode, sizeof( JumpCodeRelative_t ) );
  719. pTrampolineAddress += sizeof( JumpCodeRelative_t );
  720. }
  721. // Now flush instruction cache to ensure the processor detects the changed memory.
  722. FlushInstructionCache( hProc, pBeginTrampoline, pTrampolineAddress - pBeginTrampoline );
  723. // Trampoline is done; write jump-into-trampoline over the original function body
  724. lJumpTarget = (intp)pIntermediateJumpLocation;
  725. lJumpInstruction = (intp)pFuncToHook;
  726. lJumpOffset = lJumpTarget - lJumpInstruction - sizeof( JumpCodeRelative_t );
  727. sRelativeJumpCode.m_JumpOffset = (int32)lJumpOffset;
  728. if ( sizeof(intp) > sizeof(int32) && (intp)sRelativeJumpCode.m_JumpOffset != lJumpOffset )
  729. {
  730. // Shouldn't ever hit this, since we explicitly found an address to place the intermediate
  731. // trampoline which was close enough.
  732. Log( "Warning: Jump from function to intermediate trampoline is too far! Shouldn't happen." );
  733. return false;
  734. }
  735. // Jump is prepared for writing, now adjust virtual protection and overwrite the function start
  736. DWORD dwSystemPageSize = GetSystemPageSize();
  737. void *pLastHookByte = pFuncToHook + sizeof( JumpCodeRelative_t ) - 1;
  738. bool bHookSpansTwoPages = ( (uintp)pFuncToHook / dwSystemPageSize != (uintp)pLastHookByte / dwSystemPageSize );
  739. DWORD dwOldProtectionLevel = 0;
  740. DWORD dwOldProtectionLevel2 = 0;
  741. DWORD dwIgnore;
  742. // Fix up the protection on the memory where the functions current asm code is
  743. // so that we will be able read/write it.
  744. if( !VirtualProtect( pFuncToHook, 1, PAGE_EXECUTE_READWRITE, &dwOldProtectionLevel ) )
  745. {
  746. Log( "Warning: VirtualProtect call failed during hook attempt\n" );
  747. return false;
  748. }
  749. // In case the hook spans a page boundary, also adjust protections on the last byte,
  750. // and track the memory protection for the second page in a separate variable since
  751. // it could theoretically be different (although that would be very odd).
  752. if ( bHookSpansTwoPages && !VirtualProtect( pLastHookByte, 1, PAGE_EXECUTE_READWRITE, &dwOldProtectionLevel2 ) )
  753. {
  754. // Restore original protection on first page.
  755. VirtualProtect( pFuncToHook, 1, dwOldProtectionLevel, &dwIgnore );
  756. Log( "Warning: VirtualProtect (2) call failed during hook attempt\n" );
  757. return false;
  758. }
  759. bool bSuccess = false;
  760. // We must store the relocated function address to the output variable after the trampoline
  761. // is written, but BEFORE the hook is written, because once the hook is written it could be
  762. // executed by anybody on any thread, and it needs to know the real function address.
  763. *ppRealFunctionAdr = pBeginTrampoline;
  764. // Write new function body which jumps to trampoline which runs our hook and then relocated function bits
  765. SIZE_T cBytesWritten;
  766. if( !WriteProcessMemory( hProc, (void *)pFuncToHook, &sRelativeJumpCode, sizeof( JumpCodeRelative_t ), &cBytesWritten ) )
  767. {
  768. Log( "WriteProcessMemory() call failed trying to overwrite first 5 bytes of function body during hook\n" );
  769. }
  770. else
  771. {
  772. // From this point on, we must return success because we wrote a live jump into the trampoline
  773. *ppTrampolineAddressToReturn = NULL;
  774. bSuccess = true;
  775. if ( !FlushInstructionCache( hProc, (void*)pFuncToHook, sizeof( JumpCodeRelative_t ) ) )
  776. {
  777. // if flush instruction cache fails what should we do?
  778. Log( "FlushInstructionCache() call failed trying to overwrite first 5 bytes of function body during hook\n" );
  779. }
  780. }
  781. // Restore the original protection flags regardless of success, unless they already matched, then don't bother
  782. if ( bHookSpansTwoPages && dwOldProtectionLevel2 != PAGE_EXECUTE_READWRITE && dwOldProtectionLevel2 != PAGE_EXECUTE_WRITECOPY )
  783. {
  784. if ( !VirtualProtect( pLastHookByte, 1, dwOldProtectionLevel2, &dwIgnore ) )
  785. {
  786. Log( "Warning: VirtualProtect (2) call failed to restore protection flags during hook attempt\n" );
  787. }
  788. }
  789. if ( dwOldProtectionLevel != PAGE_EXECUTE_READWRITE && dwOldProtectionLevel != PAGE_EXECUTE_WRITECOPY )
  790. {
  791. if( !VirtualProtect( pFuncToHook, 1, dwOldProtectionLevel, &dwIgnore ) )
  792. {
  793. Log( "Warning: VirtualProtect call failed to restore protection flags during hook attempt\n" );
  794. }
  795. }
  796. // Track that we have hooked the function at this address
  797. if ( bSuccess )
  798. {
  799. GetLock getLock( g_mapLock );
  800. g_mapHookedFunctions[ (void *)pRealFunctionAddr ] = SavedData;
  801. }
  802. return bSuccess;
  803. }
  804. //-----------------------------------------------------------------------------
  805. // Purpose: Check if windows says a given address is committed. Used to make
  806. // sure we don't follow jumps into unloaded modules due to other apps bad detours
  807. // code.
  808. //-----------------------------------------------------------------------------
  809. static bool BIsAddressCommited( const void *pAddress )
  810. {
  811. MEMORY_BASIC_INFORMATION memInfo;
  812. if ( !VirtualQuery( pAddress, &memInfo, sizeof( memInfo ) ) )
  813. {
  814. return false;
  815. }
  816. if ( memInfo.State == MEM_COMMIT )
  817. return true;
  818. return false;
  819. }
  820. //-----------------------------------------------------------------------------
  821. // Purpose: Check if we have already hooked a function at a given address.
  822. // Params: pRealFunctionAddr -- the address of the function to detour.
  823. // pHookFunc -- optional, and if given is the function we want to detour to.
  824. // Providing it will allow additional detection to make sure a detour to
  825. // the target isn't already set via an E9 or chain of E9 calls at the start
  826. // of the function.
  827. //-----------------------------------------------------------------------------
  828. bool bIsFuncHooked( BYTE *pRealFunctionAddr, void *pHookFunc /* = NULL */ )
  829. {
  830. if ( !pRealFunctionAddr )
  831. return false;
  832. {
  833. GetLock getLock( g_mapLock );
  834. if ( g_mapHookedFunctions.find( (void*)pRealFunctionAddr ) != g_mapHookedFunctions.end() )
  835. {
  836. if ( *pRealFunctionAddr != 0xE9
  837. #ifdef _WIN64
  838. && ( *pRealFunctionAddr != 0xFF || *(pRealFunctionAddr+1) != 0x25 )
  839. #endif
  840. )
  841. {
  842. Log( "Warning: Function we had previously hooked now appears unhooked.\n" );
  843. }
  844. return true;
  845. }
  846. }
  847. // If we were told what the hook func address is we can do more checking to avoid infinite recursion
  848. BYTE *pFuncToHook = pRealFunctionAddr;
  849. int nJumpsToCheckForExistingHook = 5;
  850. BYTE * pCurrentDetour = pFuncToHook;
  851. while( nJumpsToCheckForExistingHook )
  852. {
  853. // We defensively check all the pointers we find following the detour chain
  854. // to make sure they are at least in commited pages to try to avoid following
  855. // bad jmps. We can end up in bad jmps due to badly behaving third-party detour
  856. // code.
  857. if ( !BIsAddressCommited( pCurrentDetour ) )
  858. return false;
  859. if ( pCurrentDetour[0] == 0xE9 )
  860. {
  861. // Make sure the hook isn't pointing at the same place we are going to detour to (which would mean we've already hooked)
  862. int32 * pOffset = (int32 *)(pCurrentDetour+1);
  863. if ( !BIsAddressCommited( pOffset ) )
  864. return false;
  865. pCurrentDetour = (BYTE*)((int64)pCurrentDetour + 5 + *pOffset);
  866. if ( pCurrentDetour == pHookFunc )
  867. {
  868. Log ( "Trying to hook when already detoured to target addr (by E9)\n" );
  869. return true;
  870. }
  871. }
  872. #ifdef _WIN64
  873. else if ( pCurrentDetour[0] == 0xFF && pCurrentDetour[1] == 0x25 )
  874. {
  875. // On x64 we have a relative offset to an absolute qword ptr
  876. DWORD * pOffset = (DWORD *)(pCurrentDetour+2);
  877. if ( !BIsAddressCommited( pOffset ) )
  878. return false;
  879. int64 *pTarget = (int64*)(pCurrentDetour + 6 + *pOffset);
  880. if ( !BIsAddressCommited( pTarget ) )
  881. return false;
  882. pCurrentDetour = (BYTE*)*pTarget;
  883. if ( (void *)pCurrentDetour == pHookFunc )
  884. {
  885. Log ( "Trying to hook when already detoured to target addr (by FF 25)\n" );
  886. return true;
  887. }
  888. }
  889. #endif
  890. else
  891. {
  892. // Done, no more chained jumps
  893. break;
  894. }
  895. --nJumpsToCheckForExistingHook;
  896. }
  897. return false;
  898. }
  899. //-----------------------------------------------------------------------------
  900. // Purpose: Check if any of the functions in our map of already hooked ones appears
  901. // to no longer exist in a valid module, if that has happened its likely the following
  902. // sequence of events has occurred:
  903. //
  904. // hMod = LoadLibrary( "target.dll" );
  905. // ...
  906. // DetourFunc called on method in target.dll
  907. // ...
  908. // FreeLibrary( hMod ); // ref count to 0 for dll in process
  909. //
  910. // If that has happened, we want to remove the address from the list of hooked code as
  911. // if the DLL is reloaded the address will likely be the same but the code will be restored
  912. // and no longer hooked.
  913. //-----------------------------------------------------------------------------
  914. void DetectUnloadedHooks()
  915. {
  916. void **pTestAddresses = NULL;
  917. uint32 nTestAddresses = 0;
  918. // Build an array of function addresses to test, naturally sorted ascending due to std::map.
  919. // Don't hold the lock while we call GetModuleHandleEx or there will be potential to deadlock!
  920. {
  921. GetLock getLock( g_mapLock );
  922. nTestAddresses = (uint32)g_mapHookedFunctions.size();
  923. pTestAddresses = (void**) malloc( sizeof(void*) * nTestAddresses );
  924. uint32 i = 0;
  925. for ( const auto &entry : g_mapHookedFunctions )
  926. {
  927. pTestAddresses[i++] = entry.first;
  928. if ( nTestAddresses == i )
  929. break;
  930. }
  931. }
  932. // Iterate from high addresses to low, can eliminate some GetModuleHandleExA calls since
  933. // the HMODULE returned is the module's base address, defining a known-valid module range.
  934. BYTE *pLoadedModuleBase = NULL;
  935. for ( uint32 i = nTestAddresses; i--; )
  936. {
  937. if ( !pLoadedModuleBase || pLoadedModuleBase > (BYTE*)pTestAddresses[i] )
  938. {
  939. HMODULE hMod = NULL;
  940. if ( !GetModuleHandleExA( GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, (LPCSTR)pTestAddresses[i], &hMod ) || !hMod )
  941. {
  942. // leave entry alone so that it is erased from map below.
  943. Log( "Found a hooked function in now unloaded module, removing from map.\n" );
  944. pLoadedModuleBase = NULL;
  945. continue;
  946. }
  947. pLoadedModuleBase = (BYTE*)hMod;
  948. }
  949. // Either we shortcut the test because we already know this module is loaded, or
  950. // we looked up the function's module and found it to be valid (and remembered it).
  951. // Swap from back and shorten array.
  952. pTestAddresses[i] = pTestAddresses[--nTestAddresses];
  953. }
  954. // Lock again and delete the entries that we found to be pointing at unloaded modules
  955. if ( nTestAddresses )
  956. {
  957. GetLock getLock( g_mapLock );
  958. for ( uint32 i = 0; i < nTestAddresses; ++i )
  959. {
  960. g_mapHookedFunctions.erase( pTestAddresses[i] );
  961. }
  962. }
  963. free( pTestAddresses );
  964. }
  965. //-----------------------------------------------------------------------------
  966. // Purpose: Unhook a function, this doesn't remove the jump code, it just makes
  967. // it jump back to the original code directly
  968. //-----------------------------------------------------------------------------
  969. void UnhookFunc( BYTE *pRealFunctionAddr, BYTE *pOriginalFunctionAddr_DEPRECATED )
  970. {
  971. (void)pOriginalFunctionAddr_DEPRECATED;
  972. UnhookFunc( pRealFunctionAddr, true );
  973. }
  974. void UnhookFunc( BYTE *pRealFunctionAddr, bool bLogFailures )
  975. {
  976. if ( !pRealFunctionAddr )
  977. {
  978. if ( bLogFailures )
  979. Log( "Aborting UnhookFunc because pRealFunctionAddr is null\n" );
  980. return;
  981. }
  982. HookData_t hookData;
  983. {
  984. GetLock getLock( g_mapLock );
  985. std::map<void *, HookData_t>::iterator iter;
  986. iter = g_mapHookedFunctions.find( (void*)pRealFunctionAddr );
  987. if ( iter == g_mapHookedFunctions.end() )
  988. {
  989. if ( bLogFailures )
  990. Log( "Aborting UnhookFunc because pRealFunctionAddr is not hooked\n" );
  991. return;
  992. }
  993. else
  994. {
  995. hookData = iter->second;
  996. g_mapHookedFunctions.erase( iter );
  997. }
  998. }
  999. DWORD dwSystemPageSize = GetSystemPageSize();
  1000. HANDLE hProc = GetCurrentProcess();
  1001. BYTE *pFuncToUnhook = hookData.m_pFuncHookedAddr;
  1002. void *pLastHookByte = pFuncToUnhook + hookData.m_nOriginalPreambleLength - 1;
  1003. bool bHookSpansTwoPages = ( (uintp)pFuncToUnhook / dwSystemPageSize != (uintp)pLastHookByte / dwSystemPageSize );
  1004. // Write a 2-byte 0xEB jump into the trampoline at the entry point (the jump to our hook function)
  1005. // to cause it to jump again to the start of the saved function bytes instead of calling our hook.
  1006. COMPILE_TIME_ASSERT( BYTES_FOR_TRAMPOLINE_ALLOCATION < 128 );
  1007. union {
  1008. struct {
  1009. uint8 opcode;
  1010. int8 offset;
  1011. } s;
  1012. uint16 u16;
  1013. } smalljump;
  1014. smalljump.s.opcode = 0xEB; // tiny jump to 8-bit immediate offset from next instruction
  1015. smalljump.s.offset = (int8)( hookData.m_pTrampolineRealFunc - ( hookData.m_pTrampolineEntryPoint + 2 ) );
  1016. *(UNALIGNED uint16*)hookData.m_pTrampolineEntryPoint = smalljump.u16;
  1017. FlushInstructionCache( hProc, hookData.m_pTrampolineEntryPoint, 2 );
  1018. if ( !BIsAddressCommited( pFuncToUnhook ) )
  1019. {
  1020. if ( bLogFailures )
  1021. Log( "UnhookFunc not restoring original bytes - function is unmapped\n" );
  1022. return;
  1023. }
  1024. // Check that the function still starts with our 0xE9 jump before slamming it back to original code
  1025. if ( *pFuncToUnhook != 0xE9 )
  1026. {
  1027. if ( bLogFailures )
  1028. Log( "UnhookFunc not restoring original bytes - jump instruction not found\n" );
  1029. return;
  1030. }
  1031. BYTE *pJumpTarget = pFuncToUnhook + 5 + *(UNALIGNED int32*)(pFuncToUnhook + 1);
  1032. if ( pJumpTarget != hookData.m_pTrampolineEntryPoint )
  1033. {
  1034. if ( bLogFailures )
  1035. Log( "UnhookFunc not restoring original bytes - jump target has changed\n" );
  1036. return;
  1037. }
  1038. DWORD dwOldProtectionLevel = 0;
  1039. DWORD dwOldProtectionLevel2 = 0;
  1040. DWORD dwIgnore;
  1041. // Fix up the protection on the memory where the functions current asm code is
  1042. // so that we will be able read/write it
  1043. if( !VirtualProtect( pFuncToUnhook, hookData.m_nOriginalPreambleLength, PAGE_EXECUTE_READWRITE, &dwOldProtectionLevel ) )
  1044. {
  1045. if ( bLogFailures )
  1046. Log( "Warning: VirtualProtect call failed during unhook\n" );
  1047. return;
  1048. }
  1049. // In case the hook spans a page boundary, also adjust protections on the last byte,
  1050. // and track the memory protection for the second page in a separate variable since
  1051. // it could theoretically be different (although that would be very odd).
  1052. if ( bHookSpansTwoPages && !VirtualProtect( pLastHookByte, 1, PAGE_EXECUTE_READWRITE, &dwOldProtectionLevel2 ) )
  1053. {
  1054. // Restore original protection on first page.
  1055. VirtualProtect( pFuncToUnhook, 1, dwOldProtectionLevel, &dwIgnore );
  1056. if ( bLogFailures )
  1057. Log( "Warning: VirtualProtect (2) call failed during unhook\n" );
  1058. return;
  1059. }
  1060. memcpy( pFuncToUnhook, hookData.m_rgOriginalPreambleCode, hookData.m_nOriginalPreambleLength );
  1061. // Must flush instruction cache to ensure the processor detects the changed memory
  1062. FlushInstructionCache( hProc, pFuncToUnhook, hookData.m_nOriginalPreambleLength );
  1063. // Restore the original protection flags regardless of success, unless they already matched, then don't bother
  1064. if ( bHookSpansTwoPages && dwOldProtectionLevel2 != PAGE_EXECUTE_READWRITE && dwOldProtectionLevel2 != PAGE_EXECUTE_WRITECOPY )
  1065. {
  1066. if ( !VirtualProtect( pLastHookByte, 1, dwOldProtectionLevel2, &dwIgnore ) )
  1067. {
  1068. if ( bLogFailures )
  1069. Log( "Warning: VirtualProtect (2) call failed to restore protection flags during unhook\n" );
  1070. }
  1071. }
  1072. if ( dwOldProtectionLevel != PAGE_EXECUTE_READWRITE && dwOldProtectionLevel != PAGE_EXECUTE_WRITECOPY )
  1073. {
  1074. if ( !VirtualProtect( pFuncToUnhook, 1, dwOldProtectionLevel, &dwIgnore ) )
  1075. {
  1076. if ( bLogFailures )
  1077. Log( "Warning: VirtualProtect call failed to restore protection flags during unhook\n" );
  1078. }
  1079. }
  1080. }
  1081. void UnhookFuncByRelocAddr( BYTE *pRelocFunctionAddr, bool bLogFailures )
  1082. {
  1083. if ( !pRelocFunctionAddr )
  1084. {
  1085. if ( bLogFailures )
  1086. Log( "Aborting UnhookFunc because pRelocFunctionAddr is null\n" );
  1087. return;
  1088. }
  1089. BYTE *pOrigFunc = NULL;
  1090. {
  1091. GetLock getLock( g_mapLock );
  1092. for ( const auto &entry : g_mapHookedFunctions )
  1093. {
  1094. if ( entry.second.m_pTrampolineRealFunc == pRelocFunctionAddr )
  1095. {
  1096. pOrigFunc = (BYTE*)entry.first;
  1097. break;
  1098. }
  1099. }
  1100. }
  1101. if ( !pOrigFunc )
  1102. {
  1103. if ( bLogFailures )
  1104. Log( "Aborting UnhookFuncByRelocAddr because no matching function is hooked\n" );
  1105. return;
  1106. }
  1107. UnhookFunc( pOrigFunc, bLogFailures );
  1108. }
  1109. #if DEBUG_ENABLE_DETOUR_RECORDING
  1110. //-----------------------------------------------------------------------------
  1111. // CRecordDetouredCalls
  1112. //-----------------------------------------------------------------------------
  1113. CRecordDetouredCalls::CRecordDetouredCalls()
  1114. {
  1115. m_guidMarkerBegin = { 0xb6a8cedf, 0x3296, 0x43d2, { 0xae, 0xc1, 0xa5, 0x96, 0xea, 0xb7, 0x6c, 0xc2 } };
  1116. m_nVersionNumber = 1;
  1117. m_cubRecordDetouredCalls = sizeof(CRecordDetouredCalls);
  1118. m_cubGetAsyncKeyStateCallRecord = sizeof( m_GetAsyncKeyStateCallRecord );
  1119. m_cubVirtualAllocCallRecord = sizeof( m_VirtualAllocCallRecord );
  1120. m_cubVirtualProtectCallRecord = sizeof( m_VirtualProtectCallRecord );
  1121. m_cubLoadLibraryCallRecord = sizeof( m_LoadLibraryCallRecord );
  1122. m_bMasterSwitch = false;
  1123. m_guidMarkerEnd = { 0xff84867e, 0x86e0, 0x4c0f, { 0x81, 0xf5, 0x8f, 0xe5, 0x48, 0x72, 0xa7, 0xe5 } };
  1124. }
  1125. //-----------------------------------------------------------------------------
  1126. // BShouldRecordProtectFlags
  1127. // only want to track PAGE_EXECUTE_READWRITE for now
  1128. //-----------------------------------------------------------------------------
  1129. bool CRecordDetouredCalls::BShouldRecordProtectFlags( DWORD flProtect )
  1130. {
  1131. return flProtect == PAGE_EXECUTE_READWRITE;
  1132. }
  1133. //-----------------------------------------------------------------------------
  1134. // RecordGetAsyncKeyState
  1135. // only care about callers address, not params or results
  1136. //-----------------------------------------------------------------------------
  1137. void CRecordDetouredCalls::RecordGetAsyncKeyState( DWORD vKey,
  1138. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1139. )
  1140. {
  1141. GetAsyncKeyStateCallRecord_t fcr;
  1142. fcr.InitGetAsyncKeyState( vKey, lpCallersAddress, lpCallersCallerAddress );
  1143. int iCall = m_GetAsyncKeyStateCallRecord.AddFunctionCallRecord( fcr );
  1144. #ifdef DEBUG_LOG_DETOURED_CALLS
  1145. Log( "GetAsyncKeyState called %d from %p %p\n", iCall, lpCallersAddress, lpCallersCallerAddress );
  1146. #else
  1147. iCall;
  1148. #endif
  1149. }
  1150. //-----------------------------------------------------------------------------
  1151. // RecordVirtualAlloc
  1152. //-----------------------------------------------------------------------------
  1153. void CRecordDetouredCalls::RecordVirtualAlloc( LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect,
  1154. LPVOID lpvResult, DWORD dwGetLastError,
  1155. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1156. )
  1157. {
  1158. VirtualAllocCallRecord_t fcr;
  1159. fcr.InitVirtualAlloc( lpAddress, dwSize, flAllocationType, flProtect, lpvResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1160. int iCall = m_VirtualAllocCallRecord.AddFunctionCallRecord( fcr );
  1161. #ifdef DEBUG_LOG_DETOURED_CALLS
  1162. Log( "VirtualAlloc called %d : %p %llx %x %x result %p from %p %p\n", iCall, lpAddress, (uint64)dwSize, flAllocationType, flProtect, lpvResult, lpCallersAddress, lpCallersCallerAddress );
  1163. #else
  1164. iCall;
  1165. #endif
  1166. }
  1167. //-----------------------------------------------------------------------------
  1168. // RecordVirtualProtect
  1169. //-----------------------------------------------------------------------------
  1170. void CRecordDetouredCalls::RecordVirtualProtect( LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, DWORD flOldProtect,
  1171. BOOL bResult, DWORD dwGetLastError,
  1172. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1173. )
  1174. {
  1175. VirtualAllocCallRecord_t fcr;
  1176. fcr.InitVirtualProtect( lpAddress, dwSize, flNewProtect, flOldProtect, bResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1177. int iCall = m_VirtualProtectCallRecord.AddFunctionCallRecord( fcr );
  1178. #ifdef DEBUG_LOG_DETOURED_CALLS
  1179. Log( "VirtualProtect called %d : %p %llx %x %x result %x from %p %p\n", iCall, lpAddress, (uint64)dwSize, flNewProtect, flOldProtect, bResult, lpCallersAddress, lpCallersCallerAddress );
  1180. #else
  1181. iCall;
  1182. #endif
  1183. }
  1184. //-----------------------------------------------------------------------------
  1185. // RecordVirtualAllocEx
  1186. //-----------------------------------------------------------------------------
  1187. void CRecordDetouredCalls::RecordVirtualAllocEx( HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect,
  1188. LPVOID lpvResult, DWORD dwGetLastError,
  1189. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1190. )
  1191. {
  1192. VirtualAllocCallRecord_t fcr;
  1193. fcr.InitVirtualAllocEx( hProcess, lpAddress, dwSize, flAllocationType, flProtect, lpvResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1194. int iCall = m_VirtualAllocCallRecord.AddFunctionCallRecord( fcr );
  1195. #ifdef DEBUG_LOG_DETOURED_CALLS
  1196. Log( "VirtualAllocEx called %d : %p %llx %x %x result %p from %p %p\n", iCall, lpAddress, (uint64)dwSize, flAllocationType, flProtect, lpvResult, lpCallersAddress, lpCallersCallerAddress );
  1197. #else
  1198. iCall;
  1199. #endif
  1200. }
  1201. //-----------------------------------------------------------------------------
  1202. // RecordVirtualProtectEx
  1203. //-----------------------------------------------------------------------------
  1204. void CRecordDetouredCalls::RecordVirtualProtectEx( HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, DWORD flOldProtect,
  1205. BOOL bResult, DWORD dwGetLastError,
  1206. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1207. )
  1208. {
  1209. VirtualAllocCallRecord_t fcr;
  1210. fcr.InitVirtualProtectEx( hProcess, lpAddress, dwSize, flNewProtect, flOldProtect, bResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1211. int iCall = m_VirtualProtectCallRecord.AddFunctionCallRecord( fcr );
  1212. #ifdef DEBUG_LOG_DETOURED_CALLS
  1213. Log( "VirtualProtectEx called %d : %p %llx %x %x result %x from %p %p\n", iCall, lpAddress, (uint64)dwSize, flNewProtect, flOldProtect, bResult, lpCallersAddress, lpCallersCallerAddress );
  1214. #else
  1215. iCall;
  1216. #endif
  1217. }
  1218. //-----------------------------------------------------------------------------
  1219. // RecordLoadLibraryW
  1220. //-----------------------------------------------------------------------------
  1221. void CRecordDetouredCalls::RecordLoadLibraryW(
  1222. LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags,
  1223. HMODULE hModule, DWORD dwGetLastError,
  1224. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1225. )
  1226. {
  1227. LoadLibraryCallRecord_t fcr;
  1228. fcr.InitLoadLibraryW( lpLibFileName, hFile, dwFlags, hModule, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1229. int iCall = m_LoadLibraryCallRecord.AddFunctionCallRecord( fcr );
  1230. if ( iCall >= 0 )
  1231. {
  1232. // keep updating the last callers address, so we will have the first and last caller, but lose any in between
  1233. m_LoadLibraryCallRecord.m_rgElements[iCall].m_lpLastCallerAddress = lpCallersAddress;
  1234. }
  1235. #ifdef DEBUG_LOG_DETOURED_CALLS
  1236. char rgchCopy[500];
  1237. wcstombs( rgchCopy, lpLibFileName, 500 );
  1238. Log( "LoadLibraryW called %d : %s result %p from %p %p\n", iCall, rgchCopy, hModule, lpCallersAddress, lpCallersCallerAddress );
  1239. #else
  1240. iCall;
  1241. #endif
  1242. }
  1243. //-----------------------------------------------------------------------------
  1244. // RecordLoadLibraryA
  1245. //-----------------------------------------------------------------------------
  1246. void CRecordDetouredCalls::RecordLoadLibraryA(
  1247. LPCSTR lpLibFileName, HANDLE hFile, DWORD dwFlags,
  1248. HMODULE hModule, DWORD dwGetLastError,
  1249. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1250. )
  1251. {
  1252. LoadLibraryCallRecord_t fcr;
  1253. fcr.InitLoadLibraryA( lpLibFileName, hFile, dwFlags, hModule, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1254. int iCall = m_LoadLibraryCallRecord.AddFunctionCallRecord( fcr );
  1255. if ( iCall >= 0 )
  1256. {
  1257. // keep updating the last callers address, so we will have the first and last caller, but lose any in between
  1258. m_LoadLibraryCallRecord.m_rgElements[iCall].m_lpLastCallerAddress = lpCallersAddress;
  1259. }
  1260. #ifdef DEBUG_LOG_DETOURED_CALLS
  1261. Log( "LoadLibraryA called %d : %s result %p from %p %p\n", iCall, lpLibFileName, hModule, lpCallersAddress, lpCallersCallerAddress );
  1262. #else
  1263. iCall;
  1264. #endif
  1265. }
  1266. //-----------------------------------------------------------------------------
  1267. // SharedInit
  1268. //-----------------------------------------------------------------------------
  1269. void CRecordDetouredCalls::FunctionCallRecordBase_t::SharedInit(
  1270. DWORD dwResult, DWORD dwGetLastError,
  1271. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1272. )
  1273. {
  1274. m_dwResult = dwResult;
  1275. m_dwGetLastError = dwGetLastError;
  1276. m_lpFirstCallersAddress = lpCallersAddress;
  1277. m_lpLastCallerAddress = NULL;
  1278. lpCallersCallerAddress;
  1279. }
  1280. //-----------------------------------------------------------------------------
  1281. // CRecordDetouredCalls private implementations
  1282. //-----------------------------------------------------------------------------
  1283. void CRecordDetouredCalls::GetAsyncKeyStateCallRecord_t::InitGetAsyncKeyState( DWORD vKey,
  1284. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1285. )
  1286. {
  1287. vKey;
  1288. SharedInit( 0, 0, lpCallersAddress, lpCallersCallerAddress );
  1289. }
  1290. void CRecordDetouredCalls::VirtualAllocCallRecord_t::InitVirtualAlloc( LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect,
  1291. LPVOID lpvResult, DWORD dwGetLastError,
  1292. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1293. )
  1294. {
  1295. SharedInit( (DWORD)lpvResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1296. m_dwProcessId = 0;
  1297. m_lpAddress = lpAddress;
  1298. m_dwSize = dwSize;
  1299. m_flProtect = flProtect;
  1300. m_dw2 = flAllocationType;
  1301. }
  1302. // VirtualAllocEx
  1303. void CRecordDetouredCalls::VirtualAllocCallRecord_t::InitVirtualAllocEx( HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect,
  1304. LPVOID lpvResult, DWORD dwGetLastError,
  1305. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1306. )
  1307. {
  1308. SharedInit( (DWORD)lpvResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1309. m_dwProcessId = GetProcessId( hProcess );
  1310. m_lpAddress = lpAddress;
  1311. m_dwSize = dwSize;
  1312. m_flProtect = flProtect;
  1313. m_dw2 = flAllocationType;
  1314. }
  1315. // VirtualProtect
  1316. void CRecordDetouredCalls::VirtualAllocCallRecord_t::InitVirtualProtect( LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, DWORD flOldProtect,
  1317. BOOL bResult, DWORD dwGetLastError,
  1318. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1319. )
  1320. {
  1321. SharedInit( (DWORD)bResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1322. m_dwProcessId = 0;
  1323. m_lpAddress = lpAddress;
  1324. m_dwSize = dwSize;
  1325. m_flProtect = flNewProtect;
  1326. m_dw2 = flOldProtect;
  1327. }
  1328. // VirtualProtectEx
  1329. void CRecordDetouredCalls::VirtualAllocCallRecord_t::InitVirtualProtectEx( HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, DWORD flOldProtect,
  1330. BOOL bResult, DWORD dwGetLastError,
  1331. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1332. )
  1333. {
  1334. SharedInit( (DWORD)bResult, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1335. m_dwProcessId = GetProcessId( hProcess );
  1336. m_lpAddress = lpAddress;
  1337. m_dwSize = dwSize;
  1338. m_flProtect = flNewProtect;
  1339. m_dw2 = flOldProtect;
  1340. }
  1341. // LoadLibraryExW
  1342. void CRecordDetouredCalls::LoadLibraryCallRecord_t::InitLoadLibraryW(
  1343. LPCWSTR lpLibFileName, HANDLE hFile, DWORD dwFlags,
  1344. HMODULE hModule, DWORD dwGetLastError,
  1345. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1346. )
  1347. {
  1348. SharedInit( (DWORD)hModule, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1349. m_hFile = hFile;
  1350. m_dwFlags = dwFlags;
  1351. memset( m_rgubFileName, 0, sizeof(m_rgubFileName) );
  1352. if ( hModule != NULL && lpLibFileName != NULL )
  1353. {
  1354. // record as many of the tail bytes as will fit in m_rgubFileName
  1355. size_t cubLibFileName = wcslen( lpLibFileName )* sizeof(WCHAR);
  1356. size_t cubToCopy = cubLibFileName;
  1357. size_t nOffset = 0;
  1358. if ( cubToCopy > sizeof(m_rgubFileName) )
  1359. {
  1360. nOffset = cubToCopy - sizeof(m_rgubFileName);
  1361. cubToCopy = sizeof(m_rgubFileName);
  1362. }
  1363. memcpy( m_rgubFileName, ((uint8 *)lpLibFileName) + nOffset, cubToCopy );
  1364. }
  1365. }
  1366. // LoadLibraryExA
  1367. void CRecordDetouredCalls::LoadLibraryCallRecord_t::InitLoadLibraryA(
  1368. LPCSTR lpLibFileName, HANDLE hFile, DWORD dwFlags,
  1369. HMODULE hModule, DWORD dwGetLastError,
  1370. PVOID lpCallersAddress, PVOID lpCallersCallerAddress
  1371. )
  1372. {
  1373. SharedInit( (DWORD)hModule, dwGetLastError, lpCallersAddress, lpCallersCallerAddress );
  1374. m_hFile = hFile;
  1375. m_dwFlags = dwFlags;
  1376. memset( m_rgubFileName, 0, sizeof(m_rgubFileName) );
  1377. if ( hModule != NULL && lpLibFileName != NULL )
  1378. {
  1379. // record as many of the tail bytes as will fit in m_rgubFileName
  1380. size_t cubLibFileName = strlen( lpLibFileName );
  1381. size_t cubToCopy = cubLibFileName;
  1382. size_t nOffset = 0;
  1383. if ( cubToCopy > sizeof(m_rgubFileName) )
  1384. {
  1385. nOffset = cubToCopy - sizeof(m_rgubFileName);
  1386. cubToCopy = sizeof(m_rgubFileName);
  1387. }
  1388. memcpy( m_rgubFileName, ((uint8 *)lpLibFileName) + nOffset, cubToCopy );
  1389. }
  1390. }
  1391. #endif // DEBUG_ENABLE_DETOUR_RECORDING
  1392. #pragma warning( pop )