Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

765 lines
24 KiB

  1. // mach_override.c semver:1.2.0
  2. // Copyright (c) 2003-2012 Jonathan 'Wolf' Rentzsch: http://rentzsch.com
  3. // Some rights reserved: http://opensource.org/licenses/mit
  4. // https://github.com/rentzsch/mach_override
  5. #include "mach_override.h"
  6. #include <mach-o/dyld.h>
  7. #include <mach/mach_host.h>
  8. #include <mach/mach_init.h>
  9. #include <mach/vm_map.h>
  10. #include <sys/mman.h>
  11. #include <libkern/OSAtomic.h>
  12. #include <CoreServices/CoreServices.h>
  13. /**************************
  14. *
  15. * Constants
  16. *
  17. **************************/
  18. #pragma mark -
  19. #pragma mark (Constants)
  20. #define kPageSize 4096
  21. #if defined(__ppc__) || defined(__POWERPC__)
  22. long kIslandTemplate[] = {
  23. 0x9001FFFC, // stw r0,-4(SP)
  24. 0x3C00DEAD, // lis r0,0xDEAD
  25. 0x6000BEEF, // ori r0,r0,0xBEEF
  26. 0x7C0903A6, // mtctr r0
  27. 0x8001FFFC, // lwz r0,-4(SP)
  28. 0x60000000, // nop ; optionally replaced
  29. 0x4E800420 // bctr
  30. };
  31. #define kAddressHi 3
  32. #define kAddressLo 5
  33. #define kInstructionHi 10
  34. #define kInstructionLo 11
  35. #elif defined(__i386__)
  36. #define kOriginalInstructionsSize 16
  37. // On X86 we migh need to instert an add with a 32 bit immediate after the
  38. // original instructions.
  39. #define kMaxFixupSizeIncrease 5
  40. unsigned char kIslandTemplate[] = {
  41. // kOriginalInstructionsSize nop instructions so that we
  42. // should have enough space to host original instructions
  43. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  44. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  45. // Now the real jump instruction
  46. 0xE9, 0xEF, 0xBE, 0xAD, 0xDE
  47. };
  48. #define kInstructions 0
  49. #define kJumpAddress kInstructions + kOriginalInstructionsSize + 1
  50. #elif defined(__x86_64__)
  51. #define kOriginalInstructionsSize 32
  52. // On X86-64 we never need to instert a new instruction.
  53. #define kMaxFixupSizeIncrease 0
  54. #define kJumpAddress kOriginalInstructionsSize + 6
  55. unsigned char kIslandTemplate[] = {
  56. // kOriginalInstructionsSize nop instructions so that we
  57. // should have enough space to host original instructions
  58. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  59. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  60. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  61. 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
  62. // Now the real jump instruction
  63. 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00,
  64. 0x00, 0x00, 0x00, 0x00,
  65. 0x00, 0x00, 0x00, 0x00
  66. };
  67. #endif
  68. /**************************
  69. *
  70. * Data Types
  71. *
  72. **************************/
  73. #pragma mark -
  74. #pragma mark (Data Types)
  75. typedef struct {
  76. char instructions[sizeof(kIslandTemplate)];
  77. } BranchIsland;
  78. /**************************
  79. *
  80. * Funky Protos
  81. *
  82. **************************/
  83. #pragma mark -
  84. #pragma mark (Funky Protos)
  85. static mach_error_t
  86. allocateBranchIsland(
  87. BranchIsland **island,
  88. void *originalFunctionAddress);
  89. mach_error_t
  90. freeBranchIsland(
  91. BranchIsland *island );
  92. #if defined(__ppc__) || defined(__POWERPC__)
  93. mach_error_t
  94. setBranchIslandTarget(
  95. BranchIsland *island,
  96. const void *branchTo,
  97. long instruction );
  98. #endif
  99. #if defined(__i386__) || defined(__x86_64__)
  100. mach_error_t
  101. setBranchIslandTarget_i386(
  102. BranchIsland *island,
  103. const void *branchTo,
  104. char* instructions );
  105. void
  106. atomic_mov64(
  107. uint64_t *targetAddress,
  108. uint64_t value );
  109. static Boolean
  110. eatKnownInstructions(
  111. unsigned char *code,
  112. uint64_t *newInstruction,
  113. int *howManyEaten,
  114. char *originalInstructions,
  115. int *originalInstructionCount,
  116. uint8_t *originalInstructionSizes );
  117. static void
  118. fixupInstructions(
  119. uint32_t offset,
  120. void *instructionsToFix,
  121. int instructionCount,
  122. uint8_t *instructionSizes );
  123. #endif
  124. /*******************************************************************************
  125. *
  126. * Interface
  127. *
  128. *******************************************************************************/
  129. #pragma mark -
  130. #pragma mark (Interface)
  131. #if defined(__i386__) || defined(__x86_64__)
  132. mach_error_t makeIslandExecutable(void *address) {
  133. mach_error_t err = err_none;
  134. uintptr_t page = (uintptr_t)address & ~(uintptr_t)(kPageSize-1);
  135. int e = err_none;
  136. e |= mprotect((void *)page, kPageSize, PROT_EXEC | PROT_READ | PROT_WRITE);
  137. e |= msync((void *)page, kPageSize, MS_INVALIDATE );
  138. if (e) {
  139. err = err_cannot_override;
  140. }
  141. return err;
  142. }
  143. #endif
  144. mach_error_t
  145. mach_override_ptr(
  146. void *originalFunctionAddress,
  147. const void *overrideFunctionAddress,
  148. void **originalFunctionReentryIsland )
  149. {
  150. assert( originalFunctionAddress );
  151. assert( overrideFunctionAddress );
  152. // this addresses overriding such functions as AudioOutputUnitStart()
  153. // test with modified DefaultOutputUnit project
  154. #if defined(__x86_64__)
  155. for(;;){
  156. if(*(uint16_t*)originalFunctionAddress==0x25FF) // jmp qword near [rip+0x????????]
  157. originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1));
  158. else break;
  159. }
  160. #elif defined(__i386__)
  161. for(;;){
  162. if(*(uint16_t*)originalFunctionAddress==0x25FF) // jmp *0x????????
  163. originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1);
  164. else break;
  165. }
  166. #endif
  167. long *originalFunctionPtr = (long*) originalFunctionAddress;
  168. mach_error_t err = err_none;
  169. #if defined(__ppc__) || defined(__POWERPC__)
  170. // Ensure first instruction isn't 'mfctr'.
  171. #define kMFCTRMask 0xfc1fffff
  172. #define kMFCTRInstruction 0x7c0903a6
  173. long originalInstruction = *originalFunctionPtr;
  174. if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) )
  175. err = err_cannot_override;
  176. #elif defined(__i386__) || defined(__x86_64__)
  177. int eatenCount = 0;
  178. int originalInstructionCount = 0;
  179. char originalInstructions[kOriginalInstructionsSize];
  180. uint8_t originalInstructionSizes[kOriginalInstructionsSize];
  181. uint64_t jumpRelativeInstruction = 0; // JMP
  182. Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr,
  183. &jumpRelativeInstruction, &eatenCount,
  184. originalInstructions, &originalInstructionCount,
  185. originalInstructionSizes );
  186. if (eatenCount + kMaxFixupSizeIncrease > kOriginalInstructionsSize) {
  187. //printf ("Too many instructions eaten\n");
  188. overridePossible = false;
  189. }
  190. if (!overridePossible) err = err_cannot_override;
  191. if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
  192. #endif
  193. // Make the original function implementation writable.
  194. if( !err ) {
  195. err = vm_protect( mach_task_self(),
  196. (vm_address_t) originalFunctionPtr, 8, false,
  197. (VM_PROT_ALL | VM_PROT_COPY) );
  198. if( err )
  199. err = vm_protect( mach_task_self(),
  200. (vm_address_t) originalFunctionPtr, 8, false,
  201. (VM_PROT_DEFAULT | VM_PROT_COPY) );
  202. }
  203. if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
  204. // Allocate and target the escape island to the overriding function.
  205. BranchIsland *escapeIsland = NULL;
  206. if( !err )
  207. err = allocateBranchIsland( &escapeIsland, originalFunctionAddress );
  208. if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
  209. #if defined(__ppc__) || defined(__POWERPC__)
  210. if( !err )
  211. err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 );
  212. // Build the branch absolute instruction to the escape island.
  213. long branchAbsoluteInstruction = 0; // Set to 0 just to silence warning.
  214. if( !err ) {
  215. long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF;
  216. branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress;
  217. }
  218. #elif defined(__i386__) || defined(__x86_64__)
  219. if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
  220. if( !err )
  221. err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 );
  222. if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__);
  223. // Build the jump relative instruction to the escape island
  224. #endif
  225. #if defined(__i386__) || defined(__x86_64__)
  226. if (!err) {
  227. uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5);
  228. addressOffset = OSSwapInt32(addressOffset);
  229. jumpRelativeInstruction |= 0xE900000000000000LL;
  230. jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24;
  231. jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction);
  232. }
  233. #endif
  234. // Optionally allocate & return the reentry island. This may contain relocated
  235. // jmp instructions and so has all the same addressing reachability requirements
  236. // the escape island has to the original function, except the escape island is
  237. // technically our original function.
  238. BranchIsland *reentryIsland = NULL;
  239. if( !err && originalFunctionReentryIsland ) {
  240. err = allocateBranchIsland( &reentryIsland, escapeIsland);
  241. if( !err )
  242. *originalFunctionReentryIsland = reentryIsland;
  243. }
  244. #if defined(__ppc__) || defined(__POWERPC__)
  245. // Atomically:
  246. // o If the reentry island was allocated:
  247. // o Insert the original instruction into the reentry island.
  248. // o Target the reentry island at the 2nd instruction of the
  249. // original function.
  250. // o Replace the original instruction with the branch absolute.
  251. if( !err ) {
  252. int escapeIslandEngaged = false;
  253. do {
  254. if( reentryIsland )
  255. err = setBranchIslandTarget( reentryIsland,
  256. (void*) (originalFunctionPtr+1), originalInstruction );
  257. if( !err ) {
  258. escapeIslandEngaged = CompareAndSwap( originalInstruction,
  259. branchAbsoluteInstruction,
  260. (UInt32*)originalFunctionPtr );
  261. if( !escapeIslandEngaged ) {
  262. // Someone replaced the instruction out from under us,
  263. // re-read the instruction, make sure it's still not
  264. // 'mfctr' and try again.
  265. originalInstruction = *originalFunctionPtr;
  266. if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction)
  267. err = err_cannot_override;
  268. }
  269. }
  270. } while( !err && !escapeIslandEngaged );
  271. }
  272. #elif defined(__i386__) || defined(__x86_64__)
  273. // Atomically:
  274. // o If the reentry island was allocated:
  275. // o Insert the original instructions into the reentry island.
  276. // o Target the reentry island at the first non-replaced
  277. // instruction of the original function.
  278. // o Replace the original first instructions with the jump relative.
  279. //
  280. // Note that on i386, we do not support someone else changing the code under our feet
  281. if ( !err ) {
  282. uint32_t offset = (uintptr_t)originalFunctionPtr - (uintptr_t)reentryIsland;
  283. fixupInstructions(offset, originalInstructions,
  284. originalInstructionCount, originalInstructionSizes );
  285. if( reentryIsland )
  286. err = setBranchIslandTarget_i386( reentryIsland,
  287. (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions );
  288. // try making islands executable before planting the jmp
  289. #if defined(__x86_64__) || defined(__i386__)
  290. if( !err )
  291. err = makeIslandExecutable(escapeIsland);
  292. if( !err && reentryIsland )
  293. err = makeIslandExecutable(reentryIsland);
  294. #endif
  295. if ( !err )
  296. atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction);
  297. }
  298. #endif
  299. // Clean up on error.
  300. if( err ) {
  301. if( reentryIsland )
  302. freeBranchIsland( reentryIsland );
  303. if( escapeIsland )
  304. freeBranchIsland( escapeIsland );
  305. }
  306. return err;
  307. }
  308. /*******************************************************************************
  309. *
  310. * Implementation
  311. *
  312. *******************************************************************************/
  313. #pragma mark -
  314. #pragma mark (Implementation)
  315. static bool jump_in_range(intptr_t from, intptr_t to) {
  316. intptr_t field_value = to - from - 5;
  317. int32_t field_value_32 = field_value;
  318. return field_value == field_value_32;
  319. }
  320. /*******************************************************************************
  321. Implementation: Allocates memory for a branch island.
  322. @param island <- The allocated island.
  323. @result <- mach_error_t
  324. ***************************************************************************/
  325. static mach_error_t
  326. allocateBranchIslandAux(
  327. BranchIsland **island,
  328. void *originalFunctionAddress,
  329. bool forward)
  330. {
  331. assert( island );
  332. assert( sizeof( BranchIsland ) <= kPageSize );
  333. vm_map_t task_self = mach_task_self();
  334. vm_address_t original_address = (vm_address_t) originalFunctionAddress;
  335. vm_address_t address = original_address;
  336. for (;;) {
  337. vm_size_t vmsize = 0;
  338. memory_object_name_t object = 0;
  339. kern_return_t kr = 0;
  340. vm_region_flavor_t flavor = VM_REGION_BASIC_INFO;
  341. // Find the region the address is in.
  342. #if __WORDSIZE == 32
  343. vm_region_basic_info_data_t info;
  344. mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
  345. kr = vm_region(task_self, &address, &vmsize, flavor,
  346. (vm_region_info_t)&info, &info_count, &object);
  347. #else
  348. vm_region_basic_info_data_64_t info;
  349. mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
  350. kr = vm_region_64(task_self, &address, &vmsize, flavor,
  351. (vm_region_info_t)&info, &info_count, &object);
  352. #endif
  353. if (kr != KERN_SUCCESS)
  354. return kr;
  355. assert((address & (kPageSize - 1)) == 0);
  356. // Go to the first page before or after this region
  357. vm_address_t new_address = forward ? address + vmsize : address - kPageSize;
  358. #if __WORDSIZE == 64
  359. if(!jump_in_range(original_address, new_address))
  360. break;
  361. #endif
  362. address = new_address;
  363. // Try to allocate this page.
  364. kr = vm_allocate(task_self, &address, kPageSize, 0);
  365. if (kr == KERN_SUCCESS) {
  366. *island = (BranchIsland*) address;
  367. return err_none;
  368. }
  369. if (kr != KERN_NO_SPACE)
  370. return kr;
  371. }
  372. return KERN_NO_SPACE;
  373. }
  374. static mach_error_t
  375. allocateBranchIsland(
  376. BranchIsland **island,
  377. void *originalFunctionAddress)
  378. {
  379. mach_error_t err =
  380. allocateBranchIslandAux(island, originalFunctionAddress, true);
  381. if (!err)
  382. return err;
  383. return allocateBranchIslandAux(island, originalFunctionAddress, false);
  384. }
  385. /*******************************************************************************
  386. Implementation: Deallocates memory for a branch island.
  387. @param island -> The island to deallocate.
  388. @result <- mach_error_t
  389. ***************************************************************************/
  390. mach_error_t
  391. freeBranchIsland(
  392. BranchIsland *island )
  393. {
  394. assert( island );
  395. assert( (*(long*)&island->instructions[0]) == kIslandTemplate[0] );
  396. assert( sizeof( BranchIsland ) <= kPageSize );
  397. return vm_deallocate( mach_task_self(), (vm_address_t) island,
  398. kPageSize );
  399. }
  400. /*******************************************************************************
  401. Implementation: Sets the branch island's target, with an optional
  402. instruction.
  403. @param island -> The branch island to insert target into.
  404. @param branchTo -> The address of the target.
  405. @param instruction -> Optional instruction to execute prior to branch. Set
  406. to zero for nop.
  407. @result <- mach_error_t
  408. ***************************************************************************/
  409. #if defined(__ppc__) || defined(__POWERPC__)
  410. mach_error_t
  411. setBranchIslandTarget(
  412. BranchIsland *island,
  413. const void *branchTo,
  414. long instruction )
  415. {
  416. // Copy over the template code.
  417. bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) );
  418. // Fill in the address.
  419. ((short*)island->instructions)[kAddressLo] = ((long) branchTo) & 0x0000FFFF;
  420. ((short*)island->instructions)[kAddressHi]
  421. = (((long) branchTo) >> 16) & 0x0000FFFF;
  422. // Fill in the (optional) instuction.
  423. if( instruction != 0 ) {
  424. ((short*)island->instructions)[kInstructionLo]
  425. = instruction & 0x0000FFFF;
  426. ((short*)island->instructions)[kInstructionHi]
  427. = (instruction >> 16) & 0x0000FFFF;
  428. }
  429. //MakeDataExecutable( island->instructions, sizeof( kIslandTemplate ) );
  430. msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE );
  431. return err_none;
  432. }
  433. #endif
  434. #if defined(__i386__)
  435. mach_error_t
  436. setBranchIslandTarget_i386(
  437. BranchIsland *island,
  438. const void *branchTo,
  439. char* instructions )
  440. {
  441. // Copy over the template code.
  442. bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) );
  443. // copy original instructions
  444. if (instructions) {
  445. bcopy (instructions, island->instructions + kInstructions, kOriginalInstructionsSize);
  446. }
  447. // Fill in the address.
  448. int32_t addressOffset = (char *)branchTo - (island->instructions + kJumpAddress + 4);
  449. *((int32_t *)(island->instructions + kJumpAddress)) = addressOffset;
  450. msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE );
  451. return err_none;
  452. }
  453. #elif defined(__x86_64__)
  454. mach_error_t
  455. setBranchIslandTarget_i386(
  456. BranchIsland *island,
  457. const void *branchTo,
  458. char* instructions )
  459. {
  460. // Copy over the template code.
  461. bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) );
  462. // Copy original instructions.
  463. if (instructions) {
  464. bcopy (instructions, island->instructions, kOriginalInstructionsSize);
  465. }
  466. // Fill in the address.
  467. *((uint64_t *)(island->instructions + kJumpAddress)) = (uint64_t)branchTo;
  468. msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE );
  469. return err_none;
  470. }
  471. #endif
  472. #if defined(__i386__) || defined(__x86_64__)
  473. // simplistic instruction matching
  474. typedef struct {
  475. unsigned int length; // max 15
  476. unsigned char mask[15]; // sequence of bytes in memory order
  477. unsigned char constraint[15]; // sequence of bytes in memory order
  478. } AsmInstructionMatch;
  479. #if defined(__i386__)
  480. static AsmInstructionMatch possibleInstructions[] = {
  481. { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xE9, 0x00, 0x00, 0x00, 0x00} }, // jmp 0x????????
  482. { 0x5, {0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, {0x55, 0x89, 0xe5, 0xc9, 0xc3} }, // push %ebp; mov %esp,%ebp; leave; ret
  483. { 0x1, {0xFF}, {0x90} }, // nop
  484. { 0x1, {0xFF}, {0x55} }, // push %esp
  485. { 0x2, {0xFF, 0xFF}, {0x89, 0xE5} }, // mov %esp,%ebp
  486. { 0x1, {0xFF}, {0x53} }, // push %ebx
  487. { 0x3, {0xFF, 0xFF, 0x00}, {0x83, 0xEC, 0x00} }, // sub 0x??, %esp
  488. { 0x6, {0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, {0x81, 0xEC, 0x00, 0x00, 0x00, 0x00} }, // sub 0x??, %esp with 32bit immediate
  489. { 0x1, {0xFF}, {0x57} }, // push %edi
  490. { 0x1, {0xFF}, {0x56} }, // push %esi
  491. { 0x2, {0xFF, 0xFF}, {0x31, 0xC0} }, // xor %eax, %eax
  492. { 0x3, {0xFF, 0x4F, 0x00}, {0x8B, 0x45, 0x00} }, // mov $imm(%ebp), %reg
  493. { 0x3, {0xFF, 0x4C, 0x00}, {0x8B, 0x40, 0x00} }, // mov $imm(%eax-%edx), %reg
  494. { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x8B, 0x4C, 0x24, 0x00} }, // mov $imm(%esp), %ecx
  495. { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xB8, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %eax
  496. { 0x6, {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, {0xE8, 0x00, 0x00, 0x00, 0x00, 0x58} }, // call $imm; pop %eax
  497. { 0x0 }
  498. };
  499. #elif defined(__x86_64__)
  500. static AsmInstructionMatch possibleInstructions[] = {
  501. { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xE9, 0x00, 0x00, 0x00, 0x00} }, // jmp 0x????????
  502. { 0x1, {0xFF}, {0x90} }, // nop
  503. { 0x1, {0xF8}, {0x50} }, // push %rX
  504. { 0x3, {0xFF, 0xFF, 0xFF}, {0x48, 0x89, 0xE5} }, // mov %rsp,%rbp
  505. { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x48, 0x83, 0xEC, 0x00} }, // sub 0x??, %rsp
  506. { 0x4, {0xFB, 0xFF, 0x00, 0x00}, {0x48, 0x89, 0x00, 0x00} }, // move onto rbp
  507. { 0x4, {0xFF, 0xFF, 0xFF, 0xFF}, {0x40, 0x0f, 0xbe, 0xce} }, // movsbl %sil, %ecx
  508. { 0x2, {0xFF, 0x00}, {0x41, 0x00} }, // push %rXX
  509. { 0x2, {0xFF, 0x00}, {0x85, 0x00} }, // test %rX,%rX
  510. { 0x5, {0xF8, 0x00, 0x00, 0x00, 0x00}, {0xB8, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %reg
  511. { 0x3, {0xFF, 0xFF, 0x00}, {0xFF, 0x77, 0x00} }, // pushq $imm(%rdi)
  512. { 0x2, {0xFF, 0xFF}, {0x31, 0xC0} }, // xor %eax, %eax
  513. { 0x2, {0xFF, 0xFF}, {0x89, 0xF8} }, // mov %edi, %eax
  514. //leaq offset(%rip),%rax
  515. { 0x7, {0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, {0x48, 0x8d, 0x05, 0x00, 0x00, 0x00, 0x00} },
  516. { 0x0 }
  517. };
  518. #endif
  519. static Boolean codeMatchesInstruction(unsigned char *code, AsmInstructionMatch* instruction)
  520. {
  521. Boolean match = true;
  522. size_t i;
  523. for (i=0; i<instruction->length; i++) {
  524. unsigned char mask = instruction->mask[i];
  525. unsigned char constraint = instruction->constraint[i];
  526. unsigned char codeValue = code[i];
  527. match = ((codeValue & mask) == constraint);
  528. if (!match) break;
  529. }
  530. return match;
  531. }
  532. #if defined(__i386__) || defined(__x86_64__)
  533. static Boolean
  534. eatKnownInstructions(
  535. unsigned char *code,
  536. uint64_t *newInstruction,
  537. int *howManyEaten,
  538. char *originalInstructions,
  539. int *originalInstructionCount,
  540. uint8_t *originalInstructionSizes )
  541. {
  542. Boolean allInstructionsKnown = true;
  543. int totalEaten = 0;
  544. unsigned char* ptr = code;
  545. int remainsToEat = 5; // a JMP instruction takes 5 bytes
  546. int instructionIndex = 0;
  547. if (howManyEaten) *howManyEaten = 0;
  548. if (originalInstructionCount) *originalInstructionCount = 0;
  549. while (remainsToEat > 0) {
  550. Boolean curInstructionKnown = false;
  551. // See if instruction matches one we know
  552. AsmInstructionMatch* curInstr = possibleInstructions;
  553. do {
  554. if ((curInstructionKnown = codeMatchesInstruction(ptr, curInstr))) break;
  555. curInstr++;
  556. } while (curInstr->length > 0);
  557. // if all instruction matches failed, we don't know current instruction then, stop here
  558. if (!curInstructionKnown) {
  559. allInstructionsKnown = false;
  560. fprintf(stderr, "mach_override: some instructions unknown! Need to update mach_override.c\n");
  561. break;
  562. }
  563. // At this point, we've matched curInstr
  564. int eaten = curInstr->length;
  565. ptr += eaten;
  566. remainsToEat -= eaten;
  567. totalEaten += eaten;
  568. if (originalInstructionSizes) originalInstructionSizes[instructionIndex] = eaten;
  569. instructionIndex += 1;
  570. if (originalInstructionCount) *originalInstructionCount = instructionIndex;
  571. }
  572. if (howManyEaten) *howManyEaten = totalEaten;
  573. if (originalInstructions) {
  574. Boolean enoughSpaceForOriginalInstructions = (totalEaten < kOriginalInstructionsSize);
  575. if (enoughSpaceForOriginalInstructions) {
  576. memset(originalInstructions, 0x90 /* NOP */, kOriginalInstructionsSize); // fill instructions with NOP
  577. bcopy(code, originalInstructions, totalEaten);
  578. } else {
  579. // printf ("Not enough space in island to store original instructions. Adapt the island definition and kOriginalInstructionsSize\n");
  580. return false;
  581. }
  582. }
  583. if (allInstructionsKnown) {
  584. // save last 3 bytes of first 64bits of codre we'll replace
  585. uint64_t currentFirst64BitsOfCode = *((uint64_t *)code);
  586. currentFirst64BitsOfCode = OSSwapInt64(currentFirst64BitsOfCode); // back to memory representation
  587. currentFirst64BitsOfCode &= 0x0000000000FFFFFFLL;
  588. // keep only last 3 instructions bytes, first 5 will be replaced by JMP instr
  589. *newInstruction &= 0xFFFFFFFFFF000000LL; // clear last 3 bytes
  590. *newInstruction |= (currentFirst64BitsOfCode & 0x0000000000FFFFFFLL); // set last 3 bytes
  591. }
  592. return allInstructionsKnown;
  593. }
  594. static void
  595. fixupInstructions(
  596. uint32_t offset,
  597. void *instructionsToFix,
  598. int instructionCount,
  599. uint8_t *instructionSizes )
  600. {
  601. // The start of "leaq offset(%rip),%rax"
  602. static const uint8_t LeaqHeader[] = {0x48, 0x8d, 0x05};
  603. int index;
  604. for (index = 0;index < instructionCount;index += 1)
  605. {
  606. if (*(uint8_t*)instructionsToFix == 0xE9) // 32-bit jump relative
  607. {
  608. uint32_t *jumpOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 1);
  609. *jumpOffsetPtr += offset;
  610. }
  611. // leaq offset(%rip),%rax
  612. if (memcmp(instructionsToFix, LeaqHeader, 3) == 0) {
  613. uint32_t *LeaqOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 3);
  614. *LeaqOffsetPtr += offset;
  615. }
  616. // 32-bit call relative to the next addr; pop %eax
  617. if (*(uint8_t*)instructionsToFix == 0xE8)
  618. {
  619. // Just this call is larger than the jump we use, so we
  620. // know this is the last instruction.
  621. assert(index == (instructionCount - 1));
  622. assert(instructionSizes[index] == 6);
  623. // Insert "addl $offset, %eax" in the end so that when
  624. // we jump to the rest of the function %eax has the
  625. // value it would have if eip had been pushed by the
  626. // call in its original position.
  627. uint8_t *op = (uint8_t*)instructionsToFix;
  628. op += 6;
  629. *op = 0x05; // addl
  630. uint32_t *addImmPtr = (uint32_t*)(op + 1);
  631. *addImmPtr = offset;
  632. }
  633. instructionsToFix = (void*)((uintptr_t)instructionsToFix + instructionSizes[index]);
  634. }
  635. }
  636. #endif
  637. #if defined(__i386__)
  638. void atomic_mov64(
  639. uint64_t *targetAddress,
  640. uint64_t value)
  641. {
  642. while (true)
  643. {
  644. uint64_t old_value = *targetAddress;
  645. if (OSAtomicCompareAndSwap64(old_value, value, (int64_t*)targetAddress)) return;
  646. }
  647. }
  648. #elif defined(__x86_64__)
  649. void atomic_mov64(
  650. uint64_t *targetAddress,
  651. uint64_t value )
  652. {
  653. *targetAddress = value;
  654. }
  655. #endif
  656. #endif