Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1560 lines
41 KiB

  1. /*++
  2. Copyright (c) 1996-2000 Microsoft Corporation
  3. Module Name:
  4. Process.c
  5. Abstract:
  6. This module contains the entrypoints for processing instructions.
  7. Author:
  8. Barry Bond (barrybo) creation-date 1-Apr-1996
  9. Revision History:
  10. 24-Aug-1999 [askhalid] copied from 32-bit wx86 directory and make work for 64bit.
  11. 20-Sept-1999[barrybo] added FRAG2REF(LockCmpXchg8bFrag32, ULONGLONG)
  12. --*/
  13. #include <nt.h>
  14. #include <ntrtl.h>
  15. #include <nturtl.h>
  16. #include <windows.h>
  17. #define _WX86CPUAPI_
  18. #include <wx86.h>
  19. #include <wx86nt.h>
  20. #include <wx86cpu.h>
  21. #include <cpuassrt.h>
  22. #include <config.h>
  23. #include <instr.h>
  24. #include <threadst.h>
  25. #include <frag.h>
  26. #include <compiler.h>
  27. #include <ptchstrc.h>
  28. #include <codeseq.h>
  29. #include <findpc.h>
  30. #include <tc.h>
  31. #include <opt.h>
  32. #include <atomic.h>
  33. #include <cpunotif.h>
  34. #define _codegen_
  35. #if _PPC_
  36. #include <soppc.h>
  37. #elif _MIPS_
  38. #include <somips.h>
  39. #elif _ALPHA_
  40. #include <soalpha.h>
  41. ENTRYPOINT EntrypointECU;
  42. #endif
  43. #include <process.h>
  44. ASSERTNAME;
  45. #define MAX_OPERAND_SIZE 32 // allow upto 32 instructions per operand
  46. DWORD RegCache[NUM_CACHE_REGS]; // One entry for each cached register
  47. DWORD LastRegDeleted;
  48. DWORD Arg1Contents; // GP_ number of x86 reg held in A1, or NO_REG
  49. DWORD Arg2Contents; // GP_ number of x86 reg held in A1, or NO_REG
  50. typedef enum _Operand_Op {
  51. #if _ALPHA_
  52. OP_MovRegToReg8B,
  53. #endif
  54. OP_MovToMem32B,
  55. OP_MovToMem32W,
  56. OP_MovToMem32D,
  57. OP_MovToMem16B,
  58. OP_MovToMem16W,
  59. OP_MovToMem16D,
  60. OP_MovToMem8B,
  61. OP_MovToMem8D,
  62. OP_MovRegToReg32,
  63. OP_MovRegToReg16,
  64. OP_MovRegToReg8
  65. } OPERAND_OP;
  66. CONST PPLACEOPERANDFN OpFragments[] = {
  67. #if _ALPHA_
  68. GenOperandMovRegToReg8B,
  69. #endif
  70. GenOperandMovToMem32B,
  71. GenOperandMovToMem32B,
  72. GenOperandMovToMem32D,
  73. GenOperandMovToMem16B,
  74. GenOperandMovToMem16W,
  75. #if _ALPHA_
  76. GenOperandMovToMem16D,
  77. GenOperandMovToMem8B,
  78. GenOperandMovToMem8D,
  79. #else
  80. GenOperandMovToMem16W,
  81. GenOperandMovToMem8B,
  82. GenOperandMovToMem8B,
  83. #endif
  84. GenOperandMovRegToReg32,
  85. GenOperandMovRegToReg16,
  86. GenOperandMovRegToReg8
  87. };
  88. VOID
  89. UpdateEntrypointNativeInfo(
  90. PCHAR NativeEnd
  91. );
  92. ULONG
  93. PlaceExceptionData(
  94. PCHAR Location,
  95. DWORD cEntryPoints
  96. );
  97. ULONG
  98. PlaceNativeCode(
  99. PCHAR CodeLocation
  100. );
  101. VOID
  102. DetermineOperandAlignment(
  103. BOOL EbpAligned,
  104. POPERAND Operand
  105. );
  106. ULONG
  107. DetermineInstructionAlignment(
  108. PINSTRUCTION Instruction
  109. );
  110. ULONG
  111. PlaceOperand(
  112. ULONG OperandNumber,
  113. POPERAND Operand,
  114. PINSTRUCTION Instruction,
  115. PCHAR Location
  116. );
  117. PCHAR
  118. InterleaveInstructions(
  119. OUT PCHAR CodeLocation,
  120. IN PCHAR Op1Code,
  121. IN ULONG Op1Count,
  122. IN PCHAR Op2Code,
  123. IN ULONG Op2Count
  124. );
  125. ULONG
  126. LookupRegInCache(
  127. ULONG Reg
  128. )
  129. /*++
  130. Routine Description:
  131. Determines if an x86 register is cached in a RISC register or not, and
  132. if so, which RISC register contains the x86 register.
  133. Arguments:
  134. Reg - one of the GP_ constants or NO_REG.
  135. Return Value:
  136. Offset into RegCache[] array if the x86 register is cached in a RISC
  137. register, or NO_REG if the x86 register is not cached.
  138. --*/
  139. {
  140. int RegCacheNum;
  141. //
  142. // Map the register number into one of the 32-bit x86 regs.
  143. // ie. REG_AH, REG_AL, and REG_AX all become REG_EAX.
  144. //
  145. if (Reg == NO_REG) {
  146. return NO_REG;
  147. } else if (Reg >= GP_AH) {
  148. Reg -= GP_AH;
  149. } else if (Reg >= GP_AL) {
  150. Reg -= GP_AL;
  151. } else if (Reg >= GP_AX) {
  152. Reg -= GP_AX;
  153. } else if (Reg >= REG_ES) {
  154. return NO_REG;
  155. }
  156. //
  157. // Search the register cache to see if the 32-bit x86 register
  158. // is loaded into a RISC register already.
  159. //
  160. for (RegCacheNum=0; RegCacheNum<NUM_CACHE_REGS; ++RegCacheNum) {
  161. if (RegCache[RegCacheNum] == Reg) {
  162. return RegCacheNum;
  163. }
  164. }
  165. return NO_REG;
  166. }
  167. VOID SetArgContents(
  168. ULONG OperandNumber,
  169. ULONG Reg
  170. )
  171. /*++
  172. Routine Description:
  173. Updates information about what argument registers are known to
  174. contain x86 register values.
  175. Arguments:
  176. OperandNumber - Number of ArgReg to update
  177. (0 means no ArgReg caches the x86 register)
  178. Reg - New contents of AREG_NP(OperandNumber)
  179. (NO_REG means the ArgReg does not cache an x86 register)
  180. Return Value:
  181. None.
  182. --*/
  183. {
  184. ULONG Reg2;
  185. ULONG Reg3;
  186. ULONG Reg4;
  187. //
  188. // If an 8- or 16-bit register is known to be in a particular
  189. // argreg, then older copies of the 32-bit register are invalid.
  190. // ie. if a fragment calls SetArgContents(1, GP_AH) and Arg2Contents
  191. // is GP_AX, then Arg2Contents must be invalidated.
  192. //
  193. if (Reg >= GP_AH) {
  194. //
  195. // For a hi-8 register, invalidate the 16- and 32-bit versions
  196. //
  197. Reg2 = GP_AX + Reg-GP_AH;
  198. Reg3 = GP_EAX + Reg-GP_AH;
  199. Reg4 = NO_REG;
  200. } else if (Reg >= GP_AL) {
  201. //
  202. // For a low-8 register, invalidate the 16-bit and 32-bit versions
  203. //
  204. Reg2 = GP_AX + Reg-GP_AL;
  205. Reg3 = GP_EAX + Reg-GP_AL;
  206. Reg4 = NO_REG;
  207. } else if (Reg >= GP_AX) {
  208. //
  209. // For a 16-bit register, invalidate the lo-8, high-8 and 32-bit versions
  210. //
  211. Reg2 = GP_EAX + Reg-GP_AX;
  212. Reg3 = GP_AH + Reg-GP_AX;
  213. Reg4 = GP_AL + Reg-GP_AX;
  214. } else {
  215. //
  216. // For a 32-bit register, invalidate the low-8, high-8, and 16-bit versions
  217. //
  218. Reg2 = GP_AH + Reg-GP_EAX;
  219. Reg3 = GP_AL + Reg-GP_EAX;
  220. Reg4 = GP_AX + Reg-GP_EAX;
  221. }
  222. //
  223. // Assume that all other registers known to hold Reg are invalid, as
  224. // SetArgContents() is called only after a new value is stored from the
  225. // argreg into memory.
  226. //
  227. if (Arg1Contents == Reg || Arg1Contents == Reg2 || Arg1Contents == Reg3 || Arg1Contents == Reg4) {
  228. Arg1Contents = NO_REG;
  229. }
  230. if (Arg2Contents == Reg || Arg2Contents == Reg2 || Arg2Contents == Reg3 || Arg2Contents == Reg4) {
  231. Arg2Contents = NO_REG;
  232. }
  233. if (OperandNumber == 1) {
  234. Arg1Contents = Reg;
  235. } else if (OperandNumber == 2) {
  236. Arg2Contents = Reg;
  237. }
  238. }
  239. ULONG
  240. LoadRegCacheForInstruction(
  241. DWORD RegsToCache,
  242. PCHAR CodeLocation
  243. )
  244. /*++
  245. Routine Description:
  246. Loads x86 regsisters into RISC registers based on information the
  247. analysis phase placed into RegsToCache and the current contents of
  248. the register cache.
  249. Arguments:
  250. RegsToCache - list of x86 registers which will be referenced frequently
  251. in subsequent instructions
  252. CodeLocation - pointer to place to generate code
  253. Return Value:
  254. Count of DWORDs of code generated to load x86 registers into the cache.
  255. --*/
  256. {
  257. DWORD i;
  258. int RegCacheNum;
  259. PCHAR Location = CodeLocation;
  260. //
  261. // Iterate over the 8 32-bit x86 general-purpose registers
  262. //
  263. for (i=0; i<REGCOUNT; ++i, RegsToCache >>= REGSHIFT) {
  264. if (RegsToCache & REGMASK) {
  265. //
  266. // There is a register to cache. See if it is already cached.
  267. //
  268. for (RegCacheNum = 0; RegCacheNum<NUM_CACHE_REGS; ++RegCacheNum) {
  269. if (RegCache[RegCacheNum] == i) {
  270. //
  271. // Register is already cached. Nothing to do.
  272. //
  273. goto NextCachedReg;
  274. }
  275. }
  276. //
  277. // The register is not already cached, so cache it.
  278. //
  279. for (RegCacheNum = 0; RegCacheNum<NUM_CACHE_REGS; ++RegCacheNum) {
  280. if (RegCache[RegCacheNum] == NO_REG) {
  281. //
  282. // This slot is empty, so use it.
  283. //
  284. RegCache[RegCacheNum] = i;
  285. //
  286. // Generate code to load the register
  287. //
  288. Location += GenLoadCacheReg(
  289. (PULONG)Location,
  290. NULL,
  291. RegCacheNum
  292. );
  293. goto NextCachedReg;
  294. }
  295. }
  296. //
  297. // There is no free register to cache the value in.
  298. // Select a cached register and use it.
  299. //
  300. LastRegDeleted = (LastRegDeleted+1) % NUM_CACHE_REGS;
  301. RegCache[LastRegDeleted] = i;
  302. //
  303. // Generate code to load the register
  304. //
  305. Location += GenLoadCacheReg(
  306. (PULONG)Location,
  307. NULL,
  308. LastRegDeleted
  309. );
  310. }
  311. NextCachedReg:;
  312. }
  313. return (ULONG) (ULONGLONG)(Location - CodeLocation);
  314. }
  315. VOID
  316. ResetRegCache(
  317. VOID
  318. )
  319. /*++
  320. Routine Description:
  321. Invalidates the entire register cache by marking RISC registers as free.
  322. Functionally the same as:
  323. InvalidateRegCacheForInstruction(0xffffffff)
  324. LastRegDeleted = 0;
  325. Arguments:
  326. None.
  327. Return Value:
  328. None.
  329. --*/
  330. {
  331. int CacheRegNum;
  332. for (CacheRegNum = 0; CacheRegNum<NUM_CACHE_REGS; CacheRegNum++) {
  333. RegCache[CacheRegNum] = NO_REG;
  334. }
  335. LastRegDeleted = 0;
  336. }
  337. VOID
  338. InvalidateRegCacheForInstruction(
  339. DWORD RegsSet
  340. )
  341. /*++
  342. Routine Description:
  343. Invalidates the register cache by marking RISC registers as free if
  344. RegsSet indicates the previous instruction modified the x86 register
  345. in the cache.
  346. Arguments:
  347. RegsSet - list of x86 registers which have been modified.
  348. Return Value:
  349. None.
  350. --*/
  351. {
  352. int CacheRegNum;
  353. //
  354. // Invalidate cached registers which have been alterd
  355. //
  356. for (CacheRegNum = 0; CacheRegNum<NUM_CACHE_REGS; CacheRegNum++) {
  357. if (RegCache[CacheRegNum] != NO_REG &&
  358. ((REGMASK << (REGSHIFT*RegCache[CacheRegNum])) & RegsSet)) {
  359. RegCache[CacheRegNum] = NO_REG;
  360. LastRegDeleted = CacheRegNum;
  361. }
  362. }
  363. }
  364. VOID
  365. CleanupMovInstruction(
  366. PINSTRUCTION pInstr
  367. )
  368. /*++
  369. Routine Description:
  370. Performs some final optimizatins on MOV instructions. This cannot
  371. be performed during the x86 analysis phase as it needs to know
  372. about register caching.
  373. Arguments:
  374. pInstr - MOV instruction to clean up.
  375. Return Value:
  376. None. pInstr modified.
  377. --*/
  378. {
  379. if (pInstr->Operand1.Type == OPND_REGREF) {
  380. ULONG Reg;
  381. if (pInstr->Operand2.Type == OPND_REGVALUE &&
  382. pInstr->Operand2.Reg < GP_AH &&
  383. (Reg = LookupRegInCache(pInstr->Operand2.Reg)) != NO_REG) {
  384. //
  385. // pInstr is a MOV reg1, reg2 (Where reg2 is not a Hi8),
  386. // and reg2 is cached. Set Operand1 to be an OPND_MOVREGTOREG
  387. // with Reg=destination register and IndexReg = source register
  388. // (in the cache).
  389. //
  390. pInstr->Operand2.Type = OPND_NONE;
  391. pInstr->Operand1.Type = OPND_MOVREGTOREG;
  392. pInstr->Operand1.IndexReg = pInstr->Operand1.Reg;
  393. pInstr->Operand1.Reg = Reg;
  394. pInstr->Operand1.Immed = pInstr->Operation;
  395. } else {
  396. //
  397. // pInstr is a MOV reg, X. Rewrite it to be a NOP
  398. // with Operand1 set to X, Operand2 set to OPND_NONE,
  399. // and Operand3 set to OPND_MOVTOREG.
  400. //
  401. Reg = pInstr->Operand1.Reg;
  402. pInstr->Operand1 = pInstr->Operand2;
  403. pInstr->Operand2.Type = OPND_NONE;
  404. pInstr->Operand3.Type = OPND_MOVTOREG;
  405. pInstr->Operand3.Reg = Reg;
  406. pInstr->Operand3.Immed = pInstr->Operation;
  407. }
  408. } else {
  409. pInstr->Operand3.Type = OPND_MOVTOMEM;
  410. pInstr->Operand3.Immed = pInstr->Operation;
  411. }
  412. }
  413. ULONG PlaceInstructions(
  414. PCHAR CodeLocation,
  415. DWORD cEntryPoints
  416. )
  417. /*++
  418. Routine Description:
  419. Generates optimized native code for the entire InstructionStream[] array.
  420. Arguments:
  421. CodeLocation -- place to write the native code
  422. cEntryPoints -- count of ENTRYPOINT structures describing the x86 code
  423. Return Value:
  424. Size of native code generated, in bytes.
  425. --*/
  426. {
  427. ULONG NativeSize;
  428. int i;
  429. ULONG IntelNext;
  430. PULONG NextCompilationUnitStart;
  431. FixupCount = 0;
  432. //
  433. // Generate native code
  434. //
  435. NativeSize = PlaceNativeCode(CodeLocation);
  436. //
  437. // Generate the JumpToNextCompilationUnit code. It loads
  438. // RegEip with the intel address of the Intel instruction following
  439. // this run of code.
  440. //
  441. // First, find the last non-Nop instruction in the stream. These
  442. // are only present if there is an OPT_ instruction in the stream,
  443. // so the loop is guaranteed to terminate.
  444. //
  445. for (i=NumberOfInstructions-1; InstructionStream[i].Size == 0; i--)
  446. ;
  447. IntelNext = InstructionStream[i].IntelAddress +
  448. InstructionStream[i].Size;
  449. NextCompilationUnitStart = (PULONG)(CodeLocation+NativeSize);
  450. NativeSize += GenJumpToNextCompilationUnit(NextCompilationUnitStart,
  451. #if _ALPHA_
  452. (ULONG)(ULONGLONG)&EntrypointECU,
  453. #endif
  454. (PINSTRUCTION)IntelNext);
  455. #if _ALPHA_
  456. //
  457. // Fixups which reference EntrypointECU will be patched by ApplyFixups()
  458. // to point at the EndCompilationUnit fragment generated here
  459. //
  460. EntrypointECU.nativeStart = CodeLocation + NativeSize;
  461. NativeSize += GenEndCompilationUnit((PULONG)(CodeLocation + NativeSize), 0, NULL);
  462. #endif
  463. //
  464. // Update the nativeStart and nativeEnd fields in Entrypoints
  465. //
  466. UpdateEntrypointNativeInfo(CodeLocation + NativeSize);
  467. //
  468. // Use fixup information to finish generation
  469. //
  470. ApplyFixups(NextCompilationUnitStart);
  471. //
  472. // Optimize the resulting code
  473. //
  474. PeepNativeCode(CodeLocation, NativeSize);
  475. //
  476. // Generate the information required to regenerate EIP after
  477. // an exception
  478. //
  479. NativeSize += PlaceExceptionData(CodeLocation + NativeSize, cEntryPoints);
  480. return NativeSize;
  481. }
  482. VOID
  483. UpdateEntrypointNativeInfo(
  484. PCHAR NativeEnd
  485. )
  486. /*++
  487. Routine Description:
  488. After native code is generated, this function sets the nativeStart and
  489. nativeEnd fields of entrypoints.
  490. Arguments:
  491. NativeEnd -- highest native address used for the generated code.
  492. Return Value:
  493. None. EntryPoints updated.
  494. --*/
  495. {
  496. PENTRYPOINT EntryPoint = NULL;
  497. ULONG i;
  498. BYTE InstrCount;
  499. InstrCount = 0;
  500. for (i=0; i<NumberOfInstructions; ++i) {
  501. //
  502. // Keep count of the number of x86 instructions within the
  503. // entrypoint (not counting 0-byte NOPs)
  504. //
  505. if (InstructionStream[i].Operation != OP_Nop ||
  506. InstructionStream[i].Size != 0) {
  507. InstrCount++;
  508. }
  509. if (EntryPoint != InstructionStream[i].EntryPoint) {
  510. if (EntryPoint) {
  511. EntryPoint->nativeEnd = InstructionStream[i].NativeStart-1;
  512. }
  513. InstrCount = 1;
  514. EntryPoint = InstructionStream[i].EntryPoint;
  515. EntryPoint->nativeStart = InstructionStream[i].NativeStart;
  516. }
  517. }
  518. EntryPoint->nativeEnd = NativeEnd;
  519. }
  520. ULONG
  521. PlaceExceptionData(
  522. PCHAR Location,
  523. DWORD cEntryPoints
  524. )
  525. /*++
  526. Routine Description:
  527. Places the data required to regenerate EIP after an exception occurs.
  528. Arguments:
  529. Locatoion -- address to store the exception data to
  530. cEntryPoints -- count of EntryPoints describing the x86 code generated
  531. Return Value:
  532. Size of exception data, in bytes.
  533. --*/
  534. {
  535. DWORD i;
  536. PENTRYPOINT EP;
  537. PULONG pData;
  538. PINSTRUCTION pInstr;
  539. //
  540. // The format of the Exception data is a series of ULONGs:
  541. // EXCEPTIONDATA_SIGNATURE (an illegal RISC instruction)
  542. // cEntryPoints (count of ENTRYPOINTs in InstructionStream[])
  543. // for each ENTRYPOINT in the InstructionStream {
  544. // ptr to ENTRYPOINT
  545. // for each x86 instruction with non-zero x86 size {
  546. // MAKELONG(offset of start of x86 instr from EP->IntelAddress,
  547. // offset of first RISC instr in the x86 instr from
  548. // EP->nativeStart)
  549. // }
  550. // }
  551. //
  552. // The last RISC offset in each EntryPoint has the low bit set to
  553. // mark it as the last offset.
  554. //
  555. //
  556. pData = (PULONG)Location;
  557. *pData = EXCEPTIONDATA_SIGNATURE;
  558. pData++;
  559. *pData = cEntryPoints;
  560. pData++;
  561. EP = NULL;
  562. pInstr = &InstructionStream[0];
  563. for (i=0; i<NumberOfInstructions; ++i, pInstr++) {
  564. if (EP != pInstr->EntryPoint) {
  565. if (EP) {
  566. //
  567. // flag the previous offset NativeStart as the last one for
  568. // that EntryPoint.
  569. //
  570. *(pData-1) |= 1;
  571. }
  572. EP = pInstr->EntryPoint;
  573. *pData = (ULONG)(ULONGLONG)EP;
  574. pData++;
  575. }
  576. if (pInstr->Operation != OP_Nop || pInstr->Size != 0) {
  577. *pData = MAKELONG(
  578. (USHORT)(pInstr->NativeStart - (PCHAR)EP->nativeStart),
  579. (USHORT)(pInstr->IntelAddress - (ULONG)(ULONGLONG)EP->intelStart));
  580. pData++;
  581. }
  582. }
  583. *(pData-1) |= 1; // Flag the pair of offsets as the last.
  584. return (ULONG)(LONGLONG) ( (PCHAR)pData - Location);
  585. }
  586. VOID
  587. GetEipFromException(
  588. PCPUCONTEXT cpu,
  589. PEXCEPTION_POINTERS pExceptionPointers
  590. )
  591. /*++
  592. Routine Description:
  593. This routine derives the value of EIP from a RISC exception record.
  594. 1. Walk the stack until the instruction pointer points into the
  595. Translation Cache.
  596. 2. Walk forward through the Translation Cache until the
  597. EXCEPTIONDATA_SIGNATURE signature is found.
  598. 3. Find the ENTRYPOINT which describes the faulting instruction.
  599. 4. Find the correct x86 instruction by examining the pairs of
  600. RISC offsets of the starts of x86 instructions.
  601. Arguments:
  602. cpu -- current cpu state
  603. pExceptionPointers -- state of the thread when the exception occurred
  604. Return Value:
  605. None. cpu->Eip now points at faulting x86 instruction.
  606. --*/
  607. {
  608. ULONG NextPc;
  609. PENTRYPOINT EP;
  610. PULONG Location;
  611. ULONG i;
  612. ULONG cEntryPoints;
  613. ULONG RiscStart;
  614. ULONG RiscEnd;
  615. //
  616. // 1. Walk the stack until the instruction pointer points into the
  617. // Translation Cache
  618. //
  619. NextPc = FindPcInTranslationCache(pExceptionPointers);
  620. if (!NextPc) {
  621. //
  622. // The Translation Cache is not on the stack. Nothing we can do.
  623. //
  624. CPUASSERTMSG(FALSE, "FindPcInTranslationCache failed");
  625. cpu->eipReg.i4 = 0x81234567;
  626. return;
  627. }
  628. //
  629. // 2. Walk forwards through the Translation Cache until the
  630. // EXCEPTIONDATA_SIGNATURE signature is found
  631. //
  632. CPUASSERTMSG((NextPc & 3) == 0, "NextPc is not DWORD-aligned");
  633. Location = (PULONG)NextPc;
  634. while (*Location != EXCEPTIONDATA_SIGNATURE) {
  635. Location++;
  636. if (!AddressInTranslationCache((ULONG) (ULONGLONG) Location)) {
  637. cpu->eipReg.i4 = 0x80012345;
  638. CPUASSERTMSG(FALSE, "EXCEPTIONDATA_SIGNATURE not found");
  639. return;
  640. }
  641. }
  642. //
  643. // 3. Find the ENTRYPOINT which describes the address within
  644. // the Cache.
  645. //
  646. Location++; // skip over EXCEPTIONDATA_SIGNATURE
  647. cEntryPoints = *Location;
  648. Location++; // skip over cEntryPoints
  649. for (i=0; i<cEntryPoints; ++i) {
  650. EP = (PENTRYPOINT)*Location;
  651. if ((ULONG)(ULONGLONG)EP->nativeStart <= NextPc && (ULONG)(ULONGLONG)EP->nativeEnd > NextPc) {
  652. //
  653. // This EntryPoint describes the Pc value in the cache
  654. //
  655. break;
  656. }
  657. //
  658. // Skip over the pairs of x86 instruction starts and RISC
  659. // instruction starts.
  660. //
  661. do {
  662. Location++;
  663. } while ((*Location & 1) == 0);
  664. Location++;
  665. }
  666. if (i == cEntryPoints) {
  667. CPUASSERTMSG(FALSE, "Entrypoint not found in EXCEPTIONDATA");
  668. cpu->eipReg.i4 = 0x80001234;
  669. return;
  670. }
  671. //
  672. // 4. Find the correct x86 instruction by examining the pairs of
  673. // RISC offsets of the starts of x86 instructions.
  674. //
  675. NextPc -= (ULONG)(ULONGLONG)EP->nativeStart; // Make relative to nativeStart of EP
  676. RiscStart = 0; // Also relative to nativeStart of EP
  677. Location++;
  678. while ((*Location & 1) == 0) {
  679. RiscEnd = LOWORD(*(Location + 1)) & 0xfffe; // RiscEnd = RiscStart of next instr
  680. if (RiscStart <= NextPc && NextPc < RiscEnd) {
  681. cpu->eipReg.i4 = (ULONG)(ULONGLONG)EP->intelStart + HIWORD(*Location);
  682. return;
  683. }
  684. RiscStart = RiscEnd;
  685. Location++;
  686. }
  687. cpu->eipReg.i4 = (ULONG)(ULONGLONG)EP->intelStart + HIWORD(*Location);
  688. }
  689. ULONG
  690. PlaceNativeCode(
  691. PCHAR CodeLocation
  692. )
  693. /*++
  694. Routine Description:
  695. Generates native code for the set of x86 instructions described by
  696. InstructionStream[] and NumberOfInstructions.
  697. Arguments:
  698. CodeLocation -- pointer to location to generate native code into.
  699. Return Value:
  700. Returns the number of bytes in the native code for this compilation unit
  701. Notes:
  702. None.
  703. --*/
  704. {
  705. PENTRYPOINT EntryPoint = NULL;
  706. PINSTRUCTION pInstr;
  707. PBYTE Location;
  708. PBYTE StartLocation;
  709. ULONG Size;
  710. ULONG i;
  711. OPERATION Op;
  712. CHAR Op1Buffer[MAX_OPERAND_SIZE*sizeof(ULONG)];
  713. CHAR Op2Buffer[MAX_OPERAND_SIZE*sizeof(ULONG)];
  714. ULONG Op1Size;
  715. ULONG Op2Size;
  716. BOOLEAN fMovInstruction;
  717. Location = CodeLocation;
  718. pInstr = &InstructionStream[0];
  719. for (i=NumberOfInstructions; i > 0; --i, pInstr++) {
  720. Op = pInstr->Operation;
  721. pInstr->NativeStart = Location;
  722. if (EntryPoint != pInstr->EntryPoint) {
  723. //
  724. // This instruction begins an EntryPoint
  725. //
  726. EntryPoint = pInstr->EntryPoint;
  727. StartLocation = Location;
  728. //
  729. // Reset per-basic-block state
  730. //
  731. ResetRegCache();
  732. Arg1Contents = Arg2Contents = NO_REG;
  733. Location += GenStartBasicBlock((PULONG)Location,
  734. #if _ALPHA_
  735. (ULONG)(ULONGLONG)&EntrypointECU,
  736. #endif
  737. pInstr);
  738. }
  739. if (pInstr->RegsToCache) {
  740. //
  741. // Load up frequently-used x86 registers into RISC registers
  742. //
  743. Location += LoadRegCacheForInstruction(pInstr->RegsToCache,
  744. Location);
  745. }
  746. if ((Op==OP_Mov32) || (Op==OP_Mov16) || (Op==OP_Mov8)) {
  747. //
  748. // Make some final x86 code optimzations based on the
  749. // register caching info.
  750. //
  751. CleanupMovInstruction(pInstr);
  752. fMovInstruction = TRUE;
  753. } else {
  754. fMovInstruction = FALSE;
  755. }
  756. //
  757. // Generate code for the operands
  758. //
  759. Op1Size = PlaceOperand(1, &pInstr->Operand1, pInstr, Op1Buffer);
  760. Op2Size = PlaceOperand(2, &pInstr->Operand2, pInstr, Op2Buffer);
  761. #if _PPC_
  762. if (pInstr->Operand1.Type == OPND_ADDRVALUE32 &&
  763. pInstr->Operand1.Alignment != ALIGN_DWORD_ALIGNED &&
  764. pInstr->Operand2.Type == OPND_ADDRVALUE32 &&
  765. pInstr->Operand2.Alignment != ALIGN_DWORD_ALIGNED) {
  766. //
  767. // Two MakeValue32 operands cannot be interleaved on PPC due
  768. // to the fact that they share registers RegUt1, RegUt2, RegUt3
  769. //
  770. memcpy(Location, Op1Buffer, Op1Size);
  771. Location += Op1Size;
  772. memcpy(Location, Op2Buffer, Op2Size);
  773. Location += Op2Size;
  774. } else {
  775. Location = InterleaveInstructions(Location,
  776. Op1Buffer,
  777. Op1Size,
  778. Op2Buffer,
  779. Op2Size);
  780. }
  781. #elif _ALPHA_
  782. memcpy(Location, Op1Buffer, Op1Size);
  783. Location += Op1Size;
  784. memcpy(Location, Op2Buffer, Op2Size);
  785. Location += Op2Size;
  786. #else
  787. Location = InterleaveInstructions(Location,
  788. Op1Buffer,
  789. Op1Size,
  790. Op2Buffer,
  791. Op2Size);
  792. #endif
  793. Location += PlaceOperand(3, &pInstr->Operand3, pInstr, Location);
  794. if (DetermineInstructionAlignment(pInstr)) {
  795. //
  796. // The instruction has an aligned version and the operands
  797. // are sufficiently aligned to use it.
  798. //
  799. Op++;
  800. pInstr->Operation = Op;
  801. }
  802. //
  803. // Generate the body of the instruction
  804. //
  805. if (CompilerFlags & COMPFL_FAST) {
  806. Location += (*PlaceFn[Fragments[Op].FastPlaceFn])((PULONG)Location,
  807. #if _ALPHA_
  808. (ULONG)(ULONGLONG)&EntrypointECU,
  809. #endif
  810. pInstr);
  811. } else {
  812. Location += (*PlaceFn[Fragments[Op].SlowPlaceFn])((PULONG)Location,
  813. #if _ALPHA_
  814. (ULONG)(ULONGLONG)&EntrypointECU,
  815. #endif
  816. pInstr);
  817. }
  818. if (pInstr->RegsSet) {
  819. //
  820. // Mark RISC registers in the cache as invalid if this instruction
  821. // modified the matching x86 register.
  822. //
  823. InvalidateRegCacheForInstruction(pInstr->RegsSet);
  824. }
  825. if (!fMovInstruction) {
  826. //
  827. // If the instruction isn't a MOV, then assume the arg regs
  828. // were modified by the fragment
  829. //
  830. Arg1Contents = Arg2Contents = NO_REG;
  831. }
  832. }
  833. return (ULONG)(ULONGLONG)(Location - CodeLocation);
  834. }
  835. VOID
  836. DetermineOperandAlignment(
  837. BOOL EbpAligned,
  838. POPERAND Operand
  839. )
  840. /*++
  841. Routine Description:
  842. This function determines the alignment of an operand. It also sets the
  843. alignment field in the specified operand. The alignment returned indicates
  844. the best we can determine at compile time. An operand that is specified
  845. as byte aligned may actually turn out to be dword aligned.
  846. Arguments:
  847. Operand -- Supplies the operand
  848. Return Value:
  849. Returns the value specifying the alignment
  850. Notes:
  851. It would be really handy here to have an idea what the register
  852. contents were. It would allow us to try to be more optimistic
  853. about the alignment.
  854. This routine should be expanded for all of the alignment cases
  855. assuming its possible.
  856. --*/
  857. {
  858. USHORT LowBits;
  859. switch (Operand->Type) {
  860. //
  861. // All of the following are regarded as dword aligned, including
  862. // high register refrence. The code for handing high half registers
  863. // takes care of alignment
  864. //
  865. case OPND_MOVREGTOREG :
  866. #if _ALPHA_
  867. if (Operand->IndexReg >= GP_AH) {
  868. // The Hi8 registers are considered to be only BYTE-aligned
  869. // on Alpha. This matters for 'mov bh, val' instructions.
  870. // We need to select the MovFrag8B fragment in this case.
  871. Operand->Alignment = ALIGN_BYTE_ALIGNED;
  872. } else {
  873. Operand->Alignment = ALIGN_DWORD_ALIGNED;
  874. }
  875. break;
  876. #endif
  877. // fall into the other cases on MIPS and PPC.
  878. case OPND_REGREF :
  879. case OPND_MOVTOREG :
  880. #if _ALPHA_
  881. if (Operand->Reg >= GP_AH) {
  882. // The Hi8 registers are considered to be only BYTE-aligned
  883. // on Alpha. This matters for 'mov bh, val' instructions.
  884. // We need to select the MovFrag8B fragment in this case.
  885. Operand->Alignment = ALIGN_BYTE_ALIGNED;
  886. break;
  887. }
  888. #endif
  889. // fall into the other cases on MIPS and PPC
  890. case OPND_NONE :
  891. case OPND_NOCODEGEN :
  892. case OPND_REGVALUE :
  893. case OPND_IMM:
  894. Operand->Alignment = ALIGN_DWORD_ALIGNED;
  895. break;
  896. //
  897. // All of the following have alignment depending on the formation
  898. // of the operand
  899. //
  900. case OPND_ADDRREF :
  901. case OPND_ADDRVALUE32 :
  902. case OPND_ADDRVALUE16 :
  903. case OPND_ADDRVALUE8 :
  904. if ((Operand->Reg != NO_REG) && (Operand->Reg != GP_ESP) && (Operand->Reg != GP_EBP || !EbpAligned)) {
  905. //
  906. // We have a reg + ... form. Since we have no idea what the
  907. // contents of the register are, we can't guess about the
  908. // alignment.
  909. //
  910. Operand->Alignment = ALIGN_BYTE_ALIGNED;
  911. } else {
  912. //
  913. // Figure out low two bits
  914. //
  915. LowBits = (USHORT)(Operand->Immed & 0x3);
  916. if ((Operand->IndexReg != NO_REG) && (Operand->IndexReg != GP_ESP) && (Operand->IndexReg != GP_EBP || !EbpAligned)) {
  917. LowBits = (LowBits | (1 << Operand->Scale)) & 0x3;
  918. }
  919. //
  920. // Convert lowbits into alignment
  921. //
  922. if (!LowBits) {
  923. Operand->Alignment = ALIGN_DWORD_ALIGNED;
  924. } else if (!(LowBits & 0x1)){
  925. Operand->Alignment = ALIGN_WORD_ALIGNED;
  926. } else {
  927. Operand->Alignment = ALIGN_BYTE_ALIGNED;
  928. }
  929. }
  930. break;
  931. case OPND_MOVTOMEM:
  932. //
  933. // No alignment issue with this operand.
  934. //
  935. break;
  936. default :
  937. CPUASSERTMSG(FALSE, "Bad Operand type");
  938. }
  939. }
  940. ULONG
  941. DetermineInstructionAlignment(
  942. PINSTRUCTION Instruction
  943. )
  944. /*++
  945. Routine Description:
  946. This routine determines if the aligned form of an instruction can
  947. be used.
  948. Arguments:
  949. Instruction - Supplies a pointer to the instruction
  950. Return Value:
  951. Returns the alignment condition of the instruction
  952. Notes:
  953. The results of this are pretty much ignored for inline mov's. They are
  954. currently the only instructions that care about the details of the
  955. alignment. For the rest, naturally aligned or un-aligned is sufficient.
  956. --*/
  957. {
  958. OPERATION Op = Instruction->Operation;
  959. //
  960. // If the instruction does not have an aligned version, then
  961. // there is no work to do.
  962. //
  963. if (!(Fragments[Op].Flags & OPFL_ALIGN)) {
  964. return FALSE;
  965. }
  966. if (Instruction->Operand1.Type != OPND_ADDRREF) {
  967. ;
  968. } else if (Instruction->Operand1.Alignment == ALIGN_DWORD_ALIGNED) {
  969. ;
  970. } else if ((Instruction->Operand1.Alignment == ALIGN_WORD_ALIGNED) &&
  971. (Fragments[Op].Flags & OPFL_ADDR16)
  972. ) {
  973. ;
  974. } else {
  975. return FALSE;
  976. }
  977. if (Instruction->Operand2.Type != OPND_ADDRREF) {
  978. ;
  979. } else if (Instruction->Operand2.Alignment == ALIGN_DWORD_ALIGNED) {
  980. ;
  981. } else if ((Instruction->Operand2.Alignment == ALIGN_WORD_ALIGNED) &&
  982. (Fragments[Op].Flags & OPFL_ADDR16)
  983. ) {
  984. ;
  985. } else {
  986. return FALSE;
  987. }
  988. return TRUE;
  989. }
  990. ULONG
  991. PlaceOperand(
  992. ULONG OperandNumber,
  993. POPERAND Operand,
  994. PINSTRUCTION Instruction,
  995. PCHAR Location
  996. )
  997. /*++
  998. Routine Description:
  999. This routine generates the fragments necessary to form and operand.
  1000. Arguments:
  1001. OperandNumber - number of operand (selects arg register number to target)
  1002. Operand - Supplies the operand
  1003. Instruction - The instruction containing the operand
  1004. Location - Location to generate code into
  1005. Return Value:
  1006. The size in bytes of the fragments selected.
  1007. --*/
  1008. {
  1009. OPERAND_OP Op;
  1010. ULONG RegCacheNum;
  1011. PCHAR StartLocation;
  1012. #define GEN_OPERAND(Op) (OpFragments[Op])((PULONG)Location, Operand, OperandNumber)
  1013. //
  1014. // Early return for no operands
  1015. //
  1016. if (Operand->Type == OPND_NONE || Operand->Type == OPND_NOCODEGEN) {
  1017. return 0;
  1018. }
  1019. StartLocation = Location;
  1020. DetermineOperandAlignment(Instruction->EbpAligned, Operand);
  1021. switch (Operand->Type) {
  1022. case OPND_REGVALUE:
  1023. if ((CompilerFlags & COMPFL_FAST)
  1024. && (Fragments[Instruction->Operation].Flags & OPFL_INLINEARITH)) {
  1025. break;
  1026. } else {
  1027. Location += GenOperandRegVal((PULONG)Location,
  1028. Operand,
  1029. OperandNumber
  1030. );
  1031. }
  1032. break;
  1033. case OPND_REGREF:
  1034. if ((CompilerFlags & COMPFL_FAST)
  1035. && (Fragments[Instruction->Operation].Flags & OPFL_INLINEARITH)) {
  1036. break;
  1037. } else {
  1038. Location += GenOperandRegRef((PULONG)Location,
  1039. Operand,
  1040. OperandNumber
  1041. );
  1042. }
  1043. break;
  1044. case OPND_ADDRREF:
  1045. case OPND_ADDRVALUE8:
  1046. case OPND_ADDRVALUE16:
  1047. case OPND_ADDRVALUE32:
  1048. Location += GenOperandAddr((PULONG)Location,
  1049. Operand,
  1050. OperandNumber,
  1051. Instruction->FsOverride
  1052. );
  1053. break;
  1054. case OPND_IMM :
  1055. if ((CompilerFlags & COMPFL_FAST)
  1056. && (Fragments[Instruction->Operation].Flags & OPFL_INLINEARITH)) {
  1057. break;
  1058. } else {
  1059. Location += GenOperandImm((PULONG)Location,
  1060. Operand,
  1061. OperandNumber);
  1062. }
  1063. break;
  1064. case OPND_MOVTOREG:
  1065. Location += GenOperandMovToReg((PULONG)Location,
  1066. Operand,
  1067. OperandNumber);
  1068. break;
  1069. case OPND_MOVREGTOREG:
  1070. switch (Operand->Immed) {
  1071. case OP_Mov32:
  1072. Op = OP_MovRegToReg32;
  1073. break;
  1074. case OP_Mov16:
  1075. Op = OP_MovRegToReg16;
  1076. break;
  1077. case OP_Mov8:
  1078. #if _ALPHA_
  1079. if (Operand->Alignment == ALIGN_BYTE_ALIGNED) {
  1080. Op = OP_MovRegToReg8B;
  1081. break;
  1082. }
  1083. #endif
  1084. Op = OP_MovRegToReg8;
  1085. break;
  1086. default:
  1087. CPUASSERT(FALSE);
  1088. }
  1089. Location += GEN_OPERAND(Op);
  1090. break;
  1091. case OPND_MOVTOMEM:
  1092. switch (Operand->Immed) {
  1093. case OP_Mov32:
  1094. Op = OP_MovToMem32B + Instruction->Operand1.Alignment;
  1095. break;
  1096. case OP_Mov16:
  1097. Op = OP_MovToMem16B + Instruction->Operand1.Alignment;
  1098. break;
  1099. case OP_Mov8:
  1100. Op = OP_MovToMem8D;
  1101. #if _ALPHA_
  1102. if (Instruction->Operand1.Alignment != ALIGN_DWORD_ALIGNED) {
  1103. Op = OP_MovToMem8B;
  1104. }
  1105. #endif
  1106. break;
  1107. default:
  1108. CPUASSERT(FALSE); // unknown MOV opcode
  1109. }
  1110. //
  1111. // Generate the correct code based on the alignment of the operand
  1112. //
  1113. Location += GEN_OPERAND(Op);
  1114. break;
  1115. default:
  1116. //
  1117. // This is an internal error
  1118. //
  1119. CPUASSERT(FALSE); // Unknown operand type!!!!
  1120. }
  1121. return (ULONG)(ULONGLONG)(Location - StartLocation);
  1122. }
  1123. PCHAR
  1124. InterleaveInstructions(
  1125. OUT PCHAR CodeLocation,
  1126. IN PCHAR Op1Code,
  1127. IN ULONG Op1Count,
  1128. IN PCHAR Op2Code,
  1129. IN ULONG Op2Count
  1130. )
  1131. /*++
  1132. Routine Description:
  1133. This routine interleaves two streams of native code into one stream
  1134. to try and avoid pipeline stalls. It assumes that the two streams
  1135. have no interdependencies (like they can't use the same register).
  1136. Arguments:
  1137. CodeLocation -- Supplies the location to place the code at
  1138. Op1Code -- Code for the first operand
  1139. Op1Count -- Count of BYTES in the first operand
  1140. Op2Code -- Code for the second operand
  1141. Op2Count -- Count of BYTES in the second operand
  1142. Return Value:
  1143. New value for CodeLocation - just past the end of the operands.
  1144. Notes:
  1145. None
  1146. --*/
  1147. {
  1148. PULONG pCode = (PULONG)CodeLocation;
  1149. PULONG LongCode;
  1150. PULONG ShortCode;
  1151. ULONG LongCount;
  1152. ULONG ShortCount;
  1153. ULONG LongTail;
  1154. //
  1155. // Figure out which operand has more instructions - it starts first
  1156. //
  1157. if (Op1Count > Op2Count) {
  1158. LongCode = (PULONG)Op1Code;
  1159. LongCount = Op1Count / sizeof(ULONG);
  1160. ShortCode = (PULONG)Op2Code;
  1161. ShortCount = Op2Count / sizeof(ULONG);
  1162. } else {
  1163. LongCode = (PULONG)Op2Code;
  1164. LongCount = Op2Count / sizeof(ULONG);
  1165. ShortCode = (PULONG)Op1Code;
  1166. ShortCount = Op1Count / sizeof(ULONG);
  1167. }
  1168. // get the length of the part of the longer operand which
  1169. // goes after the interleaved part (in BYTES)
  1170. LongTail = (LongCount - ShortCount) * sizeof(ULONG);
  1171. //
  1172. // Interleave instructions from both operands
  1173. //
  1174. while (ShortCount) {
  1175. *pCode++ = *LongCode++;
  1176. *pCode++ = *ShortCode++;
  1177. ShortCount--;
  1178. }
  1179. //
  1180. // Copy in the remaining instructions from the longer operand
  1181. //
  1182. if (LongTail) {
  1183. memcpy(pCode, LongCode, LongTail);
  1184. }
  1185. return CodeLocation + Op1Count + Op2Count;
  1186. }
  1187. USHORT
  1188. ChecksumMemory(
  1189. ENTRYPOINT *pEP
  1190. )
  1191. /*++
  1192. Routine Description:
  1193. Perform a simple checksum on the range of Intel addresses specified
  1194. in an Entrypoint.
  1195. Arguments:
  1196. pEp -- entrypoint describing Intel memory to checksum
  1197. Return Value:
  1198. Checksum for the memory
  1199. Notes:
  1200. None
  1201. --*/
  1202. {
  1203. USHORT Checksum = 0;
  1204. PBYTE pb = (PBYTE)pEP->intelStart;
  1205. while (pb != (PBYTE)pEP->intelEnd) {
  1206. Checksum = ((Checksum << 1) | ((Checksum >> 15) & 1)) + (USHORT)*pb;
  1207. pb++;
  1208. };
  1209. return Checksum;
  1210. }
  1211. DWORD
  1212. SniffMemory(
  1213. ENTRYPOINT *pEP,
  1214. USHORT Checksum
  1215. )
  1216. /*++
  1217. Routine Description:
  1218. Called from the StartBasicBlock code for regions of memory which
  1219. must be sniffed to determine if the x86 app has modified its code or not.
  1220. Arguments:
  1221. pEp -- entrypoint describing Intel memory to checksum
  1222. Checksum -- checksum of the code at compile-time
  1223. Return Value:
  1224. TRUE - code has not changed...native translation OK
  1225. FALSE - code has been modified. CpuNotify has been set to flush
  1226. the cache on the next CpuSimulateLoop. Caller must jump
  1227. to EndTranslatedCode immediately!
  1228. Notes:
  1229. None
  1230. --*/
  1231. {
  1232. USHORT NewChecksum = ChecksumMemory(pEP);
  1233. if (NewChecksum != Checksum) {
  1234. DECLARE_CPU;
  1235. //
  1236. // Intel code has been modified!!!!! We must flush the cache and
  1237. // recompile!!!!
  1238. //
  1239. #if DBG
  1240. LOGPRINT((TRACELOG, "WX86CPU: Intel code at %x modified!\n", pEP->intelStart));
  1241. #endif
  1242. #undef CpuNotify // soalpha.h defines this to be offset of CpuNotify
  1243. InterlockedOr(&cpu->CpuNotify, CPUNOTIFY_MODECHANGE);
  1244. cpu->eipReg.i4 = (ULONG)(ULONGLONG)pEP->intelStart;
  1245. return FALSE;
  1246. }
  1247. //
  1248. // Intel code has not been modified. Continue simulation without
  1249. // recompilation
  1250. //
  1251. return TRUE;
  1252. }