Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1070 lines
35 KiB

  1. //====== Copyright 1996-2004, Valve Corporation, All rights reserved. =======
  2. //
  3. // Purpose:
  4. //
  5. // Build Notes: In order for the coroutine system to work a few build options
  6. // need to be set for coroutine.cpp itself. These are the VPC
  7. // entries for those options:
  8. // $Compiler
  9. // {
  10. // $EnableC++Exceptions "No"
  11. // $BasicRuntimeChecks "Default"
  12. // $EnableFloatingPointExceptions "No"
  13. // }
  14. //
  15. // If you have not set these options you will get a strange popup in
  16. // Visual Studio at the end of Coroutine_Continue().
  17. //
  18. //=============================================================================
  19. //#include "pch_vstdlib.h"
  20. #if defined(_DEBUG)
  21. // Verify that something is false
  22. #define DbgVerifyNot(x) Assert(!x)
  23. #else
  24. #define DbgVerifyNot(x) x
  25. #endif
  26. #include "vstdlib/coroutine.h"
  27. #include "tier0/vprof.h"
  28. #include "tier0/minidump.h"
  29. #include "tier1/utllinkedlist.h"
  30. #include "tier1/utlvector.h"
  31. #include <setjmp.h>
  32. // for debugging
  33. //#define CHECK_STACK_CORRUPTION
  34. #ifndef STEAM
  35. #define PvAlloc(x) malloc(x)
  36. #define FreePv(x) free(x)
  37. #endif
  38. #ifdef CHECK_STACK_CORRUPTION
  39. #include "tier1/checksum_md5.h"
  40. #include "../tier1/checksum_md5.cpp"
  41. #endif // CHECK_STACK_CORRUPTION
  42. //#define COROUTINE_TRACE
  43. #ifdef COROUTINE_TRACE
  44. #include "tier1/fmtstr.h"
  45. static CFmtStr g_fmtstr;
  46. #ifdef WIN32
  47. extern "C" __declspec(dllimport) void __stdcall OutputDebugStringA( const char * );
  48. #else
  49. void OutputDebugStringA( const char *pchMsg ) { fprintf( stderr, pchMsg ); fflush( stderr ); }
  50. #endif
  51. #define CoroutineDbgMsg( fmt, ... ) \
  52. { \
  53. g_fmtstr.sprintf( fmt, ##__VA_ARGS__ ); \
  54. OutputDebugStringA( g_fmtstr ); \
  55. }
  56. #else
  57. #define CoroutineDbgMsg( pchMsg, ... )
  58. #endif // COROUTINE_TRACE
  59. // memdbgon must be the last include file in a .cpp file!!!
  60. #include "tier0/memdbgon.h"
  61. #if defined( _MSC_VER ) && ( _MSC_VER >= 1900 ) && defined( PLATFORM_64BITS )
  62. //the VS2105 longjmp() seems to freak out jumping back into a coroutine (just like linux if _FORTIFY_SOURCE is defined)
  63. // I can't find an analogy to _FORTIFY_SOURCE for MSVC at the moment, so I wrote a quick assembly to longjmp() without any safety checks
  64. extern "C" void Coroutine_LongJmp_Unchecked(jmp_buf buffer, int nResult);
  65. #define Coroutine_longjmp Coroutine_LongJmp_Unchecked
  66. #else
  67. #define Coroutine_longjmp longjmp
  68. #endif
  69. // it *feels* like we should need barriers around our setjmp/longjmp calls, and the memcpy's
  70. // to make sure the optimizer doesn't reorder us across register load/stores, so I've put them
  71. // in what seem like the appropriate spots, but we seem to run ok without them, so...
  72. #ifdef GNUC
  73. #define RW_MEMORY_BARRIER /* __sync_synchronize() */
  74. #else
  75. #define RW_MEMORY_BARRIER /* _ReadWriteBarrier() */
  76. #endif
  77. #if defined(VALGRIND_HINTING)
  78. #include <valgrind/valgrind.h>
  79. #include <valgrind/memcheck.h>
  80. #define MARK_AS_STACK(start,len) VALGRIND_STACK_REGISTER(start,start+len); \
  81. CoroutineDbgMsg( "STACK_REGISTER() [%x - %x] (%x)\n", start, start+len, len ); \
  82. VALGRIND_MAKE_MEM_DEFINED(start,len); \
  83. CoroutineDbgMsg( "MAKE_MEM_DEFINED() [%x - %x] (%x)\n", start, start+len, len );
  84. #define UNMARK_AS_STACK(id,start,len)
  85. // VALGRIND_STACK_DEREGISTER(id);
  86. // VALGRIND_MAKE_MEM_UNDEFINED(start,len)
  87. #else
  88. #define MARK_AS_STACK(start,len) 0
  89. #define UNMARK_AS_STACK(id,start,len)
  90. #endif
  91. // it *feels* like we should need barriers around our setjmp/longjmp calls, and the memcpy's
  92. // to make sure the optimizer doesn't reorder us across register load/stores, so I've put them
  93. // in what seem like the appropriate spots, but we seem to run ok without them, so...
  94. #ifdef GNUC
  95. #define RW_MEMORY_BARRIER /* __sync_synchronize() */
  96. #else
  97. #define RW_MEMORY_BARRIER /* _ReadWriteBarrier() */
  98. #endif
  99. // return values from setjmp()
  100. static const int k_iSetJmpStateSaved = 0x00;
  101. static const int k_iSetJmpContinue = 0x01;
  102. static const int k_iSetJmpDone = 0x02;
  103. static const int k_iSetJmpDbgBreak = 0x03;
  104. // distance up the stack that coroutine functions stacks' start
  105. #ifdef _PS3
  106. // PS3 has a small stack. Hopefully we dont need 64k of padding!
  107. static const int k_cubCoroutineStackGap = (3 * 1024);
  108. static const int k_cubCoroutineStackGapSmall = 64;
  109. #else
  110. static const int k_cubCoroutineStackGap = (64 * 1024);
  111. static const int k_cubCoroutineStackGapSmall = 64;
  112. #endif
  113. // cap the size of allocated stacks
  114. static const int k_cubMaxCoroutineStackSize = (32 * 1024);
  115. #ifdef _WIN64
  116. extern "C" byte *GetStackPtr64();
  117. #define GetStackPtr( pStackPtr) byte *pStackPtr = GetStackPtr64();
  118. #else
  119. #ifdef WIN32
  120. #define GetStackPtr( pStackPtr ) byte *pStackPtr; __asm mov pStackPtr, esp
  121. #elif defined(GNUC)
  122. #define GetStackPtr( pStackPtr ) byte *pStackPtr = (byte*)__builtin_frame_address(0)
  123. #elif defined(__SNC__)
  124. #define GetStackPtr( pStackPtr ) byte *pStackPtr = (byte*)__builtin_frame_address(0)
  125. #else
  126. #error
  127. #endif
  128. #endif
  129. #ifdef _M_X64
  130. #define _REGISTER_ALIGNMENT 16ull
  131. int CalcAlignOffset( const unsigned char *p )
  132. {
  133. return static_cast<int>( AlignValue( p, _REGISTER_ALIGNMENT ) - p );
  134. }
  135. #endif
  136. //-----------------------------------------------------------------------------
  137. // Purpose: single coroutine descriptor
  138. //-----------------------------------------------------------------------------
  139. #if defined( _PS3 ) && defined( _DEBUG )
  140. byte rgStackTempBuffer[65535];
  141. #endif
  142. class CCoroutine
  143. {
  144. public:
  145. CCoroutine()
  146. {
  147. m_pSavedStack = NULL;
  148. m_pStackHigh = m_pStackLow = NULL;
  149. m_cubSavedStack = 0;
  150. m_nStackId = 0;
  151. m_pFunc = NULL;
  152. m_pchName = "(none)";
  153. m_iJumpCode = 0;
  154. m_pchDebugMsg = NULL;
  155. #ifdef COROUTINE_TRACE
  156. m_hCoroutine = -1;
  157. #endif
  158. #ifdef _M_X64
  159. m_nAlignmentBytes = CalcAlignOffset( m_rgubRegisters );
  160. #else
  161. memset( &m_Registers, 0, sizeof( m_Registers ) );
  162. #endif
  163. #if defined( VPROF_ENABLED )
  164. m_pVProfNodeScope = NULL;
  165. #endif
  166. }
  167. jmp_buf &GetRegisters()
  168. {
  169. #ifdef _M_X64
  170. // Did we get moved in memory in such a way that the registers became unaligned?
  171. // If so, fix them up now
  172. size_t align = _REGISTER_ALIGNMENT - 1;
  173. unsigned char *pRegistersCur = &m_rgubRegisters[m_nAlignmentBytes];
  174. if ( (size_t)pRegistersCur & align )
  175. {
  176. m_nAlignmentBytes = CalcAlignOffset( m_rgubRegisters );
  177. unsigned char *pRegistersNew = &m_rgubRegisters[m_nAlignmentBytes];
  178. Q_memmove( pRegistersNew, pRegistersCur, sizeof(jmp_buf) );
  179. pRegistersCur = pRegistersNew;
  180. }
  181. return *reinterpret_cast<jmp_buf *>( pRegistersCur );
  182. #else
  183. return m_Registers;
  184. #endif
  185. }
  186. ~CCoroutine()
  187. {
  188. if ( m_pSavedStack )
  189. {
  190. FreePv( m_pSavedStack );
  191. }
  192. }
  193. FORCEINLINE void RestoreStack()
  194. {
  195. if ( m_cubSavedStack )
  196. {
  197. Assert( m_pStackHigh );
  198. Assert( m_pSavedStack );
  199. #if defined( _PS3 ) && defined( _DEBUG )
  200. // Our (and Sony's) memory tracking tools may try to walk the stack during a free() call
  201. // if we do the free here at our normal point though the stack is invalid since it's in
  202. // the middle of swapping. Instead move it to a temp buffer now and free while the stack
  203. // frames in place are still ok.
  204. Assert( m_cubSavedStack < Q_ARRAYSIZE( rgStackTempBuffer ) );
  205. memcpy( &rgStackTempBuffer[0], m_pSavedStack, m_cubSavedStack );
  206. FreePv( m_pSavedStack );
  207. m_pSavedStack = &rgStackTempBuffer[0];
  208. #endif
  209. // Assert we're not about to trash our own immediate stack
  210. GetStackPtr( pStack );
  211. if ( pStack >= m_pStackLow && pStack <= m_pStackHigh )
  212. {
  213. CoroutineDbgMsg( g_fmtstr.sprintf( "Restoring stack over ESP (%p, %p, %p)\n", pStack, m_pStackLow, m_pStackHigh ) );
  214. AssertMsg3( false, "Restoring stack over ESP (%p, %p, %p)\n", pStack, m_pStackLow, m_pStackHigh );
  215. }
  216. // Make sure we can access the our instance pointer after restoring the stack. This function is inlined, so the compiler could decide to
  217. // use an existing coroutine pointer that is already on the stack from the previous function (does so on the PS3), and will be overwritten
  218. // when we memcpy below. Any allocations here should be ok, as the caller should have advanced the stack past the stack area where the
  219. // new stack will be copied
  220. CCoroutine *pThis = (CCoroutine*)stackalloc( sizeof( CCoroutine* ) );
  221. pThis = this;
  222. RW_MEMORY_BARRIER;
  223. memcpy( m_pStackLow, m_pSavedStack, m_cubSavedStack );
  224. pThis->m_nStackId = MARK_AS_STACK( pThis->m_pStackLow, pThis->m_cubSavedStack );
  225. // WARNING: The stack has been replaced.. do not use previous stack variables or this
  226. #ifdef CHECK_STACK_CORRUPTION
  227. MD5Init( &pThis->m_md52 );
  228. MD5Update( &pThis->m_md52, pThis->m_pStackLow, pThis->m_cubSavedStack );
  229. MD5Final( pThis->m_digest2, &pThis->m_md52 );
  230. Assert( 0 == Q_memcmp( pThis->m_digest, pThis->m_digest2, MD5_DIGEST_LENGTH ) );
  231. #endif
  232. // free the saved stack info
  233. pThis->m_cubSavedStack = 0;
  234. #if !defined( _PS3 ) || !defined( _DEBUG )
  235. FreePv( pThis->m_pSavedStack );
  236. #endif
  237. pThis->m_pSavedStack = NULL;
  238. // If we were the "main thread", reset our stack pos to zero
  239. if ( NULL == pThis->m_pFunc )
  240. {
  241. pThis->m_pStackLow = pThis->m_pStackHigh = 0;
  242. }
  243. // resume accounting against the vprof node we were in when we yielded
  244. // Make sure we are added after the coroutine we just copied onto the stack
  245. #if defined( VPROF_ENABLED )
  246. pThis->m_pVProfNodeScope = g_VProfCurrentProfile.GetCurrentNode();
  247. if ( g_VProfCurrentProfile.IsEnabled() )
  248. {
  249. FOR_EACH_VEC_BACK( pThis->m_vecProfNodeStack, i )
  250. {
  251. g_VProfCurrentProfile.EnterScope(
  252. pThis->m_vecProfNodeStack[i]->GetName(),
  253. 0,
  254. g_VProfCurrentProfile.GetBudgetGroupName( pThis->m_vecProfNodeStack[i]->GetBudgetGroupID() ),
  255. false,
  256. g_VProfCurrentProfile.GetBudgetGroupFlags( pThis->m_vecProfNodeStack[i]->GetBudgetGroupID() )
  257. );
  258. }
  259. }
  260. pThis->m_vecProfNodeStack.Purge();
  261. #endif
  262. }
  263. }
  264. FORCEINLINE void SaveStack()
  265. {
  266. MEM_ALLOC_CREDIT_( "Coroutine saved stack" );
  267. if ( m_pSavedStack )
  268. {
  269. FreePv( m_pSavedStack );
  270. }
  271. GetStackPtr( pLocal );
  272. m_pStackLow = pLocal;
  273. m_cubSavedStack = (m_pStackHigh - m_pStackLow);
  274. m_pSavedStack = (byte *)PvAlloc( m_cubSavedStack );
  275. // if you hit this assert, it's because you're allocating way too much stuff on the stack in your job
  276. // check you haven't got any overly large string buffers allocated on the stack
  277. Assert( m_cubSavedStack < k_cubMaxCoroutineStackSize );
  278. #if defined( VPROF_ENABLED )
  279. // Exit any current vprof scope when we yield, and remember the vprof stack so we can restore it when we run again
  280. m_vecProfNodeStack.RemoveAll();
  281. CVProfNode *pCurNode = g_VProfCurrentProfile.GetCurrentNode();
  282. while ( pCurNode && m_pVProfNodeScope && pCurNode != m_pVProfNodeScope && pCurNode != g_VProfCurrentProfile.GetRoot() )
  283. {
  284. m_vecProfNodeStack.AddToTail( pCurNode );
  285. g_VProfCurrentProfile.ExitScope();
  286. pCurNode = g_VProfCurrentProfile.GetCurrentNode();
  287. }
  288. m_pVProfNodeScope = NULL;
  289. #endif
  290. RW_MEMORY_BARRIER;
  291. // save the stack in the newly allocated slot
  292. memcpy( m_pSavedStack, m_pStackLow, m_cubSavedStack );
  293. UNMARK_AS_STACK( m_nStackId, m_pStackLow, m_cubSavedStack );
  294. #ifdef CHECK_STACK_CORRUPTION
  295. MD5Init( &m_md5 );
  296. MD5Update( &m_md5, m_pSavedStack, m_cubSavedStack );
  297. MD5Final( m_digest, &m_md5 );
  298. #endif
  299. }
  300. #ifdef DBGFLAG_VALIDATE
  301. void Validate( CValidator &validator, const char *pchName )
  302. {
  303. validator.Push( "CCoroutine", this, pchName );
  304. validator.ClaimMemory( m_pSavedStack );
  305. validator.Pop();
  306. }
  307. #endif
  308. #ifdef _M_X64
  309. unsigned char m_rgubRegisters[sizeof(jmp_buf) + _REGISTER_ALIGNMENT];
  310. int m_nAlignmentBytes;
  311. #else
  312. jmp_buf m_Registers;
  313. #endif
  314. byte *m_pStackHigh; // position of initial entry to the coroutine (stack ptr before continue is ran)
  315. byte *m_pStackLow; // low point on the stack we plan on saving (stack ptr when we yield)
  316. byte *m_pSavedStack; // pointer to the saved stack (allocated on heap)
  317. int m_cubSavedStack; // amount of data on stack
  318. int m_nStackId;
  319. const char *m_pchName;
  320. int m_iJumpCode;
  321. const char *m_pchDebugMsg;
  322. #ifdef COROUTINE_TRACE
  323. HCoroutine m_hCoroutine; // for debugging
  324. #endif
  325. CoroutineFunc_t m_pFunc;
  326. void *m_pvParam;
  327. #if defined( VPROF_ENABLED )
  328. CUtlVector<CVProfNode *> m_vecProfNodeStack;
  329. CVProfNode *m_pVProfNodeScope;
  330. #endif
  331. #ifdef CHECK_STACK_CORRUPTION
  332. MD5Context_t m_md5;
  333. unsigned char m_digest[MD5_DIGEST_LENGTH];
  334. MD5Context_t m_md52;
  335. unsigned char m_digest2[MD5_DIGEST_LENGTH];
  336. #endif
  337. };
  338. //-----------------------------------------------------------------------------
  339. // Purpose: manages list of all coroutines
  340. //-----------------------------------------------------------------------------
  341. class CCoroutineMgr
  342. {
  343. public:
  344. CCoroutineMgr()
  345. {
  346. m_topofexceptionchain = 0;
  347. // reserve the 0 index as the main coroutine
  348. HCoroutine hMainCoroutine = m_ListCoroutines.AddToTail();
  349. m_ListCoroutines[hMainCoroutine].m_pchName = "(main)";
  350. #ifdef COROUTINE_TRACE
  351. m_ListCoroutines[hMainCoroutine].m_hCoroutine = hMainCoroutine;
  352. #endif
  353. // mark it as currently running
  354. m_VecCoroutineStack.AddToTail( hMainCoroutine );
  355. }
  356. HCoroutine CreateCoroutine( CoroutineFunc_t pFunc, void *pvParam )
  357. {
  358. HCoroutine hCoroutine = m_ListCoroutines.AddToTail();
  359. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Create() hCoroutine = %x pFunc = 0x%x pvParam = 0x%x\n", hCoroutine, pFunc, pvParam ) );
  360. m_ListCoroutines[hCoroutine].m_pFunc = pFunc;
  361. m_ListCoroutines[hCoroutine].m_pvParam = pvParam;
  362. m_ListCoroutines[hCoroutine].m_pSavedStack = NULL;
  363. m_ListCoroutines[hCoroutine].m_cubSavedStack = 0;
  364. m_ListCoroutines[hCoroutine].m_pStackHigh = m_ListCoroutines[hCoroutine].m_pStackLow = NULL;
  365. m_ListCoroutines[hCoroutine].m_pchName = "(no name set)";
  366. #ifdef COROUTINE_TRACE
  367. m_ListCoroutines[hCoroutine].m_hCoroutine = hCoroutine;
  368. #endif
  369. return hCoroutine;
  370. }
  371. HCoroutine GetActiveCoroutineHandle()
  372. {
  373. // look up the coroutine of the last item on the stack
  374. return m_VecCoroutineStack[m_VecCoroutineStack.Count() - 1];
  375. }
  376. CCoroutine &GetActiveCoroutine()
  377. {
  378. // look up the coroutine of the last item on the stack
  379. return m_ListCoroutines[GetActiveCoroutineHandle()];
  380. }
  381. CCoroutine &GetPreviouslyActiveCoroutine()
  382. {
  383. // look up the coroutine that ran the current coroutine
  384. return m_ListCoroutines[m_VecCoroutineStack[m_VecCoroutineStack.Count() - 2]];
  385. }
  386. bool IsValidCoroutine( HCoroutine hCoroutine )
  387. {
  388. return m_ListCoroutines.IsValidIndex( hCoroutine ) && hCoroutine > 0;
  389. }
  390. void SetActiveCoroutine( HCoroutine hCoroutine )
  391. {
  392. m_VecCoroutineStack.AddToTail( hCoroutine );
  393. }
  394. void PopCoroutineStack()
  395. {
  396. Assert( m_VecCoroutineStack.Count() > 1 );
  397. m_VecCoroutineStack.Remove( m_VecCoroutineStack.Count() - 1 );
  398. }
  399. bool IsAnyCoroutineActive()
  400. {
  401. return m_VecCoroutineStack.Count() > 1;
  402. }
  403. void DeleteCoroutine( HCoroutine hCoroutine )
  404. {
  405. m_ListCoroutines.Remove( hCoroutine );
  406. }
  407. #ifdef DBGFLAG_VALIDATE
  408. void Validate( CValidator &validator, const char *pchName )
  409. {
  410. validator.Push( "CCoroutineMgr", this, pchName );
  411. ValidateObj( m_ListCoroutines );
  412. FOR_EACH_LL( m_ListCoroutines, iRoutine )
  413. {
  414. ValidateObj( m_ListCoroutines[iRoutine] );
  415. }
  416. ValidateObj( m_VecCoroutineStack );
  417. validator.Pop();
  418. }
  419. #endif // DBGFLAG_VALIDATE
  420. uint32 m_topofexceptionchain;
  421. private:
  422. CUtlLinkedList<CCoroutine, HCoroutine> m_ListCoroutines;
  423. CUtlVector<HCoroutine> m_VecCoroutineStack;
  424. };
  425. CTHREADLOCALPTR( CCoroutineMgr ) g_ThreadLocalCoroutineMgr;
  426. //GenericThreadLocals::CThreadLocalPtr< CCoroutineMgr >
  427. CUtlVector< CCoroutineMgr * > g_VecPCoroutineMgr;
  428. CThreadMutex g_ThreadMutexCoroutineMgr;
  429. CCoroutineMgr &GCoroutineMgr()
  430. {
  431. if ( !g_ThreadLocalCoroutineMgr )
  432. {
  433. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  434. g_ThreadLocalCoroutineMgr = new CCoroutineMgr();
  435. g_VecPCoroutineMgr.AddToTail( g_ThreadLocalCoroutineMgr );
  436. }
  437. return *g_ThreadLocalCoroutineMgr;
  438. }
  439. //-----------------------------------------------------------------------------
  440. // Purpose: call when a thread is quiting to release any per-thread memory
  441. //-----------------------------------------------------------------------------
  442. void Coroutine_ReleaseThreadMemory()
  443. {
  444. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  445. if ( g_ThreadLocalCoroutineMgr != static_cast<const void*>( nullptr ) )
  446. {
  447. int iCoroutineMgr = g_VecPCoroutineMgr.Find( g_ThreadLocalCoroutineMgr );
  448. delete g_VecPCoroutineMgr[iCoroutineMgr];
  449. g_VecPCoroutineMgr.Remove( iCoroutineMgr );
  450. }
  451. }
  452. // predecs
  453. void Coroutine_Launch( CCoroutine &coroutine );
  454. void Coroutine_Finish();
  455. //-----------------------------------------------------------------------------
  456. // Purpose: Creates a soroutine, specified by the function, returns a handle
  457. //-----------------------------------------------------------------------------
  458. HCoroutine Coroutine_Create( CoroutineFunc_t pFunc, void *pvParam )
  459. {
  460. return GCoroutineMgr().CreateCoroutine( pFunc, pvParam );
  461. }
  462. //-----------------------------------------------------------------------------
  463. // Purpose: Continues a current coroutine
  464. // input: hCoroutine - the coroutine to continue
  465. // pchDebugMsg - if non-NULL, it will generate an assertion in
  466. // that coroutine, then that coroutine will
  467. // immediately yield back to this thread
  468. //-----------------------------------------------------------------------------
  469. static const char *k_pchDebugMsg_GenericBreak = (const char *)1;
  470. bool Internal_Coroutine_Continue( HCoroutine hCoroutine, const char *pchDebugMsg, const char *pchName )
  471. {
  472. Assert( GCoroutineMgr().IsValidCoroutine(hCoroutine) );
  473. bool bInCoroutineAlready = GCoroutineMgr().IsAnyCoroutineActive();
  474. #ifdef _WIN32
  475. #ifndef _WIN64
  476. // make sure nobody has a try/catch block and then yielded
  477. // because we hate that and we will crash
  478. uint32 topofexceptionchain;
  479. __asm mov eax, dword ptr fs:[0]
  480. __asm mov topofexceptionchain, eax
  481. if ( GCoroutineMgr().m_topofexceptionchain == 0 )
  482. GCoroutineMgr().m_topofexceptionchain = topofexceptionchain;
  483. else
  484. {
  485. Assert( topofexceptionchain == GCoroutineMgr().m_topofexceptionchain );
  486. }
  487. #endif
  488. #endif
  489. // start the new coroutine
  490. GCoroutineMgr().SetActiveCoroutine( hCoroutine );
  491. CCoroutine &coroutinePrev = GCoroutineMgr().GetPreviouslyActiveCoroutine();
  492. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  493. if ( pchName )
  494. coroutine.m_pchName = pchName;
  495. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Continue() %s#%x -> %s#%x\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutine.m_pchName, coroutine.m_hCoroutine ) );
  496. bool bStillRunning = true;
  497. // set the point for the coroutine to jump back to
  498. RW_MEMORY_BARRIER;
  499. int iResult = setjmp( coroutinePrev.GetRegisters() );
  500. if ( iResult == k_iSetJmpStateSaved )
  501. {
  502. // copy the new stack in place
  503. if ( coroutine.m_pSavedStack )
  504. {
  505. // save any of the main stack that overlaps where the coroutine stack is going to go
  506. GetStackPtr( pStackSavePoint );
  507. if ( pStackSavePoint <= coroutine.m_pStackHigh )
  508. {
  509. // save the main stack from where the coroutine stack wishes to start
  510. // if the previous coroutine already had a stack save point, just save
  511. // the whole thing.
  512. if ( NULL == coroutinePrev.m_pStackHigh )
  513. {
  514. coroutinePrev.m_pStackHigh = coroutine.m_pStackHigh;
  515. }
  516. else
  517. {
  518. Assert( coroutine.m_pStackHigh <= coroutinePrev.m_pStackHigh );
  519. }
  520. coroutinePrev.SaveStack();
  521. CoroutineDbgMsg( g_fmtstr.sprintf( "SaveStack() %s#%x [%x - %x]\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutinePrev.m_pStackLow, coroutinePrev.m_pStackHigh ) );
  522. }
  523. // If the coroutine's stack is close enough to where we are on the stack, we need to push ourselves
  524. // down past it, so that the memcpy() doesn't screw up the RestoreStack->memcpy call chain.
  525. if ( coroutine.m_pStackHigh > ( pStackSavePoint - 2048 ) )
  526. {
  527. // If the entire CR stack is above us, we don't need to pad ourselves.
  528. if ( coroutine.m_pStackLow < pStackSavePoint )
  529. {
  530. // push ourselves down
  531. int cubPush = pStackSavePoint - coroutine.m_pStackLow + 512;
  532. volatile byte *pvStackGap = (byte*)stackalloc( cubPush );
  533. pvStackGap[ cubPush-1 ] = 0xF;
  534. CoroutineDbgMsg( g_fmtstr.sprintf( "Adjusting stack point by %d (%x <- %x)\n", cubPush, pvStackGap, &pvStackGap[cubPush] ) );
  535. }
  536. }
  537. // This needs to go right here - after we've maybe padded the stack (so that iJumpCode does not
  538. // get stepped on) and before the RestoreStack() call (because that might step on pchDebugMsg!).
  539. if ( pchDebugMsg == NULL )
  540. {
  541. coroutine.m_iJumpCode = k_iSetJmpContinue;
  542. coroutine.m_pchDebugMsg = NULL;
  543. }
  544. else if ( pchDebugMsg == k_pchDebugMsg_GenericBreak )
  545. {
  546. coroutine.m_iJumpCode = k_iSetJmpDbgBreak;
  547. coroutine.m_pchDebugMsg = NULL;
  548. }
  549. else
  550. {
  551. coroutine.m_iJumpCode = k_iSetJmpDbgBreak;
  552. coroutine.m_pchDebugMsg = pchDebugMsg;
  553. }
  554. // restore the coroutine stack
  555. CoroutineDbgMsg( g_fmtstr.sprintf( "RestoreStack() %s#%x [%x - %x] (current %x)\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutine.m_pStackLow, coroutine.m_pStackHigh, pStackSavePoint ) );
  556. coroutine.RestoreStack();
  557. // the new stack is in place, so no code here can reference local stack vars
  558. // move the program counter
  559. RW_MEMORY_BARRIER;
  560. Coroutine_longjmp( GCoroutineMgr().GetActiveCoroutine().GetRegisters(), GCoroutineMgr().GetActiveCoroutine().m_iJumpCode );
  561. }
  562. else
  563. {
  564. // set the stack pos for the new coroutine
  565. // jump a long way forward on the stack
  566. // this needs to be a stackalloc() instead of a static buffer, so it won't get optimized out in release build
  567. int cubGap = bInCoroutineAlready ? k_cubCoroutineStackGapSmall : k_cubCoroutineStackGap;
  568. volatile byte *pvStackGap = (byte*)stackalloc( cubGap );
  569. pvStackGap[ cubGap-1 ] = 0xF;
  570. // hasn't started yet, so launch
  571. Coroutine_Launch( coroutine );
  572. }
  573. // when the job yields, the above setjmp() will be called again with non-zero value
  574. // code here will never run
  575. }
  576. else if ( iResult == k_iSetJmpContinue )
  577. {
  578. // just pass through
  579. }
  580. else if ( iResult == k_iSetJmpDone )
  581. {
  582. // we're done, remove the coroutine
  583. GCoroutineMgr().DeleteCoroutine( Coroutine_GetCurrentlyActive() );
  584. bStillRunning = false;
  585. }
  586. // job has suspended itself, we'll get back to it later
  587. GCoroutineMgr().PopCoroutineStack();
  588. return bStillRunning;
  589. }
  590. //-----------------------------------------------------------------------------
  591. // Purpose: Continues a current coroutine
  592. //-----------------------------------------------------------------------------
  593. bool Coroutine_Continue( HCoroutine hCoroutine, const char *pchName )
  594. {
  595. return Internal_Coroutine_Continue( hCoroutine, NULL, pchName );
  596. }
  597. //-----------------------------------------------------------------------------
  598. // Purpose: launches a coroutine way ahead on the stack
  599. //-----------------------------------------------------------------------------
  600. void NOINLINE Coroutine_Launch( CCoroutine &coroutine )
  601. {
  602. #if defined( VPROF_ENABLED )
  603. coroutine.m_pVProfNodeScope = g_VProfCurrentProfile.GetCurrentNode();
  604. #endif
  605. // set our marker
  606. #ifndef _PS3
  607. GetStackPtr( pEsp );
  608. #else
  609. // The stack pointer for the current stack frame points to the top of the stack which already includes space for the
  610. // ABI linkage area. We need to include this area as part of our coroutine stack, as the calling function will copy
  611. // the link register (return address to this function) into this area after calling m_pFunc below. Failing to do so
  612. // could result in the coroutine to return to garbage when complete
  613. uint64 *pStackFrameTwoUp = (uint64*)__builtin_frame_address(2);
  614. // Need to terminate the stack frame sequence so if someone tries to walk the stack in a co-routine they don't go forever.
  615. *pStackFrameTwoUp = 0;
  616. // Need to track where we we save up to on yield, add a few bytes so we save just the beginning linkage area of the stack frame
  617. // we added the null termination to.
  618. byte * pEsp = ((byte*)pStackFrameTwoUp)+32;
  619. #endif
  620. #ifdef _WIN64
  621. // Add a little extra padding, to capture the spill space for the registers
  622. // that is required for us to reserve ABOVE the return address), and also
  623. // align the stack
  624. coroutine.m_pStackHigh = (byte *)( ((uintptr_t)pEsp + 32 + 15) & ~(uintptr_t)15 );
  625. // On Win64, we need to be able to find an exception handler
  626. // if we walk the stack to this point. Currently,
  627. // this is as close to the root as we can go. If we
  628. // try to go higher, we wil fail. That's actually
  629. // OK at run time, because Coroutine_Finish doesn't
  630. // return!
  631. CatchAndWriteMiniDumpForVoidPtrFn( coroutine.m_pFunc, coroutine.m_pvParam, /*bExitQuietly*/ true );
  632. #else
  633. coroutine.m_pStackHigh = (byte *)pEsp;
  634. // run the function directly
  635. coroutine.m_pFunc( coroutine.m_pvParam );
  636. #endif
  637. // longjmp back to the main 'thread'
  638. Coroutine_Finish();
  639. }
  640. //-----------------------------------------------------------------------------
  641. // Purpose: cancels a currently running coroutine
  642. //-----------------------------------------------------------------------------
  643. void Coroutine_Cancel( HCoroutine hCoroutine )
  644. {
  645. GCoroutineMgr().DeleteCoroutine( hCoroutine );
  646. }
  647. //-----------------------------------------------------------------------------
  648. // Purpose: cause a debug break in the specified coroutine
  649. //-----------------------------------------------------------------------------
  650. void Coroutine_DebugBreak( HCoroutine hCoroutine )
  651. {
  652. Internal_Coroutine_Continue( hCoroutine, k_pchDebugMsg_GenericBreak, NULL );
  653. }
  654. //-----------------------------------------------------------------------------
  655. // Purpose: generate an assert (perhaps generating a minidump), with the
  656. // specified failure message, in the specified coroutine
  657. //-----------------------------------------------------------------------------
  658. void Coroutine_DebugAssert( HCoroutine hCoroutine, const char *pchMsg )
  659. {
  660. Assert( pchMsg );
  661. Internal_Coroutine_Continue( hCoroutine, pchMsg, NULL );
  662. }
  663. //-----------------------------------------------------------------------------
  664. // Purpose: returns true if the code is currently running inside of a coroutine
  665. //-----------------------------------------------------------------------------
  666. bool Coroutine_IsActive()
  667. {
  668. return GCoroutineMgr().IsAnyCoroutineActive();
  669. }
  670. //-----------------------------------------------------------------------------
  671. // Purpose: returns a handle the currently active coroutine
  672. //-----------------------------------------------------------------------------
  673. HCoroutine Coroutine_GetCurrentlyActive()
  674. {
  675. Assert( Coroutine_IsActive() );
  676. return GCoroutineMgr().GetActiveCoroutineHandle();
  677. }
  678. //-----------------------------------------------------------------------------
  679. // Purpose: lets the main thread continue
  680. //-----------------------------------------------------------------------------
  681. void Coroutine_YieldToMain()
  682. {
  683. // if you've hit this assert, it's because you're calling yield when not in a coroutine
  684. Assert( Coroutine_IsActive() );
  685. CCoroutine &coroutinePrev = GCoroutineMgr().GetPreviouslyActiveCoroutine();
  686. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  687. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_YieldToMain() %s#%x -> %s#%x\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine ) );
  688. #ifdef _WIN32
  689. #ifndef _WIN64
  690. // make sure nobody has a try/catch block and then yielded
  691. // because we hate that and we will crash
  692. uint32 topofexceptionchain;
  693. __asm mov eax, dword ptr fs:[0]
  694. __asm mov topofexceptionchain, eax
  695. if ( GCoroutineMgr().m_topofexceptionchain == 0 )
  696. GCoroutineMgr().m_topofexceptionchain = topofexceptionchain;
  697. else
  698. {
  699. Assert( topofexceptionchain == GCoroutineMgr().m_topofexceptionchain );
  700. }
  701. #endif
  702. #endif
  703. RW_MEMORY_BARRIER;
  704. int iResult = setjmp( coroutine.GetRegisters() );
  705. if ( ( iResult == k_iSetJmpStateSaved ) || ( iResult == k_iSetJmpDbgBreak ) )
  706. {
  707. // break / assert requested?
  708. if ( iResult == k_iSetJmpDbgBreak )
  709. {
  710. // Assert (minidump) requested?
  711. if ( coroutine.m_pchDebugMsg )
  712. {
  713. // Generate a failed assertion
  714. AssertMsg1( !"Coroutine assert requested", "%s", coroutine.m_pchDebugMsg );
  715. }
  716. else
  717. {
  718. // If we were loaded only to debug, call a break
  719. DebuggerBreakIfDebugging();
  720. }
  721. // Now IMMEDIATELY yield back to the main thread
  722. }
  723. // Clear message, regardless
  724. coroutine.m_pchDebugMsg = NULL;
  725. // save our stack - all the way to the top, err bottom err, the end of it ( where esp is )
  726. coroutine.SaveStack();
  727. CoroutineDbgMsg( g_fmtstr.sprintf( "SaveStack() %s#%x [%x - %x]\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutine.m_pStackLow, coroutine.m_pStackHigh ) );
  728. // restore the main thread stack
  729. // allocate a bunch of stack padding so we don't kill ourselves while in stack restoration
  730. // If the coroutine's stack is close enough to where we are on the stack, we need to push ourselves
  731. // down past it, so that the memcpy() doesn't screw up the RestoreStack->memcpy call chain.
  732. GetStackPtr( pStackPtr );
  733. if ( pStackPtr >= (coroutinePrev.m_pStackHigh - coroutinePrev.m_cubSavedStack) && ( pStackPtr - 2048 ) <= coroutinePrev.m_pStackHigh )
  734. {
  735. int cubPush = coroutinePrev.m_cubSavedStack + 512;
  736. volatile byte *pvStackGap = (byte*)stackalloc( cubPush );
  737. pvStackGap[ cubPush - 1 ] = 0xF;
  738. CoroutineDbgMsg( g_fmtstr.sprintf( "Adjusting stack point by %d (%x <- %x)\n", cubPush, pvStackGap, &pvStackGap[cubPush] ) );
  739. }
  740. CoroutineDbgMsg( g_fmtstr.sprintf( "RestoreStack() %s#%x [%x - %x]\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutinePrev.m_pStackLow, coroutinePrev.m_pStackHigh ) );
  741. coroutinePrev.RestoreStack();
  742. // jump back to the main thread
  743. // Our stack may have been mucked with, can't use local vars anymore!
  744. RW_MEMORY_BARRIER;
  745. Coroutine_longjmp( GCoroutineMgr().GetPreviouslyActiveCoroutine().GetRegisters(), k_iSetJmpContinue );
  746. }
  747. else
  748. {
  749. // we've been restored, now continue on our merry way
  750. }
  751. }
  752. //-----------------------------------------------------------------------------
  753. // Purpose: done with the Coroutine, terminate safely
  754. //-----------------------------------------------------------------------------
  755. void Coroutine_Finish()
  756. {
  757. Assert( Coroutine_IsActive() );
  758. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Finish() %s#%x -> %s#%x\n", GCoroutineMgr().GetActiveCoroutine().m_pchName, GCoroutineMgr().GetActiveCoroutineHandle(), GCoroutineMgr().GetPreviouslyActiveCoroutine().m_pchName, &GCoroutineMgr().GetPreviouslyActiveCoroutine() ) );
  759. // allocate a bunch of stack padding so we don't kill ourselves while in stack restoration
  760. volatile byte *pvStackGap = (byte*)stackalloc( GCoroutineMgr().GetPreviouslyActiveCoroutine().m_cubSavedStack + 512 );
  761. pvStackGap[ GCoroutineMgr().GetPreviouslyActiveCoroutine().m_cubSavedStack + 511 ] = 0xf;
  762. GCoroutineMgr().GetPreviouslyActiveCoroutine().RestoreStack();
  763. RW_MEMORY_BARRIER;
  764. // go back to the main thread, signaling that we're done
  765. Coroutine_longjmp( GCoroutineMgr().GetPreviouslyActiveCoroutine().GetRegisters(), k_iSetJmpDone );
  766. }
  767. #ifdef STEAM
  768. //-----------------------------------------------------------------------------
  769. // Purpose: Coroutine that spawns another coroutine
  770. //-----------------------------------------------------------------------------
  771. void CoroutineTestFunc( void *pvRelaunch )
  772. {
  773. static const char *g_pchTestString = "test string";
  774. char rgchT[256];
  775. Q_strncpy( rgchT, g_pchTestString, sizeof(rgchT) );
  776. // yield
  777. Coroutine_YieldToMain();
  778. // ensure the string is still valid
  779. DbgVerifyNot( Q_strcmp( rgchT, g_pchTestString ) );
  780. if ( !pvRelaunch )
  781. {
  782. // test launching coroutines inside of coroutines
  783. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestFunc, (void *)(size_t)0xFFFFFFFF );
  784. // first pass the coroutines should all still be running
  785. DbgVerify( Coroutine_Continue( hCoroutine, NULL ) );
  786. // second pass the coroutines should all be finished
  787. DbgVerifyNot( Coroutine_Continue( hCoroutine, NULL ) );
  788. }
  789. }
  790. // test that just spins a few times
  791. void CoroutineTestL2( void * )
  792. {
  793. // spin a few times
  794. for ( int i = 0; i < 5; i++ )
  795. {
  796. Coroutine_YieldToMain();
  797. }
  798. }
  799. // level 1 of a test
  800. void CoroutineTestL1( void *pvecCoroutineL2 )
  801. {
  802. CUtlVector<HCoroutine> &vecCoroutineL2 = *(CUtlVector<HCoroutine> *)pvecCoroutineL2;
  803. int i = 20;
  804. // launch a set of coroutines
  805. for ( i = 0; i < 20; i++ )
  806. {
  807. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestL2, NULL );
  808. vecCoroutineL2.AddToTail( hCoroutine );
  809. Coroutine_Continue( hCoroutine, NULL );
  810. // now yield back to main occasionally
  811. if ( i % 2 == 1 )
  812. Coroutine_YieldToMain();
  813. }
  814. Assert( i == 20 );
  815. }
  816. //-----------------------------------------------------------------------------
  817. // Purpose: runs a self-test of the coroutine system
  818. // it's working if it doesn't crash
  819. //-----------------------------------------------------------------------------
  820. bool Coroutine_Test()
  821. {
  822. // basic calling of a coroutine
  823. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestFunc, NULL );
  824. Coroutine_Continue( hCoroutine, NULL );
  825. Coroutine_Continue( hCoroutine, NULL );
  826. // now test
  827. CUtlVector<HCoroutine> vecCoroutineL2;
  828. hCoroutine = Coroutine_Create( &CoroutineTestL1, &vecCoroutineL2 );
  829. Coroutine_Continue( hCoroutine, NULL );
  830. // run the sub-coroutines until they're all done
  831. while ( vecCoroutineL2.Count() )
  832. {
  833. if ( hCoroutine && !Coroutine_Continue( hCoroutine, NULL ) )
  834. hCoroutine = NULL;
  835. FOR_EACH_VEC_BACK( vecCoroutineL2, i )
  836. {
  837. if ( !Coroutine_Continue( vecCoroutineL2[i], NULL ) )
  838. vecCoroutineL2.Remove( i );
  839. }
  840. }
  841. // new one
  842. hCoroutine = Coroutine_Create( &CoroutineTestFunc, NULL );
  843. // it has yielded, now continue it's call
  844. {
  845. // pop our stack up so it collides with the coroutine stack position
  846. Coroutine_Continue( hCoroutine, NULL );
  847. volatile byte *pvAlloca = (byte*)stackalloc( k_cubCoroutineStackGapSmall );
  848. pvAlloca[ k_cubCoroutineStackGapSmall-1 ] = 0xF;
  849. Coroutine_Continue( hCoroutine, NULL );
  850. }
  851. // now do a whole bunch of them
  852. static const int k_nSimultaneousCoroutines = 10 * 1000;
  853. CUtlVector<HCoroutine> coroutines;
  854. Assert( coroutines.Base() == NULL );
  855. for (int i = 0; i < k_nSimultaneousCoroutines; i++)
  856. {
  857. coroutines.AddToTail( Coroutine_Create( &CoroutineTestFunc, NULL ) );
  858. }
  859. for (int i = 0; i < coroutines.Count(); i++)
  860. {
  861. // first pass the coroutines should all still be running
  862. DbgVerify( Coroutine_Continue( coroutines[i], NULL ) );
  863. }
  864. for (int i = 0; i < coroutines.Count(); i++)
  865. {
  866. // second pass the coroutines should all be finished
  867. DbgVerifyNot( Coroutine_Continue( coroutines[i], NULL ) );
  868. }
  869. return true;
  870. }
  871. #endif
  872. //-----------------------------------------------------------------------------
  873. // Purpose: returns approximate stack depth of current coroutine.
  874. //-----------------------------------------------------------------------------
  875. size_t Coroutine_GetStackDepth()
  876. {
  877. // should only get called from a coroutine
  878. Assert( GCoroutineMgr().IsAnyCoroutineActive() );
  879. if ( !GCoroutineMgr().IsAnyCoroutineActive() )
  880. return 0;
  881. GetStackPtr( pLocal );
  882. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  883. return ( coroutine.m_pStackHigh - pLocal );
  884. }
  885. //-----------------------------------------------------------------------------
  886. // Purpose: validates memory
  887. //-----------------------------------------------------------------------------
  888. void Coroutine_ValidateGlobals( class CValidator &validator )
  889. {
  890. #ifdef DBGFLAG_VALIDATE
  891. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  892. for ( int i = 0; i < g_VecPCoroutineMgr.Count(); i++ )
  893. {
  894. ValidatePtr( g_VecPCoroutineMgr[i] );
  895. }
  896. ValidateObj( g_VecPCoroutineMgr );
  897. #endif
  898. }