Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1157 lines
37 KiB

  1. //========= Copyright Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose:
  4. //
  5. // Build Notes: In order for the coroutine system to work a few build options
  6. // need to be set for coroutine.cpp itself. These are the VPC
  7. // entries for those options:
  8. // $Compiler
  9. // {
  10. // $EnableC++Exceptions "No"
  11. // $BasicRuntimeChecks "Default"
  12. // $EnableFloatingPointExceptions "No"
  13. // }
  14. //
  15. // If you have not set these options you will get a strange popup in
  16. // Visual Studio at the end of Coroutine_Continue().
  17. //
  18. //=============================================================================
  19. //#include "pch_vstdlib.h"
  20. #if defined(_DEBUG)
  21. // Verify that something is false
  22. #define DbgVerifyNot(x) Assert(!x)
  23. #else
  24. #define DbgVerifyNot(x) x
  25. #endif
  26. #include "vstdlib/coroutine.h"
  27. #include "tier0/vprof.h"
  28. #include "tier0/minidump.h"
  29. #include "tier1/utllinkedlist.h"
  30. #include "tier1/utlvector.h"
  31. #include <setjmp.h>
  32. // for debugging
  33. //#define CHECK_STACK_CORRUPTION
  34. #ifndef STEAM
  35. #define PvAlloc(x) malloc(x)
  36. #define FreePv(x) free(x)
  37. #endif
  38. #ifdef CHECK_STACK_CORRUPTION
  39. #include "tier1/checksum_md5.h"
  40. #include "../tier1/checksum_md5.cpp"
  41. #endif // CHECK_STACK_CORRUPTION
  42. //#define COROUTINE_TRACE
  43. #ifdef COROUTINE_TRACE
  44. #include "tier1/fmtstr.h"
  45. static CFmtStr g_fmtstr;
  46. #ifdef WIN32
  47. extern "C" __declspec(dllimport) void __stdcall OutputDebugStringA( const char * );
  48. #else
  49. void OutputDebugStringA( const char *pchMsg ) { fprintf( stderr, pchMsg ); fflush( stderr ); }
  50. #endif
  51. #define CoroutineDbgMsg( fmt, ... ) \
  52. { \
  53. g_fmtstr.sprintf( fmt, ##__VA_ARGS__ ); \
  54. OutputDebugStringA( g_fmtstr ); \
  55. }
  56. #else
  57. #define CoroutineDbgMsg( pchMsg, ... )
  58. #endif // COROUTINE_TRACE
  59. // memdbgon must be the last include file in a .cpp file!!!
  60. #include "tier0/memdbgon.h"
  61. #if defined( _MSC_VER ) && ( _MSC_VER >= 1900 ) && defined( PLATFORM_64BITS )
  62. //the VS2105 longjmp() seems to freak out jumping back into a coroutine (just like linux if _FORTIFY_SOURCE is defined)
  63. // I can't find an analogy to _FORTIFY_SOURCE for MSVC at the moment, so I wrote a quick assembly to longjmp() without any safety checks
  64. extern "C" NORETURN void Coroutine_LongJmp_Unchecked( jmp_buf buffer, int nResult );
  65. #define Coroutine_longjmp Coroutine_LongJmp_Unchecked
  66. #ifdef _WIN64
  67. #define Q_offsetof(s,m) (size_t)( (ptrdiff_t)&reinterpret_cast<const volatile char&>((((s *)0)->m)) )
  68. #else
  69. #define Q_offsetof(s,m) (size_t)&reinterpret_cast<const volatile char&>((((s *)0)->m))
  70. #endif
  71. #define SIZEOF_MEMBER( className, memberName ) sizeof( ((className*)nullptr)->memberName )
  72. #define Validate_Jump_Buffer( _Member ) COMPILE_TIME_ASSERT( (Q_offsetof( _JUMP_BUFFER, _Member ) == Q_offsetof( _Duplicate_JUMP_BUFFER, _Member )) && (SIZEOF_MEMBER( _JUMP_BUFFER, _Member ) == SIZEOF_MEMBER( _Duplicate_JUMP_BUFFER, _Member )) )
  73. //validate that the structure in assembly matches what the crt setjmp thinks it is
  74. # if defined( PLATFORM_64BITS )
  75. struct _Duplicate_JUMP_BUFFER
  76. {
  77. unsigned __int64 Frame;
  78. unsigned __int64 Rbx;
  79. unsigned __int64 Rsp;
  80. unsigned __int64 Rbp;
  81. unsigned __int64 Rsi;
  82. unsigned __int64 Rdi;
  83. unsigned __int64 R12;
  84. unsigned __int64 R13;
  85. unsigned __int64 R14;
  86. unsigned __int64 R15;
  87. unsigned __int64 Rip;
  88. unsigned long MxCsr;
  89. unsigned short FpCsr;
  90. unsigned short Spare;
  91. SETJMP_FLOAT128 Xmm6;
  92. SETJMP_FLOAT128 Xmm7;
  93. SETJMP_FLOAT128 Xmm8;
  94. SETJMP_FLOAT128 Xmm9;
  95. SETJMP_FLOAT128 Xmm10;
  96. SETJMP_FLOAT128 Xmm11;
  97. SETJMP_FLOAT128 Xmm12;
  98. SETJMP_FLOAT128 Xmm13;
  99. SETJMP_FLOAT128 Xmm14;
  100. SETJMP_FLOAT128 Xmm15;
  101. };
  102. COMPILE_TIME_ASSERT( sizeof( _JUMP_BUFFER ) == sizeof( _Duplicate_JUMP_BUFFER ) );
  103. Validate_Jump_Buffer( Frame );
  104. Validate_Jump_Buffer( Rbx );
  105. Validate_Jump_Buffer( Rsp );
  106. Validate_Jump_Buffer( Rbp );
  107. Validate_Jump_Buffer( Rsi );
  108. Validate_Jump_Buffer( Rdi );
  109. Validate_Jump_Buffer( R12 );
  110. Validate_Jump_Buffer( R13 );
  111. Validate_Jump_Buffer( R14 );
  112. Validate_Jump_Buffer( R15 );
  113. Validate_Jump_Buffer( Rip );
  114. Validate_Jump_Buffer( MxCsr );
  115. Validate_Jump_Buffer( FpCsr );
  116. Validate_Jump_Buffer( Spare );
  117. Validate_Jump_Buffer( Xmm6 );
  118. Validate_Jump_Buffer( Xmm7 );
  119. Validate_Jump_Buffer( Xmm8 );
  120. Validate_Jump_Buffer( Xmm9 );
  121. Validate_Jump_Buffer( Xmm10 );
  122. Validate_Jump_Buffer( Xmm11 );
  123. Validate_Jump_Buffer( Xmm12 );
  124. Validate_Jump_Buffer( Xmm13 );
  125. Validate_Jump_Buffer( Xmm14 );
  126. Validate_Jump_Buffer( Xmm15 );
  127. # else
  128. struct _Duplicate_JUMP_BUFFER
  129. {
  130. unsigned long Ebp;
  131. unsigned long Ebx;
  132. unsigned long Edi;
  133. unsigned long Esi;
  134. unsigned long Esp;
  135. unsigned long Eip;
  136. unsigned long Registration;
  137. unsigned long TryLevel;
  138. unsigned long Cookie;
  139. unsigned long UnwindFunc;
  140. unsigned long UnwindData[6];
  141. };
  142. COMPILE_TIME_ASSERT( sizeof( _JUMP_BUFFER ) == sizeof( _Duplicate_JUMP_BUFFER ) );
  143. Validate_Jump_Buffer( Ebp );
  144. Validate_Jump_Buffer( Ebx );
  145. Validate_Jump_Buffer( Edi );
  146. Validate_Jump_Buffer( Esi );
  147. Validate_Jump_Buffer( Esp );
  148. Validate_Jump_Buffer( Eip );
  149. Validate_Jump_Buffer( Registration );
  150. Validate_Jump_Buffer( TryLevel );
  151. Validate_Jump_Buffer( Cookie );
  152. Validate_Jump_Buffer( UnwindFunc );
  153. Validate_Jump_Buffer( UnwindData[6] );
  154. # endif
  155. #else
  156. #define Coroutine_longjmp longjmp
  157. #endif
  158. // it *feels* like we should need barriers around our setjmp/longjmp calls, and the memcpy's
  159. // to make sure the optimizer doesn't reorder us across register load/stores, so I've put them
  160. // in what seem like the appropriate spots, but we seem to run ok without them, so...
  161. #ifdef GNUC
  162. #define RW_MEMORY_BARRIER /* __sync_synchronize() */
  163. #else
  164. #define RW_MEMORY_BARRIER /* _ReadWriteBarrier() */
  165. #endif
  166. // return values from setjmp()
  167. static const int k_iSetJmpStateSaved = 0x00;
  168. static const int k_iSetJmpContinue = 0x01;
  169. static const int k_iSetJmpDone = 0x02;
  170. static const int k_iSetJmpDbgBreak = 0x03;
  171. // distance up the stack that coroutine functions stacks' start
  172. #ifdef _PS3
  173. // PS3 has a small stack. Hopefully we dont need 64k of padding!
  174. static const int k_cubCoroutineStackGap = (3 * 1024);
  175. static const int k_cubCoroutineStackGapSmall = 64;
  176. #else
  177. static const int k_cubCoroutineStackGap = (64 * 1024);
  178. static const int k_cubCoroutineStackGapSmall = 64;
  179. #endif
  180. // Warning size for allocated stacks
  181. #ifdef _DEBUG
  182. // In debug builds, we'll end up with much more stack usage in some scenarios that isn't representative of release
  183. // builds. We should still warn if we're going way above what we could expect the optimizer to save us from, but the
  184. // warning is more salient in release.
  185. static const int k_cubMaxCoroutineStackSize = (48 * 1024);
  186. #else
  187. static const int k_cubMaxCoroutineStackSize = (32 * 1024);
  188. #endif // defined( _DEBUG )
  189. #ifdef _WIN64
  190. extern "C" byte *GetStackPtr64();
  191. #define GetStackPtr( pStackPtr) byte *pStackPtr = GetStackPtr64();
  192. #else
  193. #ifdef WIN32
  194. #define GetStackPtr( pStackPtr ) byte *pStackPtr; __asm mov pStackPtr, esp
  195. #elif defined(GNUC)
  196. // Apple's version of gcc/g++ doesn't return the expected value using the intrinsic, so
  197. // do it the old fashioned way - this will also use asm on linux (since we don't compile
  198. // with llvm/clang there) but that seems fine.
  199. #if defined(__llvm__) || defined(__clang__)
  200. #define GetStackPtr( pStackPtr ) byte *pStackPtr = (byte*)__builtin_frame_address(0)
  201. #else
  202. #define GetStackPtr( pStackPtr ) register byte *pStackPtr __asm__( "esp" )
  203. #endif
  204. #elif defined(__SNC__)
  205. #define GetStackPtr( pStackPtr ) byte *pStackPtr = (byte*)__builtin_frame_address(0)
  206. #else
  207. #error
  208. #endif
  209. #endif
  210. #ifdef _M_X64
  211. #define _REGISTER_ALIGNMENT 16ull
  212. int CalcAlignOffset( const unsigned char *p )
  213. {
  214. return static_cast<int>( AlignValue( p, _REGISTER_ALIGNMENT ) - p );
  215. }
  216. #endif
  217. //-----------------------------------------------------------------------------
  218. // Purpose: single coroutine descriptor
  219. //-----------------------------------------------------------------------------
  220. #if defined( _PS3 ) && defined( _DEBUG )
  221. byte rgStackTempBuffer[65535];
  222. #endif
  223. class CCoroutine
  224. {
  225. public:
  226. CCoroutine()
  227. {
  228. m_pSavedStack = NULL;
  229. m_pStackHigh = m_pStackLow = NULL;
  230. m_cubSavedStack = 0;
  231. m_pFunc = NULL;
  232. m_pchName = "(none)";
  233. m_iJumpCode = 0;
  234. m_pchDebugMsg = NULL;
  235. #ifdef COROUTINE_TRACE
  236. m_hCoroutine = -1;
  237. #endif
  238. #ifdef _M_X64
  239. m_nAlignmentBytes = CalcAlignOffset( m_rgubRegisters );
  240. #endif
  241. #if defined( VPROF_ENABLED )
  242. m_pVProfNodeScope = NULL;
  243. #endif
  244. }
  245. jmp_buf &GetRegisters()
  246. {
  247. #ifdef _M_X64
  248. // Did we get moved in memory in such a way that the registers became unaligned?
  249. // If so, fix them up now
  250. size_t align = _REGISTER_ALIGNMENT - 1;
  251. unsigned char *pRegistersCur = &m_rgubRegisters[m_nAlignmentBytes];
  252. if ( (size_t)pRegistersCur & align )
  253. {
  254. m_nAlignmentBytes = CalcAlignOffset( m_rgubRegisters );
  255. unsigned char *pRegistersNew = &m_rgubRegisters[m_nAlignmentBytes];
  256. Q_memmove( pRegistersNew, pRegistersCur, sizeof(jmp_buf) );
  257. pRegistersCur = pRegistersNew;
  258. }
  259. return *reinterpret_cast<jmp_buf *>( pRegistersCur );
  260. #else
  261. return m_Registers;
  262. #endif
  263. }
  264. ~CCoroutine()
  265. {
  266. if ( m_pSavedStack )
  267. {
  268. FreePv( m_pSavedStack );
  269. }
  270. }
  271. FORCEINLINE void RestoreStack()
  272. {
  273. if ( m_cubSavedStack )
  274. {
  275. Assert( m_pStackHigh );
  276. Assert( m_pSavedStack );
  277. #if defined( _PS3 ) && defined( _DEBUG )
  278. // Our (and Sony's) memory tracking tools may try to walk the stack during a free() call
  279. // if we do the free here at our normal point though the stack is invalid since it's in
  280. // the middle of swapping. Instead move it to a temp buffer now and free while the stack
  281. // frames in place are still ok.
  282. Assert( m_cubSavedStack < Q_ARRAYSIZE( rgStackTempBuffer ) );
  283. memcpy( &rgStackTempBuffer[0], m_pSavedStack, m_cubSavedStack );
  284. FreePv( m_pSavedStack );
  285. m_pSavedStack = &rgStackTempBuffer[0];
  286. #endif
  287. // Assert we're not about to trash our own immediate stack
  288. GetStackPtr( pStack );
  289. if ( pStack >= m_pStackLow && pStack <= m_pStackHigh )
  290. {
  291. CoroutineDbgMsg( g_fmtstr.sprintf( "Restoring stack over ESP (%x, %x, %x)\n", pStack, m_pStackLow, m_pStackHigh ) );
  292. AssertMsg3( false, "Restoring stack over ESP (%p, %p, %p)\n", pStack, m_pStackLow, m_pStackHigh );
  293. }
  294. // Make sure we can access the our instance pointer after restoring the stack. This function is inlined, so the compiler could decide to
  295. // use an existing coroutine pointer that is already on the stack from the previous function (does so on the PS3), and will be overwritten
  296. // when we memcpy below. Any allocations here should be ok, as the caller should have advanced the stack past the stack area where the
  297. // new stack will be copied
  298. CCoroutine *pThis = (CCoroutine*)stackalloc( sizeof( CCoroutine* ) );
  299. pThis = this;
  300. RW_MEMORY_BARRIER;
  301. memcpy( m_pStackLow, m_pSavedStack, m_cubSavedStack );
  302. // WARNING: The stack has been replaced.. do not use previous stack variables or this
  303. #ifdef CHECK_STACK_CORRUPTION
  304. MD5Init( &pThis->m_md52 );
  305. MD5Update( &pThis->m_md52, pThis->m_pStackLow, pThis->m_cubSavedStack );
  306. MD5Final( pThis->m_digest2, &pThis->m_md52 );
  307. Assert( 0 == Q_memcmp( pThis->m_digest, pThis->m_digest2, MD5_DIGEST_LENGTH ) );
  308. #endif
  309. // free the saved stack info
  310. pThis->m_cubSavedStack = 0;
  311. #if !defined( _PS3 ) || !defined( _DEBUG )
  312. FreePv( pThis->m_pSavedStack );
  313. #endif
  314. pThis->m_pSavedStack = NULL;
  315. // If we were the "main thread", reset our stack pos to zero
  316. if ( NULL == pThis->m_pFunc )
  317. {
  318. pThis->m_pStackLow = pThis->m_pStackHigh = 0;
  319. }
  320. // resume accounting against the vprof node we were in when we yielded
  321. // Make sure we are added after the coroutine we just copied onto the stack
  322. #if defined( VPROF_ENABLED )
  323. pThis->m_pVProfNodeScope = g_VProfCurrentProfile.GetCurrentNode();
  324. if ( g_VProfCurrentProfile.IsEnabled() )
  325. {
  326. FOR_EACH_VEC_BACK( pThis->m_vecProfNodeStack, i )
  327. {
  328. g_VProfCurrentProfile.EnterScope(
  329. pThis->m_vecProfNodeStack[i]->GetName(),
  330. 0,
  331. g_VProfCurrentProfile.GetBudgetGroupName( pThis->m_vecProfNodeStack[i]->GetBudgetGroupID() ),
  332. false,
  333. g_VProfCurrentProfile.GetBudgetGroupFlags( pThis->m_vecProfNodeStack[i]->GetBudgetGroupID() )
  334. );
  335. }
  336. }
  337. pThis->m_vecProfNodeStack.Purge();
  338. #endif
  339. }
  340. }
  341. FORCEINLINE void SaveStack()
  342. {
  343. MEM_ALLOC_CREDIT_( "Coroutine saved stack" );
  344. if ( m_pSavedStack )
  345. {
  346. FreePv( m_pSavedStack );
  347. }
  348. GetStackPtr( pLocal );
  349. m_pStackLow = pLocal;
  350. m_cubSavedStack = (m_pStackHigh - m_pStackLow);
  351. m_pSavedStack = (byte *)PvAlloc( m_cubSavedStack );
  352. // if you hit this assert, it's because you're allocating way too much stuff on the stack in your job
  353. // check you haven't got any overly large string buffers allocated on the stack
  354. Assert( m_cubSavedStack < k_cubMaxCoroutineStackSize );
  355. #if defined( VPROF_ENABLED )
  356. // Exit any current vprof scope when we yield, and remember the vprof stack so we can restore it when we run again
  357. m_vecProfNodeStack.RemoveAll();
  358. CVProfNode *pCurNode = g_VProfCurrentProfile.GetCurrentNode();
  359. while ( pCurNode && m_pVProfNodeScope && pCurNode != m_pVProfNodeScope && pCurNode != g_VProfCurrentProfile.GetRoot() )
  360. {
  361. m_vecProfNodeStack.AddToTail( pCurNode );
  362. g_VProfCurrentProfile.ExitScope();
  363. pCurNode = g_VProfCurrentProfile.GetCurrentNode();
  364. }
  365. m_pVProfNodeScope = NULL;
  366. #endif
  367. RW_MEMORY_BARRIER;
  368. // save the stack in the newly allocated slot
  369. memcpy( m_pSavedStack, m_pStackLow, m_cubSavedStack );
  370. #ifdef CHECK_STACK_CORRUPTION
  371. MD5Init( &m_md5 );
  372. MD5Update( &m_md5, m_pSavedStack, m_cubSavedStack );
  373. MD5Final( m_digest, &m_md5 );
  374. #endif
  375. }
  376. #ifdef DBGFLAG_VALIDATE
  377. void Validate( CValidator &validator, const char *pchName )
  378. {
  379. validator.Push( "CCoroutine", this, pchName );
  380. validator.ClaimMemory( m_pSavedStack );
  381. validator.Pop();
  382. }
  383. #endif
  384. #ifdef _M_X64
  385. unsigned char m_rgubRegisters[sizeof(jmp_buf) + _REGISTER_ALIGNMENT];
  386. int m_nAlignmentBytes;
  387. #else
  388. jmp_buf m_Registers;
  389. #endif
  390. byte *m_pStackHigh; // position of initial entry to the coroutine (stack ptr before continue is ran)
  391. byte *m_pStackLow; // low point on the stack we plan on saving (stack ptr when we yield)
  392. byte *m_pSavedStack; // pointer to the saved stack (allocated on heap)
  393. int m_cubSavedStack; // amount of data on stack
  394. const char *m_pchName;
  395. int m_iJumpCode;
  396. const char *m_pchDebugMsg;
  397. #ifdef COROUTINE_TRACE
  398. HCoroutine m_hCoroutine; // for debugging
  399. #endif
  400. CoroutineFunc_t m_pFunc;
  401. void *m_pvParam;
  402. #if defined( VPROF_ENABLED )
  403. CUtlVector<CVProfNode *> m_vecProfNodeStack;
  404. CVProfNode *m_pVProfNodeScope;
  405. #endif
  406. #ifdef CHECK_STACK_CORRUPTION
  407. MD5Context_t m_md5;
  408. unsigned char m_digest[MD5_DIGEST_LENGTH];
  409. MD5Context_t m_md52;
  410. unsigned char m_digest2[MD5_DIGEST_LENGTH];
  411. #endif
  412. };
  413. //-----------------------------------------------------------------------------
  414. // Purpose: manages list of all coroutines
  415. //-----------------------------------------------------------------------------
  416. class CCoroutineMgr
  417. {
  418. public:
  419. CCoroutineMgr()
  420. {
  421. m_topofexceptionchain = 0;
  422. // reserve the 0 index as the main coroutine
  423. HCoroutine hMainCoroutine = m_ListCoroutines.AddToTail();
  424. m_ListCoroutines[hMainCoroutine].m_pchName = "(main)";
  425. #ifdef COROUTINE_TRACE
  426. m_ListCoroutines[hMainCoroutine].m_hCoroutine = hMainCoroutine;
  427. #endif
  428. // mark it as currently running
  429. m_VecCoroutineStack.AddToTail( hMainCoroutine );
  430. }
  431. HCoroutine CreateCoroutine( CoroutineFunc_t pFunc, void *pvParam )
  432. {
  433. HCoroutine hCoroutine = m_ListCoroutines.AddToTail();
  434. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Create() hCoroutine = %x pFunc = 0x%x pvParam = 0x%x\n", hCoroutine, pFunc, pvParam ) );
  435. m_ListCoroutines[hCoroutine].m_pFunc = pFunc;
  436. m_ListCoroutines[hCoroutine].m_pvParam = pvParam;
  437. m_ListCoroutines[hCoroutine].m_pSavedStack = NULL;
  438. m_ListCoroutines[hCoroutine].m_cubSavedStack = 0;
  439. m_ListCoroutines[hCoroutine].m_pStackHigh = m_ListCoroutines[hCoroutine].m_pStackLow = NULL;
  440. m_ListCoroutines[hCoroutine].m_pchName = "(no name set)";
  441. #ifdef COROUTINE_TRACE
  442. m_ListCoroutines[hCoroutine].m_hCoroutine = hCoroutine;
  443. #endif
  444. return hCoroutine;
  445. }
  446. HCoroutine GetActiveCoroutineHandle()
  447. {
  448. // look up the coroutine of the last item on the stack
  449. return m_VecCoroutineStack[m_VecCoroutineStack.Count() - 1];
  450. }
  451. CCoroutine &GetActiveCoroutine()
  452. {
  453. // look up the coroutine of the last item on the stack
  454. return m_ListCoroutines[GetActiveCoroutineHandle()];
  455. }
  456. CCoroutine &GetPreviouslyActiveCoroutine()
  457. {
  458. // look up the coroutine that ran the current coroutine
  459. return m_ListCoroutines[m_VecCoroutineStack[m_VecCoroutineStack.Count() - 2]];
  460. }
  461. bool IsValidCoroutine( HCoroutine hCoroutine )
  462. {
  463. return m_ListCoroutines.IsValidIndex( hCoroutine ) && hCoroutine > 0;
  464. }
  465. void SetActiveCoroutine( HCoroutine hCoroutine )
  466. {
  467. m_VecCoroutineStack.AddToTail( hCoroutine );
  468. }
  469. void PopCoroutineStack()
  470. {
  471. Assert( m_VecCoroutineStack.Count() > 1 );
  472. m_VecCoroutineStack.Remove( m_VecCoroutineStack.Count() - 1 );
  473. }
  474. bool IsAnyCoroutineActive()
  475. {
  476. return m_VecCoroutineStack.Count() > 1;
  477. }
  478. void DeleteCoroutine( HCoroutine hCoroutine )
  479. {
  480. m_ListCoroutines.Remove( hCoroutine );
  481. }
  482. #ifdef DBGFLAG_VALIDATE
  483. void Validate( CValidator &validator, const char *pchName )
  484. {
  485. validator.Push( "CCoroutineMgr", this, pchName );
  486. ValidateObj( m_ListCoroutines );
  487. FOR_EACH_LL( m_ListCoroutines, iRoutine )
  488. {
  489. ValidateObj( m_ListCoroutines[iRoutine] );
  490. }
  491. ValidateObj( m_VecCoroutineStack );
  492. validator.Pop();
  493. }
  494. #endif // DBGFLAG_VALIDATE
  495. uint32 m_topofexceptionchain;
  496. private:
  497. CUtlLinkedList<CCoroutine, HCoroutine> m_ListCoroutines;
  498. CUtlVector<HCoroutine> m_VecCoroutineStack;
  499. };
  500. CThreadLocalPtr< CCoroutineMgr > g_ThreadLocalCoroutineMgr;
  501. CUtlVector< CCoroutineMgr * > g_VecPCoroutineMgr;
  502. CThreadMutex g_ThreadMutexCoroutineMgr;
  503. CCoroutineMgr &GCoroutineMgr()
  504. {
  505. if ( !g_ThreadLocalCoroutineMgr )
  506. {
  507. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  508. g_ThreadLocalCoroutineMgr = new CCoroutineMgr();
  509. g_VecPCoroutineMgr.AddToTail( g_ThreadLocalCoroutineMgr );
  510. }
  511. return *g_ThreadLocalCoroutineMgr;
  512. }
  513. //-----------------------------------------------------------------------------
  514. // Purpose: call when a thread is quiting to release any per-thread memory
  515. //-----------------------------------------------------------------------------
  516. void Coroutine_ReleaseThreadMemory()
  517. {
  518. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  519. if ( g_ThreadLocalCoroutineMgr != NULL )
  520. {
  521. int iCoroutineMgr = g_VecPCoroutineMgr.Find( g_ThreadLocalCoroutineMgr );
  522. delete g_VecPCoroutineMgr[iCoroutineMgr];
  523. g_VecPCoroutineMgr.Remove( iCoroutineMgr );
  524. }
  525. }
  526. // predecs
  527. void Coroutine_Launch( CCoroutine &coroutine );
  528. void Coroutine_Finish();
  529. //-----------------------------------------------------------------------------
  530. // Purpose: Creates a soroutine, specified by the function, returns a handle
  531. //-----------------------------------------------------------------------------
  532. HCoroutine Coroutine_Create( CoroutineFunc_t pFunc, void *pvParam )
  533. {
  534. return GCoroutineMgr().CreateCoroutine( pFunc, pvParam );
  535. }
  536. //-----------------------------------------------------------------------------
  537. // Purpose: Continues a current coroutine
  538. // input: hCoroutine - the coroutine to continue
  539. // pchDebugMsg - if non-NULL, it will generate an assertion in
  540. // that coroutine, then that coroutine will
  541. // immediately yield back to this thread
  542. //-----------------------------------------------------------------------------
  543. static const char *k_pchDebugMsg_GenericBreak = (const char *)1;
  544. bool Internal_Coroutine_Continue( HCoroutine hCoroutine, const char *pchDebugMsg, const char *pchName )
  545. {
  546. Assert( GCoroutineMgr().IsValidCoroutine(hCoroutine) );
  547. bool bInCoroutineAlready = GCoroutineMgr().IsAnyCoroutineActive();
  548. #ifdef _WIN32
  549. #ifndef _WIN64
  550. // make sure nobody has a try/catch block and then yielded
  551. // because we hate that and we will crash
  552. uint32 topofexceptionchain;
  553. __asm mov eax, dword ptr fs:[0]
  554. __asm mov topofexceptionchain, eax
  555. if ( GCoroutineMgr().m_topofexceptionchain == 0 )
  556. GCoroutineMgr().m_topofexceptionchain = topofexceptionchain;
  557. else
  558. {
  559. Assert( topofexceptionchain == GCoroutineMgr().m_topofexceptionchain );
  560. }
  561. #endif
  562. #endif
  563. // start the new coroutine
  564. GCoroutineMgr().SetActiveCoroutine( hCoroutine );
  565. CCoroutine &coroutinePrev = GCoroutineMgr().GetPreviouslyActiveCoroutine();
  566. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  567. if ( pchName )
  568. coroutine.m_pchName = pchName;
  569. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Continue() %s#%x -> %s#%x\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutine.m_pchName, coroutine.m_hCoroutine ) );
  570. bool bStillRunning = true;
  571. // set the point for the coroutine to jump back to
  572. RW_MEMORY_BARRIER;
  573. int iResult = setjmp( coroutinePrev.GetRegisters() );
  574. if ( iResult == k_iSetJmpStateSaved )
  575. {
  576. // copy the new stack in place
  577. if ( coroutine.m_pSavedStack )
  578. {
  579. // save any of the main stack that overlaps where the coroutine stack is going to go
  580. GetStackPtr( pStackSavePoint );
  581. if ( pStackSavePoint <= coroutine.m_pStackHigh )
  582. {
  583. // save the main stack from where the coroutine stack wishes to start
  584. // if the previous coroutine already had a stack save point, just save
  585. // the whole thing.
  586. if ( NULL == coroutinePrev.m_pStackHigh )
  587. {
  588. coroutinePrev.m_pStackHigh = coroutine.m_pStackHigh;
  589. }
  590. else
  591. {
  592. Assert( coroutine.m_pStackHigh <= coroutinePrev.m_pStackHigh );
  593. }
  594. coroutinePrev.SaveStack();
  595. CoroutineDbgMsg( g_fmtstr.sprintf( "SaveStack() %s#%x [%x - %x]\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutinePrev.m_pStackLow, coroutinePrev.m_pStackHigh ) );
  596. }
  597. // If the coroutine's stack is close enough to where we are on the stack, we need to push ourselves
  598. // down past it, so that the memcpy() doesn't screw up the RestoreStack->memcpy call chain.
  599. if ( coroutine.m_pStackHigh > ( pStackSavePoint - 2048 ) )
  600. {
  601. // If the entire CR stack is above us, we don't need to pad ourselves.
  602. if ( coroutine.m_pStackLow < pStackSavePoint )
  603. {
  604. // push ourselves down
  605. int cubPush = pStackSavePoint - coroutine.m_pStackLow + 512;
  606. volatile byte *pvStackGap = (byte*)stackalloc( cubPush );
  607. pvStackGap[ cubPush-1 ] = 0xF;
  608. CoroutineDbgMsg( g_fmtstr.sprintf( "Adjusting stack point by %d (%x <- %x)\n", cubPush, pvStackGap, &pvStackGap[cubPush] ) );
  609. }
  610. }
  611. // This needs to go right here - after we've maybe padded the stack (so that iJumpCode does not
  612. // get stepped on) and before the RestoreStack() call (because that might step on pchDebugMsg!).
  613. if ( pchDebugMsg == NULL )
  614. {
  615. coroutine.m_iJumpCode = k_iSetJmpContinue;
  616. coroutine.m_pchDebugMsg = NULL;
  617. }
  618. else if ( pchDebugMsg == k_pchDebugMsg_GenericBreak )
  619. {
  620. coroutine.m_iJumpCode = k_iSetJmpDbgBreak;
  621. coroutine.m_pchDebugMsg = NULL;
  622. }
  623. else
  624. {
  625. coroutine.m_iJumpCode = k_iSetJmpDbgBreak;
  626. coroutine.m_pchDebugMsg = pchDebugMsg;
  627. }
  628. // restore the coroutine stack
  629. CoroutineDbgMsg( g_fmtstr.sprintf( "RestoreStack() %s#%x [%x - %x] (current %x)\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutine.m_pStackLow, coroutine.m_pStackHigh, pStackSavePoint ) );
  630. coroutine.RestoreStack();
  631. // the new stack is in place, so no code here can reference local stack vars
  632. // move the program counter
  633. RW_MEMORY_BARRIER;
  634. Coroutine_longjmp( GCoroutineMgr().GetActiveCoroutine().GetRegisters(), GCoroutineMgr().GetActiveCoroutine().m_iJumpCode );
  635. }
  636. else
  637. {
  638. // set the stack pos for the new coroutine
  639. // jump a long way forward on the stack
  640. // this needs to be a stackalloc() instead of a static buffer, so it won't get optimized out in release build
  641. int cubGap = bInCoroutineAlready ? k_cubCoroutineStackGapSmall : k_cubCoroutineStackGap;
  642. volatile byte *pvStackGap = (byte*)stackalloc( cubGap );
  643. pvStackGap[ cubGap-1 ] = 0xF;
  644. // hasn't started yet, so launch
  645. Coroutine_Launch( coroutine );
  646. }
  647. // when the job yields, the above setjmp() will be called again with non-zero value
  648. // code here will never run
  649. }
  650. else if ( iResult == k_iSetJmpContinue )
  651. {
  652. // just pass through
  653. }
  654. else if ( iResult == k_iSetJmpDone )
  655. {
  656. // we're done, remove the coroutine
  657. GCoroutineMgr().DeleteCoroutine( Coroutine_GetCurrentlyActive() );
  658. bStillRunning = false;
  659. }
  660. // job has suspended itself, we'll get back to it later
  661. GCoroutineMgr().PopCoroutineStack();
  662. return bStillRunning;
  663. }
  664. //-----------------------------------------------------------------------------
  665. // Purpose: Continues a current coroutine
  666. //-----------------------------------------------------------------------------
  667. bool Coroutine_Continue( HCoroutine hCoroutine, const char *pchName )
  668. {
  669. return Internal_Coroutine_Continue( hCoroutine, NULL, pchName );
  670. }
  671. //-----------------------------------------------------------------------------
  672. // Purpose: launches a coroutine way ahead on the stack
  673. //-----------------------------------------------------------------------------
  674. void NOINLINE Coroutine_Launch( CCoroutine &coroutine )
  675. {
  676. #if defined( VPROF_ENABLED )
  677. coroutine.m_pVProfNodeScope = g_VProfCurrentProfile.GetCurrentNode();
  678. #endif
  679. // set our marker
  680. #ifndef _PS3
  681. GetStackPtr( pEsp );
  682. #else
  683. // The stack pointer for the current stack frame points to the top of the stack which already includes space for the
  684. // ABI linkage area. We need to include this area as part of our coroutine stack, as the calling function will copy
  685. // the link register (return address to this function) into this area after calling m_pFunc below. Failing to do so
  686. // could result in the coroutine to return to garbage when complete
  687. uint64 *pStackFrameTwoUp = (uint64*)__builtin_frame_address(2);
  688. // Need to terminate the stack frame sequence so if someone tries to walk the stack in a co-routine they don't go forever.
  689. *pStackFrameTwoUp = 0;
  690. // Need to track where we we save up to on yield, add a few bytes so we save just the beginning linkage area of the stack frame
  691. // we added the null termination to.
  692. byte * pEsp = ((byte*)pStackFrameTwoUp)+32;
  693. #endif
  694. #ifdef _WIN64
  695. // Add a little extra padding, to capture the spill space for the registers
  696. // that is required for us to reserve ABOVE the return address), and also
  697. // align the stack
  698. coroutine.m_pStackHigh = (byte *)( ((uintptr_t)pEsp + 32 + 15) & ~(uintptr_t)15 );
  699. // On Win64, we need to be able to find an exception handler
  700. // if we walk the stack to this point. Currently,
  701. // this is as close to the root as we can go. If we
  702. // try to go higher, we wil fail. That's actually
  703. // OK at run time, because Coroutine_Finish doesn't
  704. // return!
  705. CatchAndWriteMiniDumpForVoidPtrFn( coroutine.m_pFunc, coroutine.m_pvParam, /*bExitQuietly*/ true );
  706. #else
  707. coroutine.m_pStackHigh = (byte *)pEsp;
  708. // run the function directly
  709. coroutine.m_pFunc( coroutine.m_pvParam );
  710. #endif
  711. // longjmp back to the main 'thread'
  712. Coroutine_Finish();
  713. }
  714. //-----------------------------------------------------------------------------
  715. // Purpose: cancels a currently running coroutine
  716. //-----------------------------------------------------------------------------
  717. void Coroutine_Cancel( HCoroutine hCoroutine )
  718. {
  719. GCoroutineMgr().DeleteCoroutine( hCoroutine );
  720. }
  721. //-----------------------------------------------------------------------------
  722. // Purpose: cause a debug break in the specified coroutine
  723. //-----------------------------------------------------------------------------
  724. void Coroutine_DebugBreak( HCoroutine hCoroutine )
  725. {
  726. Internal_Coroutine_Continue( hCoroutine, k_pchDebugMsg_GenericBreak, NULL );
  727. }
  728. //-----------------------------------------------------------------------------
  729. // Purpose: generate an assert (perhaps generating a minidump), with the
  730. // specified failure message, in the specified coroutine
  731. //-----------------------------------------------------------------------------
  732. void Coroutine_DebugAssert( HCoroutine hCoroutine, const char *pchMsg )
  733. {
  734. Assert( pchMsg );
  735. Internal_Coroutine_Continue( hCoroutine, pchMsg, NULL );
  736. }
  737. //-----------------------------------------------------------------------------
  738. // Purpose: returns true if the code is currently running inside of a coroutine
  739. //-----------------------------------------------------------------------------
  740. bool Coroutine_IsActive()
  741. {
  742. return GCoroutineMgr().IsAnyCoroutineActive();
  743. }
  744. //-----------------------------------------------------------------------------
  745. // Purpose: returns a handle the currently active coroutine
  746. //-----------------------------------------------------------------------------
  747. HCoroutine Coroutine_GetCurrentlyActive()
  748. {
  749. Assert( Coroutine_IsActive() );
  750. return GCoroutineMgr().GetActiveCoroutineHandle();
  751. }
  752. //-----------------------------------------------------------------------------
  753. // Purpose: lets the main thread continue
  754. //-----------------------------------------------------------------------------
  755. void Coroutine_YieldToMain()
  756. {
  757. // if you've hit this assert, it's because you're calling yield when not in a coroutine
  758. Assert( Coroutine_IsActive() );
  759. CCoroutine &coroutinePrev = GCoroutineMgr().GetPreviouslyActiveCoroutine();
  760. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  761. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_YieldToMain() %s#%x -> %s#%x\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine ) );
  762. #ifdef _WIN32
  763. #ifndef _WIN64
  764. // make sure nobody has a try/catch block and then yielded
  765. // because we hate that and we will crash
  766. uint32 topofexceptionchain;
  767. __asm mov eax, dword ptr fs:[0]
  768. __asm mov topofexceptionchain, eax
  769. if ( GCoroutineMgr().m_topofexceptionchain == 0 )
  770. GCoroutineMgr().m_topofexceptionchain = topofexceptionchain;
  771. else
  772. {
  773. Assert( topofexceptionchain == GCoroutineMgr().m_topofexceptionchain );
  774. }
  775. #endif
  776. #endif
  777. RW_MEMORY_BARRIER;
  778. int iResult = setjmp( coroutine.GetRegisters() );
  779. if ( ( iResult == k_iSetJmpStateSaved ) || ( iResult == k_iSetJmpDbgBreak ) )
  780. {
  781. // break / assert requested?
  782. if ( iResult == k_iSetJmpDbgBreak )
  783. {
  784. // Assert (minidump) requested?
  785. if ( coroutine.m_pchDebugMsg )
  786. {
  787. // Generate a failed assertion
  788. AssertMsg1( !"Coroutine assert requested", "%s", coroutine.m_pchDebugMsg );
  789. }
  790. else
  791. {
  792. // If we were loaded only to debug, call a break
  793. DebuggerBreakIfDebugging();
  794. }
  795. // Now IMMEDIATELY yield back to the main thread
  796. }
  797. // Clear message, regardless
  798. coroutine.m_pchDebugMsg = NULL;
  799. // save our stack - all the way to the top, err bottom err, the end of it ( where esp is )
  800. coroutine.SaveStack();
  801. CoroutineDbgMsg( g_fmtstr.sprintf( "SaveStack() %s#%x [%x - %x]\n", coroutine.m_pchName, coroutine.m_hCoroutine, coroutine.m_pStackLow, coroutine.m_pStackHigh ) );
  802. // restore the main thread stack
  803. // allocate a bunch of stack padding so we don't kill ourselves while in stack restoration
  804. // If the coroutine's stack is close enough to where we are on the stack, we need to push ourselves
  805. // down past it, so that the memcpy() doesn't screw up the RestoreStack->memcpy call chain.
  806. GetStackPtr( pStackPtr );
  807. if ( pStackPtr >= (coroutinePrev.m_pStackHigh - coroutinePrev.m_cubSavedStack) && ( pStackPtr - 2048 ) <= coroutinePrev.m_pStackHigh )
  808. {
  809. int cubPush = coroutinePrev.m_cubSavedStack + 512;
  810. volatile byte *pvStackGap = (byte*)stackalloc( cubPush );
  811. pvStackGap[ cubPush - 1 ] = 0xF;
  812. CoroutineDbgMsg( g_fmtstr.sprintf( "Adjusting stack point by %d (%x <- %x)\n", cubPush, pvStackGap, &pvStackGap[cubPush] ) );
  813. }
  814. CoroutineDbgMsg( g_fmtstr.sprintf( "RestoreStack() %s#%x [%x - %x]\n", coroutinePrev.m_pchName, coroutinePrev.m_hCoroutine, coroutinePrev.m_pStackLow, coroutinePrev.m_pStackHigh ) );
  815. coroutinePrev.RestoreStack();
  816. // jump back to the main thread
  817. // Our stack may have been mucked with, can't use local vars anymore!
  818. RW_MEMORY_BARRIER;
  819. Coroutine_longjmp( GCoroutineMgr().GetPreviouslyActiveCoroutine().GetRegisters(), k_iSetJmpContinue );
  820. UNREACHABLE();
  821. }
  822. else
  823. {
  824. // we've been restored, now continue on our merry way
  825. }
  826. }
  827. //-----------------------------------------------------------------------------
  828. // Purpose: done with the Coroutine, terminate safely
  829. //-----------------------------------------------------------------------------
  830. void Coroutine_Finish()
  831. {
  832. Assert( Coroutine_IsActive() );
  833. CoroutineDbgMsg( g_fmtstr.sprintf( "Coroutine_Finish() %s#%x -> %s#%x\n", GCoroutineMgr().GetActiveCoroutine().m_pchName, GCoroutineMgr().GetActiveCoroutineHandle(), GCoroutineMgr().GetPreviouslyActiveCoroutine().m_pchName, &GCoroutineMgr().GetPreviouslyActiveCoroutine() ) );
  834. // allocate a bunch of stack padding so we don't kill ourselves while in stack restoration
  835. volatile byte *pvStackGap = (byte*)stackalloc( GCoroutineMgr().GetPreviouslyActiveCoroutine().m_cubSavedStack + 512 );
  836. pvStackGap[ GCoroutineMgr().GetPreviouslyActiveCoroutine().m_cubSavedStack + 511 ] = 0xf;
  837. GCoroutineMgr().GetPreviouslyActiveCoroutine().RestoreStack();
  838. RW_MEMORY_BARRIER;
  839. // go back to the main thread, signaling that we're done
  840. Coroutine_longjmp( GCoroutineMgr().GetPreviouslyActiveCoroutine().GetRegisters(), k_iSetJmpDone );
  841. UNREACHABLE();
  842. }
  843. //-----------------------------------------------------------------------------
  844. // Purpose: Coroutine that spawns another coroutine
  845. //-----------------------------------------------------------------------------
  846. void CoroutineTestFunc( void *pvRelaunch )
  847. {
  848. static const char *g_pchTestString = "test string";
  849. char rgchT[256];
  850. Q_strncpy( rgchT, g_pchTestString, sizeof(rgchT) );
  851. // yield
  852. Coroutine_YieldToMain();
  853. // ensure the string is still valid
  854. DbgVerifyNot( Q_strcmp( rgchT, g_pchTestString ) );
  855. if ( !pvRelaunch )
  856. {
  857. // test launching coroutines inside of coroutines
  858. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestFunc, (void *)(size_t)0xFFFFFFFF );
  859. // first pass the coroutines should all still be running
  860. DbgVerify( Coroutine_Continue( hCoroutine, NULL ) );
  861. // second pass the coroutines should all be finished
  862. DbgVerifyNot( Coroutine_Continue( hCoroutine, NULL ) );
  863. }
  864. }
  865. // test that just spins a few times
  866. void CoroutineTestL2( void * )
  867. {
  868. // spin a few times
  869. for ( int i = 0; i < 5; i++ )
  870. {
  871. Coroutine_YieldToMain();
  872. }
  873. }
  874. // level 1 of a test
  875. void CoroutineTestL1( void *pvecCoroutineL2 )
  876. {
  877. CUtlVector<HCoroutine> &vecCoroutineL2 = *(CUtlVector<HCoroutine> *)pvecCoroutineL2;
  878. int i = 20;
  879. // launch a set of coroutines
  880. for ( i = 0; i < 20; i++ )
  881. {
  882. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestL2, NULL );
  883. vecCoroutineL2.AddToTail( hCoroutine );
  884. Coroutine_Continue( hCoroutine, NULL );
  885. // now yield back to main occasionally
  886. if ( i % 2 == 1 )
  887. Coroutine_YieldToMain();
  888. }
  889. Assert( i == 20 );
  890. }
  891. //-----------------------------------------------------------------------------
  892. // Purpose: runs a self-test of the coroutine system
  893. // it's working if it doesn't crash
  894. //-----------------------------------------------------------------------------
  895. bool Coroutine_Test()
  896. {
  897. // basic calling of a coroutine
  898. HCoroutine hCoroutine = Coroutine_Create( &CoroutineTestFunc, NULL );
  899. Coroutine_Continue( hCoroutine, NULL );
  900. Coroutine_Continue( hCoroutine, NULL );
  901. // now test
  902. CUtlVector<HCoroutine> vecCoroutineL2;
  903. hCoroutine = Coroutine_Create( &CoroutineTestL1, &vecCoroutineL2 );
  904. Coroutine_Continue( hCoroutine, NULL );
  905. // run the sub-coroutines until they're all done
  906. while ( vecCoroutineL2.Count() )
  907. {
  908. if ( hCoroutine && !Coroutine_Continue( hCoroutine, NULL ) )
  909. hCoroutine = NULL;
  910. FOR_EACH_VEC_BACK( vecCoroutineL2, i )
  911. {
  912. if ( !Coroutine_Continue( vecCoroutineL2[i], NULL ) )
  913. vecCoroutineL2.Remove( i );
  914. }
  915. }
  916. // new one
  917. hCoroutine = Coroutine_Create( &CoroutineTestFunc, NULL );
  918. // it has yielded, now continue it's call
  919. {
  920. // pop our stack up so it collides with the coroutine stack position
  921. Coroutine_Continue( hCoroutine, NULL );
  922. volatile byte *pvAlloca = (byte*)stackalloc( k_cubCoroutineStackGapSmall );
  923. pvAlloca[ k_cubCoroutineStackGapSmall-1 ] = 0xF;
  924. Coroutine_Continue( hCoroutine, NULL );
  925. }
  926. // now do a whole bunch of them
  927. static const int k_nSimultaneousCoroutines = 10 * 1000;
  928. CUtlVector<HCoroutine> coroutines;
  929. Assert( coroutines.Base() == NULL );
  930. for (int i = 0; i < k_nSimultaneousCoroutines; i++)
  931. {
  932. coroutines.AddToTail( Coroutine_Create( &CoroutineTestFunc, NULL ) );
  933. }
  934. for (int i = 0; i < coroutines.Count(); i++)
  935. {
  936. // first pass the coroutines should all still be running
  937. DbgVerify( Coroutine_Continue( coroutines[i], NULL ) );
  938. }
  939. for (int i = 0; i < coroutines.Count(); i++)
  940. {
  941. // second pass the coroutines should all be finished
  942. DbgVerifyNot( Coroutine_Continue( coroutines[i], NULL ) );
  943. }
  944. return true;
  945. }
  946. //-----------------------------------------------------------------------------
  947. // Purpose: returns approximate stack depth of current coroutine.
  948. //-----------------------------------------------------------------------------
  949. size_t Coroutine_GetStackDepth()
  950. {
  951. // should only get called from a coroutine
  952. Assert( GCoroutineMgr().IsAnyCoroutineActive() );
  953. if ( !GCoroutineMgr().IsAnyCoroutineActive() )
  954. return 0;
  955. GetStackPtr( pLocal );
  956. CCoroutine &coroutine = GCoroutineMgr().GetActiveCoroutine();
  957. return ( coroutine.m_pStackHigh - pLocal );
  958. }
  959. //-----------------------------------------------------------------------------
  960. // Purpose: validates memory
  961. //-----------------------------------------------------------------------------
  962. void Coroutine_ValidateGlobals( class CValidator &validator )
  963. {
  964. #ifdef DBGFLAG_VALIDATE
  965. AUTO_LOCK( g_ThreadMutexCoroutineMgr );
  966. for ( int i = 0; i < g_VecPCoroutineMgr.Count(); i++ )
  967. {
  968. ValidatePtr( g_VecPCoroutineMgr[i] );
  969. }
  970. ValidateObj( g_VecPCoroutineMgr );
  971. #endif
  972. }