Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

116 lines
2.9 KiB

  1. # Assuming all functions begin with ')' followed by '{', just find the matching brace and
  2. # add a line with 'g_pVCR->SyncToken("<random string here>");'
  3. import dlexer
  4. import sys
  5. class BlankStruct:
  6. pass
  7. def MatchParensBack( list, iStart ):
  8. parenCount = -1
  9. for i in range( 0, iStart ):
  10. if list[iStart-i].id == __TOKEN_OPENPAREN:
  11. parenCount += 1
  12. elif list[iStart-i].id == __TOKEN_CLOSEPAREN:
  13. parenCount -= 1
  14. if parenCount == 0:
  15. return iStart - i
  16. return -1
  17. if len( sys.argv ) >= 2:
  18. # Setup the parser.
  19. parser = dlexer.DLexer( 0 )
  20. __TOKEN_NEWLINE = parser.AddToken( '\n' )
  21. __TOKEN_WHITESPACE = parser.AddToken( '[ \\t\\f\\v]+' )
  22. __TOKEN_OPENBRACE = parser.AddToken( '{' )
  23. __TOKEN_CLOSEBRACE = parser.AddToken( '}' )
  24. __TOKEN_OPENPAREN = parser.AddToken( '\(' )
  25. __TOKEN_CLOSEPAREN = parser.AddToken( '\)' )
  26. __TOKEN_COMMENT = parser.AddToken( r"\/\/.*" )
  27. __TOKEN_CONST = parser.AddToken( "const" )
  28. __TOKEN_IF = parser.AddToken( "if" )
  29. __TOKEN_WHILE = parser.AddToken( "while" )
  30. __TOKEN_FOR = parser.AddToken( "for" )
  31. __TOKEN_SWITCH = parser.AddToken( "switch" )
  32. validChars = r"\~\@\#\$\%\^\&\!\,\w\.-/\[\]\<\>\""
  33. __TOKEN_IDENT = parser.AddToken( '[' + validChars + ']+' )
  34. __TOKEN_OPERATOR = parser.AddToken( "\=|\+" )
  35. __TOKEN_SCOPE_OPERATOR = parser.AddToken( "::" )
  36. __TOKEN_IGNORE = parser.AddToken( r"\#|\;|\:|\||\?|\'|\\|\*|\-|\`" )
  37. head = None
  38. # First, read all the tokens into a list.
  39. list = []
  40. parser.BeginReadFile( sys.argv[1] )
  41. while 1:
  42. m = parser.GetToken()
  43. if m:
  44. list.append( m )
  45. else:
  46. break
  47. # Make a list of all the non-whitespace ones.
  48. nw = []
  49. for token in list:
  50. if token.id == __TOKEN_NEWLINE or token.id == __TOKEN_WHITESPACE:
  51. token.iNonWhitespace = -2222
  52. else:
  53. token.iNonWhitespace = len( nw )
  54. nw.append( token )
  55. # Get ready to output sync tokens.
  56. file = open( sys.argv[1], 'r' )
  57. fileLines = file.readlines()
  58. file.close()
  59. curLine = 1
  60. iCur = 0
  61. file = open( sys.argv[1], 'w' )
  62. # Now, search for the patterns we're interested in.
  63. # Look for <ident>::<ident> '(' <idents...> ')' followed by a '{'. This would be a function.
  64. for token in list:
  65. file.write( token.val )
  66. if token.id == __TOKEN_NEWLINE:
  67. curLine += 1
  68. if token.id == __TOKEN_OPENBRACE:
  69. i = token.iNonWhitespace
  70. if i >= 6:
  71. if nw[i-1].id == __TOKEN_CLOSEPAREN:
  72. pos = MatchParensBack( nw, i-2 )
  73. if pos != -1:
  74. if nw[pos-1].id == __TOKEN_IDENT:
  75. #ADD PROLOGUE CODE HERE
  76. #file.write( "\n\tg_pVCR->SyncToken( \"%d_%s\" ); // AUTO-GENERATED SYNC TOKEN\n" % (iCur, nw[pos-1].val) )
  77. iCur += 1
  78. # TEST CODE TO PRINT OUT FUNCTION NAMES
  79. #if nw[pos-2].id == __TOKEN_SCOPE_OPERATOR:
  80. # print "%d: %s::%s" % ( curLine, nw[pos-3].val, nw[pos-1].val )
  81. #else:
  82. # print "%d: %s" % ( curLine, nw[pos-1].val )
  83. file.close()
  84. else:
  85. print "VCRMode_AddSyncTokens <filename>"