Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

401 lines
14 KiB

  1. //========= Copyright Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose:
  4. //
  5. // $NoKeywords: $
  6. //=============================================================================//
  7. #ifndef AI_SPEECH_H
  8. #define AI_SPEECH_H
  9. #include "utlmap.h"
  10. #include "soundflags.h"
  11. #include "AI_ResponseSystem.h"
  12. #include "utldict.h"
  13. #if defined( _WIN32 )
  14. #pragma once
  15. #endif
  16. class KeyValues;
  17. class AI_CriteriaSet;
  18. //-----------------------------------------------------------------------------
  19. // Purpose: Used to share a global resource or prevent a system stepping on
  20. // own toes.
  21. //-----------------------------------------------------------------------------
  22. class CAI_TimedSemaphore
  23. {
  24. public:
  25. CAI_TimedSemaphore()
  26. : m_ReleaseTime( 0 )
  27. {
  28. m_hCurrentTalker = NULL;
  29. }
  30. void Acquire( float time, CBaseEntity *pTalker ) { m_ReleaseTime = gpGlobals->curtime + time; m_hCurrentTalker = pTalker; }
  31. void Release() { m_ReleaseTime = 0; m_hCurrentTalker = NULL; }
  32. // Current owner of the semaphore is always allowed to talk
  33. bool IsAvailable( CBaseEntity *pTalker ) const { return ((gpGlobals->curtime > m_ReleaseTime) || (m_hCurrentTalker == pTalker)); }
  34. float GetReleaseTime() const { return m_ReleaseTime; }
  35. CBaseEntity *GetOwner() { return m_hCurrentTalker; }
  36. private:
  37. float m_ReleaseTime;
  38. EHANDLE m_hCurrentTalker;
  39. };
  40. //-----------------------------------------------------------------------------
  41. extern CAI_TimedSemaphore g_AIFriendliesTalkSemaphore;
  42. extern CAI_TimedSemaphore g_AIFoesTalkSemaphore;
  43. #define GetSpeechSemaphore( pNpc ) (((pNpc)->IsPlayerAlly()) ? &g_AIFriendliesTalkSemaphore : &g_AIFoesTalkSemaphore )
  44. //-----------------------------------------------------------------------------
  45. // Basic speech system types
  46. //-----------------------------------------------------------------------------
  47. //-------------------------------------
  48. // Constants
  49. const float AIS_DEF_MIN_DELAY = 2.8; // Minimum amount of time an NPCs will wait after someone has spoken before considering speaking again
  50. const float AIS_DEF_MAX_DELAY = 3.2; // Maximum amount of time an NPCs will wait after someone has spoken before considering speaking again
  51. const float AIS_NO_DELAY = 0;
  52. const soundlevel_t AIS_DEF_SNDLVL = SNDLVL_TALKING;
  53. #define AI_NULL_CONCEPT NULL
  54. #define AI_NULL_SENTENCE NULL
  55. // Sentence prefix constants
  56. #define AI_SP_SPECIFIC_SENTENCE '!'
  57. #define AI_SP_WAVFILE '^'
  58. #define AI_SP_SCENE_GROUP '='
  59. #define AI_SP_SPECIFIC_SCENE '?'
  60. #define AI_SPECIFIC_SENTENCE(str_constant) "!" str_constant
  61. #define AI_WAVFILE(str_constant) "^" str_constant
  62. // @Note (toml 09-12-02): as scene groups are not currently implemented, the string is a semi-colon delimited list
  63. #define AI_SCENE_GROUP(str_constant) "=" str_constant
  64. #define AI_SPECIFIC_SCENE(str_constant) "?" str_constant
  65. // Designer overriding modifiers
  66. #define AI_SPECIFIC_SCENE_MODIFIER "scene:"
  67. //-------------------------------------
  68. //-------------------------------------
  69. // An id that represents the core meaning of a spoken phrase,
  70. // eventually to be mapped to a sentence group or scene
  71. typedef const char *AIConcept_t;
  72. inline bool CompareConcepts( AIConcept_t c1, AIConcept_t c2 )
  73. {
  74. return ( (void *)c1 == (void *)c2 || ( c1 && c2 && Q_stricmp( c1, c2 ) == 0 ) );
  75. }
  76. //-------------------------------------
  77. // Specifies and stores the base timing and attentuation values for concepts
  78. //
  79. class AI_Response;
  80. //-----------------------------------------------------------------------------
  81. // CAI_Expresser
  82. //
  83. // Purpose: Provides the functionality of going from abstract concept ("hello")
  84. // to specific sentence/scene/wave
  85. //
  86. //-------------------------------------
  87. // Sink supports behavior control and receives notifications of internal events
  88. class CAI_ExpresserSink
  89. {
  90. public:
  91. virtual void OnSpokeConcept( AIConcept_t concept, AI_Response *response ) {};
  92. virtual void OnStartSpeaking() {}
  93. virtual bool UseSemaphore() { return true; }
  94. };
  95. struct ConceptHistory_t
  96. {
  97. DECLARE_SIMPLE_DATADESC();
  98. ConceptHistory_t(float timeSpoken = -1 )
  99. : timeSpoken( timeSpoken ), response( NULL )
  100. {
  101. }
  102. ConceptHistory_t( const ConceptHistory_t& src );
  103. ConceptHistory_t& operator = ( const ConceptHistory_t& src );
  104. ~ConceptHistory_t();
  105. float timeSpoken;
  106. AI_Response *response;
  107. };
  108. //-------------------------------------
  109. class CAI_Expresser : public IResponseFilter
  110. {
  111. public:
  112. CAI_Expresser( CBaseFlex *pOuter = NULL );
  113. ~CAI_Expresser();
  114. // --------------------------------
  115. bool Connect( CAI_ExpresserSink *pSink ) { m_pSink = pSink; return true; }
  116. bool Disconnect( CAI_ExpresserSink *pSink ) { m_pSink = NULL; return true;}
  117. void TestAllResponses();
  118. // --------------------------------
  119. bool Speak( AIConcept_t concept, const char *modifiers = NULL, char *pszOutResponseChosen = NULL, size_t bufsize = 0, IRecipientFilter *filter = NULL );
  120. // These two methods allow looking up a response and dispatching it to be two different steps
  121. bool SpeakFindResponse( AI_Response &response, AIConcept_t concept, const char *modifiers = NULL );
  122. bool SpeakDispatchResponse( AIConcept_t concept, AI_Response &response, IRecipientFilter *filter = NULL );
  123. float GetResponseDuration( AI_Response &response );
  124. virtual int SpeakRawSentence( const char *pszSentence, float delay, float volume = VOL_NORM, soundlevel_t soundlevel = SNDLVL_TALKING, CBaseEntity *pListener = NULL );
  125. bool SemaphoreIsAvailable( CBaseEntity *pTalker );
  126. float GetSemaphoreAvailableTime( CBaseEntity *pTalker );
  127. // --------------------------------
  128. virtual bool IsSpeaking();
  129. bool CanSpeak();
  130. bool CanSpeakAfterMyself();
  131. float GetTimeSpeechComplete() const { return m_flStopTalkTime; }
  132. void BlockSpeechUntil( float time );
  133. // --------------------------------
  134. bool CanSpeakConcept( AIConcept_t concept );
  135. bool SpokeConcept( AIConcept_t concept );
  136. float GetTimeSpokeConcept( AIConcept_t concept ); // returns -1 if never
  137. void SetSpokeConcept( AIConcept_t concept, AI_Response *response, bool bCallback = true );
  138. void ClearSpokeConcept( AIConcept_t concept );
  139. // --------------------------------
  140. void SetVoicePitch( int voicePitch ) { m_voicePitch = voicePitch; }
  141. int GetVoicePitch() const;
  142. void NoteSpeaking( float duration, float delay = 0 );
  143. // Force the NPC to release the semaphore & clear next speech time
  144. void ForceNotSpeaking( void );
  145. protected:
  146. CAI_TimedSemaphore *GetMySpeechSemaphore( CBaseEntity *pNpc );
  147. bool SpeakRawScene( const char *pszScene, float delay, AI_Response *response, IRecipientFilter *filter = NULL );
  148. // This will create a fake .vcd/CChoreoScene to wrap the sound to be played
  149. bool SpeakAutoGeneratedScene( char const *soundname, float delay );
  150. void DumpHistories();
  151. void SpeechMsg( CBaseEntity *pFlex, PRINTF_FORMAT_STRING const char *pszFormat, ... );
  152. // --------------------------------
  153. CAI_ExpresserSink *GetSink() { return m_pSink; }
  154. private:
  155. // --------------------------------
  156. virtual bool IsValidResponse( ResponseType_t type, const char *pszValue );
  157. // --------------------------------
  158. CAI_ExpresserSink *m_pSink;
  159. // --------------------------------
  160. //
  161. // Speech concept data structures
  162. //
  163. CUtlDict< ConceptHistory_t, int > m_ConceptHistories;
  164. // --------------------------------
  165. //
  166. // Speaking states
  167. //
  168. float m_flStopTalkTime; // when in the future that I'll be done saying this sentence.
  169. float m_flStopTalkTimeWithoutDelay; // same as the above, but minus the delay before other people can speak
  170. float m_flBlockedTalkTime;
  171. int m_voicePitch; // pitch of voice for this head
  172. float m_flLastTimeAcceptedSpeak; // because speech may not be blocked until NoteSpeaking called by scene ent, this handles in-think blocking
  173. DECLARE_SIMPLE_DATADESC();
  174. // --------------------------------
  175. //
  176. public:
  177. virtual void SetOuter( CBaseFlex *pOuter ) { m_pOuter = pOuter; }
  178. CBaseFlex * GetOuter() { return m_pOuter; }
  179. const CBaseFlex * GetOuter() const { return m_pOuter; }
  180. private:
  181. CHandle<CBaseFlex> m_pOuter;
  182. };
  183. class CMultiplayer_Expresser : public CAI_Expresser
  184. {
  185. public:
  186. CMultiplayer_Expresser( CBaseFlex *pOuter = NULL );
  187. //~CMultiplayer_Expresser();
  188. virtual bool IsSpeaking();
  189. void AllowMultipleScenes();
  190. void DisallowMultipleScenes();
  191. private:
  192. bool m_bAllowMultipleScenes;
  193. };
  194. //-----------------------------------------------------------------------------
  195. //
  196. // An NPC base class to assist a branch of the inheritance graph
  197. // in utilizing CAI_Expresser
  198. //
  199. template <class BASE_NPC>
  200. class CAI_ExpresserHost : public BASE_NPC, protected CAI_ExpresserSink
  201. {
  202. DECLARE_CLASS_NOFRIEND( CAI_ExpresserHost, BASE_NPC );
  203. public:
  204. virtual void NoteSpeaking( float duration, float delay );
  205. virtual bool Speak( AIConcept_t concept, const char *modifiers = NULL, char *pszOutResponseChosen = NULL, size_t bufsize = 0, IRecipientFilter *filter = NULL );
  206. // These two methods allow looking up a response and dispatching it to be two different steps
  207. bool SpeakFindResponse( AI_Response& response, AIConcept_t concept, const char *modifiers = NULL );
  208. bool SpeakDispatchResponse( AIConcept_t concept, AI_Response& response );
  209. virtual void PostSpeakDispatchResponse( AIConcept_t concept, AI_Response& response ) { return; }
  210. float GetResponseDuration( AI_Response& response );
  211. float GetTimeSpeechComplete() const { return this->GetExpresser()->GetTimeSpeechComplete(); }
  212. bool IsSpeaking() { return this->GetExpresser()->IsSpeaking(); }
  213. bool CanSpeak() { return this->GetExpresser()->CanSpeak(); }
  214. bool CanSpeakAfterMyself() { return this->GetExpresser()->CanSpeakAfterMyself(); }
  215. void SetSpokeConcept( AIConcept_t concept, AI_Response *response, bool bCallback = true ) { this->GetExpresser()->SetSpokeConcept( concept, response, bCallback ); }
  216. float GetTimeSpokeConcept( AIConcept_t concept ) { return this->GetExpresser()->GetTimeSpokeConcept( concept ); }
  217. bool SpokeConcept( AIConcept_t concept ) { return this->GetExpresser()->SpokeConcept( concept ); }
  218. protected:
  219. int PlaySentence( const char *pszSentence, float delay, float volume = VOL_NORM, soundlevel_t soundlevel = SNDLVL_TALKING, CBaseEntity *pListener = NULL );
  220. virtual void ModifyOrAppendCriteria( AI_CriteriaSet& set );
  221. virtual IResponseSystem *GetResponseSystem();
  222. // Override of base entity response input handler
  223. virtual void DispatchResponse( const char *conceptName );
  224. };
  225. //-----------------------------------------------------------------------------
  226. //-----------------------------------------------------------------------------
  227. template <class BASE_NPC>
  228. inline void CAI_ExpresserHost<BASE_NPC>::NoteSpeaking( float duration, float delay )
  229. {
  230. this->GetExpresser()->NoteSpeaking( duration, delay );
  231. }
  232. //-----------------------------------------------------------------------------
  233. //-----------------------------------------------------------------------------
  234. template <class BASE_NPC>
  235. inline bool CAI_ExpresserHost<BASE_NPC>::Speak( AIConcept_t concept, const char *modifiers /*= NULL*/, char *pszOutResponseChosen /*=NULL*/, size_t bufsize /* = 0 */, IRecipientFilter *filter /* = NULL */ )
  236. {
  237. AssertOnce( this->GetExpresser()->GetOuter() == this );
  238. return this->GetExpresser()->Speak( concept, modifiers, pszOutResponseChosen, bufsize, filter );
  239. }
  240. //-----------------------------------------------------------------------------
  241. //-----------------------------------------------------------------------------
  242. template <class BASE_NPC>
  243. inline int CAI_ExpresserHost<BASE_NPC>::PlaySentence( const char *pszSentence, float delay, float volume, soundlevel_t soundlevel, CBaseEntity *pListener )
  244. {
  245. return this->GetExpresser()->SpeakRawSentence( pszSentence, delay, volume, soundlevel, pListener );
  246. }
  247. //-----------------------------------------------------------------------------
  248. //-----------------------------------------------------------------------------
  249. extern void CAI_ExpresserHost_NPC_DoModifyOrAppendCriteria( CAI_BaseNPC *pSpeaker, AI_CriteriaSet& criteriaSet );
  250. template <class BASE_NPC>
  251. inline void CAI_ExpresserHost<BASE_NPC>::ModifyOrAppendCriteria( AI_CriteriaSet& criteriaSet )
  252. {
  253. BaseClass::ModifyOrAppendCriteria( criteriaSet );
  254. if ( this->MyNPCPointer() )
  255. {
  256. CAI_ExpresserHost_NPC_DoModifyOrAppendCriteria( this->MyNPCPointer(), criteriaSet );
  257. }
  258. }
  259. //-----------------------------------------------------------------------------
  260. //-----------------------------------------------------------------------------
  261. template <class BASE_NPC>
  262. inline IResponseSystem *CAI_ExpresserHost<BASE_NPC>::GetResponseSystem()
  263. {
  264. extern IResponseSystem *g_pResponseSystem;
  265. // Expressive NPC's use the general response system
  266. return g_pResponseSystem;
  267. }
  268. //-----------------------------------------------------------------------------
  269. //-----------------------------------------------------------------------------
  270. template <class BASE_NPC>
  271. inline bool CAI_ExpresserHost<BASE_NPC>::SpeakFindResponse( AI_Response& response, AIConcept_t concept, const char *modifiers /*= NULL*/ )
  272. {
  273. return this->GetExpresser()->SpeakFindResponse( response, concept, modifiers );
  274. }
  275. //-----------------------------------------------------------------------------
  276. //-----------------------------------------------------------------------------
  277. template <class BASE_NPC>
  278. inline bool CAI_ExpresserHost<BASE_NPC>::SpeakDispatchResponse( AIConcept_t concept, AI_Response& response )
  279. {
  280. if ( this->GetExpresser()->SpeakDispatchResponse( concept, response ) )
  281. {
  282. PostSpeakDispatchResponse( concept, response );
  283. return true;
  284. }
  285. return false;
  286. }
  287. //-----------------------------------------------------------------------------
  288. //-----------------------------------------------------------------------------
  289. template <class BASE_NPC>
  290. inline float CAI_ExpresserHost<BASE_NPC>::GetResponseDuration( AI_Response& response )
  291. {
  292. return this->GetExpresser()->GetResponseDuration( response );
  293. }
  294. //-----------------------------------------------------------------------------
  295. // Override of base entity response input handler
  296. //-----------------------------------------------------------------------------
  297. template <class BASE_NPC>
  298. inline void CAI_ExpresserHost<BASE_NPC>::DispatchResponse( const char *conceptName )
  299. {
  300. Speak( (AIConcept_t)conceptName );
  301. }
  302. //-----------------------------------------------------------------------------
  303. #endif // AI_SPEECH_H