Team Fortress 2 Source Code as on 22/4/2020
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1799 lines
40 KiB

  1. //========= Copyright Valve Corporation, All rights reserved. ============//
  2. //
  3. // Purpose:
  4. //
  5. // $NoKeywords: $
  6. //
  7. //===========================================================================//
  8. #define WIN32_LEAN_AND_MEAN
  9. #include <windows.h>
  10. #pragma warning( disable : 4201 )
  11. #include <mmsystem.h>
  12. #include <stdio.h>
  13. #include <math.h>
  14. #include "snd_audio_source.h"
  15. #include "AudioWaveOutput.h"
  16. #include "ifaceposersound.h"
  17. #include "StudioModel.h"
  18. #include "hlfaceposer.h"
  19. #include "expressions.h"
  20. #include "expclass.h"
  21. #include "PhonemeConverter.h"
  22. #include "utlvector.h"
  23. #include "filesystem.h"
  24. #include "sentence.h"
  25. #include "faceposer_models.h"
  26. #include "iclosecaptionmanager.h"
  27. #include "phonemeeditor.h"
  28. #include "wavebrowser.h"
  29. #include "choreoscene.h"
  30. #include "choreoview.h"
  31. #include "KeyValues.h"
  32. extern ISoundEmitterSystemBase *soundemitter;
  33. typedef struct channel_s
  34. {
  35. int leftvol;
  36. int rightvol;
  37. int rleftvol;
  38. int rrightvol;
  39. float pitch;
  40. } channel_t;
  41. #define INPUT_BUFFER_COUNT 32
  42. class CAudioWaveInput : public CAudioInput
  43. {
  44. public:
  45. CAudioWaveInput( void );
  46. ~CAudioWaveInput( void );
  47. // Returns the current count of available samples
  48. int SampleCount( void );
  49. // returns the size of each sample in bytes
  50. int SampleSize( void ) { return m_sampleSize; }
  51. // returns the sampling rate of the data
  52. int SampleRate( void ) { return m_sampleRate; }
  53. // returns a pointer to the actual data
  54. void *SampleData( void );
  55. // release the available data (mark as done)
  56. void SampleRelease( void );
  57. // returns the mono/stereo status of this device (true if stereo)
  58. bool IsStereo( void ) { return m_isStereo; }
  59. // begin sampling
  60. void Start( void );
  61. // stop sampling
  62. void Stop( void );
  63. void WaveMessage( HWAVEIN hdevice, UINT uMsg, DWORD dwParam1, DWORD dwParam2 );
  64. private:
  65. void OpenDevice( void );
  66. bool ValidDevice( void ) { return m_deviceId != 0xFFFFFFFF; }
  67. void ClearDevice( void ) { m_deviceId = 0xFFFFFFFF; }
  68. // returns true if the new format is better
  69. bool BetterFormat( DWORD dwNewFormat, DWORD dwOldFormat );
  70. void InitReadyList( void );
  71. void AddToReadyList( WAVEHDR *pBuffer );
  72. void PopReadyList( void );
  73. WAVEHDR *m_pReadyList;
  74. int m_sampleSize;
  75. int m_sampleRate;
  76. bool m_isStereo;
  77. UINT m_deviceId;
  78. HWAVEIN m_deviceHandle;
  79. WAVEHDR *m_buffers[ INPUT_BUFFER_COUNT ];
  80. };
  81. extern "C" void CALLBACK WaveData( HWAVEIN hwi, UINT uMsg, CAudioWaveInput *pAudio, DWORD dwParam1, DWORD dwParam2 );
  82. CAudioWaveInput::CAudioWaveInput( void )
  83. {
  84. memset( m_buffers, 0, sizeof( m_buffers ) );
  85. int deviceCount = (int)waveInGetNumDevs();
  86. UINT deviceId = 0;
  87. DWORD deviceFormat = 0;
  88. int i;
  89. for ( i = 0; i < deviceCount; i++ )
  90. {
  91. WAVEINCAPS waveCaps;
  92. MMRESULT errorCode = waveInGetDevCaps( (UINT)i, &waveCaps, sizeof(waveCaps) );
  93. if ( errorCode == MMSYSERR_NOERROR )
  94. {
  95. // valid device
  96. if ( BetterFormat( waveCaps.dwFormats, deviceFormat ) )
  97. {
  98. deviceId = i;
  99. deviceFormat = waveCaps.dwFormats;
  100. }
  101. }
  102. }
  103. if ( !deviceFormat )
  104. {
  105. m_deviceId = 0xFFFFFFFF;
  106. m_sampleSize = 0;
  107. m_sampleRate = 0;
  108. m_isStereo = false;
  109. }
  110. else
  111. {
  112. m_deviceId = deviceId;
  113. m_sampleRate = 44100;
  114. m_isStereo = false;
  115. if ( deviceFormat & WAVE_FORMAT_4M16 )
  116. {
  117. m_sampleSize = 2;
  118. }
  119. else if ( deviceFormat & WAVE_FORMAT_4M08 )
  120. {
  121. m_sampleSize = 1;
  122. }
  123. else
  124. {
  125. // ERROR!
  126. }
  127. OpenDevice();
  128. }
  129. InitReadyList();
  130. }
  131. CAudioWaveInput::~CAudioWaveInput( void )
  132. {
  133. if ( ValidDevice() )
  134. {
  135. Stop();
  136. waveInReset( m_deviceHandle );
  137. waveInClose( m_deviceHandle );
  138. for ( int i = 0; i < INPUT_BUFFER_COUNT; i++ )
  139. {
  140. if ( m_buffers[i] )
  141. {
  142. waveInUnprepareHeader( m_deviceHandle, m_buffers[i], sizeof( *m_buffers[i] ) );
  143. delete[] m_buffers[i]->lpData;
  144. delete m_buffers[i];
  145. }
  146. m_buffers[i] = NULL;
  147. }
  148. ClearDevice();
  149. }
  150. }
  151. void CALLBACK WaveData( HWAVEIN hwi, UINT uMsg, CAudioWaveInput *pAudio, DWORD dwParam1, DWORD dwParam2 )
  152. {
  153. if ( pAudio )
  154. {
  155. pAudio->WaveMessage( hwi, uMsg, dwParam1, dwParam2 );
  156. }
  157. }
  158. void CAudioWaveInput::WaveMessage( HWAVEIN hdevice, UINT uMsg, DWORD dwParam1, DWORD dwParam2 )
  159. {
  160. if ( hdevice != m_deviceHandle )
  161. return;
  162. switch( uMsg )
  163. {
  164. case WIM_DATA:
  165. WAVEHDR *pHeader = (WAVEHDR *)dwParam1;
  166. AddToReadyList( pHeader );
  167. break;
  168. }
  169. }
  170. void CAudioWaveInput::OpenDevice( void )
  171. {
  172. if ( !ValidDevice() )
  173. return;
  174. WAVEFORMATEX format;
  175. memset( &format, 0, sizeof(format) );
  176. format.nAvgBytesPerSec = m_sampleRate * m_sampleSize;
  177. format.nChannels = 1;
  178. format.wBitsPerSample = m_sampleSize * 8;
  179. format.nSamplesPerSec = m_sampleRate;
  180. format.wFormatTag = WAVE_FORMAT_PCM;
  181. format.nBlockAlign = m_sampleSize;
  182. MMRESULT errorCode = waveInOpen( &m_deviceHandle, m_deviceId, &format, (DWORD)WaveData, (DWORD)this, CALLBACK_FUNCTION );
  183. if ( errorCode == MMSYSERR_NOERROR )
  184. {
  185. // valid device opened
  186. int bufferSize = m_sampleSize * m_sampleRate / INPUT_BUFFER_COUNT; // total of one second of data
  187. // allocate buffers
  188. for ( int i = 0; i < INPUT_BUFFER_COUNT; i++ )
  189. {
  190. m_buffers[i] = new WAVEHDR;
  191. m_buffers[i]->lpData = new char[ bufferSize ];
  192. m_buffers[i]->dwBufferLength = bufferSize;
  193. m_buffers[i]->dwUser = 0;
  194. m_buffers[i]->dwFlags = 0;
  195. waveInPrepareHeader( m_deviceHandle, m_buffers[i], sizeof( *m_buffers[i] ) );
  196. waveInAddBuffer( m_deviceHandle, m_buffers[i], sizeof( *m_buffers[i] ) );
  197. }
  198. }
  199. else
  200. {
  201. ClearDevice();
  202. }
  203. }
  204. void CAudioWaveInput::Start( void )
  205. {
  206. if ( !ValidDevice() )
  207. return;
  208. waveInStart( m_deviceHandle );
  209. }
  210. void CAudioWaveInput::Stop( void )
  211. {
  212. if ( !ValidDevice() )
  213. return;
  214. waveInStop( m_deviceHandle );
  215. }
  216. void CAudioWaveInput::InitReadyList( void )
  217. {
  218. m_pReadyList = NULL;
  219. }
  220. void CAudioWaveInput::AddToReadyList( WAVEHDR *pBuffer )
  221. {
  222. WAVEHDR **pList = &m_pReadyList;
  223. waveInUnprepareHeader( m_deviceHandle, pBuffer, sizeof(*pBuffer) );
  224. // insert at the tail of the list
  225. while ( *pList )
  226. {
  227. pList = reinterpret_cast<WAVEHDR **>(&((*pList)->dwUser));
  228. }
  229. pBuffer->dwUser = NULL;
  230. *pList = pBuffer;
  231. }
  232. void CAudioWaveInput::PopReadyList( void )
  233. {
  234. if ( m_pReadyList )
  235. {
  236. WAVEHDR *pBuffer = m_pReadyList;
  237. m_pReadyList = reinterpret_cast<WAVEHDR *>(m_pReadyList->dwUser);
  238. waveInPrepareHeader( m_deviceHandle, pBuffer, sizeof(*pBuffer) );
  239. waveInAddBuffer( m_deviceHandle, pBuffer, sizeof(*pBuffer) );
  240. }
  241. }
  242. #define WAVE_FORMAT_STEREO (WAVE_FORMAT_1S08|WAVE_FORMAT_1S16|WAVE_FORMAT_2S08|WAVE_FORMAT_2S16|WAVE_FORMAT_4S08|WAVE_FORMAT_4S16)
  243. #define WAVE_FORMATS_UNDERSTOOD (0xFFF)
  244. #define WAVE_FORMAT_11K (WAVE_FORMAT_1M08|WAVE_FORMAT_1M16)
  245. #define WAVE_FORMAT_22K (WAVE_FORMAT_2M08|WAVE_FORMAT_2M16)
  246. #define WAVE_FORMAT_44K (WAVE_FORMAT_4M08|WAVE_FORMAT_4M16)
  247. static int HighestBit( DWORD dwFlags )
  248. {
  249. int i = 31;
  250. while ( i )
  251. {
  252. if ( dwFlags & (1<<i) )
  253. return i;
  254. i--;
  255. }
  256. return 0;
  257. }
  258. bool CAudioWaveInput::BetterFormat( DWORD dwNewFormat, DWORD dwOldFormat )
  259. {
  260. dwNewFormat &= WAVE_FORMATS_UNDERSTOOD & (~WAVE_FORMAT_STEREO);
  261. dwOldFormat &= WAVE_FORMATS_UNDERSTOOD & (~WAVE_FORMAT_STEREO);
  262. // our target format is 44.1KHz, mono, 16-bit
  263. if ( HighestBit(dwOldFormat) >= HighestBit(dwNewFormat) )
  264. return false;
  265. return true;
  266. }
  267. int CAudioWaveInput::SampleCount( void )
  268. {
  269. if ( !ValidDevice() )
  270. return 0;
  271. if ( m_pReadyList )
  272. {
  273. switch( SampleSize() )
  274. {
  275. case 2:
  276. return m_pReadyList->dwBytesRecorded >> 1;
  277. case 1:
  278. return m_pReadyList->dwBytesRecorded;
  279. default:
  280. break;
  281. }
  282. }
  283. return 0;
  284. }
  285. void *CAudioWaveInput::SampleData( void )
  286. {
  287. if ( !ValidDevice() )
  288. return NULL;
  289. if ( m_pReadyList )
  290. {
  291. return m_pReadyList->lpData;
  292. }
  293. return NULL;
  294. }
  295. // release the available data (mark as done)
  296. void CAudioWaveInput::SampleRelease( void )
  297. {
  298. PopReadyList();
  299. }
  300. // factory to create a suitable audio input for this system
  301. CAudioInput *CAudioInput::Create( void )
  302. {
  303. // sound source is a singleton for now
  304. static CAudioInput *pSource = NULL;
  305. if ( !pSource )
  306. {
  307. pSource = new CAudioWaveInput;
  308. }
  309. return pSource;
  310. }
  311. void CAudioDeviceSWMix::Mix8Mono( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, int rateScaleFix, int outCount, int timecompress, bool forward )
  312. {
  313. int sampleIndex = 0;
  314. fixedint sampleFrac = inputOffset;
  315. int fixup = 0;
  316. int fixupstep = 1;
  317. if ( !forward )
  318. {
  319. fixup = outCount - 1;
  320. fixupstep = -1;
  321. }
  322. for ( int i = 0; i < outCount; i++, fixup += fixupstep )
  323. {
  324. int dest = max( outputOffset + fixup, 0 );
  325. m_paintbuffer[ dest ].left += pChannel->leftvol * pData[sampleIndex];
  326. m_paintbuffer[ dest ].right += pChannel->rightvol * pData[sampleIndex];
  327. sampleFrac += rateScaleFix;
  328. sampleIndex += FIX_INTPART(sampleFrac);
  329. sampleFrac = FIX_FRACPART(sampleFrac);
  330. }
  331. }
  332. void CAudioDeviceSWMix::Mix8Stereo( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, int rateScaleFix, int outCount, int timecompress, bool forward )
  333. {
  334. int sampleIndex = 0;
  335. fixedint sampleFrac = inputOffset;
  336. int fixup = 0;
  337. int fixupstep = 1;
  338. if ( !forward )
  339. {
  340. fixup = outCount - 1;
  341. fixupstep = -1;
  342. }
  343. for ( int i = 0; i < outCount; i++, fixup += fixupstep )
  344. {
  345. int dest = max( outputOffset + fixup, 0 );
  346. m_paintbuffer[ dest ].left += pChannel->leftvol * pData[sampleIndex];
  347. m_paintbuffer[ dest ].right += pChannel->rightvol * pData[sampleIndex+1];
  348. sampleFrac += rateScaleFix;
  349. sampleIndex += FIX_INTPART(sampleFrac)<<1;
  350. sampleFrac = FIX_FRACPART(sampleFrac);
  351. }
  352. }
  353. void CAudioDeviceSWMix::Mix16Mono( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, int rateScaleFix, int outCount, int timecompress, bool forward )
  354. {
  355. int sampleIndex = 0;
  356. fixedint sampleFrac = inputOffset;
  357. int fixup = 0;
  358. int fixupstep = 1;
  359. if ( !forward )
  360. {
  361. fixup = outCount - 1;
  362. fixupstep = -1;
  363. }
  364. for ( int i = 0; i < outCount; i++, fixup += fixupstep )
  365. {
  366. int dest = max( outputOffset + fixup, 0 );
  367. m_paintbuffer[ dest ].left += (pChannel->leftvol * pData[sampleIndex])>>8;
  368. m_paintbuffer[ dest ].right += (pChannel->rightvol * pData[sampleIndex])>>8;
  369. sampleFrac += rateScaleFix;
  370. sampleIndex += FIX_INTPART(sampleFrac);
  371. sampleFrac = FIX_FRACPART(sampleFrac);
  372. }
  373. }
  374. void CAudioDeviceSWMix::Mix16Stereo( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, int rateScaleFix, int outCount, int timecompress, bool forward )
  375. {
  376. int sampleIndex = 0;
  377. fixedint sampleFrac = inputOffset;
  378. int fixup = 0;
  379. int fixupstep = 1;
  380. if ( !forward )
  381. {
  382. fixup = outCount - 1;
  383. fixupstep = -1;
  384. }
  385. for ( int i = 0; i < outCount; i++, fixup += fixupstep )
  386. {
  387. int dest = max( outputOffset + fixup, 0 );
  388. m_paintbuffer[ dest ].left += (pChannel->leftvol * pData[sampleIndex])>>8;
  389. m_paintbuffer[ dest ].right += (pChannel->rightvol * pData[sampleIndex+1])>>8;
  390. sampleFrac += rateScaleFix;
  391. sampleIndex += FIX_INTPART(sampleFrac)<<1;
  392. sampleFrac = FIX_FRACPART(sampleFrac);
  393. }
  394. }
  395. int CAudioDeviceSWMix::MaxSampleCount( void )
  396. {
  397. return PAINTBUFFER_SIZE;
  398. }
  399. void CAudioDeviceSWMix::MixBegin( void )
  400. {
  401. memset( m_paintbuffer, 0, sizeof(m_paintbuffer) );
  402. }
  403. void CAudioDeviceSWMix::TransferBufferStereo16( short *pOutput, int sampleCount )
  404. {
  405. for ( int i = 0; i < sampleCount; i++ )
  406. {
  407. if ( m_paintbuffer[i].left > 32767 )
  408. m_paintbuffer[i].left = 32767;
  409. else if ( m_paintbuffer[i].left < -32768 )
  410. m_paintbuffer[i].left = -32768;
  411. if ( m_paintbuffer[i].right > 32767 )
  412. m_paintbuffer[i].right = 32767;
  413. else if ( m_paintbuffer[i].right < -32768 )
  414. m_paintbuffer[i].right = -32768;
  415. *pOutput++ = (short)m_paintbuffer[i].left;
  416. *pOutput++ = (short)m_paintbuffer[i].right;
  417. }
  418. }
  419. CAudioWaveOutput::CAudioWaveOutput( void )
  420. {
  421. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  422. {
  423. CAudioBuffer *buffer = &m_buffers[ i ];
  424. Assert( buffer );
  425. buffer->hdr = NULL;
  426. buffer->submitted = false;
  427. buffer->submit_sample_count = false;
  428. }
  429. ClearDevice();
  430. OpenDevice();
  431. m_mixTime = -1;
  432. m_sampleIndex = 0;
  433. memset( m_sourceList, 0, sizeof(m_sourceList) );
  434. m_nEstimatedSamplesAhead = (int)( ( float ) OUTPUT_SAMPLE_RATE / 10.0f );
  435. }
  436. void CAudioWaveOutput::RemoveMixerChannelReferences( CAudioMixer *mixer )
  437. {
  438. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  439. {
  440. RemoveFromReferencedList( mixer, &m_buffers[ i ] );
  441. }
  442. }
  443. void CAudioWaveOutput::AddToReferencedList( CAudioMixer *mixer, CAudioBuffer *buffer )
  444. {
  445. // Already in list
  446. for ( int i = 0; i < buffer->m_Referenced.Size(); i++ )
  447. {
  448. if ( buffer->m_Referenced[ i ].mixer == mixer )
  449. {
  450. return;
  451. }
  452. }
  453. // Just remove it
  454. int idx = buffer->m_Referenced.AddToTail();
  455. CAudioMixerState *state = &buffer->m_Referenced[ idx ];
  456. state->mixer = mixer;
  457. state->submit_mixer_sample = mixer->GetSamplePosition();
  458. }
  459. void CAudioWaveOutput::RemoveFromReferencedList( CAudioMixer *mixer, CAudioBuffer *buffer )
  460. {
  461. for ( int i = 0; i < buffer->m_Referenced.Size(); i++ )
  462. {
  463. if ( buffer->m_Referenced[ i ].mixer == mixer )
  464. {
  465. buffer->m_Referenced.Remove( i );
  466. break;
  467. }
  468. }
  469. }
  470. bool CAudioWaveOutput::IsSoundInReferencedList( CAudioMixer *mixer, CAudioBuffer *buffer )
  471. {
  472. for ( int i = 0; i < buffer->m_Referenced.Size(); i++ )
  473. {
  474. if ( buffer->m_Referenced[ i ].mixer == mixer )
  475. {
  476. return true;
  477. }
  478. }
  479. return false;
  480. }
  481. bool CAudioWaveOutput::IsSourceReferencedByActiveBuffer( CAudioMixer *mixer )
  482. {
  483. if ( !ValidDevice() )
  484. return false;
  485. CAudioBuffer *buffer;
  486. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  487. {
  488. buffer = &m_buffers[ i ];
  489. if ( !buffer->submitted )
  490. continue;
  491. if ( buffer->hdr->dwFlags & WHDR_DONE )
  492. continue;
  493. // See if it's referenced
  494. if ( IsSoundInReferencedList( mixer, buffer ) )
  495. return true;
  496. }
  497. return false;
  498. }
  499. CAudioWaveOutput::~CAudioWaveOutput( void )
  500. {
  501. if ( ValidDevice() )
  502. {
  503. waveOutReset( m_deviceHandle );
  504. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  505. {
  506. if ( m_buffers[i].hdr )
  507. {
  508. waveOutUnprepareHeader( m_deviceHandle, m_buffers[i].hdr, sizeof(*m_buffers[i].hdr) );
  509. delete[] m_buffers[i].hdr->lpData;
  510. delete m_buffers[i].hdr;
  511. }
  512. m_buffers[i].hdr = NULL;
  513. m_buffers[i].submitted = false;
  514. m_buffers[i].submit_sample_count = 0;
  515. m_buffers[i].m_Referenced.Purge();
  516. }
  517. waveOutClose( m_deviceHandle );
  518. ClearDevice();
  519. }
  520. }
  521. CAudioBuffer *CAudioWaveOutput::GetEmptyBuffer( void )
  522. {
  523. CAudioBuffer *pOutput = NULL;
  524. if ( ValidDevice() )
  525. {
  526. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  527. {
  528. if ( !(m_buffers[ i ].submitted ) ||
  529. m_buffers[i].hdr->dwFlags & WHDR_DONE )
  530. {
  531. pOutput = &m_buffers[i];
  532. pOutput->submitted = true;
  533. pOutput->m_Referenced.Purge();
  534. break;
  535. }
  536. }
  537. }
  538. return pOutput;
  539. }
  540. void CAudioWaveOutput::SilenceBuffer( short *pSamples, int sampleCount )
  541. {
  542. int i;
  543. for ( i = 0; i < sampleCount; i++ )
  544. {
  545. // left
  546. *pSamples++ = 0;
  547. // right
  548. *pSamples++ = 0;
  549. }
  550. }
  551. void CAudioWaveOutput::Flush( void )
  552. {
  553. waveOutReset( m_deviceHandle );
  554. }
  555. // mix a buffer up to time (time is absolute)
  556. void CAudioWaveOutput::Update( float time )
  557. {
  558. if ( !ValidDevice() )
  559. return;
  560. // reset the system
  561. if ( m_mixTime < 0 || time < m_baseTime )
  562. {
  563. m_baseTime = time;
  564. m_mixTime = 0;
  565. }
  566. // put time in our coordinate frame
  567. time -= m_baseTime;
  568. if ( time > m_mixTime )
  569. {
  570. CAudioBuffer *pBuffer = GetEmptyBuffer();
  571. // no free buffers, mixing is ahead of the playback!
  572. if ( !pBuffer || !pBuffer->hdr )
  573. {
  574. //Con_Printf( "out of buffers\n" );
  575. return;
  576. }
  577. // UNDONE: These numbers are constants
  578. // calc number of samples (2 channels * 2 bytes per sample)
  579. int sampleCount = pBuffer->hdr->dwBufferLength >> 2;
  580. m_mixTime += sampleCount * (1.0f / OUTPUT_SAMPLE_RATE);
  581. short *pSamples = reinterpret_cast<short *>(pBuffer->hdr->lpData);
  582. SilenceBuffer( pSamples, sampleCount );
  583. int tempCount = sampleCount;
  584. while ( tempCount > 0 )
  585. {
  586. if ( tempCount > m_audioDevice.MaxSampleCount() )
  587. sampleCount = m_audioDevice.MaxSampleCount();
  588. else
  589. sampleCount = tempCount;
  590. m_audioDevice.MixBegin();
  591. for ( int i = 0; i < MAX_CHANNELS; i++ )
  592. {
  593. CAudioMixer *pSource = m_sourceList[i];
  594. if ( !pSource )
  595. continue;
  596. StudioModel *model = NULL;
  597. int modelindex = pSource->GetModelIndex();
  598. if ( modelindex >= 0 )
  599. {
  600. model = models->GetStudioModel( modelindex );
  601. }
  602. else
  603. {
  604. if ( g_pPhonemeEditor->IsActiveTool() || g_pWaveBrowser->IsActiveTool() )
  605. {
  606. model = models->GetActiveStudioModel();
  607. }
  608. }
  609. if ( model && !model->m_mouth.IsSourceReferenced( pSource->GetSource() ) )
  610. {
  611. CChoreoScene *pScene = g_pChoreoView->GetScene();
  612. bool bIgnorePhonemes = pScene ? pScene->ShouldIgnorePhonemes() : false;
  613. model->m_mouth.AddSource( pSource->GetSource(), bIgnorePhonemes );
  614. if ( modelindex < 0 )
  615. {
  616. pSource->SetModelIndex( models->GetIndexForStudioModel( model ) );
  617. }
  618. }
  619. int currentsample = pSource->GetSamplePosition();
  620. bool forward = pSource->GetDirection();
  621. if ( pSource->GetActive() )
  622. {
  623. if ( !pSource->MixDataToDevice( &m_audioDevice, pSource->GetChannel(), currentsample, sampleCount, SampleRate(), forward ) )
  624. {
  625. // Source becomes inactive when last submitted sample is finally
  626. // submitted. But it lingers until it's no longer referenced
  627. pSource->SetActive( false );
  628. }
  629. else
  630. {
  631. AddToReferencedList( pSource, pBuffer );
  632. }
  633. }
  634. else
  635. {
  636. if ( !IsSourceReferencedByActiveBuffer( pSource ) )
  637. {
  638. if ( !pSource->GetAutoDelete() )
  639. {
  640. FreeChannel( i );
  641. }
  642. }
  643. else
  644. {
  645. pSource->IncrementSamples( pSource->GetChannel(), currentsample, sampleCount, SampleRate(), forward );
  646. }
  647. }
  648. }
  649. m_audioDevice.TransferBufferStereo16( pSamples, sampleCount );
  650. m_sampleIndex += sampleCount;
  651. tempCount -= sampleCount;
  652. pSamples += sampleCount * 2;
  653. }
  654. // if the buffers aren't aligned on sample boundaries, this will hard-lock the machine!
  655. pBuffer->submit_sample_count = GetOutputPosition();
  656. waveOutWrite( m_deviceHandle, pBuffer->hdr, sizeof(*(pBuffer->hdr)) );
  657. }
  658. }
  659. int CAudioWaveOutput::GetNumberofSamplesAhead( void )
  660. {
  661. ComputeSampleAheadAmount();
  662. return m_nEstimatedSamplesAhead;
  663. }
  664. float CAudioWaveOutput::GetAmountofTimeAhead( void )
  665. {
  666. ComputeSampleAheadAmount();
  667. return ( (float)m_nEstimatedSamplesAhead / (float)OUTPUT_SAMPLE_RATE );
  668. }
  669. // Find the most recent submitted sample that isn't flagged as whdr_done
  670. void CAudioWaveOutput::ComputeSampleAheadAmount( void )
  671. {
  672. m_nEstimatedSamplesAhead = 0;
  673. int newest_sample_index = -1;
  674. int newest_sample_count = 0;
  675. CAudioBuffer *buffer;
  676. if ( ValidDevice() )
  677. {
  678. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  679. {
  680. buffer = &m_buffers[ i ];
  681. if ( !buffer->submitted )
  682. continue;
  683. if ( buffer->hdr->dwFlags & WHDR_DONE )
  684. continue;
  685. if ( buffer->submit_sample_count > newest_sample_count )
  686. {
  687. newest_sample_index = i;
  688. newest_sample_count = buffer->submit_sample_count;
  689. }
  690. }
  691. }
  692. if ( newest_sample_index == -1 )
  693. return;
  694. buffer = &m_buffers[ newest_sample_index ];
  695. int currentPos = GetOutputPosition() ;
  696. m_nEstimatedSamplesAhead = currentPos - buffer->submit_sample_count;
  697. }
  698. int CAudioWaveOutput::FindSourceIndex( CAudioMixer *pSource )
  699. {
  700. for ( int i = 0; i < MAX_CHANNELS; i++ )
  701. {
  702. if ( pSource == m_sourceList[i] )
  703. {
  704. return i;
  705. }
  706. }
  707. return -1;
  708. }
  709. CAudioMixer *CAudioWaveOutput::GetMixerForSource( CAudioSource *source )
  710. {
  711. for ( int i = 0; i < MAX_CHANNELS; i++ )
  712. {
  713. if ( !m_sourceList[i] )
  714. continue;
  715. if ( source == m_sourceList[i]->GetSource() )
  716. {
  717. return m_sourceList[i];
  718. }
  719. }
  720. return NULL;
  721. }
  722. void CAudioWaveOutput::AddSource( CAudioMixer *pSource )
  723. {
  724. int slot = 0;
  725. for ( int i = 0; i < MAX_CHANNELS; i++ )
  726. {
  727. if ( !m_sourceList[i] )
  728. {
  729. slot = i;
  730. break;
  731. }
  732. }
  733. if ( m_sourceList[slot] )
  734. {
  735. FreeChannel( slot );
  736. }
  737. SetChannel( slot, pSource );
  738. pSource->SetActive( true );
  739. }
  740. void CAudioWaveOutput::StopSounds( void )
  741. {
  742. for ( int i = 0; i < MAX_CHANNELS; i++ )
  743. {
  744. if ( m_sourceList[i] )
  745. {
  746. FreeChannel( i );
  747. }
  748. }
  749. }
  750. void CAudioWaveOutput::SetChannel( int channelIndex, CAudioMixer *pSource )
  751. {
  752. if ( channelIndex < 0 || channelIndex >= MAX_CHANNELS )
  753. return;
  754. m_sourceList[channelIndex] = pSource;
  755. }
  756. void CAudioWaveOutput::FreeChannel( int channelIndex )
  757. {
  758. if ( channelIndex < 0 || channelIndex >= MAX_CHANNELS )
  759. return;
  760. if ( m_sourceList[channelIndex] )
  761. {
  762. StudioModel *model = NULL;
  763. int modelindex = m_sourceList[channelIndex]->GetModelIndex();
  764. if ( modelindex >= 0)
  765. {
  766. model = models->GetStudioModel( modelindex );
  767. }
  768. if ( model )
  769. {
  770. model->m_mouth.RemoveSource( m_sourceList[channelIndex]->GetSource() );
  771. }
  772. RemoveMixerChannelReferences( m_sourceList[channelIndex] );
  773. delete m_sourceList[channelIndex];
  774. m_sourceList[channelIndex] = NULL;
  775. }
  776. }
  777. int CAudioWaveOutput::GetOutputPosition( void )
  778. {
  779. if ( !m_deviceHandle )
  780. return 0;
  781. MMTIME mmtime;
  782. mmtime.wType = TIME_SAMPLES;
  783. waveOutGetPosition( m_deviceHandle, &mmtime, sizeof( MMTIME ) );
  784. // Convert time to sample count
  785. return ( mmtime.u.sample );
  786. }
  787. void CAudioWaveOutput::OpenDevice( void )
  788. {
  789. WAVEFORMATEX waveFormat;
  790. memset( &waveFormat, 0, sizeof(waveFormat) );
  791. // Select a PCM, 16-bit stereo playback device
  792. waveFormat.cbSize = sizeof(waveFormat);
  793. waveFormat.nAvgBytesPerSec = OUTPUT_SAMPLE_RATE * 2 * 2;
  794. waveFormat.nBlockAlign = 2 * 2; // channels * sample size
  795. waveFormat.nChannels = 2; // stereo
  796. waveFormat.nSamplesPerSec = OUTPUT_SAMPLE_RATE;
  797. waveFormat.wBitsPerSample = 16;
  798. waveFormat.wFormatTag = WAVE_FORMAT_PCM;
  799. MMRESULT errorCode = waveOutOpen( &m_deviceHandle, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL );
  800. if ( errorCode == MMSYSERR_NOERROR )
  801. {
  802. int bufferSize = 4 * ( OUTPUT_SAMPLE_RATE / OUTPUT_BUFFER_COUNT ); // total of 1 second of data
  803. // Got one!
  804. for ( int i = 0; i < OUTPUT_BUFFER_COUNT; i++ )
  805. {
  806. m_buffers[i].hdr = new WAVEHDR;
  807. m_buffers[i].hdr->lpData = new char[ bufferSize ];
  808. long align = (long)m_buffers[i].hdr->lpData;
  809. if ( align & 3 )
  810. {
  811. m_buffers[i].hdr->lpData = (char *) ( (align+3) &~3 );
  812. }
  813. m_buffers[i].hdr->dwBufferLength = bufferSize - (align&3);
  814. m_buffers[i].hdr->dwFlags = 0;
  815. if ( waveOutPrepareHeader( m_deviceHandle, m_buffers[i].hdr, sizeof(*m_buffers[i].hdr) ) != MMSYSERR_NOERROR )
  816. {
  817. ClearDevice();
  818. return;
  819. }
  820. }
  821. }
  822. else
  823. {
  824. ClearDevice();
  825. }
  826. }
  827. // factory to create a suitable audio output for this system
  828. CAudioOutput *CAudioOutput::Create( void )
  829. {
  830. // sound device is a singleton for now
  831. static CAudioOutput *pWaveOut = NULL;
  832. if ( !pWaveOut )
  833. {
  834. pWaveOut = new CAudioWaveOutput;
  835. }
  836. return pWaveOut;
  837. }
  838. struct CSoundFile
  839. {
  840. char filename[ 512 ];
  841. CAudioSource *source;
  842. long filetime;
  843. };
  844. //-----------------------------------------------------------------------------
  845. // Purpose:
  846. //-----------------------------------------------------------------------------
  847. class CFacePoserSound : public IFacePoserSound
  848. {
  849. public:
  850. ~CFacePoserSound( void );
  851. void Init( void );
  852. void Shutdown( void );
  853. void Update( float dt );
  854. void Flush( void );
  855. CAudioSource *LoadSound( const char *wavfile );
  856. void PlaySound( StudioModel *source, float volume, const char *wavfile, CAudioMixer **ppMixer );
  857. void PlaySound( CAudioSource *source, float volume, CAudioMixer **ppMixer );
  858. void PlayPartialSound( StudioModel *model, float volume, const char *wavfile, CAudioMixer **ppMixer, int startSample, int endSample );
  859. bool IsSoundPlaying( CAudioMixer *pMixer );
  860. CAudioMixer *FindMixer( CAudioSource *source );
  861. void StopAll( void );
  862. void StopSound( CAudioMixer *mixer );
  863. void RenderWavToDC( HDC dc, RECT& outrect, COLORREF clr, float starttime, float endtime,
  864. CAudioSource *pWave, bool selected = false, int selectionstart = 0, int selectionend = 0 );
  865. // void InstallPhonemecallback( IPhonemeTag *pTagInterface );
  866. float GetAmountofTimeAhead( void );
  867. int GetNumberofSamplesAhead( void );
  868. CAudioOuput *GetAudioOutput( void );
  869. virtual void EnsureNoModelReferences( CAudioSource *source );
  870. private:
  871. void AddViseme( float intensity, StudioModel *model, int phoneme, float scale );
  872. void ProcessCloseCaptionData( StudioModel *model, float curtime, CSentence* sentence );
  873. void SetupWeights( void );
  874. CAudioSource *FindOrAddSound( const char *filename );
  875. CAudioOutput *m_pAudio;
  876. float m_flElapsedTime;
  877. CUtlVector < CSoundFile > m_ActiveSounds;
  878. };
  879. static CFacePoserSound g_FacePoserSound;
  880. IFacePoserSound *sound = ( IFacePoserSound * )&g_FacePoserSound;
  881. CFacePoserSound::~CFacePoserSound( void )
  882. {
  883. OutputDebugString( va( "Removing %i sounds\n", m_ActiveSounds.Size() ) );
  884. for ( int i = 0 ; i < m_ActiveSounds.Size(); i++ )
  885. {
  886. CSoundFile *p = &m_ActiveSounds[ i ];
  887. OutputDebugString( va( "Removing sound: %s\n", p->filename ) );
  888. delete p->source;
  889. }
  890. m_ActiveSounds.RemoveAll();
  891. }
  892. //-----------------------------------------------------------------------------
  893. // Purpose:
  894. //-----------------------------------------------------------------------------
  895. CAudioOuput *CFacePoserSound::GetAudioOutput( void )
  896. {
  897. return (CAudioOuput *)m_pAudio;
  898. }
  899. CAudioSource *CFacePoserSound::FindOrAddSound( const char *filename )
  900. {
  901. CSoundFile *s;
  902. int i;
  903. for ( i = 0; i < m_ActiveSounds.Size(); i++ )
  904. {
  905. s = &m_ActiveSounds[ i ];
  906. Assert( s );
  907. if ( !stricmp( s->filename, filename ) )
  908. {
  909. long filetime = filesystem->GetFileTime( filename );
  910. if ( filetime != s->filetime )
  911. {
  912. Con_Printf( "Reloading sound %s\n", filename );
  913. delete s->source;
  914. s->source = LoadSound( filename );
  915. s->filetime = filetime;
  916. }
  917. return s->source;
  918. }
  919. }
  920. i = m_ActiveSounds.AddToTail();
  921. s = &m_ActiveSounds[ i ];
  922. strcpy( s->filename, filename );
  923. s->source = LoadSound( filename );
  924. s->filetime = filesystem->GetFileTime( filename );
  925. return s->source;
  926. }
  927. void CFacePoserSound::Init( void )
  928. {
  929. m_flElapsedTime = 0.0f;
  930. m_pAudio = CAudioOutput::Create();
  931. // Load SoundOverrides for Faceposer
  932. KeyValues *manifest = new KeyValues( "scripts/game_sounds_manifest.txt" );
  933. if ( filesystem->LoadKeyValues( *manifest, IFileSystem::TYPE_SOUNDEMITTER, "scripts/game_sounds_manifest.txt", "GAME" ) )
  934. {
  935. for ( KeyValues *sub = manifest->GetFirstSubKey(); sub != NULL; sub = sub->GetNextKey() )
  936. {
  937. if ( !Q_stricmp( sub->GetName(), "faceposer_file" ) )
  938. {
  939. soundemitter->AddSoundOverrides( sub->GetString() );
  940. continue;
  941. }
  942. }
  943. }
  944. manifest->deleteThis();
  945. }
  946. void CFacePoserSound::Shutdown( void )
  947. {
  948. }
  949. float CFacePoserSound::GetAmountofTimeAhead( void )
  950. {
  951. if ( !m_pAudio )
  952. return 0.0f;
  953. return m_pAudio->GetAmountofTimeAhead();
  954. }
  955. int CFacePoserSound::GetNumberofSamplesAhead( void )
  956. {
  957. if ( !m_pAudio )
  958. return 0;
  959. return m_pAudio->GetNumberofSamplesAhead();
  960. }
  961. CAudioSource *CFacePoserSound::LoadSound( const char *wavfile )
  962. {
  963. if ( !m_pAudio )
  964. return NULL;
  965. CAudioSource *wave = AudioSource_Create( wavfile );
  966. return wave;
  967. }
  968. void CFacePoserSound::PlaySound( StudioModel *model, float volume, const char *wavfile, CAudioMixer **ppMixer )
  969. {
  970. if ( m_pAudio )
  971. {
  972. CAudioSource *wave = FindOrAddSound( wavfile );
  973. if ( !wave )
  974. return;
  975. CAudioMixer *pMixer = wave->CreateMixer();
  976. if ( ppMixer )
  977. {
  978. *ppMixer = pMixer;
  979. }
  980. pMixer->SetVolume( volume );
  981. m_pAudio->AddSource( pMixer );
  982. if ( model )
  983. {
  984. pMixer->SetModelIndex( models->GetIndexForStudioModel( model ) );
  985. }
  986. }
  987. }
  988. void CFacePoserSound::PlayPartialSound( StudioModel *model, float volume, const char *wavfile, CAudioMixer **ppMixer, int startSample, int endSample )
  989. {
  990. if ( !m_pAudio )
  991. return;
  992. StopAll();
  993. CAudioSource *wave = FindOrAddSound( wavfile );
  994. if ( !wave )
  995. return;
  996. CAudioMixer *mixer = wave->CreateMixer();
  997. if ( ppMixer )
  998. {
  999. *ppMixer = mixer;
  1000. }
  1001. mixer->SetSamplePosition( startSample );
  1002. mixer->SetLoopPosition( endSample );
  1003. mixer->SetVolume( volume );
  1004. m_pAudio->AddSource( mixer );
  1005. }
  1006. void CFacePoserSound::PlaySound( CAudioSource *source, float volume, CAudioMixer **ppMixer )
  1007. {
  1008. if ( ppMixer )
  1009. {
  1010. *ppMixer = NULL;
  1011. }
  1012. if ( m_pAudio )
  1013. {
  1014. CAudioMixer *mixer = source->CreateMixer();
  1015. if ( ppMixer )
  1016. {
  1017. *ppMixer = mixer;
  1018. }
  1019. mixer->SetVolume( volume );
  1020. m_pAudio->AddSource( mixer );
  1021. }
  1022. }
  1023. enum
  1024. {
  1025. PHONEME_CLASS_WEAK = 0,
  1026. PHONEME_CLASS_NORMAL,
  1027. PHONEME_CLASS_STRONG,
  1028. NUM_PHONEME_CLASSES
  1029. };
  1030. struct Emphasized_Phoneme
  1031. {
  1032. char *classname;
  1033. bool required;
  1034. bool valid;
  1035. CExpClass *cl;
  1036. CExpression *exp;
  1037. float *settings;
  1038. float amount;
  1039. };
  1040. static Emphasized_Phoneme g_PhonemeClasses[ NUM_PHONEME_CLASSES ] =
  1041. {
  1042. { "phonemes_weak", false },
  1043. { "phonemes", true },
  1044. { "phonemes_strong", false },
  1045. };
  1046. #define STRONG_CROSSFADE_START 0.60f
  1047. #define WEAK_CROSSFADE_START 0.40f
  1048. void ComputeBlendedSetting( Emphasized_Phoneme *classes, float emphasis_intensity )
  1049. {
  1050. // Here's the formula
  1051. // 0.5 is neutral 100 % of the default setting
  1052. // Crossfade starts at STRONG_CROSSFADE_START and is full at STRONG_CROSSFADE_END
  1053. // If there isn't a strong then the intensity of the underlying phoneme is fixed at 2 x STRONG_CROSSFADE_START
  1054. // so we don't get huge numbers
  1055. bool has_weak = classes[ PHONEME_CLASS_WEAK ].valid;
  1056. bool has_strong = classes[ PHONEME_CLASS_STRONG ].valid;
  1057. Assert( classes[ PHONEME_CLASS_NORMAL ].valid );
  1058. if ( emphasis_intensity > STRONG_CROSSFADE_START )
  1059. {
  1060. if ( has_strong )
  1061. {
  1062. // Blend in some of strong
  1063. float dist_remaining = 1.0f - emphasis_intensity;
  1064. float frac = dist_remaining / ( 1.0f - STRONG_CROSSFADE_START );
  1065. classes[ PHONEME_CLASS_NORMAL ].amount = (frac) * 2.0f * STRONG_CROSSFADE_START;
  1066. classes[ PHONEME_CLASS_STRONG ].amount = 1.0f - frac;
  1067. }
  1068. else
  1069. {
  1070. emphasis_intensity = min( emphasis_intensity, STRONG_CROSSFADE_START );
  1071. classes[ PHONEME_CLASS_NORMAL ].amount = 2.0f * emphasis_intensity;
  1072. }
  1073. }
  1074. else if ( emphasis_intensity < WEAK_CROSSFADE_START )
  1075. {
  1076. if ( has_weak )
  1077. {
  1078. // Blend in some weak
  1079. float dist_remaining = WEAK_CROSSFADE_START - emphasis_intensity;
  1080. float frac = dist_remaining / ( WEAK_CROSSFADE_START );
  1081. classes[ PHONEME_CLASS_NORMAL ].amount = (1.0f - frac) * 2.0f * WEAK_CROSSFADE_START;
  1082. classes[ PHONEME_CLASS_WEAK ].amount = frac;
  1083. }
  1084. else
  1085. {
  1086. emphasis_intensity = max( emphasis_intensity, WEAK_CROSSFADE_START );
  1087. classes[ PHONEME_CLASS_NORMAL ].amount = 2.0f * emphasis_intensity;
  1088. }
  1089. }
  1090. else
  1091. {
  1092. classes[ PHONEME_CLASS_NORMAL ].amount = 2.0f * emphasis_intensity;
  1093. }
  1094. }
  1095. void CFacePoserSound::AddViseme( float intensity, StudioModel *model, int phoneme, float scale )
  1096. {
  1097. int i;
  1098. Assert( model );
  1099. CStudioHdr *hdr = model->GetStudioHdr();
  1100. Assert( hdr );
  1101. if ( !hdr )
  1102. return;
  1103. for ( i = 0; i < NUM_PHONEME_CLASSES; i++ )
  1104. {
  1105. Emphasized_Phoneme *info = &g_PhonemeClasses[ i ];
  1106. info->valid = false;
  1107. info->exp = NULL;
  1108. info->settings = NULL;
  1109. info->amount = 0.0f;
  1110. info->cl = expressions->FindClass( info->classname, true );
  1111. if ( info->cl )
  1112. {
  1113. info->exp = info->cl->FindExpression( ConvertPhoneme( phoneme ) );
  1114. }
  1115. if ( info->required && ( !info->cl || !info->exp ) )
  1116. {
  1117. return;
  1118. }
  1119. if ( info->exp )
  1120. {
  1121. info->valid = true;
  1122. info->settings = info->exp->GetSettings();
  1123. Assert( info->settings );
  1124. }
  1125. }
  1126. ComputeBlendedSetting( g_PhonemeClasses, intensity );
  1127. // Look up the phoneme
  1128. for ( LocalFlexController_t i = LocalFlexController_t(0); i < hdr->numflexcontrollers(); i++)
  1129. {
  1130. int j = hdr->pFlexcontroller( i )->localToGlobal;
  1131. float add = 0.0f;
  1132. for ( int k = 0 ; k < NUM_PHONEME_CLASSES; k++ )
  1133. {
  1134. Emphasized_Phoneme *info = &g_PhonemeClasses[ k ];
  1135. if ( !info->valid || !info->amount )
  1136. continue;
  1137. add += info->amount * info->settings[ j ];
  1138. }
  1139. if ( add == 0.0f )
  1140. continue;
  1141. float curvalue = model->GetFlexController( i );
  1142. curvalue += add * scale;
  1143. model->SetFlexController( i, curvalue );
  1144. }
  1145. }
  1146. #define PHONEME_FILTER 0.08f
  1147. #define PHONEME_DELAY 0.0f
  1148. void CFacePoserSound::SetupWeights( void )
  1149. {
  1150. StudioModel *model;
  1151. int c = models->Count();
  1152. for ( int i = 0; i < c; i++ )
  1153. {
  1154. model = models->GetStudioModel( i );
  1155. if ( !model )
  1156. continue;
  1157. // Reset flexes
  1158. CStudioHdr *hdr = model->GetStudioHdr();
  1159. if ( !hdr )
  1160. continue;
  1161. for ( int s = 0; s < model->m_mouth.GetNumVoiceSources(); s++ )
  1162. {
  1163. CVoiceData *vd = model->m_mouth.GetVoiceSource( s );
  1164. if ( !vd || vd->ShouldIgnorePhonemes() )
  1165. continue;
  1166. CAudioSource *source = vd->GetSource();
  1167. // check for phoneme flexes
  1168. if ( !source )
  1169. continue;
  1170. CAudioMixer *mixer = FindMixer( source );
  1171. if ( !mixer )
  1172. continue;
  1173. CSentence *sentence = source->GetSentence();
  1174. if ( !sentence )
  1175. continue;
  1176. // Zero faces if needed
  1177. models->CheckResetFlexes();
  1178. float pos = (float)mixer->GetScrubPosition();
  1179. // Con_Printf( "pos %f for mixer %p\n", pos, mixer );
  1180. float soundtime = pos / source->SampleRate();
  1181. float t = soundtime - PHONEME_DELAY;
  1182. float dt = PHONEME_FILTER;
  1183. float sentence_duration = source->GetRunningLength();
  1184. float emphasis_intensity = sentence->GetIntensity( t, sentence_duration );
  1185. if ( t > 0.0f )
  1186. {
  1187. for ( int w = 0 ; w < sentence->m_Words.Size(); w++ )
  1188. {
  1189. CWordTag *word = sentence->m_Words[ w ];
  1190. if ( !word )
  1191. continue;
  1192. for ( int k = 0; k < word->m_Phonemes.Size(); k++)
  1193. {
  1194. CPhonemeTag *phoneme = word->m_Phonemes[ k ];
  1195. if ( !phoneme )
  1196. continue;
  1197. // if the filter starts within this phoneme, make sure the filter size is
  1198. // at least least as long as the current phoneme, or until the end of the next phoneme,
  1199. // whichever is smaller
  1200. if (t > phoneme->GetStartTime() && t < phoneme->GetEndTime())
  1201. {
  1202. CPhonemeTag *next = NULL;
  1203. // try next phoneme, or first phoneme of next word
  1204. if (k < word->m_Phonemes.Size()-1)
  1205. {
  1206. next = word->m_Phonemes[ k+1 ];
  1207. }
  1208. else if ( w < sentence->m_Words.Size() - 1 && sentence->m_Words[ w+1 ]->m_Phonemes.Size() )
  1209. {
  1210. next = sentence->m_Words[ w+1 ]->m_Phonemes[ 0 ];
  1211. }
  1212. // if I have a neighbor
  1213. if (next)
  1214. {
  1215. // and they're touching
  1216. if (next->GetStartTime() == phoneme->GetEndTime())
  1217. {
  1218. // no gap, so increase the blend length to the end of the next phoneme, as long as it's not longer than the current phoneme
  1219. dt = max( dt, min( next->GetEndTime() - t, phoneme->GetEndTime() - phoneme->GetStartTime() ) );
  1220. }
  1221. else
  1222. {
  1223. // dead space, so increase the blend length to the start of the next phoneme, as long as it's not longer than the current phoneme
  1224. dt = max( dt, min( next->GetStartTime() - t, phoneme->GetEndTime() - phoneme->GetStartTime() ) );
  1225. }
  1226. }
  1227. else
  1228. {
  1229. // last phoneme in list, increase the blend length to the length of the current phoneme
  1230. dt = max( dt, phoneme->GetEndTime() - phoneme->GetStartTime() );
  1231. }
  1232. }
  1233. float t1 = ( phoneme->GetStartTime() - t) / dt;
  1234. float t2 = ( phoneme->GetEndTime() - t) / dt;
  1235. if (t1 < 1.0 && t2 > 0)
  1236. {
  1237. float scale;
  1238. // clamp
  1239. if (t2 > 1)
  1240. t2 = 1;
  1241. if (t1 < 0)
  1242. t1 = 0;
  1243. // FIXME: simple box filter. Should use something fancier
  1244. scale = (t2 - t1);
  1245. AddViseme( emphasis_intensity, model, phoneme->GetPhonemeCode(), scale );
  1246. }
  1247. }
  1248. }
  1249. ProcessCloseCaptionData( model, t, sentence );
  1250. }
  1251. }
  1252. }
  1253. }
  1254. static int g_nSoundFrameCount = 0;
  1255. void CFacePoserSound::ProcessCloseCaptionData( StudioModel *model, float curtime, CSentence* sentence )
  1256. {
  1257. // closecaptionmanager->Process( g_nSoundFrameCount, model, curtime, sentence, GetCloseCaptionLanguageId() );
  1258. }
  1259. void CFacePoserSound::Update( float dt )
  1260. {
  1261. // closecaptionmanager->PreProcess( g_nSoundFrameCount );
  1262. if ( m_pAudio )
  1263. {
  1264. SetupWeights();
  1265. m_pAudio->Update( m_flElapsedTime );
  1266. }
  1267. // closecaptionmanager->PostProcess( g_nSoundFrameCount, dt );
  1268. m_flElapsedTime += dt;
  1269. g_nSoundFrameCount++;
  1270. }
  1271. void CFacePoserSound::Flush( void )
  1272. {
  1273. if ( m_pAudio )
  1274. {
  1275. m_pAudio->Flush();
  1276. }
  1277. }
  1278. void CFacePoserSound::StopAll( void )
  1279. {
  1280. int c = models->Count();
  1281. for ( int i = 0; i < c; i++ )
  1282. {
  1283. StudioModel *model = models->GetStudioModel( i );
  1284. if ( model )
  1285. {
  1286. model->m_mouth.ClearVoiceSources();
  1287. }
  1288. }
  1289. if ( m_pAudio )
  1290. {
  1291. m_pAudio->StopSounds();
  1292. }
  1293. }
  1294. void CFacePoserSound::StopSound( CAudioMixer *mixer )
  1295. {
  1296. int idx = m_pAudio->FindSourceIndex( mixer );
  1297. if ( idx != -1 )
  1298. {
  1299. m_pAudio->FreeChannel( idx );
  1300. }
  1301. }
  1302. void CFacePoserSound::RenderWavToDC( HDC dc, RECT& outrect, COLORREF clr,
  1303. float starttime, float endtime, CAudioSource *pWave,
  1304. bool selected /*= false*/, int selectionstart /*= 0*/, int selectionend /*= 0*/ )
  1305. {
  1306. channel_t channel;
  1307. channel.leftvol = 127;
  1308. channel.rightvol = 127;
  1309. channel.pitch = 1.0;
  1310. if ( !pWave )
  1311. return;
  1312. CAudioWaveOutput *pWaveOutput = ( CAudioWaveOutput * )m_pAudio;
  1313. CAudioMixer *pMixer = pWave->CreateMixer();
  1314. float timeperpixel = ( endtime - starttime ) / (float)( outrect.right - outrect.left );
  1315. float samplesperpixel = timeperpixel * pWave->SampleRate();
  1316. samplesperpixel = min( samplesperpixel, (float)PAINTBUFFER_SIZE );
  1317. int intsamplesperpixel = (int)samplesperpixel;
  1318. // Determine start/stop positions
  1319. int totalsamples = (int)( pWave->GetRunningLength() * pWave->SampleRate() );
  1320. if ( totalsamples <= 0 )
  1321. return;
  1322. float selectionstarttime = pWave->GetRunningLength() * ( float )selectionstart / ( float )totalsamples;
  1323. float selectionendtime = pWave->GetRunningLength() * ( float )selectionend / ( float )totalsamples;
  1324. HPEN oldPen, pen, pen2, pen3, pen4;
  1325. pen = CreatePen( PS_SOLID, 1, RGB( 175, 175, 250 ) );
  1326. pen2 = CreatePen( PS_SOLID, 1, clr );
  1327. pen3 = CreatePen( PS_SOLID, 1, RGB( 127, 200, 249 ) );
  1328. pen4 = CreatePen( PS_SOLID, 2, RGB( 0, 0, 200 ) );
  1329. oldPen = (HPEN)SelectObject( dc, pen );
  1330. MoveToEx( dc, outrect.left, ( outrect.bottom + outrect.top ) / 2, NULL );
  1331. LineTo( dc, outrect.right, ( outrect.bottom + outrect.top ) / 2 );
  1332. SelectObject( dc, pen2 );
  1333. // Now iterate the samples
  1334. float currenttime = 0.0f;
  1335. int pixel = 0;
  1336. int height = ( outrect.bottom - outrect.top ) / 2;
  1337. int midy = ( outrect.bottom + outrect.top ) / 2;
  1338. int bufferlen = ( intsamplesperpixel + 3 ) & ~3;
  1339. short *samples = new short[ 2 * bufferlen ];
  1340. bool drawingselection = false;
  1341. int maxsamples = max( 32, intsamplesperpixel / 16 );
  1342. int currentsample = 0;
  1343. while ( currenttime < endtime )
  1344. {
  1345. pWaveOutput->m_audioDevice.MixBegin();
  1346. int samplecount = min( maxsamples, intsamplesperpixel );
  1347. if ( !pMixer->MixDataToDevice( &pWaveOutput->m_audioDevice, &channel, currentsample, samplecount, pWave->SampleRate(), true ) )
  1348. break;
  1349. currentsample = pMixer->GetSamplePosition();
  1350. // Jump ahead by diff
  1351. int diff = intsamplesperpixel - samplecount;
  1352. if ( diff > 0 )
  1353. {
  1354. if ( !pMixer->SkipSamples( &channel, currentsample, diff, pWave->SampleRate(), true ) )
  1355. break;
  1356. }
  1357. currentsample = pMixer->GetSamplePosition();
  1358. pWaveOutput->m_audioDevice.TransferBufferStereo16( samples, samplecount );
  1359. if ( currenttime >= starttime )
  1360. {
  1361. if ( selected )
  1362. {
  1363. bool boundary = false;
  1364. bool inselection = ( currenttime >= selectionstarttime &&
  1365. currenttime <= selectionendtime );
  1366. if ( inselection )
  1367. {
  1368. if ( !drawingselection )
  1369. {
  1370. drawingselection = true;
  1371. boundary = true;
  1372. }
  1373. }
  1374. else if ( drawingselection )
  1375. {
  1376. boundary = true;
  1377. drawingselection = false;
  1378. }
  1379. if ( inselection || boundary )
  1380. {
  1381. int top, bottom;
  1382. bottom = outrect.bottom;
  1383. HPEN *usePen;
  1384. if ( boundary )
  1385. {
  1386. usePen = &pen4;
  1387. top = outrect.top;
  1388. }
  1389. else
  1390. {
  1391. usePen = &pen3;
  1392. top = outrect.bottom - 19;
  1393. }
  1394. HPEN old = (HPEN)SelectObject( dc, *usePen );
  1395. MoveToEx( dc, outrect.left + pixel, top, NULL );
  1396. LineTo( dc, outrect.left + pixel, bottom-1 );
  1397. SelectObject( dc, old );
  1398. }
  1399. }
  1400. int maxvalue = -65536;
  1401. int minvalue = 65536;
  1402. short *pData = samples;
  1403. // only take fix samples
  1404. int step = 2;
  1405. int count = 2 * samplecount;
  1406. for ( int i = 0; i < count; i+=step )
  1407. {
  1408. int val = (int)( pData[i] + pData[i+1] ) / 2;
  1409. if ( val > maxvalue )
  1410. {
  1411. maxvalue = val;
  1412. }
  1413. if ( val < minvalue )
  1414. {
  1415. minvalue = val;
  1416. }
  1417. }
  1418. float maxv = (float)( maxvalue ) / 32768.0f;
  1419. float minv = (float)( minvalue ) / 32768.0f;
  1420. MoveToEx( dc, outrect.left + pixel, midy + ( int ) ( maxv * height ), NULL );
  1421. LineTo( dc, outrect.left + pixel, midy + ( int ) ( minv * height ) );
  1422. pixel++;
  1423. }
  1424. currenttime += timeperpixel;
  1425. }
  1426. delete[] samples;
  1427. SelectObject( dc, oldPen );
  1428. DeleteObject( pen );
  1429. DeleteObject( pen2 );
  1430. DeleteObject( pen3 );
  1431. delete pMixer;
  1432. }
  1433. bool CFacePoserSound::IsSoundPlaying( CAudioMixer *pMixer )
  1434. {
  1435. if ( !m_pAudio || !pMixer )
  1436. {
  1437. return false;
  1438. }
  1439. //
  1440. int index = m_pAudio->FindSourceIndex( pMixer );
  1441. if ( index != -1 )
  1442. return true;
  1443. return false;
  1444. }
  1445. CAudioMixer *CFacePoserSound::FindMixer( CAudioSource *source )
  1446. {
  1447. if ( !m_pAudio )
  1448. return NULL;
  1449. return m_pAudio->GetMixerForSource( source );
  1450. }
  1451. void CFacePoserSound::EnsureNoModelReferences( CAudioSource *source )
  1452. {
  1453. int c = models->Count();
  1454. for ( int i = 0; i < c; i++ )
  1455. {
  1456. StudioModel *model = models->GetStudioModel( i );
  1457. if ( model->m_mouth.IsSourceReferenced( source ) )
  1458. {
  1459. model->m_mouth.RemoveSource( source );
  1460. }
  1461. }
  1462. }