Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4876 lines
131 KiB

  1. #include "precomp.h"
  2. //
  3. // MGC.CPP
  4. // MCS Glue Layer, Legacy from simultaneous R.11 and T.120 support
  5. //
  6. // Copyright(c) Microsoft 1997-
  7. //
  8. #define MLZ_FILE_ZONE ZONE_NET
  9. //
  10. //
  11. // CONSTANT DATA
  12. //
  13. // These arrays map between MCAT and DC-Grouwpare constants. They are not
  14. // in separate data file since only referenced from this source file.
  15. //
  16. //
  17. UINT McsErrToNetErr (UINT rcMCS);
  18. const UINT c_RetCodeMap1[] =
  19. {
  20. 0,
  21. NET_RC_MGC_NOT_SUPPORTED,
  22. NET_RC_MGC_NOT_INITIALIZED,
  23. NET_RC_MGC_ALREADY_INITIALIZED,
  24. NET_RC_MGC_INIT_FAIL,
  25. NET_RC_MGC_INVALID_REMOTE_ADDRESS,
  26. NET_RC_NO_MEMORY,
  27. NET_RC_MGC_CALL_FAILED,
  28. NET_RC_MGC_NOT_SUPPORTED,
  29. NET_RC_MGC_NOT_SUPPORTED,
  30. NET_RC_MGC_NOT_SUPPORTED, // security failed
  31. };
  32. const UINT c_RetCodeMap2[] =
  33. {
  34. NET_RC_MGC_DOMAIN_IN_USE,
  35. NET_RC_MGC_INVALID_DOMAIN,
  36. NET_RC_MGC_NOT_ATTACHED,
  37. NET_RC_MGC_INVALID_USER_HANDLE,
  38. NET_RC_MGC_TOO_MUCH_IN_USE,
  39. NET_RC_MGC_INVALID_CONN_HANDLE,
  40. NET_RC_MGC_INVALID_UP_DOWN_PARM,
  41. NET_RC_MGC_NOT_SUPPORTED,
  42. NET_RC_MGC_TOO_MUCH_IN_USE
  43. };
  44. #define MG_NUM_OF_MCS_RESULTS 15
  45. #define MG_INVALID_MCS_RESULT MG_NUM_OF_MCS_RESULTS
  46. NET_RESULT c_ResultMap[MG_NUM_OF_MCS_RESULTS+1] =
  47. {
  48. NET_RESULT_OK,
  49. NET_RESULT_NOK,
  50. NET_RESULT_NOK,
  51. NET_RESULT_CHANNEL_UNAVAILABLE,
  52. NET_RESULT_DOMAIN_UNAVAILABLE,
  53. NET_RESULT_NOK,
  54. NET_RESULT_REJECTED,
  55. NET_RESULT_NOK,
  56. NET_RESULT_NOK,
  57. NET_RESULT_TOKEN_ALREADY_GRABBED,
  58. NET_RESULT_TOKEN_NOT_OWNED,
  59. NET_RESULT_NOK,
  60. NET_RESULT_NOK,
  61. NET_RESULT_NOT_SPECIFIED,
  62. NET_RESULT_USER_REJECTED,
  63. NET_RESULT_UNKNOWN
  64. };
  65. //
  66. // MG_Register()
  67. //
  68. BOOL MG_Register
  69. (
  70. MGTASK task,
  71. PMG_CLIENT * ppmgClient,
  72. PUT_CLIENT putTask
  73. )
  74. {
  75. PMG_CLIENT pmgClient = NULL;
  76. CMTASK cmTask;
  77. BOOL rc = FALSE;
  78. DebugEntry(MG_Register);
  79. UT_Lock(UTLOCK_T120);
  80. ASSERT(task >= MGTASK_FIRST);
  81. ASSERT(task < MGTASK_MAX);
  82. //
  83. // Check the putTask passed in:
  84. //
  85. ValidateUTClient(putTask);
  86. //
  87. // Does this already exist?
  88. //
  89. if (g_amgClients[task].putTask != NULL)
  90. {
  91. ERROR_OUT(("MG task %d already exists", task));
  92. DC_QUIT;
  93. }
  94. pmgClient = &(g_amgClients[task]);
  95. ZeroMemory(pmgClient, sizeof(MG_CLIENT));
  96. pmgClient->putTask = putTask;
  97. //
  98. // Register an exit procedure
  99. //
  100. UT_RegisterExit(putTask, MGExitProc, pmgClient);
  101. pmgClient->exitProcReg = TRUE;
  102. //
  103. // We register a high priority event handler (join by key handler) to
  104. // intercept various events which are generated as part of the join by
  105. // key processing. We register it now, before the call to
  106. // MG_ChannelJoin below, to prevent events which we cant stop from
  107. // going to the client if UT_RegisterEvent fails. This high priority
  108. // handler also looks after our internal scheduling of pending
  109. // requests.
  110. //
  111. UT_RegisterEvent(putTask, MGEventHandler, pmgClient, UT_PRIORITY_OBMAN);
  112. pmgClient->eventProcReg = TRUE;
  113. //
  114. // Register our hidden event handler for the client (the parameter to
  115. // be passed to the event handler is the pointer to the client CB):
  116. //
  117. UT_RegisterEvent(putTask, MGLongStopHandler, pmgClient, UT_PRIORITY_NETWORK);
  118. pmgClient->lowEventProcReg = TRUE;
  119. //
  120. // Register as a call manager secondary.
  121. //
  122. switch (task)
  123. {
  124. case MGTASK_OM:
  125. cmTask = CMTASK_OM;
  126. break;
  127. case MGTASK_DCS:
  128. cmTask = CMTASK_DCS;
  129. break;
  130. default:
  131. ASSERT(FALSE);
  132. }
  133. if (!CMS_Register(putTask, cmTask, &(pmgClient->pcmClient)))
  134. {
  135. ERROR_OUT(("CMS_Register failed"));
  136. DC_QUIT;
  137. }
  138. rc = TRUE;
  139. DC_EXIT_POINT:
  140. *ppmgClient = pmgClient;
  141. UT_Unlock(UTLOCK_T120);
  142. DebugExitBOOL(MG_Register, rc);
  143. return(rc);
  144. }
  145. //
  146. // MG_Deregister(...)
  147. //
  148. void MG_Deregister(PMG_CLIENT * ppmgClient)
  149. {
  150. PMG_CLIENT pmgClient;
  151. DebugEntry(MG_Deregister);
  152. UT_Lock(UTLOCK_T120);
  153. ASSERT(ppmgClient);
  154. pmgClient = *ppmgClient;
  155. ValidateMGClient(pmgClient);
  156. MGExitProc(pmgClient);
  157. //
  158. // Dereg CMS handler. In abnormal situations, the CMS exit proc will
  159. // clean it up for us.
  160. //
  161. if (pmgClient->pcmClient)
  162. {
  163. CMS_Deregister(&pmgClient->pcmClient);
  164. }
  165. *ppmgClient = NULL;
  166. UT_Unlock(UTLOCK_T120);
  167. DebugExitVOID(MG_Deregister);
  168. }
  169. //
  170. // MGExitProc()
  171. //
  172. void CALLBACK MGExitProc(LPVOID uData)
  173. {
  174. PMG_CLIENT pmgClient = (PMG_CLIENT)uData;
  175. PMG_BUFFER pmgBuffer;
  176. DebugEntry(MGExitProc);
  177. UT_Lock(UTLOCK_T120);
  178. ValidateMGClient(pmgClient);
  179. //
  180. // If the client has attached, detach it
  181. //
  182. if (pmgClient->userAttached)
  183. {
  184. MG_Detach(pmgClient);
  185. }
  186. //
  187. // Free all buffers the client may be using:
  188. //
  189. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers), FIELD_OFFSET(MG_BUFFER, clientChain));
  190. while (pmgBuffer != NULL)
  191. {
  192. ValidateMGBuffer(pmgBuffer);
  193. //
  194. // This implicitly frees any user memory or MCS memory associated
  195. // with the buffer CB.
  196. //
  197. MGFreeBuffer(pmgClient, &pmgBuffer);
  198. //
  199. // MGFreeBuffer removed this CB from the list, so we get the first
  200. // one in what's left of the list - if the list is now empty, this
  201. // will give us NULL and we will break out of the while loop:
  202. //
  203. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers), FIELD_OFFSET(MG_BUFFER, clientChain));
  204. }
  205. //
  206. // Deregister our event handler and exit procedure:
  207. //
  208. if (pmgClient->exitProcReg)
  209. {
  210. UT_DeregisterExit(pmgClient->putTask, MGExitProc, pmgClient);
  211. pmgClient->exitProcReg = FALSE;
  212. }
  213. if (pmgClient->lowEventProcReg)
  214. {
  215. UT_DeregisterEvent(pmgClient->putTask, MGLongStopHandler, pmgClient);
  216. pmgClient->lowEventProcReg = FALSE;
  217. }
  218. if (pmgClient->eventProcReg)
  219. {
  220. UT_DeregisterEvent(pmgClient->putTask, MGEventHandler, pmgClient);
  221. pmgClient->eventProcReg = FALSE;
  222. }
  223. //
  224. // We should only ever be asked to free a client CB which has had all
  225. // of its child resources already freed, so do a quick sanity check:
  226. //
  227. ASSERT(pmgClient->buffers.next == 0);
  228. //
  229. // Set the putTask to NULL; that's how we know if a client is in use or
  230. // not.
  231. //
  232. pmgClient->putTask = NULL;
  233. UT_Unlock(UTLOCK_T120);
  234. DebugExitVOID(MGExitProc);
  235. }
  236. //
  237. // MG_Attach(...)
  238. //
  239. UINT MG_Attach
  240. (
  241. PMG_CLIENT pmgClient,
  242. UINT callID,
  243. PNET_FLOW_CONTROL pFlowControl
  244. )
  245. {
  246. UINT rc = 0;
  247. DebugEntry(MG_Attach);
  248. UT_Lock(UTLOCK_T120);
  249. ValidateCMP(g_pcmPrimary);
  250. ValidateMGClient(pmgClient);
  251. if (!g_pcmPrimary->callID)
  252. {
  253. //
  254. // We aren't in a call yet/anymore.
  255. //
  256. WARNING_OUT(("MG_Attach failing; not in T.120 call"));
  257. rc = NET_RC_MGC_NOT_CONNECTED;
  258. DC_QUIT;
  259. }
  260. ASSERT(callID == g_pcmPrimary->callID);
  261. ASSERT(!pmgClient->userAttached);
  262. pmgClient->userIDMCS = NET_UNUSED_IDMCS;
  263. ZeroMemory(&pmgClient->flo, sizeof(FLO_STATIC_DATA));
  264. pmgClient->userAttached = TRUE;
  265. //
  266. // Call through to the underlying MCS layer (normally, we need our
  267. // callbacks to happen with a task switch but since this is Windows it
  268. // doesn't really matter anyway):
  269. //
  270. rc = MCS_AttachRequest(&(pmgClient->m_piMCSSap),
  271. (DomainSelector) &g_pcmPrimary->callID,
  272. sizeof(g_pcmPrimary->callID),
  273. (MCSCallBack) MGCallback,
  274. (void *) pmgClient,
  275. ATTACHMENT_DISCONNECT_IN_DATA_LOSS);
  276. if (rc != 0)
  277. {
  278. WARNING_OUT(("MCSAttachUserRequest failed with error %x", rc));
  279. MGDetach(pmgClient);
  280. rc = McsErrToNetErr(rc);
  281. DC_QUIT;
  282. }
  283. if (++g_mgAttachCount == 1)
  284. {
  285. UT_PostEvent(pmgClient->putTask,
  286. pmgClient->putTask,
  287. MG_TIMER_PERIOD,
  288. NET_MG_WATCHDOG,
  289. 0, 0);
  290. }
  291. ASSERT(g_mgAttachCount <= MGTASK_MAX);
  292. //
  293. // It is assumed that the client will use the same latencies for every
  294. // attachment, so we keep them at the client level.
  295. //
  296. pmgClient->flowControl = *pFlowControl;
  297. DC_EXIT_POINT:
  298. UT_Unlock(UTLOCK_T120);
  299. DebugExitDWORD(MG_Attach, rc);
  300. return(rc);
  301. }
  302. //
  303. // MG_Detach(...)
  304. //
  305. void MG_Detach
  306. (
  307. PMG_CLIENT pmgClient
  308. )
  309. {
  310. DebugEntry(MG_Detach);
  311. UT_Lock(UTLOCK_T120);
  312. ValidateMGClient(pmgClient);
  313. if (!pmgClient->userAttached)
  314. {
  315. TRACE_OUT(("MG_Detach: client %x not attached", pmgClient));
  316. DC_QUIT;
  317. }
  318. //
  319. // Call FLO_UserTerm to ensure that flow control is stopped on all the
  320. // channels that have been flow controlled on our behalf.
  321. //
  322. FLO_UserTerm(pmgClient);
  323. //
  324. // Clear out the buffers, variabls.
  325. //
  326. MGDetach(pmgClient);
  327. DC_EXIT_POINT:
  328. UT_Unlock(UTLOCK_T120);
  329. DebugExitVOID(MG_Detach);
  330. }
  331. //
  332. // MG_ChannelJoin(...)
  333. //
  334. UINT MG_ChannelJoin
  335. (
  336. PMG_CLIENT pmgClient,
  337. NET_CHANNEL_ID * pCorrelator,
  338. NET_CHANNEL_ID channel
  339. )
  340. {
  341. PMG_BUFFER pmgBuffer;
  342. UINT rc = 0;
  343. DebugEntry(MG_ChannelJoin);
  344. UT_Lock(UTLOCK_T120);
  345. ValidateMGClient(pmgClient);
  346. if (!pmgClient->userAttached)
  347. {
  348. TRACE_OUT(("MG_ChannelJoin: client %x not attached", pmgClient));
  349. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  350. DC_QUIT;
  351. }
  352. //
  353. // MCAT may bounce this request, so we must queue the request
  354. //
  355. rc = MGNewBuffer(pmgClient, MG_RQ_CHANNEL_JOIN, &pmgBuffer);
  356. if (rc != 0)
  357. {
  358. DC_QUIT;
  359. }
  360. MGNewCorrelator(pmgClient, pCorrelator);
  361. pmgBuffer->work = *pCorrelator;
  362. pmgBuffer->channelId = (ChannelID)channel;
  363. TRACE_OUT(("Inserting join message 0x%08x into pending chain", pmgBuffer));
  364. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  365. UT_PostEvent(pmgClient->putTask,
  366. pmgClient->putTask,
  367. NO_DELAY,
  368. NET_MG_SCHEDULE,
  369. 0,
  370. 0);
  371. DC_EXIT_POINT:
  372. UT_Unlock(UTLOCK_T120);
  373. DebugExitDWORD(MG_ChannelJoin, rc);
  374. return(rc);
  375. }
  376. //
  377. // MG_ChannelJoinByKey(...)
  378. //
  379. UINT MG_ChannelJoinByKey
  380. (
  381. PMG_CLIENT pmgClient,
  382. NET_CHANNEL_ID * pCorrelator,
  383. WORD channelKey
  384. )
  385. {
  386. PMG_BUFFER pmgBuffer;
  387. UINT rc = 0;
  388. DebugEntry(MG_ChannelJoinByKey);
  389. UT_Lock(UTLOCK_T120);
  390. ValidateMGClient(pmgClient);
  391. if (!pmgClient->userAttached)
  392. {
  393. TRACE_OUT(("MG_ChannelJoinByKey: client %x not attached", pmgClient));
  394. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  395. DC_QUIT;
  396. }
  397. //
  398. // MCAT may bounce this request, so we must queue the request
  399. //
  400. rc = MGNewBuffer(pmgClient, MG_RQ_CHANNEL_JOIN_BY_KEY, &pmgBuffer);
  401. if (rc != 0)
  402. {
  403. DC_QUIT;
  404. }
  405. //
  406. // Store the various pieces of information in the joinByKeyInfo
  407. // structure of the client CB
  408. //
  409. MGNewCorrelator(pmgClient, pCorrelator);
  410. pmgBuffer->work = *pCorrelator;
  411. pmgBuffer->channelKey = (ChannelID)channelKey;
  412. pmgBuffer->channelId = 0;
  413. TRACE_OUT(("Inserting join message 0x%08x into pending chain", pmgBuffer));
  414. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  415. UT_PostEvent(pmgClient->putTask,
  416. pmgClient->putTask,
  417. NO_DELAY,
  418. NET_MG_SCHEDULE,
  419. 0,
  420. 0);
  421. DC_EXIT_POINT:
  422. UT_Unlock(UTLOCK_T120);
  423. DebugExitDWORD(MG_ChannelJoinByKey, rc);
  424. return(rc);
  425. }
  426. //
  427. // MG_ChannelLeave(...)
  428. //
  429. void MG_ChannelLeave
  430. (
  431. PMG_CLIENT pmgClient,
  432. NET_CHANNEL_ID channel
  433. )
  434. {
  435. PMG_BUFFER pmgBuffer;
  436. DebugEntry(MG_ChannelLeave);
  437. UT_Lock(UTLOCK_T120);
  438. ValidateMGClient(pmgClient);
  439. if (!pmgClient->userAttached)
  440. {
  441. TRACE_OUT(("MG_ChannelLeave: client %x not attached", pmgClient));
  442. DC_QUIT;
  443. }
  444. //
  445. // MCAT may bounce this request, so instead of processing it straight
  446. // away, we put it on the user's request queue and kick off a process
  447. // queue loop: This is a request CB, but we don't need any data buffer
  448. //
  449. if (MGNewBuffer(pmgClient, MG_RQ_CHANNEL_LEAVE, &pmgBuffer) != 0)
  450. {
  451. DC_QUIT;
  452. }
  453. //
  454. // Fill in the specific data fields in the request CB:
  455. //
  456. pmgBuffer->channelId = (ChannelID)channel;
  457. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  458. UT_PostEvent(pmgClient->putTask,
  459. pmgClient->putTask,
  460. NO_DELAY,
  461. NET_MG_SCHEDULE,
  462. 0,
  463. 0);
  464. DC_EXIT_POINT:
  465. UT_Unlock(UTLOCK_T120);
  466. DebugExitVOID(MG_ChannelLeave);
  467. }
  468. //
  469. // MG_SendData(...)
  470. //
  471. UINT MG_SendData
  472. (
  473. PMG_CLIENT pmgClient,
  474. NET_PRIORITY priority,
  475. NET_CHANNEL_ID channel,
  476. UINT length,
  477. void ** ppData
  478. )
  479. {
  480. PMG_BUFFER pmgBuffer;
  481. UINT numControlBlocks;
  482. UINT i;
  483. UINT rc;
  484. DebugEntry(MG_SendData);
  485. UT_Lock(UTLOCK_T120);
  486. ValidateMGClient(pmgClient);
  487. if (!pmgClient->userAttached)
  488. {
  489. TRACE_OUT(("MG_SendData: client %x not attached", pmgClient));
  490. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  491. DC_QUIT;
  492. }
  493. //
  494. // Check for a packet greater than the permitted size
  495. // It must not cause the length to wrap into the flow flag
  496. //
  497. ASSERT(TSHR_MAX_SEND_PKT + sizeof(TSHR_NET_PKT_HEADER) < TSHR_PKT_FLOW);
  498. ASSERT(length <= TSHR_MAX_SEND_PKT);
  499. //
  500. // Ensure we have a priority which is valid for our use of MCS.
  501. //
  502. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  503. if (pmgClient->userIDMCS == NET_UNUSED_IDMCS)
  504. {
  505. //
  506. // We are not yet attached, so don't try to send data.
  507. //
  508. ERROR_OUT(("Sending data prior to attach indication"));
  509. rc = NET_RC_INVALID_STATE;
  510. DC_QUIT;
  511. }
  512. //
  513. // The <ppData> parameter points to a data buffer pointer. This buffer
  514. // pointer should point to a buffer which the client acquired using
  515. // MG_GetBuffer. MG_GetBuffer should have added a buffer CB to the
  516. // client's buffer list containing the same pointer. Note that if the
  517. // NET_SEND_ALL_PRIORITIES flag is set then there will be four buffers
  518. // in the client's buffer list containing the same pointer.
  519. //
  520. // So, we search through the client's buffer list looking for a match
  521. // on the data buffer pointer. Move to the first position in the list.
  522. //
  523. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pmgClient->buffers),
  524. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  525. FIELD_OFFSET(MG_BUFFER, pDataBuffer), (DWORD_PTR)*ppData,
  526. FIELD_SIZE(MG_BUFFER, pDataBuffer));
  527. ValidateMGBuffer(pmgBuffer);
  528. //
  529. // Check the NET_SEND_ALL_PRIORITIES flag to see if it is set
  530. //
  531. if (pmgBuffer->priority & NET_SEND_ALL_PRIORITIES)
  532. {
  533. //
  534. // Check that the priority and channel has not changed. Changing
  535. // the priority between calling MG_GetBuffer and calling
  536. // MG_SendData is not allowed.
  537. //
  538. ASSERT(pmgBuffer->channelId == channel);
  539. ASSERT(priority & NET_SEND_ALL_PRIORITIES);
  540. //
  541. // The flag is set so there should be multiple control buffers
  542. // waiting to be sent.
  543. //
  544. numControlBlocks = MG_NUM_PRIORITIES;
  545. }
  546. else
  547. {
  548. //
  549. // Check that the priority and channel has not changed.
  550. //
  551. ASSERT(pmgBuffer->channelId == channel);
  552. ASSERT(pmgBuffer->priority == priority);
  553. //
  554. // The flag is not set so there should be only one control buffer
  555. // waiting.
  556. //
  557. numControlBlocks = 1;
  558. }
  559. //
  560. // Now send the control blocks
  561. //
  562. for (i = 0; i < numControlBlocks; i++)
  563. {
  564. ValidateMGBuffer(pmgBuffer);
  565. //
  566. // Clear the NET_SEND_ALL_PRIORITIES flag.
  567. //
  568. pmgBuffer->priority &= ~NET_SEND_ALL_PRIORITIES;
  569. //
  570. // Set up the packet length for the send (this may be different
  571. // from the length in the buffer header since the app may not have
  572. // used all the buffer).
  573. //
  574. ASSERT(length + sizeof(TSHR_NET_PKT_HEADER) <= pmgBuffer->length);
  575. pmgBuffer->pPktHeader->header.pktLength = (TSHR_UINT16)(length + sizeof(TSHR_NET_PKT_HEADER));
  576. //
  577. // If the length has changed then tell FC about it.
  578. //
  579. if ((length + sizeof(MG_INT_PKT_HEADER)) < pmgBuffer->length)
  580. {
  581. FLO_ReallocSend(pmgClient, pmgBuffer->pStr,
  582. pmgBuffer->length - (length + sizeof(MG_INT_PKT_HEADER)));
  583. }
  584. TRACE_OUT(("Inserting send 0x%08x into pend chain, pri %u, chan 0x%08x",
  585. pmgBuffer, pmgBuffer->priority, pmgBuffer->channelId));
  586. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  587. //
  588. // If there is one or more control block left to find then search
  589. // the client's buffer list for it.
  590. //
  591. if ((numControlBlocks - (i + 1)) > 0)
  592. {
  593. COM_BasedListFind(LIST_FIND_FROM_NEXT, &(pmgClient->buffers),
  594. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  595. FIELD_OFFSET(MG_BUFFER, pDataBuffer),
  596. (DWORD_PTR)*ppData, FIELD_SIZE(MG_BUFFER, pDataBuffer));
  597. }
  598. }
  599. UT_PostEvent(pmgClient->putTask,
  600. pmgClient->putTask,
  601. NO_DELAY,
  602. NET_MG_SCHEDULE,
  603. 0,
  604. 0);
  605. //
  606. // Everything went OK - set the ppData pointer to NULL to prevent
  607. // the caller from accessing the memory.
  608. //
  609. *ppData = NULL;
  610. rc = 0;
  611. DC_EXIT_POINT:
  612. UT_Unlock(UTLOCK_T120);
  613. DebugExitDWORD(MG_SendData, rc);
  614. return(rc);
  615. }
  616. //
  617. // MG_TokenGrab(...)
  618. //
  619. UINT MG_TokenGrab
  620. (
  621. PMG_CLIENT pmgClient,
  622. NET_TOKEN_ID tokenID
  623. )
  624. {
  625. PMG_BUFFER pmgBuffer;
  626. UINT rc = 0;
  627. DebugEntry(MG_TokenGrab);
  628. UT_Lock(UTLOCK_T120);
  629. ValidateMGClient(pmgClient);
  630. if (!pmgClient->userAttached)
  631. {
  632. TRACE_OUT(("MG_TokenGrab: client 0x%08x not attached", pmgClient));
  633. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  634. DC_QUIT;
  635. }
  636. //
  637. // MCAT may bounce this request, so instead of processing it straight
  638. // away, we put it on the user's request queue and kick off a process
  639. // queue loop:
  640. //
  641. rc = MGNewBuffer(pmgClient, MG_RQ_TOKEN_GRAB, &pmgBuffer);
  642. if (rc != 0)
  643. {
  644. WARNING_OUT(("MGNewBuffer failed in MG_TokenGrab"));
  645. DC_QUIT;
  646. }
  647. pmgBuffer->channelId = (ChannelID)tokenID;
  648. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  649. UT_PostEvent(pmgClient->putTask,
  650. pmgClient->putTask,
  651. NO_DELAY,
  652. NET_MG_SCHEDULE,
  653. 0,
  654. 0);
  655. DC_EXIT_POINT:
  656. UT_Unlock(UTLOCK_T120);
  657. DebugExitDWORD(MG_TokenGrab, rc);
  658. return(rc);
  659. }
  660. //
  661. // MG_TokenInhibit(...)
  662. //
  663. UINT MG_TokenInhibit
  664. (
  665. PMG_CLIENT pmgClient,
  666. NET_TOKEN_ID tokenID
  667. )
  668. {
  669. PMG_BUFFER pmgBuffer;
  670. UINT rc = 0;
  671. DebugEntry(MG_TokenInhibit);
  672. UT_Lock(UTLOCK_T120);
  673. ValidateMGClient(pmgClient);
  674. if (!pmgClient->userAttached)
  675. {
  676. TRACE_OUT(("MG_TokenInhibit: client 0x%08x not attached", pmgClient));
  677. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  678. DC_QUIT;
  679. }
  680. //
  681. // MCAT may bounce this request, so instead of processing it straight
  682. // away, we put it on the user's request queue and kick off a process
  683. // queue loop:
  684. //
  685. rc = MGNewBuffer(pmgClient, MG_RQ_TOKEN_INHIBIT, &pmgBuffer);
  686. if (rc != 0)
  687. {
  688. WARNING_OUT(("MGNewBuffer failed in MG_TokenInhibit"));
  689. DC_QUIT;
  690. }
  691. pmgBuffer->channelId = (ChannelID)tokenID;
  692. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  693. UT_PostEvent(pmgClient->putTask,
  694. pmgClient->putTask,
  695. NO_DELAY,
  696. NET_MG_SCHEDULE,
  697. 0,
  698. 0);
  699. DC_EXIT_POINT:
  700. UT_Unlock(UTLOCK_T120);
  701. DebugExitDWORD(MG_TokenInhibit, rc);
  702. return(rc);
  703. }
  704. //
  705. // MG_GetBuffer(...)
  706. //
  707. UINT MG_GetBuffer
  708. (
  709. PMG_CLIENT pmgClient,
  710. UINT length,
  711. NET_PRIORITY priority,
  712. NET_CHANNEL_ID channel,
  713. void ** ppData
  714. )
  715. {
  716. PMG_BUFFER pmgBuffer;
  717. UINT rc;
  718. DebugEntry(MG_GetBuffer);
  719. UT_Lock(UTLOCK_T120);
  720. ValidateMGClient(pmgClient);
  721. if (!pmgClient->userAttached)
  722. {
  723. TRACE_OUT(("MG_GetBuffer: client 0x%08x not attached", pmgClient));
  724. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  725. DC_QUIT;
  726. }
  727. //
  728. // Ensure we have a priority which is valid for our use of MCS.
  729. //
  730. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  731. //
  732. // Obtain a buffer and store the info in a buffer CB hung off the
  733. // client's list:
  734. //
  735. rc = MGNewTxBuffer(pmgClient, priority, channel, length,
  736. &pmgBuffer);
  737. if (rc != 0)
  738. {
  739. DC_QUIT;
  740. }
  741. //
  742. // We always return a pointer to the data buffer to an application.
  743. // The MG packet header is only used when giving data to MCS or
  744. // receiving data from MCS.
  745. //
  746. *ppData = pmgBuffer->pDataBuffer;
  747. DC_EXIT_POINT:
  748. UT_Unlock(UTLOCK_T120);
  749. DebugExitDWORD(MG_GetBuffer, rc);
  750. return(rc);
  751. }
  752. //
  753. // MG_FreeBuffer(...)
  754. //
  755. void MG_FreeBuffer
  756. (
  757. PMG_CLIENT pmgClient,
  758. void ** ppData
  759. )
  760. {
  761. PMG_BUFFER pmgBuffer;
  762. DebugEntry(MG_FreeBuffer);
  763. UT_Lock(UTLOCK_T120);
  764. ValidateMGClient(pmgClient);
  765. //
  766. // Find the buffer CB associated with the buffer - an application
  767. // always uses a pointer to the data buffer rather than the packet
  768. // header.
  769. //
  770. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pmgClient->buffers),
  771. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  772. FIELD_OFFSET(MG_BUFFER, pDataBuffer), (DWORD_PTR)*ppData,
  773. FIELD_SIZE(MG_BUFFER, pDataBuffer));
  774. ValidateMGBuffer(pmgBuffer);
  775. //
  776. // If the app is freeing a send buffer (e.g. because it decided not to
  777. // send it) then inform flow control:
  778. //
  779. if (pmgBuffer->type == MG_TX_BUFFER)
  780. {
  781. FLO_ReallocSend(pmgClient,
  782. pmgBuffer->pStr,
  783. pmgBuffer->length);
  784. }
  785. //
  786. // Now free the buffer CB and all associated data:
  787. //
  788. MGFreeBuffer(pmgClient, &pmgBuffer);
  789. //
  790. // Reset the client's pointer:
  791. //
  792. *ppData = NULL;
  793. UT_Unlock(UTLOCK_T120);
  794. DebugExitVOID(MG_FreeBuffer);
  795. }
  796. //
  797. // MG_FlowControlStart
  798. //
  799. void MG_FlowControlStart
  800. (
  801. PMG_CLIENT pmgClient,
  802. NET_CHANNEL_ID channel,
  803. NET_PRIORITY priority,
  804. UINT backlog,
  805. UINT maxBytesOutstanding
  806. )
  807. {
  808. DebugEntry(MG_FlowControlStart);
  809. ValidateMGClient(pmgClient);
  810. if (!pmgClient->userAttached)
  811. {
  812. TRACE_OUT(("MG_FlowControlStart: client 0x%08x not attached", pmgClient));
  813. DC_QUIT;
  814. }
  815. //
  816. // Ensure we have a priority which is valid for our use of MCS.
  817. //
  818. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  819. FLO_StartControl(pmgClient,
  820. channel,
  821. priority,
  822. backlog,
  823. maxBytesOutstanding);
  824. DC_EXIT_POINT:
  825. DebugExitVOID(MG_FlowControlStart);
  826. }
  827. //
  828. // MGLongStopHandler(...)
  829. //
  830. BOOL CALLBACK MGLongStopHandler
  831. (
  832. LPVOID pData,
  833. UINT event,
  834. UINT_PTR UNUSEDparam1,
  835. UINT_PTR param2
  836. )
  837. {
  838. PMG_CLIENT pmgClient;
  839. BOOL processed = FALSE;
  840. DebugEntry(MGLongStopHandler);
  841. pmgClient = (PMG_CLIENT)pData;
  842. ValidateMGClient(pmgClient);
  843. if (event == NET_EVENT_CHANNEL_JOIN)
  844. {
  845. WARNING_OUT(("Failed to process NET_EVENT_CHANNEL_JOIN; freeing buffer 0x%08x",
  846. param2));
  847. MG_FreeBuffer(pmgClient, (void **)&param2);
  848. processed = TRUE;
  849. }
  850. else if (event == NET_FLOW)
  851. {
  852. WARNING_OUT(("Failed to process NET_FLOW; freeing buffer 0x%08x",
  853. param2));
  854. processed = TRUE;
  855. }
  856. DebugExitBOOL(MGLongStopHandler, processed);
  857. return(processed);
  858. }
  859. //
  860. // MGEventHandler(...)
  861. //
  862. BOOL CALLBACK MGEventHandler
  863. (
  864. LPVOID pData,
  865. UINT event,
  866. UINT_PTR param1,
  867. UINT_PTR param2
  868. )
  869. {
  870. PMG_CLIENT pmgClient;
  871. PNET_JOIN_CNF_EVENT pNetJoinCnf = NULL;
  872. BOOL processed = TRUE;
  873. PMG_BUFFER pmgBuffer;
  874. BOOL joinComplete = FALSE;
  875. UINT result = NET_RESULT_USER_REJECTED;
  876. DebugEntry(MGEventHandler);
  877. pmgClient = (PMG_CLIENT)pData;
  878. ValidateMGClient(pmgClient);
  879. switch (event)
  880. {
  881. case NET_EVENT_CHANNEL_JOIN:
  882. {
  883. //
  884. // If there are no join requests queued off the client CB then
  885. // we have nothing more to do. The only NET events we are
  886. // interested in are NET_EV_JOIN_CONFIRM events - pass any others
  887. // on.
  888. //
  889. if (pmgClient->joinChain.next == 0)
  890. {
  891. //
  892. // Pass the event on...
  893. //
  894. processed = FALSE;
  895. DC_QUIT;
  896. }
  897. //
  898. // We must be careful not to process a completed channel join
  899. // which we intend to go to the client. The correlator is only
  900. // filled in on completed events and is always non-zero.
  901. //
  902. pNetJoinCnf = (PNET_JOIN_CNF_EVENT)param2;
  903. if (pNetJoinCnf->correlator != 0)
  904. {
  905. //
  906. // Pass the event on...
  907. //
  908. processed = FALSE;
  909. DC_QUIT;
  910. }
  911. //
  912. // There is only ever one join request outstanding per client,
  913. // so the join confirm is for the first join request in the
  914. // list.
  915. //
  916. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  917. FIELD_OFFSET(MG_BUFFER, pendChain));
  918. ValidateMGBuffer(pmgBuffer);
  919. //
  920. // We will post a join confirm to the application. Set up the
  921. // parameters which are needed.
  922. //
  923. result = pNetJoinCnf->result;
  924. //
  925. // Assume for now that we have completed the pending join
  926. // request.
  927. //
  928. joinComplete = TRUE;
  929. //
  930. // If the result is a failure, we've finished
  931. //
  932. if (result != NET_RESULT_OK)
  933. {
  934. WARNING_OUT(("Failed to join channel 0x%08x, result %u",
  935. pmgBuffer->channelId,
  936. pNetJoinCnf->result));
  937. DC_QUIT;
  938. }
  939. //
  940. // The join request was successful. There are three different
  941. // scenarios for issuing a join request...
  942. //
  943. // (a) A regular channel join.
  944. // (b) Stage 1 of a channel join by key (get MCS to assign a
  945. // channel number, which we will try to register).
  946. // (c) Stage 2 of a channel join by key (join the registered
  947. // channel).
  948. //
  949. if (pmgBuffer->type == MG_RQ_CHANNEL_JOIN)
  950. {
  951. //
  952. // This is the completion of a regular channel join. Copy
  953. // the channel Id from the join confirm to the bufferCB
  954. // (the join request may have been for channel 0).
  955. //
  956. pmgBuffer->channelId = (ChannelID)pNetJoinCnf->channel;
  957. TRACE_OUT(("Channel join complete, channel 0x%08x",
  958. pmgBuffer->channelId));
  959. DC_QUIT;
  960. }
  961. //
  962. // This is channel join by key
  963. //
  964. if (pmgBuffer->channelId != 0)
  965. {
  966. //
  967. // This is the completion of a channel join by key.
  968. //
  969. TRACE_OUT(("Channel join by key complete, channel 0x%08x, key %d",
  970. pmgBuffer->channelId,
  971. pmgBuffer->channelKey));
  972. DC_QUIT;
  973. }
  974. //
  975. // This is Stage 1 of a channel join by key. Fill in the
  976. // channel Id which MCS has assigned us into the bufferCB,
  977. // otherwise we'll lose track of the channel Id which we're
  978. // registering.
  979. //
  980. pmgBuffer->channelId = (ChannelID)pNetJoinCnf->channel;
  981. //
  982. // This must be completion of stage 1 of a join by key. We now
  983. // have to register the channel Id.
  984. //
  985. TRACE_OUT(("Registering channel 0x%08x, key %d",
  986. pmgBuffer->channelId,
  987. pmgBuffer->channelKey));
  988. if (!CMS_ChannelRegister(pmgClient->pcmClient,
  989. pmgBuffer->channelKey,
  990. pmgBuffer->channelId))
  991. {
  992. WARNING_OUT(("Failed to register channel, "
  993. "channel 0x%08x, key %d, result %u",
  994. pmgBuffer->channelId,
  995. pmgBuffer->channelKey,
  996. param1));
  997. //
  998. // This causes us to post an error notification
  999. //
  1000. result = NET_RESULT_USER_REJECTED;
  1001. DC_QUIT;
  1002. }
  1003. TRACE_OUT(("Waiting for CMS_CHANNEL_REGISTER_CONFIRM"));
  1004. //
  1005. // We're now waiting for a CMS_CHANNEL_REGISTER_CONFIRM, so we
  1006. // haven't finished processing the join request
  1007. //
  1008. joinComplete = FALSE;
  1009. break;
  1010. }
  1011. case CMS_CHANNEL_REGISTER_CONFIRM:
  1012. {
  1013. //
  1014. // If there are no join requests queued off the client CB then
  1015. // we have nothing more to do.
  1016. //
  1017. if (pmgClient->joinChain.next == 0)
  1018. {
  1019. processed = FALSE;
  1020. DC_QUIT;
  1021. }
  1022. TRACE_OUT(("CMS_CHANNEL_REGISTER rcvd, result %u, channel %u",
  1023. param1, param2));
  1024. //
  1025. // Assume for now that we have completed the pending join
  1026. // request.
  1027. //
  1028. joinComplete = TRUE;
  1029. //
  1030. // There is only ever one join request outstanding per client,
  1031. // so the channel register confirm is for the first join
  1032. // request in the list.
  1033. //
  1034. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  1035. FIELD_OFFSET(MG_BUFFER, pendChain));
  1036. ValidateMGBuffer(pmgBuffer);
  1037. //
  1038. // Param1 contains the result, LOWORD(param2) contains the
  1039. // channel number of the registered channel (NOT necessarily
  1040. // the same as the channel we tried to register).
  1041. //
  1042. if (!param1)
  1043. {
  1044. WARNING_OUT(("Failed to register channel, "
  1045. "channel 0x%08x, key %d, result %u",
  1046. pmgBuffer->channelId,
  1047. pmgBuffer->channelKey,
  1048. param1));
  1049. result = NET_RESULT_USER_REJECTED;
  1050. DC_QUIT;
  1051. }
  1052. //
  1053. // If the channel number returned in the confirm event is the
  1054. // same as the channel number which we tried to register, then
  1055. // we have finished. Otherwise we have to leave the channel we
  1056. // tried to register and join the channel returned instead.
  1057. //
  1058. if (LOWORD(param2) == pmgBuffer->channelId)
  1059. {
  1060. TRACE_OUT(("Channel join by key complete, "
  1061. "channel 0x%08x, key %d",
  1062. pmgBuffer->channelId,
  1063. pmgBuffer->channelKey));
  1064. result = NET_RESULT_OK;
  1065. DC_QUIT;
  1066. }
  1067. MG_ChannelLeave(pmgClient, pmgBuffer->channelId);
  1068. pmgBuffer->channelId = (ChannelID)LOWORD(param2);
  1069. //
  1070. // Now we simply requeue the request onto the pending execution
  1071. // chain, but now with a set channel id to join
  1072. //
  1073. TRACE_OUT(("Inserting 0x%08x into pending chain",pmgBuffer));
  1074. COM_BasedListRemove(&(pmgBuffer->pendChain));
  1075. COM_BasedListInsertBefore(&(pmgClient->pendChain),
  1076. &(pmgBuffer->pendChain));
  1077. //
  1078. // We are now waiting for a join confirm (we've not finished
  1079. // yet !). However, we've requeued the bufferCB, so we can now
  1080. // process another join request (or the one we've requeued if
  1081. // its the only one).
  1082. //
  1083. joinComplete = FALSE;
  1084. pmgClient->joinPending = FALSE;
  1085. MGProcessPendingQueue(pmgClient);
  1086. break;
  1087. }
  1088. case NET_MG_SCHEDULE:
  1089. {
  1090. MGProcessPendingQueue(pmgClient);
  1091. break;
  1092. }
  1093. case NET_MG_WATCHDOG:
  1094. {
  1095. MGProcessDomainWatchdog(pmgClient);
  1096. break;
  1097. }
  1098. default:
  1099. {
  1100. //
  1101. // Don't do anything - we want to pass this event on.
  1102. //
  1103. processed = FALSE;
  1104. break;
  1105. }
  1106. }
  1107. DC_EXIT_POINT:
  1108. if (processed && pNetJoinCnf)
  1109. {
  1110. //
  1111. // Call MG_FreeBuffer to free up the event memory (we know that
  1112. // MG_FreeBuffer doesn't use the hUser so we pass in zero):
  1113. //
  1114. MG_FreeBuffer(pmgClient, (void **)&pNetJoinCnf);
  1115. }
  1116. if (joinComplete)
  1117. {
  1118. //
  1119. // We have either completed the channel join, or failed -
  1120. // either way we have finished processing the join request.
  1121. //
  1122. // We have to:
  1123. // - post a NET_EVENT_CHANNEL_JOIN event to the client
  1124. // - free up the bufferCB
  1125. // - reset the client's joinPending state
  1126. //
  1127. MGPostJoinConfirm(pmgClient,
  1128. (NET_RESULT)result,
  1129. pmgBuffer->channelId,
  1130. (NET_CHANNEL_ID)pmgBuffer->work);
  1131. MGFreeBuffer(pmgClient, &pmgBuffer);
  1132. pmgClient->joinPending = FALSE;
  1133. }
  1134. DebugExitBOOL(MGEventHandler, processed);
  1135. return(processed);
  1136. }
  1137. //
  1138. // MGCallback(...)
  1139. //
  1140. #ifdef _DEBUG
  1141. const char * c_szMCSMsgTbl[22] =
  1142. {
  1143. "MCS_CONNECT_PROVIDER_INDICATION", // 0
  1144. "MCS_CONNECT_PROVIDER_CONFIRM", // 1
  1145. "MCS_DISCONNECT_PROVIDER_INDICATION", // 2
  1146. "MCS_ATTACH_USER_CONFIRM", // 3
  1147. "MCS_DETACH_USER_INDICATION", // 4
  1148. "MCS_CHANNEL_JOIN_CONFIRM", // 5
  1149. "MCS_CHANNEL_LEAVE_INDICATION", // 6
  1150. "MCS_CHANNEL_CONVENE_CONFIRM", // 7
  1151. "MCS_CHANNEL_DISBAND_INDICATION", // 8
  1152. "MCS_CHANNEL_ADMIT_INDICATION", // 9
  1153. "MCS_CHANNEL_EXPEL_INDICATION", // 10
  1154. "MCS_SEND_DATA_INDICATION", // 11
  1155. "MCS_UNIFORM_SEND_DATA_INDICATION", // 12
  1156. "MCS_TOKEN_GRAB_CONFIRM", // 13
  1157. "MCS_TOKEN_INHIBIT_CONFIRM", // 14
  1158. "MCS_TOKEN_GIVE_INDICATION", // 15
  1159. "MCS_TOKEN_GIVE_CONFIRM", // 16
  1160. "MCS_TOKEN_PLEASE_INDICATION", // 17
  1161. "MCS_TOKEN_RELEASE_CONFIRM", // 18
  1162. "MCS_TOKEN_TEST_CONFIRM", // 19
  1163. "MCS_TOKEN_RELEASE_INDICATION", // 20
  1164. "MCS_TRANSMIT_BUFFER_AVAILABLE_INDICATION", // 21
  1165. };
  1166. // MCS_MERGE_DOMAIN_INDICATION 200
  1167. // MCS_TRANSPORT_STATUS_INDICATION 101
  1168. char * DbgGetMCSMsgStr(unsigned short mcsMessageType)
  1169. {
  1170. if (mcsMessageType <= 21)
  1171. {
  1172. return (char *) c_szMCSMsgTbl[mcsMessageType];
  1173. }
  1174. #ifdef USE_MERGE_DOMAIN_CODE
  1175. else if (mcsMessageType == MCS_MERGE_DOMAIN_INDICATION)
  1176. {
  1177. return "MCS_MERGE_DOMAIN_INDICATION";
  1178. }
  1179. #endif // USE_MERGE_DOMAIN_CODE
  1180. else if (mcsMessageType == MCS_TRANSPORT_STATUS_INDICATION)
  1181. {
  1182. return "MCS_TRANSPORT_STATUS_INDICATION";
  1183. }
  1184. return "Unknown";
  1185. }
  1186. #endif // _DEBUG
  1187. void CALLBACK MGCallback
  1188. (
  1189. unsigned int mcsMessageType,
  1190. unsigned long eventData,
  1191. unsigned long pData
  1192. )
  1193. {
  1194. PMG_CLIENT pmgClient;
  1195. PMG_BUFFER pmgBuffer;
  1196. UINT rc = 0;
  1197. DebugEntry(MGCallback);
  1198. UT_Lock(UTLOCK_T120);
  1199. pmgClient = (PMG_CLIENT)pData;
  1200. ValidateMGClient(pmgClient);
  1201. if (!pmgClient->userAttached)
  1202. {
  1203. TRACE_OUT(("MGCallback: client 0x%08x not attached", pmgClient));
  1204. DC_QUIT;
  1205. }
  1206. ValidateCMP(g_pcmPrimary);
  1207. switch (mcsMessageType)
  1208. {
  1209. case MCS_UNIFORM_SEND_DATA_INDICATION:
  1210. case MCS_SEND_DATA_INDICATION:
  1211. {
  1212. //
  1213. // The processing for a SEND_DATA_INDICATION is complicated
  1214. // significantly by MCS segmenting packets, so we call
  1215. // MGHandleSendInd to do all the work , then quit out of the
  1216. // function rather than special casing throughout.
  1217. //
  1218. rc = MGHandleSendInd(pmgClient, (PSendData)eventData);
  1219. DC_QUIT;
  1220. break;
  1221. }
  1222. case MCS_ATTACH_USER_CONFIRM:
  1223. {
  1224. NET_UID user;
  1225. NET_RESULT result;
  1226. user = LOWUSHORT(eventData);
  1227. result = TranslateResult(HIGHUSHORT(eventData));
  1228. //
  1229. // If the attach did not succeed, clean up:
  1230. //
  1231. if (HIGHUSHORT(eventData) != RESULT_SUCCESSFUL)
  1232. {
  1233. WARNING_OUT(("MG_Attach failed; cleaning up"));
  1234. MGDetach(pmgClient);
  1235. }
  1236. else
  1237. {
  1238. pmgClient->userIDMCS = user;
  1239. //
  1240. // Now initialize flow control for this user attachment
  1241. //
  1242. ZeroMemory(&(pmgClient->flo), sizeof(pmgClient->flo));
  1243. pmgClient->flo.callBack = MGFLOCallBack;
  1244. }
  1245. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1246. NET_EVENT_USER_ATTACH, MAKELONG(user, result),
  1247. g_pcmPrimary->callID);
  1248. break;
  1249. }
  1250. case MCS_DETACH_USER_INDICATION:
  1251. {
  1252. NET_UID user;
  1253. user = LOWUSHORT(eventData);
  1254. //
  1255. // If the detach is for the local user, then clean up
  1256. // the user CB:
  1257. //
  1258. if (user == pmgClient->userIDMCS)
  1259. {
  1260. //
  1261. // First terminate flow control
  1262. //
  1263. FLO_UserTerm(pmgClient);
  1264. MGDetach(pmgClient);
  1265. }
  1266. else
  1267. {
  1268. //
  1269. // Just remove the offending user from flow control
  1270. //
  1271. FLO_RemoveUser(pmgClient, user);
  1272. }
  1273. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1274. NET_EVENT_USER_DETACH, user, g_pcmPrimary->callID);
  1275. break;
  1276. }
  1277. case MCS_CHANNEL_JOIN_CONFIRM:
  1278. {
  1279. PNET_JOIN_CNF_EVENT pNetEvent;
  1280. UINT i;
  1281. //
  1282. // Allocate a buffer for the event
  1283. //
  1284. rc = MGNewDataBuffer(pmgClient, MG_EV_BUFFER,
  1285. sizeof(MG_INT_PKT_HEADER) + sizeof(NET_JOIN_CNF_EVENT), &pmgBuffer);
  1286. if (rc != 0)
  1287. {
  1288. WARNING_OUT(("MGNewDataBuffer failed in MGCallback"));
  1289. DC_QUIT;
  1290. }
  1291. pNetEvent = (PNET_JOIN_CNF_EVENT)pmgBuffer->pDataBuffer;
  1292. //
  1293. // Fill in the call ID:
  1294. //
  1295. pNetEvent->callID = g_pcmPrimary->callID;
  1296. pNetEvent->channel = LOWUSHORT(eventData);
  1297. pNetEvent->result = TranslateResult(HIGHUSHORT(eventData));
  1298. //
  1299. // Now establish flow control for the newly joined channel
  1300. // Only control priorities that have a non-zero latency
  1301. // And remember to ignore our own user channel! And top priority.
  1302. //
  1303. if (HIGHUSHORT(eventData) == RESULT_SUCCESSFUL)
  1304. {
  1305. if (pNetEvent->channel != pmgClient->userIDMCS)
  1306. {
  1307. for (i = 0; i < NET_NUM_PRIORITIES; i++)
  1308. {
  1309. if ((i == MG_VALID_PRIORITY(i)) &&
  1310. (pmgClient->flowControl.latency[i] != 0))
  1311. {
  1312. FLO_StartControl(pmgClient, pNetEvent->channel,
  1313. i, pmgClient->flowControl.latency[i],
  1314. pmgClient->flowControl.streamSize[i]);
  1315. }
  1316. }
  1317. }
  1318. }
  1319. //
  1320. // OK, we've built the DCG event so now post it to our client:
  1321. //
  1322. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1323. NET_EVENT_CHANNEL_JOIN, 0, (UINT_PTR)pNetEvent);
  1324. pmgBuffer->eventPosted = TRUE;
  1325. break;
  1326. }
  1327. case MCS_CHANNEL_LEAVE_INDICATION:
  1328. {
  1329. NET_CHANNEL_ID channel;
  1330. channel = LOWUSHORT(eventData);
  1331. MGProcessEndFlow(pmgClient, channel);
  1332. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1333. NET_EVENT_CHANNEL_LEAVE, channel, g_pcmPrimary->callID);
  1334. break;
  1335. }
  1336. case MCS_TOKEN_GRAB_CONFIRM:
  1337. {
  1338. NET_RESULT result;
  1339. result = TranslateResult(HIGHUSHORT(eventData));
  1340. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1341. NET_EVENT_TOKEN_GRAB, result, g_pcmPrimary->callID);
  1342. break;
  1343. }
  1344. case MCS_TOKEN_INHIBIT_CONFIRM:
  1345. {
  1346. NET_RESULT result;
  1347. result = TranslateResult(HIGHUSHORT(eventData));
  1348. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1349. NET_EVENT_TOKEN_INHIBIT, result, g_pcmPrimary->callID);
  1350. break;
  1351. }
  1352. default:
  1353. break;
  1354. }
  1355. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1356. NET_MG_SCHEDULE, 0, 0);
  1357. DC_EXIT_POINT:
  1358. if (rc != 0)
  1359. {
  1360. //
  1361. // We hit an error, but must return OK to MCS - otherwise it will
  1362. // keep sending us the callback forever!
  1363. //
  1364. WARNING_OUT(("MGCallback: Error 0x%08x processing MCS message %u",
  1365. rc, mcsMessageType));
  1366. }
  1367. UT_Unlock(UTLOCK_T120);
  1368. DebugExitDWORD(MGCallback, MCS_NO_ERROR);
  1369. }
  1370. //
  1371. // ProcessEndFlow(...)
  1372. //
  1373. void MGProcessEndFlow
  1374. (
  1375. PMG_CLIENT pmgClient,
  1376. ChannelID channel
  1377. )
  1378. {
  1379. UINT i;
  1380. DebugEntry(MGProcessEndFlow);
  1381. ValidateMGClient(pmgClient);
  1382. ASSERT(pmgClient->userAttached);
  1383. //
  1384. // Terminate flow control for the newly left channel
  1385. //
  1386. if (channel != pmgClient->userIDMCS)
  1387. {
  1388. for (i = 0; i < NET_NUM_PRIORITIES; i++)
  1389. {
  1390. if ((i == MG_VALID_PRIORITY(i)) &&
  1391. (pmgClient->flowControl.latency[i] != 0))
  1392. {
  1393. TRACE_OUT(("Ending flow control on channel 0x%08x priority %u",
  1394. channel, i));
  1395. FLO_EndControl(pmgClient, channel, i);
  1396. }
  1397. }
  1398. }
  1399. DebugExitVOID(MGProcessEndFlow);
  1400. }
  1401. //
  1402. // MGHandleSendInd(...)
  1403. //
  1404. UINT MGHandleSendInd
  1405. (
  1406. PMG_CLIENT pmgClient,
  1407. PSendData pSendData
  1408. )
  1409. {
  1410. PMG_BUFFER pmgBuffer;
  1411. PNET_SEND_IND_EVENT pEvent;
  1412. NET_PRIORITY priority;
  1413. LPBYTE pData;
  1414. UINT cbData;
  1415. UINT rc = 0;
  1416. TSHR_NET_PKT_HEADER pktHeader;
  1417. DebugEntry(MGHandleSendInd);
  1418. ValidateMGClient(pmgClient);
  1419. ASSERT(pmgClient->userAttached);
  1420. priority = (NET_PRIORITY)MG_VALID_PRIORITY(
  1421. (NET_PRIORITY)pSendData->data_priority);
  1422. pData = pSendData->user_data.value;
  1423. ASSERT(pData != NULL);
  1424. cbData = pSendData->user_data.length;
  1425. ASSERT(cbData > sizeof(TSHR_NET_PKT_HEADER));
  1426. TRACE_OUT(("MCS Data Indication: flags 0x%08x, size %u, first dword 0x%08x",
  1427. pSendData->segmentation, pSendData->user_data.length,
  1428. *((DWORD *)pData)));
  1429. ASSERT (pSendData->segmentation == (SEGMENTATION_BEGIN | SEGMENTATION_END));
  1430. TRACE_OUT(("Only segment: channel %u, priority %u, length %u",
  1431. pSendData->channel_id, pSendData->data_priority, cbData));
  1432. //
  1433. // Look at the header
  1434. //
  1435. memcpy(&pktHeader, pData, sizeof(TSHR_NET_PKT_HEADER));
  1436. //
  1437. // Trace out the MG header word
  1438. //
  1439. TRACE_OUT(("Got 1st MG segment (header=%X)", pktHeader.pktLength));
  1440. //
  1441. // First of all try for a flow control packet
  1442. //
  1443. if (pktHeader.pktLength & TSHR_PKT_FLOW)
  1444. {
  1445. TRACE_OUT(("Flow control packet"));
  1446. if (pktHeader.pktLength == TSHR_PKT_FLOW)
  1447. {
  1448. FLO_ReceivedPacket(pmgClient,
  1449. (PTSHR_FLO_CONTROL)(pData + sizeof(TSHR_NET_PKT_HEADER)));
  1450. }
  1451. else
  1452. {
  1453. WARNING_OUT(("Received obsolete throughput packet size 0x%04x", pktHeader.pktLength));
  1454. }
  1455. pmgClient->m_piMCSSap->FreeBuffer((PVoid) pData);
  1456. DC_QUIT;
  1457. }
  1458. //
  1459. // Allocate headers for the incoming buffer.
  1460. //
  1461. //
  1462. ASSERT((sizeof(NET_SEND_IND_EVENT) + pktHeader.pktLength) <= 0xFFFF);
  1463. ASSERT(pktHeader.pktLength == cbData);
  1464. rc = MGNewRxBuffer(pmgClient,
  1465. priority,
  1466. pSendData->channel_id,
  1467. pSendData->initiator,
  1468. &pmgBuffer);
  1469. if (rc != 0)
  1470. {
  1471. WARNING_OUT(("MGNewRxBuffer of size %u failed",
  1472. sizeof(NET_SEND_IND_EVENT) + sizeof(MG_INT_PKT_HEADER)));
  1473. pmgClient->m_piMCSSap->FreeBuffer((PVoid) pData);
  1474. DC_QUIT;
  1475. }
  1476. pEvent = (PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer;
  1477. ValidateCMP(g_pcmPrimary);
  1478. pEvent->callID = g_pcmPrimary->callID;
  1479. pEvent->priority = priority;
  1480. pEvent->channel = pSendData->channel_id;
  1481. //
  1482. // Copy the length into the data buffer header.
  1483. //
  1484. pmgBuffer->pPktHeader->header = pktHeader;
  1485. //
  1486. // We want to skip past the packet header to the user data
  1487. //
  1488. pData += sizeof(TSHR_NET_PKT_HEADER);
  1489. cbData -= sizeof(TSHR_NET_PKT_HEADER);
  1490. //
  1491. // Set the pointer in the buffer header to point to the received data.
  1492. //
  1493. // pEvent->lengthOfData contains the number of bytes received in this
  1494. // event so far.
  1495. //
  1496. ASSERT(pData);
  1497. pEvent->data_ptr = pData;
  1498. pEvent->lengthOfData = cbData;
  1499. TRACE_OUT(("New RX pmgBuffer 0x%08x pDataBuffer 0x%08x",
  1500. pmgBuffer, pEvent));
  1501. //
  1502. // OK, we've got all the segments, so post it to our client:
  1503. //
  1504. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1505. NET_EVENT_DATA_RECEIVED, 0, (UINT_PTR)pEvent);
  1506. pmgBuffer->eventPosted = TRUE;
  1507. DC_EXIT_POINT:
  1508. DebugExitDWORD(MGHandleSendInd, rc);
  1509. return(rc);
  1510. }
  1511. //
  1512. // MGNewBuffer(...)
  1513. //
  1514. UINT MGNewBuffer
  1515. (
  1516. PMG_CLIENT pmgClient,
  1517. UINT bufferType,
  1518. PMG_BUFFER * ppmgBuffer
  1519. )
  1520. {
  1521. PMG_BUFFER pmgBuffer;
  1522. void * pBuffer = NULL;
  1523. UINT rc = 0;
  1524. DebugEntry(MGNewBuffer);
  1525. ValidateMGClient(pmgClient);
  1526. ASSERT(pmgClient->userAttached);
  1527. pmgBuffer = new MG_BUFFER;
  1528. if (!pmgBuffer)
  1529. {
  1530. WARNING_OUT(("MGNewBuffer failed; out of memory"));
  1531. rc = NET_RC_NO_MEMORY;
  1532. DC_QUIT;
  1533. }
  1534. ZeroMemory(pmgBuffer, sizeof(*pmgBuffer));
  1535. SET_STAMP(pmgBuffer, MGBUFFER);
  1536. pmgBuffer->type = bufferType;
  1537. //
  1538. // Insert it at the head of this client's list of allocated buffers:
  1539. //
  1540. COM_BasedListInsertAfter(&(pmgClient->buffers), &(pmgBuffer->clientChain));
  1541. //
  1542. // return the pointer
  1543. //
  1544. *ppmgBuffer = pmgBuffer;
  1545. DC_EXIT_POINT:
  1546. DebugExitDWORD(MGNewBuffer, rc);
  1547. return(rc);
  1548. }
  1549. //
  1550. // MGNewDataBuffer(...)
  1551. //
  1552. UINT MGNewDataBuffer
  1553. (
  1554. PMG_CLIENT pmgClient,
  1555. UINT bufferType,
  1556. UINT bufferSize,
  1557. PMG_BUFFER * ppmgBuffer
  1558. )
  1559. {
  1560. void * pBuffer = NULL;
  1561. PMG_BUFFER pmgBuffer;
  1562. UINT rc = 0;
  1563. DebugEntry(MGNewDataBuffer);
  1564. //
  1565. // Buffers include an MG internal packet header that has a length field
  1566. // which we add to the start of all user data passed to/received from
  1567. // MCS. This is four byte aligned, and since the data buffer starts
  1568. // immediately after this, the data buffer will be aligned.
  1569. //
  1570. pBuffer = new BYTE[bufferSize];
  1571. if (!pBuffer)
  1572. {
  1573. WARNING_OUT(("MGNewDataBuffer allocation of size %u failed", bufferSize));
  1574. rc = NET_RC_NO_MEMORY;
  1575. DC_QUIT;
  1576. }
  1577. ZeroMemory(pBuffer, bufferSize);
  1578. //
  1579. // Now we allocate the buffer CB which we will use to track the use of
  1580. // the buffer.
  1581. //
  1582. rc = MGNewBuffer(pmgClient, bufferType, ppmgBuffer);
  1583. if (rc != 0)
  1584. {
  1585. WARNING_OUT(("MGNewBuffer failed"));
  1586. DC_QUIT;
  1587. }
  1588. //
  1589. // Initialise the buffer entry
  1590. //
  1591. pmgBuffer = *ppmgBuffer;
  1592. pmgBuffer->length = bufferSize;
  1593. pmgBuffer->pPktHeader = (PMG_INT_PKT_HEADER)pBuffer;
  1594. pmgBuffer->pDataBuffer = (LPBYTE)pBuffer + sizeof(MG_INT_PKT_HEADER);
  1595. //
  1596. // Initialize the use count of the data buffer
  1597. //
  1598. pmgBuffer->pPktHeader->useCount = 1;
  1599. DC_EXIT_POINT:
  1600. if (rc != 0)
  1601. {
  1602. //
  1603. // Cleanup:
  1604. //
  1605. if (pBuffer != NULL)
  1606. {
  1607. WARNING_OUT(("Freeing MG_BUFFER data 0x%08x; MGNewBuffer failed", pBuffer));
  1608. delete[] pBuffer;
  1609. }
  1610. }
  1611. DebugExitDWORD(MGNewDataBuffer, rc);
  1612. return(rc);
  1613. }
  1614. //
  1615. // MGNewTxBuffer(...)
  1616. //
  1617. UINT MGNewTxBuffer
  1618. (
  1619. PMG_CLIENT pmgClient,
  1620. NET_PRIORITY priority,
  1621. NET_CHANNEL_ID channel,
  1622. UINT bufferSize,
  1623. PMG_BUFFER * ppmgBuffer
  1624. )
  1625. {
  1626. int i;
  1627. UINT numPrioritiesToUse;
  1628. UINT rc = 0;
  1629. UINT nextPriority;
  1630. PMG_BUFFER pmgBufferArray[MG_NUM_PRIORITIES];
  1631. PFLO_STREAM_DATA pStr[MG_NUM_PRIORITIES];
  1632. NET_PRIORITY priorities[MG_NUM_PRIORITIES];
  1633. DebugEntry(MGNewTxBuffer);
  1634. ValidateMGClient(pmgClient);
  1635. ASSERT(priority != NET_TOP_PRIORITY);
  1636. //
  1637. // Initialise the control buffer pointer array. The first member of
  1638. // this array is the normal buffer which is allocated regardless of the
  1639. // NET_SEND_ALL_PRIORITIES flag. The remaining members are used for
  1640. // duplicate control buffer pointers needed for sending data on all
  1641. // priorities.
  1642. //
  1643. ZeroMemory(pmgBufferArray, sizeof(pmgBufferArray));
  1644. ZeroMemory(pStr, sizeof(pStr));
  1645. //
  1646. // SFR6025: Check for the NET_SEND_ALL_PRIORITIES flag. This means
  1647. // that the data will be sent at all four priorities. If it
  1648. // is not set then we just need to send data at one priority.
  1649. // In either case we need to:
  1650. //
  1651. // Check with flow control that it is possible to send data on
  1652. // all channels
  1653. //
  1654. // Allocate an additional three control blocks which all point
  1655. // to the same data block and bump up the usage count.
  1656. //
  1657. //
  1658. // NOTE: Previously this function just checked with flow control for
  1659. // a single channel.
  1660. //
  1661. if (priority & NET_SEND_ALL_PRIORITIES)
  1662. {
  1663. numPrioritiesToUse = MG_NUM_PRIORITIES;
  1664. }
  1665. else
  1666. {
  1667. numPrioritiesToUse = 1;
  1668. }
  1669. //
  1670. // Disable the flag to prevent FLO_AllocSend being sent an invalid
  1671. // priority.
  1672. //
  1673. priority &= ~NET_SEND_ALL_PRIORITIES;
  1674. nextPriority = priority;
  1675. for (i = 0; i < (int) numPrioritiesToUse; i++)
  1676. {
  1677. //
  1678. // Check with flow control to ensure that send space is available.
  1679. // Start with the requested priority level and continue for the
  1680. // other priority levels.
  1681. //
  1682. priorities[i] = (NET_PRIORITY)nextPriority;
  1683. rc = FLO_AllocSend(pmgClient,
  1684. nextPriority,
  1685. channel,
  1686. bufferSize + sizeof(MG_INT_PKT_HEADER),
  1687. &(pStr[i]));
  1688. //
  1689. // If we have got back pressure then just return.
  1690. //
  1691. if (rc != 0)
  1692. {
  1693. TRACE_OUT(("Received back pressure"));
  1694. //
  1695. // Free any buffer space allocated by FLO_AllocSend.
  1696. //
  1697. for ( --i; i >= 0; i--)
  1698. {
  1699. FLO_ReallocSend(pmgClient,
  1700. pStr[i],
  1701. bufferSize + sizeof(MG_INT_PKT_HEADER));
  1702. }
  1703. DC_QUIT;
  1704. }
  1705. ValidateFLOStr(pStr[i]);
  1706. //
  1707. // Move on to the next priority level. There are MG_NUM_PRIORITY
  1708. // levels, numbered contiguously from MG_PRIORITY_HIGHEST. The
  1709. // first priority processed can be any level in the valid range so
  1710. // rather than simply add 1 to get to the next level, we need to
  1711. // cope with the wrap-around back to MG_PRIORITY_HIGHEST when we
  1712. // have just processed the last priority, ie MG_PRIORITY_HIGHEST +
  1713. // MG_NUM_PRIORITIES - 1. This is achieved by rebasing the priority
  1714. // level to zero (the - MG_PRIORITY_HIGHEST, below), incrementing
  1715. // the rebased priority (+1), taking the modulus of the number of
  1716. // priorities to avoid exceeding the limit (% MG_NUM_PRIORITIES)
  1717. // and then restoring the base by adding back the first priority
  1718. // level (+ MG_PRIORITY_HIGHEST).
  1719. //
  1720. nextPriority = (((nextPriority + 1 - MG_PRIORITY_HIGHEST) %
  1721. MG_NUM_PRIORITIES) + MG_PRIORITY_HIGHEST);
  1722. }
  1723. //
  1724. // Use MGNewDataBuffer to allocate the buffer
  1725. //
  1726. rc = MGNewDataBuffer(pmgClient,
  1727. MG_TX_BUFFER,
  1728. bufferSize + sizeof(MG_INT_PKT_HEADER),
  1729. &pmgBufferArray[0]);
  1730. if (rc != 0)
  1731. {
  1732. WARNING_OUT(("MGNewDataBuffer failed in MGNewTxBuffer"));
  1733. DC_QUIT;
  1734. }
  1735. //
  1736. // Add the fields required for doing the send
  1737. //
  1738. pmgBufferArray[0]->priority = priority;
  1739. pmgBufferArray[0]->channelId = (ChannelID) channel;
  1740. pmgBufferArray[0]->senderId = pmgClient->userIDMCS;
  1741. ValidateFLOStr(pStr[0]);
  1742. pmgBufferArray[0]->pStr = pStr[0];
  1743. //
  1744. // Now allocate an additional three control blocks which are identical
  1745. // to the first one if required.
  1746. //
  1747. if (numPrioritiesToUse > 1)
  1748. {
  1749. //
  1750. // Firstly re-enable the NET_SEND_ALL_PRIORITIES flag. This is to
  1751. // ensure that traversing the linked list in MG_SendData is
  1752. // efficient.
  1753. //
  1754. pmgBufferArray[0]->priority |= NET_SEND_ALL_PRIORITIES;
  1755. //
  1756. // Create the duplicate buffers and initialise them.
  1757. //
  1758. for (i = 1; i < MG_NUM_PRIORITIES; i++)
  1759. {
  1760. TRACE_OUT(("Task allocating extra CB, priority %u",
  1761. priorities[i]));
  1762. //
  1763. // Allocate a new control buffer.
  1764. //
  1765. rc = MGNewBuffer(pmgClient,
  1766. MG_TX_BUFFER,
  1767. &pmgBufferArray[i]);
  1768. if (rc != 0)
  1769. {
  1770. WARNING_OUT(("MGNewBuffer failed"));
  1771. DC_QUIT;
  1772. }
  1773. //
  1774. // Initialise the buffer control block. The priority values of
  1775. // these control blocks are in increasing order from that of
  1776. // pmgBuffer.
  1777. //
  1778. pmgBufferArray[i]->priority = priorities[i];
  1779. pmgBufferArray[i]->channelId = pmgBufferArray[0]->channelId;
  1780. pmgBufferArray[i]->senderId = pmgBufferArray[0]->senderId;
  1781. pmgBufferArray[i]->length = pmgBufferArray[0]->length;
  1782. pmgBufferArray[i]->pPktHeader = pmgBufferArray[0]->pPktHeader;
  1783. pmgBufferArray[i]->pDataBuffer = pmgBufferArray[0]->pDataBuffer;
  1784. ValidateFLOStr(pStr[i]);
  1785. pmgBufferArray[i]->pStr = pStr[i];
  1786. //
  1787. // Set the NET_SEND_ALL_PRIORITIES flag.
  1788. //
  1789. pmgBufferArray[i]->priority |= NET_SEND_ALL_PRIORITIES;
  1790. //
  1791. // Now bump up the usage count of the data block.
  1792. //
  1793. pmgBufferArray[i]->pPktHeader->useCount++;
  1794. TRACE_OUT(("Use count of data buffer %#.8lx now %d",
  1795. pmgBufferArray[i]->pPktHeader,
  1796. pmgBufferArray[i]->pPktHeader->useCount));
  1797. }
  1798. }
  1799. //
  1800. // Assign the passed first control buffer allocated to the passed
  1801. // control buffer parameter.
  1802. //
  1803. *ppmgBuffer = pmgBufferArray[0];
  1804. DC_EXIT_POINT:
  1805. //
  1806. // In the event of a problem we free any buffers that we have already
  1807. // allocated.
  1808. //
  1809. if (rc != 0)
  1810. {
  1811. for (i = 0; i < MG_NUM_PRIORITIES; i++)
  1812. {
  1813. if (pmgBufferArray[i] != NULL)
  1814. {
  1815. TRACE_OUT(("About to free control buffer %u", i));
  1816. MGFreeBuffer(pmgClient, &pmgBufferArray[i]);
  1817. }
  1818. }
  1819. }
  1820. DebugExitDWORD(MGNewTxBuffer, rc);
  1821. return(rc);
  1822. }
  1823. //
  1824. // MGNewRxBuffer(...)
  1825. //
  1826. UINT MGNewRxBuffer
  1827. (
  1828. PMG_CLIENT pmgClient,
  1829. NET_PRIORITY priority,
  1830. NET_CHANNEL_ID channel,
  1831. NET_CHANNEL_ID senderID,
  1832. PMG_BUFFER * ppmgBuffer
  1833. )
  1834. {
  1835. UINT rc = 0;
  1836. DebugEntry(MGNewRxBuffer);
  1837. ValidateMGClient(pmgClient);
  1838. //
  1839. // First tell flow control we need a buffer.
  1840. // No back pressure may be applied here, but flow control uses this
  1841. // notification to control responses to the sender.
  1842. //
  1843. // Note that we always use the sizes including the internal packet
  1844. // header for flow control purposes.
  1845. //
  1846. FLO_AllocReceive(pmgClient,
  1847. priority,
  1848. channel,
  1849. senderID);
  1850. //
  1851. // Use MGNewDataBuffer to allocate the buffer. bufferSize includes the
  1852. // size of the network packet header (this comes over the wire), but
  1853. // not the remainder of the internal packet header.
  1854. //
  1855. rc = MGNewDataBuffer(pmgClient,
  1856. MG_RX_BUFFER,
  1857. sizeof(NET_SEND_IND_EVENT) + sizeof(MG_INT_PKT_HEADER),
  1858. ppmgBuffer);
  1859. //
  1860. // Add the fields required for a receive buffer
  1861. //
  1862. if (rc == 0)
  1863. {
  1864. (*ppmgBuffer)->priority = priority;
  1865. (*ppmgBuffer)->channelId = (ChannelID)channel;
  1866. (*ppmgBuffer)->senderId = (ChannelID)senderID;
  1867. }
  1868. else
  1869. {
  1870. WARNING_OUT(("MGNewDataBuffer failed in MGNewRxBuffer"));
  1871. }
  1872. DebugExitDWORD(MGNewRxBuffer, rc);
  1873. return(rc);
  1874. }
  1875. //
  1876. // MGFreeBuffer(...)
  1877. //
  1878. void MGFreeBuffer
  1879. (
  1880. PMG_CLIENT pmgClient,
  1881. PMG_BUFFER * ppmgBuffer
  1882. )
  1883. {
  1884. PMG_BUFFER pmgBuffer;
  1885. void * pBuffer;
  1886. DebugEntry(MGFreeBuffer);
  1887. pmgBuffer = *ppmgBuffer;
  1888. ValidateMGBuffer(pmgBuffer);
  1889. //
  1890. // If this is a receive buffer then we must first tell flow control
  1891. // about the space available
  1892. // This may trigger a pong, if we are waiting for the app to free up
  1893. // some space
  1894. //
  1895. if (pmgBuffer->type == MG_RX_BUFFER)
  1896. {
  1897. ASSERT (pmgBuffer->pPktHeader->useCount == 1);
  1898. TRACE_OUT(("Free RX pmgBuffer 0x%08x", pmgBuffer));
  1899. //
  1900. // Do a sanity check on the client (there is a window where this
  1901. // may have been freed).
  1902. //
  1903. if (!pmgClient->userAttached)
  1904. {
  1905. TRACE_OUT(("MGFreeBuffer: client 0x%08x not attached", pmgClient));
  1906. }
  1907. else
  1908. {
  1909. FLO_FreeReceive(pmgClient,
  1910. pmgBuffer->priority,
  1911. pmgBuffer->channelId,
  1912. pmgBuffer->senderId);
  1913. // Free the MCS buffer
  1914. if ((pmgBuffer->pPktHeader != NULL) && (pmgClient->m_piMCSSap != NULL))
  1915. {
  1916. ASSERT(pmgBuffer->pDataBuffer != NULL);
  1917. ASSERT(((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr != NULL);
  1918. pmgClient->m_piMCSSap->FreeBuffer (
  1919. (PVoid) (((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)
  1920. ->data_ptr - sizeof(TSHR_NET_PKT_HEADER)));
  1921. TRACE_OUT(("MGFreeBuffer: Freed data_ptr for pmgBuffer 0x%08x pDataBuffer 0x%08x",
  1922. pmgBuffer, pmgBuffer->pDataBuffer));
  1923. ((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr = NULL;
  1924. }
  1925. }
  1926. }
  1927. //
  1928. // Free the data buffer, if there is one present. Note that this can
  1929. // be referenced by more than one bufferCB, and so has a use count
  1930. // which must reach zero before the buffer is freed.
  1931. //
  1932. if (pmgBuffer->pPktHeader != NULL)
  1933. {
  1934. ASSERT(pmgBuffer->pPktHeader->useCount != 0);
  1935. pmgBuffer->pPktHeader->useCount--;
  1936. TRACE_OUT(("Data buffer 0x%08x use count %d",
  1937. pmgBuffer->pPktHeader,
  1938. pmgBuffer->pPktHeader->useCount));
  1939. if (pmgBuffer->pPktHeader->useCount == 0)
  1940. {
  1941. TRACE_OUT(("Freeing MG_BUFFER data 0x%08x; use count is zero", pmgBuffer->pPktHeader));
  1942. delete[] pmgBuffer->pPktHeader;
  1943. pmgBuffer->pPktHeader = NULL;
  1944. }
  1945. }
  1946. //
  1947. // If the buffer CB is in the pending queue then remove it first!
  1948. //
  1949. if (pmgBuffer->pendChain.next != 0)
  1950. {
  1951. COM_BasedListRemove(&(pmgBuffer->pendChain));
  1952. }
  1953. //
  1954. // Now remove the buffer CB itself from the list and free it up:
  1955. //
  1956. COM_BasedListRemove(&(pmgBuffer->clientChain));
  1957. delete pmgBuffer;
  1958. *ppmgBuffer = NULL;
  1959. DebugExitVOID(MGFreeBuffer);
  1960. }
  1961. //
  1962. // MGDetach(...)
  1963. //
  1964. void MGDetach
  1965. (
  1966. PMG_CLIENT pmgClient
  1967. )
  1968. {
  1969. PMG_BUFFER pmgBuffer;
  1970. PMG_BUFFER pmgT;
  1971. PIMCSSap pMCSSap;
  1972. #ifdef _DEBUG
  1973. UINT rc;
  1974. #endif // _DEBUG
  1975. DebugEntry(MGDetach);
  1976. ValidateMGClient(pmgClient);
  1977. ASSERT(pmgClient->userAttached);
  1978. pMCSSap = pmgClient->m_piMCSSap;
  1979. //
  1980. // Remove any entries for this user from the channel join pending list.
  1981. //
  1982. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  1983. FIELD_OFFSET(MG_BUFFER, pendChain));
  1984. while (pmgBuffer != NULL)
  1985. {
  1986. ValidateMGBuffer(pmgBuffer);
  1987. //
  1988. // Get a pointer to the next bufferCB in the list - we have to do
  1989. // this before we free the current bufferCB (freeing it NULLs it,
  1990. // so we won't be able to step along to the next entry in the
  1991. // list).
  1992. //
  1993. pmgT = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->joinChain), pmgBuffer,
  1994. FIELD_OFFSET(MG_BUFFER, pendChain));
  1995. MGFreeBuffer(pmgClient, &pmgBuffer);
  1996. //
  1997. // We won't get a match on a join request now, so we don't have
  1998. // a join pending.
  1999. //
  2000. pmgClient->joinPending = FALSE;
  2001. pmgBuffer = pmgT;
  2002. }
  2003. //
  2004. // Remove any unsent receive buffers for this user from the buffer list
  2005. //
  2006. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers),
  2007. FIELD_OFFSET(MG_BUFFER, clientChain));
  2008. while (pmgBuffer != NULL)
  2009. {
  2010. ValidateMGBuffer(pmgBuffer);
  2011. //
  2012. // Get a pointer to the next bufferCB in the list - we have to do
  2013. // this before we free the current bufferCB (freeing it NULLs it,
  2014. // so we won't be able to step along to the next entry in the
  2015. // list).
  2016. //
  2017. pmgT = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->buffers), pmgBuffer,
  2018. FIELD_OFFSET(MG_BUFFER, clientChain));
  2019. if (pmgBuffer->type == MG_RX_BUFFER)
  2020. {
  2021. if (pmgBuffer->eventPosted)
  2022. {
  2023. if ((pmgBuffer->pPktHeader != NULL) && (pMCSSap != NULL))
  2024. {
  2025. ASSERT(pmgBuffer->pDataBuffer != NULL);
  2026. ASSERT(((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr != NULL);
  2027. pMCSSap->FreeBuffer (
  2028. (PVoid) (((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)
  2029. ->data_ptr - sizeof(TSHR_NET_PKT_HEADER)));
  2030. TRACE_OUT(("MGDetach: Freed data_ptr for pmgBuffer 0x%08x pDataBuffer 0x%08x",
  2031. pmgBuffer, pmgBuffer->pDataBuffer));
  2032. ((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)->data_ptr = NULL;
  2033. }
  2034. }
  2035. else
  2036. {
  2037. //
  2038. // The bufferCB's user matches the user we are freeing up,
  2039. // and we haven't posted the event to the user, so free it.
  2040. // MGFreeBuffer removes it from the pending list, so we don't
  2041. // have to do that.
  2042. //
  2043. MGFreeBuffer(pmgClient, &pmgBuffer);
  2044. }
  2045. }
  2046. pmgBuffer = pmgT;
  2047. }
  2048. //
  2049. // Clear out the attachment info
  2050. //
  2051. pmgClient->userAttached = FALSE;
  2052. pmgClient->userIDMCS = 0;
  2053. //
  2054. // We can safely do an MCS DetachRequest without adding a requestCB
  2055. // - MCS will not bounce the request due to congestion, domain merging
  2056. // etc.
  2057. //
  2058. if (pMCSSap != NULL)
  2059. {
  2060. #ifdef _DEBUG
  2061. rc = pMCSSap->ReleaseInterface();
  2062. if (rc != 0) {
  2063. //
  2064. // No quit - we need to do our own cleanup.
  2065. //
  2066. // lonchanc: what cleanup needs to be done???
  2067. //
  2068. rc = McsErrToNetErr(rc);
  2069. switch (rc)
  2070. {
  2071. case 0:
  2072. case NET_RC_MGC_INVALID_USER_HANDLE:
  2073. case NET_RC_MGC_TOO_MUCH_IN_USE:
  2074. // These are normal.
  2075. TRACE_OUT(("MCSDetachUser normal error %d", rc));
  2076. break;
  2077. default:
  2078. ERROR_OUT(("MCSDetachUser abnormal error %d", rc));
  2079. break;
  2080. }
  2081. }
  2082. #else
  2083. pMCSSap->ReleaseInterface();
  2084. #endif //_DEBUG
  2085. pmgClient->m_piMCSSap = NULL;
  2086. }
  2087. --g_mgAttachCount;
  2088. DebugExitVOID(MGDetach);
  2089. }
  2090. //
  2091. // MGProcessPendingQueue(...)
  2092. //
  2093. UINT MGProcessPendingQueue(PMG_CLIENT pmgClient)
  2094. {
  2095. PMG_BUFFER pmgBuffer;
  2096. PMG_BUFFER pNextBuffer;
  2097. UINT rc = 0;
  2098. DebugEntry(MGProcessPendingQueue);
  2099. ValidateMGClient(pmgClient);
  2100. pNextBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->pendChain),
  2101. FIELD_OFFSET(MG_BUFFER, pendChain));
  2102. //
  2103. // Try and clear all the pending request queue
  2104. //
  2105. for ( ; (pmgBuffer = pNextBuffer) != NULL; )
  2106. {
  2107. ValidateMGBuffer(pmgBuffer);
  2108. pNextBuffer = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->pendChain),
  2109. pNextBuffer, FIELD_OFFSET(MG_BUFFER, pendChain));
  2110. TRACE_OUT(("Got request 0x%08x from queue, type %u",
  2111. pmgBuffer, pmgBuffer->type));
  2112. //
  2113. // Check that the buffer is still valid. There is a race at
  2114. // conference termination where we can arrive here, but our user
  2115. // has actually already detached. In this case, free the buffer
  2116. // and continue.
  2117. //
  2118. if (!pmgClient->userAttached)
  2119. {
  2120. TRACE_OUT(("MGProcessPendingQueue: client 0x%08x not attached", pmgClient));
  2121. MGFreeBuffer(pmgClient, &pmgBuffer);
  2122. continue;
  2123. }
  2124. switch (pmgBuffer->type)
  2125. {
  2126. case MG_RQ_CHANNEL_JOIN:
  2127. case MG_RQ_CHANNEL_JOIN_BY_KEY:
  2128. {
  2129. //
  2130. // If this client already has a join outstanding, then skip
  2131. // this request.
  2132. //
  2133. if (pmgClient->joinPending)
  2134. {
  2135. //
  2136. // Break out of switch and goto next iteration of for()
  2137. //
  2138. continue;
  2139. }
  2140. pmgClient->joinPending = TRUE;
  2141. //
  2142. // Attempt the join
  2143. //
  2144. rc = pmgClient->m_piMCSSap->ChannelJoin(
  2145. (unsigned short) pmgBuffer->channelId);
  2146. //
  2147. // If the join failed then post an error back immediately
  2148. //
  2149. if (rc != 0)
  2150. {
  2151. if ((rc != MCS_TRANSMIT_BUFFER_FULL) &&
  2152. (rc != MCS_DOMAIN_MERGING))
  2153. {
  2154. //
  2155. // Something terminal went wrong - post a
  2156. // NET_EV_JOIN_CONFIRM (failed) to the client
  2157. //
  2158. MGPostJoinConfirm(pmgClient,
  2159. NET_RESULT_USER_REJECTED,
  2160. pmgBuffer->channelId,
  2161. (NET_CHANNEL_ID)(pmgBuffer->work));
  2162. }
  2163. pmgClient->joinPending = FALSE;
  2164. }
  2165. else
  2166. {
  2167. //
  2168. // If the request worked then we must move it to the
  2169. // join queue for completion
  2170. //
  2171. TRACE_OUT(("Inserting 0x%08x into join queue",pmgBuffer));
  2172. COM_BasedListRemove(&(pmgBuffer->pendChain));
  2173. COM_BasedListInsertBefore(&(pmgClient->joinChain),
  2174. &(pmgBuffer->pendChain));
  2175. //
  2176. // Do not free this buffer - continue processing the
  2177. // pending queue
  2178. //
  2179. continue;
  2180. }
  2181. }
  2182. break;
  2183. case MG_RQ_CHANNEL_LEAVE:
  2184. {
  2185. //
  2186. // Try to leave the channel:
  2187. //
  2188. rc = pmgClient->m_piMCSSap->ChannelLeave(
  2189. (unsigned short) pmgBuffer->channelId);
  2190. if (rc == 0)
  2191. {
  2192. MGProcessEndFlow(pmgClient,
  2193. pmgBuffer->channelId);
  2194. }
  2195. }
  2196. break;
  2197. case MG_RQ_TOKEN_GRAB:
  2198. {
  2199. rc = pmgClient->m_piMCSSap->TokenGrab(pmgBuffer->channelId);
  2200. }
  2201. break;
  2202. case MG_RQ_TOKEN_INHIBIT:
  2203. {
  2204. rc = pmgClient->m_piMCSSap->TokenInhibit(pmgBuffer->channelId);
  2205. }
  2206. break;
  2207. case MG_RQ_TOKEN_RELEASE:
  2208. {
  2209. rc = pmgClient->m_piMCSSap->TokenRelease(pmgBuffer->channelId);
  2210. }
  2211. break;
  2212. case MG_TX_BUFFER:
  2213. {
  2214. ASSERT(!(pmgBuffer->pPktHeader->header.pktLength & TSHR_PKT_FLOW));
  2215. //
  2216. // Send the data. Remember that we don't send all of the
  2217. // packet header, only from the length...
  2218. //
  2219. ASSERT((pmgBuffer->priority != NET_TOP_PRIORITY));
  2220. rc = pmgClient->m_piMCSSap->SendData(NORMAL_SEND_DATA,
  2221. pmgBuffer->channelId,
  2222. (Priority)(pmgBuffer->priority),
  2223. (unsigned char *) &(pmgBuffer->pPktHeader->header),
  2224. pmgBuffer->pPktHeader->header.pktLength,
  2225. APP_ALLOCATION);
  2226. //
  2227. // Check the return code.
  2228. //
  2229. if (rc == 0)
  2230. {
  2231. //
  2232. // Update the allocation. FLO_DecrementAlloc will
  2233. // check that the stream pointer is not null for us.
  2234. // (It will be null if flow control has ended on this
  2235. // channel since this buffer was allocated or if this
  2236. // is an uncontrolled channel).
  2237. //
  2238. // Note that for flow control purposes, we always use
  2239. // packet sizes including the internal packet header.
  2240. //
  2241. FLO_DecrementAlloc(pmgBuffer->pStr,
  2242. (pmgBuffer->pPktHeader->header.pktLength
  2243. - sizeof(TSHR_NET_PKT_HEADER) + sizeof(MG_INT_PKT_HEADER)));
  2244. }
  2245. }
  2246. break;
  2247. case MG_TX_PING:
  2248. case MG_TX_PONG:
  2249. case MG_TX_PANG:
  2250. {
  2251. //
  2252. // This is the length of a ping/pong message:
  2253. //
  2254. ASSERT(pmgBuffer->priority != NET_TOP_PRIORITY);
  2255. rc = pmgClient->m_piMCSSap->SendData(NORMAL_SEND_DATA,
  2256. pmgBuffer->channelId,
  2257. (Priority)(pmgBuffer->priority),
  2258. (unsigned char *) &(pmgBuffer->pPktHeader->header),
  2259. sizeof(TSHR_NET_PKT_HEADER) + sizeof(TSHR_FLO_CONTROL),
  2260. APP_ALLOCATION);
  2261. }
  2262. break;
  2263. }
  2264. rc = McsErrToNetErr(rc);
  2265. //
  2266. // If the request failed due to back pressure then just get out
  2267. // now. We will try again later.
  2268. //
  2269. if (rc == NET_RC_MGC_TOO_MUCH_IN_USE)
  2270. {
  2271. TRACE_OUT(("MCS Back pressure"));
  2272. break;
  2273. }
  2274. //
  2275. // Only for obman...
  2276. //
  2277. if (pmgClient == &g_amgClients[MGTASK_OM])
  2278. {
  2279. ValidateCMP(g_pcmPrimary);
  2280. //
  2281. // For any other error or if everything worked so far
  2282. // then tell the user to keep going
  2283. //
  2284. TRACE_OUT(("Posting NET_FEEDBACK"));
  2285. UT_PostEvent(pmgClient->putTask,
  2286. pmgClient->putTask,
  2287. NO_DELAY,
  2288. NET_FEEDBACK,
  2289. 0,
  2290. g_pcmPrimary->callID);
  2291. }
  2292. //
  2293. // All is OK, or the request failed fatally. In either case we
  2294. // should free this request and attempt to continue.
  2295. //
  2296. MGFreeBuffer(pmgClient, &pmgBuffer);
  2297. }
  2298. DebugExitDWORD(MGProcessPendingQueue, rc);
  2299. return(rc);
  2300. }
  2301. //
  2302. // MGPostJoinConfirm(...)
  2303. //
  2304. UINT MGPostJoinConfirm
  2305. (
  2306. PMG_CLIENT pmgClient,
  2307. NET_RESULT result,
  2308. NET_CHANNEL_ID channel,
  2309. NET_CHANNEL_ID correlator
  2310. )
  2311. {
  2312. PNET_JOIN_CNF_EVENT pNetJoinCnf;
  2313. PMG_BUFFER pmgBuffer;
  2314. UINT rc;
  2315. DebugEntry(MGPostJoinConfirm);
  2316. ValidateMGClient(pmgClient);
  2317. //
  2318. // Allocate a buffer to send the event in - this should only fail if we
  2319. // really are out of virtual memory.
  2320. //
  2321. rc = MGNewDataBuffer(pmgClient, MG_EV_BUFFER,
  2322. sizeof(MG_INT_PKT_HEADER) + sizeof(NET_JOIN_CNF_EVENT), &pmgBuffer);
  2323. if (rc != 0)
  2324. {
  2325. WARNING_OUT(("Failed to alloc NET_JOIN_CNF_EVENT"));
  2326. DC_QUIT;
  2327. }
  2328. pNetJoinCnf = (PNET_JOIN_CNF_EVENT) pmgBuffer->pDataBuffer;
  2329. ValidateCMP(g_pcmPrimary);
  2330. if (!g_pcmPrimary->callID)
  2331. {
  2332. WARNING_OUT(("MGPostJoinConfirm failed; not in call"));
  2333. rc = NET_RC_MGC_NOT_CONNECTED;
  2334. DC_QUIT;
  2335. }
  2336. //
  2337. // Fill in the fields.
  2338. //
  2339. pNetJoinCnf->callID = g_pcmPrimary->callID;
  2340. pNetJoinCnf->result = result;
  2341. pNetJoinCnf->channel = channel;
  2342. pNetJoinCnf->correlator = correlator;
  2343. //
  2344. // OK, we've built the event so now post it to our client:
  2345. //
  2346. UT_PostEvent(pmgClient->putTask,
  2347. pmgClient->putTask,
  2348. NO_DELAY,
  2349. NET_EVENT_CHANNEL_JOIN,
  2350. 0,
  2351. (UINT_PTR) pNetJoinCnf);
  2352. pmgBuffer->eventPosted = TRUE;
  2353. DC_EXIT_POINT:
  2354. DebugExitDWORD(MGPostJoinConfirm, rc);
  2355. return(rc);
  2356. }
  2357. //
  2358. // MCSErrToNetErr()
  2359. //
  2360. UINT McsErrToNetErr ( UINT rcMCS )
  2361. {
  2362. UINT rc = NET_RC_MGC_NOT_SUPPORTED;
  2363. //
  2364. // We use a static array of values to map the return code:
  2365. //
  2366. if (rcMCS < sizeof(c_RetCodeMap1) / sizeof(c_RetCodeMap1[0]))
  2367. {
  2368. rc = c_RetCodeMap1[rcMCS];
  2369. }
  2370. else
  2371. {
  2372. UINT nNewIndex = rcMCS - MCS_DOMAIN_ALREADY_EXISTS;
  2373. if (nNewIndex < sizeof(c_RetCodeMap2) / sizeof(c_RetCodeMap2[0]))
  2374. {
  2375. rc = c_RetCodeMap2[nNewIndex];
  2376. }
  2377. }
  2378. #ifdef _DEBUG
  2379. if (MCS_TRANSMIT_BUFFER_FULL == rcMCS)
  2380. {
  2381. ASSERT(NET_RC_MGC_TOO_MUCH_IN_USE == rc);
  2382. }
  2383. #endif
  2384. return rc;
  2385. }
  2386. //
  2387. // TranslateResult(...)
  2388. //
  2389. NET_RESULT TranslateResult(WORD resultMCS)
  2390. {
  2391. //
  2392. // We use a static array of values to map the result code:
  2393. //
  2394. if (resultMCS >= MG_NUM_OF_MCS_RESULTS)
  2395. resultMCS = MG_INVALID_MCS_RESULT;
  2396. return(c_ResultMap[resultMCS]);
  2397. }
  2398. //
  2399. // MGFLOCallback(...)
  2400. //
  2401. void MGFLOCallBack
  2402. (
  2403. PMG_CLIENT pmgClient,
  2404. UINT callbackType,
  2405. UINT priority,
  2406. UINT newBufferSize
  2407. )
  2408. {
  2409. PMG_BUFFER pmgBuffer;
  2410. DebugEntry(MGFLOCallBack);
  2411. ASSERT(priority != NET_TOP_PRIORITY);
  2412. ValidateMGClient(pmgClient);
  2413. ASSERT(pmgClient->userAttached);
  2414. //
  2415. // If this is a buffermod callback then tell the app
  2416. //
  2417. if (pmgClient == &g_amgClients[MGTASK_DCS])
  2418. {
  2419. if (callbackType == FLO_BUFFERMOD)
  2420. {
  2421. UT_PostEvent(pmgClient->putTask,
  2422. pmgClient->putTask,
  2423. NO_DELAY,
  2424. NET_FLOW,
  2425. priority,
  2426. newBufferSize);
  2427. }
  2428. }
  2429. else
  2430. {
  2431. ASSERT(pmgClient == &g_amgClients[MGTASK_OM]);
  2432. //
  2433. // Wake up the app in case we have applied back pressure.
  2434. //
  2435. TRACE_OUT(("Posting NET_FEEDBACK"));
  2436. UT_PostEvent(pmgClient->putTask,
  2437. pmgClient->putTask,
  2438. NO_DELAY,
  2439. NET_FEEDBACK,
  2440. 0,
  2441. g_pcmPrimary->callID);
  2442. }
  2443. DebugExitVOID(MGFLOCallback);
  2444. }
  2445. //
  2446. // MGProcessDomainWatchdog()
  2447. //
  2448. void MGProcessDomainWatchdog
  2449. (
  2450. PMG_CLIENT pmgClient
  2451. )
  2452. {
  2453. int task;
  2454. DebugEntry(MGProcessDomainWatchdog);
  2455. ValidateMGClient(pmgClient);
  2456. //
  2457. // Call FLO to check each user attachment for delinquency
  2458. //
  2459. if (g_mgAttachCount > 0)
  2460. {
  2461. for (task = MGTASK_FIRST; task < MGTASK_MAX; task++)
  2462. {
  2463. if (g_amgClients[task].userAttached)
  2464. {
  2465. FLO_CheckUsers(&(g_amgClients[task]));
  2466. }
  2467. }
  2468. //
  2469. // Continue periodic messages - but only if there are some users.
  2470. //
  2471. // TRACE_OUT(("Continue watchdog"));
  2472. UT_PostEvent(pmgClient->putTask,
  2473. pmgClient->putTask,
  2474. MG_TIMER_PERIOD,
  2475. NET_MG_WATCHDOG,
  2476. 0, 0);
  2477. }
  2478. else
  2479. {
  2480. TRACE_OUT(("Don't continue Watchdog timer"));
  2481. }
  2482. DebugExitVOID(MGProcessDomainWatchdog);
  2483. }
  2484. //
  2485. // FLO_UserTerm
  2486. //
  2487. void FLO_UserTerm(PMG_CLIENT pmgClient)
  2488. {
  2489. UINT i;
  2490. UINT cStreams;
  2491. DebugEntry(FLO_UserTerm);
  2492. ValidateMGClient(pmgClient);
  2493. ASSERT(pmgClient->userAttached);
  2494. cStreams = pmgClient->flo.numStreams;
  2495. //
  2496. // Stop flow control on all channels. We scan the list of streams and
  2497. // if flow control is active on a stream then we stop it.
  2498. //
  2499. for (i = 0; i < cStreams; i++)
  2500. {
  2501. //
  2502. // Check that the stream is flow controlled.
  2503. //
  2504. if (pmgClient->flo.pStrData[i] != NULL)
  2505. {
  2506. //
  2507. // End control on this controlled stream.
  2508. //
  2509. FLOStreamEndControl(pmgClient, i);
  2510. }
  2511. }
  2512. DebugExitVOID(FLO_UserTerm);
  2513. }
  2514. //
  2515. // FLO_StartControl
  2516. //
  2517. void FLO_StartControl
  2518. (
  2519. PMG_CLIENT pmgClient,
  2520. NET_CHANNEL_ID channel,
  2521. UINT priority,
  2522. UINT backlog,
  2523. UINT maxBytesOutstanding
  2524. )
  2525. {
  2526. UINT rc = 0;
  2527. PFLO_STREAM_DATA pStr;
  2528. UINT i;
  2529. UINT stream;
  2530. DebugEntry(FLO_StartControl);
  2531. ValidateMGClient(pmgClient);
  2532. ASSERT(pmgClient->userAttached);
  2533. ASSERT(priority != NET_TOP_PRIORITY);
  2534. //
  2535. // Flow control is on by default.
  2536. //
  2537. //
  2538. // Check to see if the channel is already flow controlled. If it is
  2539. // then we just exit.
  2540. //
  2541. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  2542. if (stream != FLO_NOT_CONTROLLED)
  2543. {
  2544. ValidateFLOStr(pStr);
  2545. TRACE_OUT(("Stream %u is already controlled (0x%08x:%u)",
  2546. stream, channel, priority));
  2547. DC_QUIT;
  2548. }
  2549. //
  2550. // If we already have hit the stream limit for this app then give up.
  2551. //
  2552. for (i = 0; i < FLO_MAX_STREAMS; i++)
  2553. {
  2554. if ((pmgClient->flo.pStrData[i]) == NULL)
  2555. {
  2556. break;
  2557. }
  2558. }
  2559. if (i == FLO_MAX_STREAMS)
  2560. {
  2561. ERROR_OUT(("Too many streams defined already"));
  2562. DC_QUIT;
  2563. }
  2564. TRACE_OUT(("This is stream %u", i));
  2565. //
  2566. // Allocate memory for our stream data. Hang the pointer off floHandle
  2567. // - this should be returned to us on all subsequent API calls.
  2568. //
  2569. pStr = new FLO_STREAM_DATA;
  2570. if (!pStr)
  2571. {
  2572. WARNING_OUT(("FLO_StartControl failed; out of memory"));
  2573. DC_QUIT;
  2574. }
  2575. ZeroMemory(pStr, sizeof(*pStr));
  2576. //
  2577. // Store the channel and priorities for this stream.
  2578. //
  2579. SET_STAMP(pStr, FLOSTR);
  2580. pStr->channel = channel;
  2581. pStr->priority = priority;
  2582. pStr->backlog = backlog;
  2583. if (maxBytesOutstanding == 0)
  2584. {
  2585. maxBytesOutstanding = FLO_MAX_STREAMSIZE;
  2586. }
  2587. pStr->DC_ABSMaxBytesInPipe = maxBytesOutstanding;
  2588. pStr->maxBytesInPipe = FLO_INIT_STREAMSIZE;
  2589. if (pStr->maxBytesInPipe > maxBytesOutstanding)
  2590. {
  2591. pStr->maxBytesInPipe = maxBytesOutstanding;
  2592. }
  2593. //
  2594. // Set the initial stream bytesAllocated to 0.
  2595. //
  2596. pStr->bytesAllocated = 0;
  2597. //
  2598. // Ping needed immediately.
  2599. //
  2600. pStr->pingNeeded = TRUE;
  2601. pStr->pingTime = FLO_INIT_PINGTIME;
  2602. pStr->nextPingTime = GetTickCount();
  2603. //
  2604. // Initialize the users base pointers.
  2605. //
  2606. COM_BasedListInit(&(pStr->users));
  2607. //
  2608. // Hang the stream CB off the base control block.
  2609. //
  2610. pmgClient->flo.pStrData[i] = pStr;
  2611. if (i >= pmgClient->flo.numStreams)
  2612. {
  2613. pmgClient->flo.numStreams++;
  2614. }
  2615. TRACE_OUT(("Flow control started, stream %u, (0x%08x:%u)",
  2616. i, channel, priority));
  2617. DC_EXIT_POINT:
  2618. DebugExitVOID(FLO_StartControl);
  2619. }
  2620. //
  2621. // FLO_EndControl
  2622. //
  2623. void FLO_EndControl
  2624. (
  2625. PMG_CLIENT pmgClient,
  2626. NET_CHANNEL_ID channel,
  2627. UINT priority
  2628. )
  2629. {
  2630. UINT stream;
  2631. PFLO_STREAM_DATA pStr;
  2632. DebugEntry(FLO_EndControl);
  2633. ValidateMGClient(pmgClient);
  2634. ASSERT(pmgClient->userAttached);
  2635. ASSERT(priority != NET_TOP_PRIORITY);
  2636. //
  2637. // Convert channel and stream into priority.
  2638. //
  2639. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  2640. //
  2641. // The stream is not controlled so we just trace and quit.
  2642. //
  2643. if (stream == FLO_NOT_CONTROLLED)
  2644. {
  2645. WARNING_OUT(("Uncontrolled stream channel 0x%08x priority %u",
  2646. channel, priority));
  2647. DC_QUIT;
  2648. }
  2649. //
  2650. // Call the internal FLOStreamEndControl to end flow control on a
  2651. // given stream.
  2652. //
  2653. ValidateFLOStr(pStr);
  2654. FLOStreamEndControl(pmgClient, stream);
  2655. DC_EXIT_POINT:
  2656. DebugExitVOID(FLO_EndControl);
  2657. }
  2658. //
  2659. // FLO_AllocSend
  2660. //
  2661. UINT FLO_AllocSend
  2662. (
  2663. PMG_CLIENT pmgClient,
  2664. UINT priority,
  2665. NET_CHANNEL_ID channel,
  2666. UINT size,
  2667. PFLO_STREAM_DATA * ppStr
  2668. )
  2669. {
  2670. UINT stream;
  2671. UINT curtime;
  2672. PFLO_STREAM_DATA pStr;
  2673. BOOL denyAlloc = FALSE;
  2674. BOOL doPing = FALSE;
  2675. UINT rc = 0;
  2676. DebugEntry(FLO_AllocSend);
  2677. ValidateMGClient(pmgClient);
  2678. ASSERT(pmgClient->userAttached);
  2679. ASSERT(priority != NET_TOP_PRIORITY);
  2680. //
  2681. // Convert channel and stream into priority
  2682. //
  2683. stream = FLOGetStream(pmgClient, channel, priority, ppStr);
  2684. pStr = *ppStr;
  2685. //
  2686. // For non-controlled streams just send the data
  2687. //
  2688. if (stream == FLO_NOT_CONTROLLED)
  2689. {
  2690. TRACE_OUT(("Send %u bytes on uncontrolled channel/pri (0x%08x:%u)",
  2691. size, channel, priority));
  2692. DC_QUIT;
  2693. }
  2694. //
  2695. // Get the current tick count.
  2696. //
  2697. curtime = GetTickCount();
  2698. //
  2699. // Check whether this request is permitted. We must allow one packet
  2700. // beyond the specified limit to avoid problems determining when we
  2701. // have started rejecting requests and also to avoid situations where a
  2702. // single request exceeds the total pipe size.
  2703. //
  2704. // If we have not yet received a pong then we limit the amount of
  2705. // allocated buffer space to below FLO_MAX_PRE_FC_ALLOC. However this
  2706. // data can be sent immediately so the overall throughput is still
  2707. // relatively high. In this way we minimize the amount of data held in
  2708. // the glue layer to a maximum of FLO_MAX_PRE_FC_ALLOC if there are no
  2709. // remote users.
  2710. //
  2711. ValidateFLOStr(pStr);
  2712. if (!pStr->gotPong)
  2713. {
  2714. //
  2715. // Flag that a ping is required.
  2716. //
  2717. pStr->pingNeeded = TRUE;
  2718. if (curtime > pStr->nextPingTime)
  2719. {
  2720. doPing = TRUE;
  2721. }
  2722. //
  2723. // We haven't got a pong yet (i.e. FC is non-operational) so we
  2724. // need to limit the maximum amount of data held in flow control to
  2725. // FLO_MAX_PRE_FC_ALLOC.
  2726. //
  2727. if (pStr->bytesAllocated > FLO_MAX_PRE_FC_ALLOC)
  2728. {
  2729. denyAlloc = TRUE;
  2730. TRACE_OUT(("Max allocation of %u bytes exceeded (currently %u)",
  2731. FLO_MAX_PRE_FC_ALLOC,
  2732. pStr->bytesAllocated));
  2733. DC_QUIT;
  2734. }
  2735. pStr->bytesInPipe += size;
  2736. pStr->bytesAllocated += size;
  2737. TRACE_OUT((
  2738. "Alloc of %u succeeded: bytesAlloc %u, bytesInPipe %u"
  2739. " (0x%08x:%u)",
  2740. size,
  2741. pStr->bytesAllocated,
  2742. pStr->bytesInPipe,
  2743. pStr->channel,
  2744. pStr->priority));
  2745. DC_QUIT;
  2746. }
  2747. if (pStr->bytesInPipe < pStr->maxBytesInPipe)
  2748. {
  2749. //
  2750. // Check to see if a ping is required and if so send it now.
  2751. //
  2752. if ((pStr->pingNeeded) && (curtime > pStr->nextPingTime))
  2753. {
  2754. doPing = TRUE;
  2755. }
  2756. pStr->bytesInPipe += size;
  2757. pStr->bytesAllocated += size;
  2758. TRACE_OUT(("Stream %u - alloc %u (InPipe:MaxInPipe %u:%u)",
  2759. stream,
  2760. size,
  2761. pStr->bytesInPipe,
  2762. pStr->maxBytesInPipe));
  2763. DC_QUIT;
  2764. }
  2765. //
  2766. // If we get here then we cannot currently allocate any buffers so deny
  2767. // the allocation. Simulate back pressure with NET_OUT_OF_RESOURCE.
  2768. // We also flag that a "wake up" event is required to get the app to
  2769. // send more data.
  2770. //
  2771. denyAlloc = TRUE;
  2772. pStr->eventNeeded = TRUE;
  2773. pStr->curDenialTime = pStr->lastPingTime;
  2774. //
  2775. // We are not allowed to apply back pressure unless we can guarantee
  2776. // that we will wake up the app later on. This is dependent upon our
  2777. // receiving a pong later. But if there is no ping outstanding
  2778. // (because we have allocated all our buffer allowance within the ping
  2779. // delay time) then we should first send a ping to trigger the wake up.
  2780. // If this fails then our watchdog will finally wake us up.
  2781. //
  2782. if (pStr->pingNeeded)
  2783. {
  2784. doPing = TRUE;
  2785. }
  2786. DC_EXIT_POINT:
  2787. //
  2788. // Check to see if we should deny the buffer allocation.
  2789. //
  2790. if (denyAlloc)
  2791. {
  2792. rc = NET_RC_MGC_TOO_MUCH_IN_USE;
  2793. TRACE_OUT(("Denying buffer request on stream %u InPipe %u Alloc %u",
  2794. stream,
  2795. pStr->bytesInPipe,
  2796. pStr->bytesAllocated));
  2797. }
  2798. if (doPing)
  2799. {
  2800. //
  2801. // A ping is required so send it now.
  2802. //
  2803. FLOPing(pmgClient, stream, curtime);
  2804. }
  2805. DebugExitDWORD(FLO_AllocSend, rc);
  2806. return(rc);
  2807. }
  2808. //
  2809. // FLO_ReallocSend
  2810. //
  2811. void FLO_ReallocSend
  2812. (
  2813. PMG_CLIENT pmgClient,
  2814. PFLO_STREAM_DATA pStr,
  2815. UINT size
  2816. )
  2817. {
  2818. DebugEntry(FLO_ReallocSend);
  2819. ValidateMGClient(pmgClient);
  2820. ASSERT(pmgClient->userAttached);
  2821. //
  2822. // For non-controlled streams there is nothing to do so just exit.
  2823. //
  2824. if (pStr == NULL)
  2825. {
  2826. TRACE_OUT(("Realloc data on uncontrolled channel"));
  2827. DC_QUIT;
  2828. }
  2829. //
  2830. // Perform a quick sanity check.
  2831. //
  2832. ValidateFLOStr(pStr);
  2833. if (size > pStr->bytesInPipe)
  2834. {
  2835. ERROR_OUT(("Realloc of %u makes bytesInPipe (%u) neg (0x%08x:%u)",
  2836. size,
  2837. pStr->bytesInPipe,
  2838. pStr->channel,
  2839. pStr->priority));
  2840. DC_QUIT;
  2841. }
  2842. //
  2843. // Add the length not sent back into the pool.
  2844. //
  2845. pStr->bytesInPipe -= size;
  2846. TRACE_OUT(("Realloc %u FC bytes (bytesInPipe is now %u) (0x%08x:%u)",
  2847. size,
  2848. pStr->bytesInPipe,
  2849. pStr->channel,
  2850. pStr->priority));
  2851. DC_EXIT_POINT:
  2852. //
  2853. // Every time that we call FLO_ReallocSend we also want to call
  2854. // FLO_DecrementAlloc (but not vice-versa) so call it now.
  2855. //
  2856. FLO_DecrementAlloc(pStr, size);
  2857. DebugExitVOID(FLO_ReallocSend);
  2858. }
  2859. //
  2860. // FLO_DecrementAlloc
  2861. //
  2862. void FLO_DecrementAlloc
  2863. (
  2864. PFLO_STREAM_DATA pStr,
  2865. UINT size
  2866. )
  2867. {
  2868. DebugEntry(FLO_DecrementAlloc);
  2869. //
  2870. // For non-controlled streams there is nothing to do so just exit.
  2871. //
  2872. if (pStr == NULL)
  2873. {
  2874. TRACE_OUT(("Decrement bytesAllocated on uncontrolled channel"));
  2875. DC_QUIT;
  2876. }
  2877. //
  2878. // Perform a quick sanity check.
  2879. //
  2880. ValidateFLOStr(pStr);
  2881. if (size > pStr->bytesAllocated)
  2882. {
  2883. ERROR_OUT(("Dec of %u makes bytesAllocated (%u) neg (0x%08x:%u)",
  2884. size,
  2885. pStr->bytesAllocated,
  2886. pStr->channel,
  2887. pStr->priority));
  2888. DC_QUIT;
  2889. }
  2890. //
  2891. // Update the count of the data held in the glue for this stream.
  2892. //
  2893. pStr->bytesAllocated -= size;
  2894. TRACE_OUT(("Clearing %u alloc bytes (bytesAlloc is now %u) (0x%08x:%u)",
  2895. size,
  2896. pStr->bytesAllocated,
  2897. pStr->channel,
  2898. pStr->priority));
  2899. DC_EXIT_POINT:
  2900. DebugExitVOID(FLO_DecrementAlloc);
  2901. }
  2902. //
  2903. // FLO_CheckUsers
  2904. //
  2905. void FLO_CheckUsers(PMG_CLIENT pmgClient)
  2906. {
  2907. PFLO_USER pFloUser;
  2908. PBASEDLIST nextUser;
  2909. int waited;
  2910. BYTE stream;
  2911. UINT curtime;
  2912. PFLO_STREAM_DATA pStr;
  2913. DebugEntry(FLO_CheckUsers);
  2914. ValidateMGClient(pmgClient);
  2915. ASSERT(pmgClient->userAttached);
  2916. curtime = GetTickCount();
  2917. //
  2918. // Check users of each stream
  2919. //
  2920. for (stream = 0; stream < pmgClient->flo.numStreams; stream++)
  2921. {
  2922. if (pmgClient->flo.pStrData[stream] == NULL)
  2923. {
  2924. continue;
  2925. }
  2926. pStr = pmgClient->flo.pStrData[stream];
  2927. ValidateFLOStr(pStr);
  2928. //
  2929. // Check whether we have waited long enough and need to reset the
  2930. // wait counters. We only wait a certain time before resetting all
  2931. // our counts. What has happened is that someone has left the call
  2932. // and we have been waiting for their pong.
  2933. //
  2934. // We detect the outage by checking against nextPingTime which, as
  2935. // well as being set to the earliest time we can send a ping is
  2936. // also updated to the current time as each pong comes in so we can
  2937. // use it as a measure of the time since the last repsonse from any
  2938. // user of the stream.
  2939. //
  2940. // To avoid false outages caused by new joiners or transient large
  2941. // buffer situations each user is required to send a pong at the
  2942. // rate of MAX_WAIT_TIME/2. They do this by just sending a
  2943. // duplicate pong if they have not yet got the ping they need to
  2944. // to pong.
  2945. //
  2946. if ((pStr->eventNeeded) &&
  2947. (!pStr->pingNeeded))
  2948. {
  2949. TRACE_OUT(("Checking for valid back pressure on stream %u",
  2950. stream));
  2951. //
  2952. // Note that if there are no remote users then we should reset
  2953. // the flags regardless. We get into this state when we first
  2954. // start an app because OBMAN sends data before the app has
  2955. // joined the channel at the other end.
  2956. //
  2957. waited = curtime - pStr->nextPingTime;
  2958. if (waited > FLO_MAX_WAIT_TIME)
  2959. {
  2960. TRACE_OUT(("Stream %u - Waited for %d, resetting counter",
  2961. stream, waited));
  2962. pStr->bytesInPipe = 0;
  2963. pStr->pingNeeded = TRUE;
  2964. pStr->nextPingTime = curtime;
  2965. pStr->gotPong = FALSE;
  2966. //
  2967. // Remove outdated records from our user queue
  2968. //
  2969. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  2970. while (&(pFloUser->list) != &(pStr->users))
  2971. {
  2972. ValidateFLOUser(pFloUser);
  2973. //
  2974. // Address the follow on record before we free the
  2975. // current
  2976. //
  2977. nextUser = COM_BasedNextListField(&(pFloUser->list));
  2978. //
  2979. // Free the current record, if necessary
  2980. //
  2981. if (pFloUser->lastPongRcvd != pStr->pingValue)
  2982. {
  2983. //
  2984. // Remove from the list
  2985. //
  2986. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  2987. COM_BasedListRemove(&(pFloUser->list));
  2988. delete pFloUser;
  2989. }
  2990. else
  2991. {
  2992. //
  2993. // At least one user still out there so keep flow
  2994. // control active or else we would suddenly send
  2995. // out a burst of data that might flood them
  2996. //
  2997. pStr->gotPong = TRUE;
  2998. }
  2999. //
  3000. // Move on to the next record in the list
  3001. //
  3002. pFloUser = (PFLO_USER)nextUser;
  3003. }
  3004. //
  3005. // We have previously rejected an application request so we
  3006. // had better call back now
  3007. //
  3008. if (pmgClient->flo.callBack != NULL)
  3009. {
  3010. (*(pmgClient->flo.callBack))(pmgClient,
  3011. FLO_WAKEUP,
  3012. pStr->priority,
  3013. pStr->maxBytesInPipe);
  3014. }
  3015. pStr->eventNeeded = FALSE;
  3016. }
  3017. }
  3018. }
  3019. DebugExitVOID(FLO_CheckUsers);
  3020. }
  3021. //
  3022. // FLO_ReceivedPacket
  3023. //
  3024. void FLO_ReceivedPacket
  3025. (
  3026. PMG_CLIENT pmgClient,
  3027. PTSHR_FLO_CONTROL pPkt
  3028. )
  3029. {
  3030. BOOL canPing = TRUE;
  3031. PFLO_USER pFloUser;
  3032. BOOL userFound = FALSE;
  3033. UINT stream;
  3034. UINT curtime;
  3035. PFLO_STREAM_DATA pStr;
  3036. UINT callbackType = 0;
  3037. int latency;
  3038. UINT throughput;
  3039. DebugEntry(FLO_ReceivedPacket);
  3040. ValidateMGClient(pmgClient);
  3041. ASSERT(pmgClient->userAttached);
  3042. stream = pPkt->stream;
  3043. ASSERT(stream < FLO_MAX_STREAMS);
  3044. pStr = pmgClient->flo.pStrData[stream];
  3045. //
  3046. // If the stream CB has been freed up already then we can ignore any
  3047. // flow information pertaining to it.
  3048. //
  3049. if (pStr == NULL)
  3050. {
  3051. TRACE_OUT(("Found a null stream pointer for stream %u", stream));
  3052. DC_QUIT;
  3053. }
  3054. ValidateFLOStr(pStr);
  3055. curtime = GetTickCount();
  3056. //
  3057. // First we must locate the user for this ping/pong/pang
  3058. // Also, while we are doing it we can check to see if it is a pong and
  3059. // if so whether it is the last pong we need
  3060. //
  3061. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3062. while (&(pFloUser->list) != &(pStr->users))
  3063. {
  3064. ValidateFLOUser(pFloUser);
  3065. if (pFloUser->userID == pPkt->userID)
  3066. {
  3067. userFound = TRUE;
  3068. //
  3069. // We have got a match so set up the last pong value
  3070. // Accumulate pong stats for query
  3071. //
  3072. if (pPkt->packetType == PACKET_TYPE_PONG)
  3073. {
  3074. pFloUser->lastPongRcvd = pPkt->pingPongID;
  3075. pFloUser->gotPong = TRUE;
  3076. pFloUser->numPongs++;
  3077. pFloUser->pongDelay += curtime - pStr->lastPingTime;
  3078. }
  3079. else
  3080. {
  3081. break;
  3082. }
  3083. }
  3084. //
  3085. // So, is it the final pong - are there any users with different
  3086. // pong required entries?
  3087. // Note that if the user has never sent us a pong then we don't
  3088. // reference their lastPongRcvd field at this stage.
  3089. //
  3090. if (pPkt->packetType == PACKET_TYPE_PONG)
  3091. {
  3092. if (pFloUser->gotPong &&
  3093. (pFloUser->lastPongRcvd != pStr->pingValue))
  3094. {
  3095. TRACE_OUT(("%u,%u - Entry 0x%08x has different ping id %u",
  3096. stream,
  3097. pFloUser->userID,
  3098. pFloUser,
  3099. pFloUser->lastPongRcvd));
  3100. canPing = FALSE;
  3101. }
  3102. }
  3103. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pFloUser->list));
  3104. }
  3105. //
  3106. // If this is a new User then add them to the list
  3107. //
  3108. if (!userFound)
  3109. {
  3110. pFloUser = FLOAddUser(pPkt->userID, pStr);
  3111. //
  3112. // If this is a pong then we can set up the lastpong as well
  3113. //
  3114. if ((pFloUser != NULL) &&
  3115. (pPkt->packetType == PACKET_TYPE_PONG))
  3116. {
  3117. pFloUser->lastPongRcvd = pPkt->pingPongID;
  3118. }
  3119. }
  3120. //
  3121. // Now perform the actual packet specific processing
  3122. //
  3123. switch (pPkt->packetType)
  3124. {
  3125. //
  3126. // PING
  3127. //
  3128. // If this is a ping packet then just flag we must send a pong. If
  3129. // we failed to alloc a user CB then just ignore the ping and they
  3130. // will continue in blissful ignorance of our presence
  3131. //
  3132. case PACKET_TYPE_PING:
  3133. {
  3134. TRACE_OUT(("%u,%u - PING %u received",
  3135. stream, pPkt->userID, pPkt->pingPongID));
  3136. ValidateFLOUser(pFloUser);
  3137. pFloUser->sendPongID = pPkt->pingPongID;
  3138. if (pFloUser->rxPackets < FLO_MAX_RCV_PACKETS)
  3139. {
  3140. FLOPong(pmgClient, stream, pFloUser->userID, pPkt->pingPongID);
  3141. pFloUser->sentPongTime = curtime;
  3142. }
  3143. else
  3144. {
  3145. TRACE_OUT(("Receive backlog - just flagging pong needed"));
  3146. pFloUser->pongNeeded = TRUE;
  3147. }
  3148. }
  3149. break;
  3150. //
  3151. // PONG
  3152. //
  3153. // Flag we have got a pong from any user so we should start
  3154. // applying send flow control to this stream now (Within the stream
  3155. // we achieve per user granularity by ignoring those users that
  3156. // have never ponged when we inspect the stream byte count.)
  3157. //
  3158. case PACKET_TYPE_PONG:
  3159. {
  3160. pStr->gotPong = TRUE;
  3161. //
  3162. // Keep a note that we are receiving messages on this stream by
  3163. // moving nextPing on (but only if we have passed it)
  3164. //
  3165. if (curtime > pStr->nextPingTime)
  3166. {
  3167. pStr->nextPingTime = curtime;
  3168. }
  3169. //
  3170. // Update the user entry and schedule a ping if necessary
  3171. //
  3172. TRACE_OUT(("%u,%u - PONG %u received",
  3173. stream, pPkt->userID, pPkt->pingPongID));
  3174. //
  3175. // Check for readiness to send another ping This may be because
  3176. // this is the first users pong, in which case we should also send
  3177. // another ping when ready
  3178. //
  3179. if (canPing)
  3180. {
  3181. TRACE_OUT(("%u - PING scheduled, pipe was %d",
  3182. stream,
  3183. pStr->bytesInPipe));
  3184. //
  3185. // Reset byte count and ping readiness flag
  3186. //
  3187. pStr->bytesInPipe = 0;
  3188. pStr->pingNeeded = TRUE;
  3189. //
  3190. // Adjust the buffer size limit based on our current throughput
  3191. //
  3192. // If we hit the back pressure point and yet we are ahead of
  3193. // the target backlog then we should increase the buffer size
  3194. // to avoid constraining the pipe. If we have already
  3195. // increased the buffer size to our maximum value then try
  3196. // decreasing the tick delay. If we are already ticking at the
  3197. // max rate then we are going as fast as we can. If we make
  3198. // either of these adjustments then allow the next ping to flow
  3199. // immediately so that we can ramp up as fast as possible to
  3200. // LAN bandwidths.
  3201. //
  3202. // We dont need to do the decrease buffer checks if we have not
  3203. // gone into back pressure during the last pong cycle
  3204. //
  3205. if (pStr->eventNeeded)
  3206. {
  3207. TRACE_OUT(("We were in a back pressure situation"));
  3208. callbackType = FLO_WAKEUP;
  3209. TRACE_OUT(("Backlog %u denial delta %d ping delta %d",
  3210. pStr->backlog, curtime-pStr->lastDenialTime,
  3211. curtime-pStr->lastPingTime));
  3212. //
  3213. // The next is a little complex.
  3214. //
  3215. // If the turnaround of this ping pong is significantly
  3216. // less than our target then open the pipe up. But we must
  3217. // adjust to allow for the ping being sent at a quiet
  3218. // period, which we do by remembering when each ping is
  3219. // sent and, if we encounter a backlog situation, storing
  3220. // that ping time for future reference
  3221. //
  3222. // So the equation for latency is
  3223. //
  3224. // Pongtime-previous backlogged ping time
  3225. //
  3226. // The previous ping time is the that we sent prior to the
  3227. // last back pressure situation so there are two times in
  3228. // the control block, one for the last Ping time and one
  3229. // for the last but one ping time.
  3230. //
  3231. if ((int)(pStr->backlog/2 - curtime +
  3232. pStr->lastDenialTime) > 0)
  3233. {
  3234. //
  3235. // We are coping easily so increase the buffer to pump
  3236. // more data through. Predict the new buffer size
  3237. // based on the latency for the current backlog so that
  3238. // we don't artificially constrain the app. We do this
  3239. // by taking the observed latency, decrementing by a
  3240. // small factor to allow for the latency we might
  3241. // observe over the fastest possible link and then
  3242. // calculating the connection throughput.
  3243. //
  3244. // latency = curtime - lastDenialTime - fudge(100mS)
  3245. // amount sent = maxBytesInPipe (because we we were
  3246. // backed up)
  3247. // throughput = amount sent/latency (bytes/millisec)
  3248. // New buffer = throughput * target latency
  3249. //
  3250. if (pStr->maxBytesInPipe < pStr->DC_ABSMaxBytesInPipe)
  3251. {
  3252. latency = (curtime -
  3253. pStr->lastDenialTime -
  3254. 30);
  3255. if (latency <= 0)
  3256. {
  3257. latency = 1;
  3258. }
  3259. throughput = (pStr->maxBytesInPipe*8)/latency;
  3260. pStr->maxBytesInPipe = (throughput * pStr->backlog)/8;
  3261. TRACE_OUT(("Potential maxbytes of %d",
  3262. pStr->maxBytesInPipe));
  3263. if (pStr->maxBytesInPipe > pStr->DC_ABSMaxBytesInPipe)
  3264. {
  3265. pStr->maxBytesInPipe = pStr->DC_ABSMaxBytesInPipe;
  3266. }
  3267. TRACE_OUT((
  3268. "Modified buffer maxBytesInPipe up to %u "
  3269. "(0x%08x:%u)",
  3270. pStr->maxBytesInPipe,
  3271. pStr->channel,
  3272. pStr->priority));
  3273. callbackType = FLO_BUFFERMOD;
  3274. }
  3275. else
  3276. {
  3277. //
  3278. // We have hit our maximum allowed pipe size but
  3279. // are still backlogged and yet pings are going
  3280. // through acceptably.
  3281. //
  3282. // Our first action is to try reducing the ping
  3283. // time thus increasing out throughput.
  3284. //
  3285. // If we have already decreased the ping time to
  3286. // its minimum then we cannot do anything else. It
  3287. // is possible that the application parameters
  3288. // should be changed to increase the permissible
  3289. // throughput so log an alert to suggest this.
  3290. // however there are situations (input management)
  3291. // where we want some back pressure in order to
  3292. // prevent excessive cpu loading at the recipient.
  3293. //
  3294. // To increase the throughput either
  3295. //
  3296. // - Increase the maximum size of the stream. The
  3297. // disadvantage of this is that a low badwidth
  3298. // joiner may suddenly see a lot of high
  3299. // bandwidth data in the pipe. However this
  3300. // is the preferred solution in general, as
  3301. // it avoids having the pipe flooded with pings.
  3302. //
  3303. // - Reduce the target latency. This is a little
  3304. // dangerous because the latency is composed of
  3305. // the pre-queued data and the network turnaround
  3306. // time and if the network turnaround time
  3307. // approaches the target latency then the flow
  3308. // control will simply close the pipe right down
  3309. // irrespective of the achievable throughput.
  3310. //
  3311. pStr->maxBytesInPipe = pStr->DC_ABSMaxBytesInPipe;
  3312. pStr->pingTime = pStr->pingTime/2;
  3313. if (pStr->pingTime < FLO_MIN_PINGTIME)
  3314. {
  3315. pStr->pingTime = FLO_MIN_PINGTIME;
  3316. }
  3317. TRACE_OUT((
  3318. "Hit DC_ABS max - reduce ping time to %u",
  3319. pStr->pingTime));
  3320. }
  3321. //
  3322. // Allow the ping just scheduled to flow immediately
  3323. //
  3324. pStr->nextPingTime = curtime;
  3325. }
  3326. pStr->eventNeeded = FALSE;
  3327. }
  3328. //
  3329. // If we have exceeded our target latency at all then throttle
  3330. // back
  3331. //
  3332. if ((int)(pStr->backlog - curtime + pStr->lastPingTime) < 0)
  3333. {
  3334. pStr->maxBytesInPipe /= 2;
  3335. if (pStr->maxBytesInPipe < FLO_MIN_STREAMSIZE)
  3336. {
  3337. pStr->maxBytesInPipe = FLO_MIN_STREAMSIZE;
  3338. }
  3339. pStr->pingTime = pStr->pingTime * 2;
  3340. if (pStr->pingTime > FLO_INIT_PINGTIME)
  3341. {
  3342. pStr->pingTime = FLO_INIT_PINGTIME;
  3343. }
  3344. TRACE_OUT((
  3345. "Mod buffer maxBytesInPipe down to %u, ping to %u "
  3346. "(0x%08x:%u)",
  3347. pStr->maxBytesInPipe,
  3348. pStr->pingTime,
  3349. pStr->channel,
  3350. pStr->priority));
  3351. callbackType = FLO_BUFFERMOD;
  3352. }
  3353. //
  3354. // Now make athe callback if callbackType has been set
  3355. //
  3356. if ((callbackType != 0) &&
  3357. (pmgClient->flo.callBack != NULL))
  3358. {
  3359. (pmgClient->flo.callBack)(pmgClient,
  3360. callbackType,
  3361. pStr->priority,
  3362. pStr->maxBytesInPipe);
  3363. }
  3364. }
  3365. }
  3366. break;
  3367. //
  3368. // PANG
  3369. //
  3370. // Remove the user and continue
  3371. //
  3372. case PACKET_TYPE_PANG:
  3373. {
  3374. TRACE_OUT(("%u,%u - PANG received, removing user",
  3375. stream, pPkt->userID));
  3376. //
  3377. // Remove from the list
  3378. //
  3379. ValidateFLOUser(pFloUser);
  3380. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  3381. COM_BasedListRemove(&(pFloUser->list));
  3382. delete pFloUser;
  3383. //
  3384. // If we are currently waiting then generate an event for the
  3385. // app to get it moving again
  3386. //
  3387. if ((pStr->eventNeeded) &&
  3388. (pmgClient->flo.callBack != NULL))
  3389. {
  3390. TRACE_OUT(("Waking up the app because user has left"));
  3391. (*(pmgClient->flo.callBack))(pmgClient,
  3392. FLO_WAKEUP,
  3393. pStr->priority,
  3394. pStr->maxBytesInPipe);
  3395. pStr->eventNeeded = FALSE;
  3396. }
  3397. }
  3398. break;
  3399. //
  3400. // UNKNOWN
  3401. //
  3402. // Just trace alert and press on
  3403. //
  3404. default:
  3405. {
  3406. WARNING_OUT(("Invalid packet type 0x%08x", pPkt->packetType));
  3407. }
  3408. break;
  3409. }
  3410. DC_EXIT_POINT:
  3411. DebugExitVOID(FLO_ReceivedPacket);
  3412. }
  3413. //
  3414. // FLO_AllocReceive
  3415. //
  3416. void FLO_AllocReceive
  3417. (
  3418. PMG_CLIENT pmgClient,
  3419. UINT priority,
  3420. NET_CHANNEL_ID channel,
  3421. UINT userID
  3422. )
  3423. {
  3424. UINT stream;
  3425. PFLO_USER pFloUser;
  3426. BOOL userFound = FALSE;
  3427. PFLO_STREAM_DATA pStr;
  3428. UINT curtime;
  3429. DebugEntry(FLO_AllocReceive);
  3430. ValidateMGClient(pmgClient);
  3431. ASSERT(pmgClient->userAttached);
  3432. ASSERT(priority != NET_TOP_PRIORITY);
  3433. //
  3434. // Convert channel and priority into stream
  3435. //
  3436. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  3437. //
  3438. // Only process controlled streams
  3439. //
  3440. if (stream == FLO_NOT_CONTROLLED)
  3441. {
  3442. DC_QUIT;
  3443. }
  3444. //
  3445. // First we must locate the user
  3446. //
  3447. ValidateFLOStr(pStr);
  3448. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pStr->users),
  3449. (void**)&pFloUser, FIELD_OFFSET(FLO_USER, list), FIELD_OFFSET(FLO_USER, userID),
  3450. (DWORD)userID, FIELD_SIZE(FLO_USER, userID));
  3451. //
  3452. // SFR6101: If this is a new User then add them to the list
  3453. //
  3454. if (pFloUser == NULL)
  3455. {
  3456. TRACE_OUT(("Message from user 0x%08x who is not flow controlled", userID));
  3457. pFloUser = FLOAddUser(userID, pStr);
  3458. }
  3459. //
  3460. // If we failed to allocate a usr CB then just ignore for now
  3461. //
  3462. if (pFloUser != NULL)
  3463. {
  3464. ValidateFLOUser(pFloUser);
  3465. //
  3466. // Add in the new receive packet usage
  3467. //
  3468. pFloUser->rxPackets++;
  3469. TRACE_OUT(("Num outstanding receives on stream %u now %u",
  3470. stream, pFloUser->rxPackets));
  3471. //
  3472. // Now check that we have not got some kind of creep
  3473. //
  3474. if (pFloUser->rxPackets > FLO_MAX_RCV_PKTS_CREEP)
  3475. {
  3476. WARNING_OUT(("Creep? Stream %u has %u unacked rcv pkts",
  3477. stream, pFloUser->rxPackets));
  3478. }
  3479. //
  3480. // Finally check to see that we are responding OK to this person
  3481. //
  3482. curtime = GetTickCount();
  3483. if ((pFloUser->pongNeeded) &&
  3484. (curtime - pFloUser->sentPongTime > (FLO_MAX_WAIT_TIME/4)))
  3485. {
  3486. TRACE_OUT(("Send keepalive pong"));
  3487. FLOPong(pmgClient, stream, pFloUser->userID, pFloUser->sendPongID);
  3488. pFloUser->sentPongTime = curtime;
  3489. }
  3490. }
  3491. DC_EXIT_POINT:
  3492. DebugExitVOID(FLO_AllocReceive);
  3493. }
  3494. //
  3495. // FLO_FreeReceive
  3496. //
  3497. void FLO_FreeReceive
  3498. (
  3499. PMG_CLIENT pmgClient,
  3500. NET_PRIORITY priority,
  3501. NET_CHANNEL_ID channel,
  3502. UINT userID
  3503. )
  3504. {
  3505. UINT stream;
  3506. PFLO_USER pFloUser;
  3507. PFLO_STREAM_DATA pStr;
  3508. BOOL userFound = FALSE;
  3509. DebugEntry(FLO_FreeReceive);
  3510. ValidateMGClient(pmgClient);
  3511. ASSERT(pmgClient->userAttached);
  3512. ASSERT(priority != NET_TOP_PRIORITY);
  3513. //
  3514. // Convert channel and priority into stream
  3515. //
  3516. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  3517. //
  3518. // Only process controlled streams
  3519. //
  3520. if (stream != FLO_NOT_CONTROLLED)
  3521. {
  3522. ValidateFLOStr(pStr);
  3523. //
  3524. // First we must locate the user
  3525. //
  3526. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3527. while (&(pFloUser->list) != &(pStr->users))
  3528. {
  3529. ValidateFLOUser(pFloUser);
  3530. if (pFloUser->userID == userID)
  3531. {
  3532. userFound = TRUE;
  3533. break;
  3534. }
  3535. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pFloUser->list));
  3536. }
  3537. //
  3538. // If we do not find the user record then two things may have
  3539. // happened.
  3540. // - They have joined the channel and immediately sent data
  3541. // - They were removed as being delinquent and are now sending
  3542. // data again
  3543. // - We failed to add them to our user list
  3544. // Try and allocate the user entry now
  3545. // (This will start tracking receive buffer space, but this user
  3546. // will not participate in our send flow control until we receive
  3547. // a pong from them and set "gotpong" in their FLO_USER CB.)
  3548. //
  3549. if (!userFound)
  3550. {
  3551. pFloUser = FLOAddUser(userID, pStr);
  3552. }
  3553. if (pFloUser != NULL)
  3554. {
  3555. ValidateFLOUser(pFloUser);
  3556. //
  3557. // Check that we have not got some kind of creep
  3558. //
  3559. if (pFloUser->rxPackets == 0)
  3560. {
  3561. WARNING_OUT(("Freed too many buffers for user 0x%08x on str %u",
  3562. userID, stream));
  3563. }
  3564. else
  3565. {
  3566. pFloUser->rxPackets--;
  3567. TRACE_OUT(("Num outstanding receives now %u",
  3568. pFloUser->rxPackets));
  3569. }
  3570. //
  3571. // Now we must Pong if there is a pong pending and we have
  3572. // moved below the high water mark
  3573. //
  3574. if ((pFloUser->pongNeeded) &&
  3575. (pFloUser->rxPackets < FLO_MAX_RCV_PACKETS))
  3576. {
  3577. FLOPong(pmgClient, stream, pFloUser->userID, pFloUser->sendPongID);
  3578. pFloUser->pongNeeded = FALSE;
  3579. pFloUser->sentPongTime = GetTickCount();
  3580. }
  3581. }
  3582. }
  3583. DebugExitVOID(FLO_FreeReceive);
  3584. }
  3585. //
  3586. // FLOPong()
  3587. //
  3588. void FLOPong
  3589. (
  3590. PMG_CLIENT pmgClient,
  3591. UINT stream,
  3592. UINT userID,
  3593. UINT pongID
  3594. )
  3595. {
  3596. PTSHR_FLO_CONTROL pFlo;
  3597. PMG_BUFFER pmgBuffer;
  3598. UINT rc;
  3599. DebugEntry(FLOPong);
  3600. ValidateMGClient(pmgClient);
  3601. ASSERT(pmgClient->userAttached);
  3602. rc = MGNewDataBuffer(pmgClient,
  3603. MG_TX_PONG,
  3604. sizeof(TSHR_FLO_CONTROL) + sizeof(MG_INT_PKT_HEADER),
  3605. &pmgBuffer);
  3606. if (rc != 0)
  3607. {
  3608. WARNING_OUT(("MGNewDataBuffer failed in FLOPong"));
  3609. DC_QUIT;
  3610. }
  3611. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3612. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3613. //
  3614. // Set up pong contents
  3615. //
  3616. pFlo->packetType = PACKET_TYPE_PONG;
  3617. pFlo->userID = pmgClient->userIDMCS;
  3618. pFlo->stream = (BYTE)stream;
  3619. pFlo->pingPongID = (BYTE)pongID;
  3620. pmgBuffer->channelId = (ChannelID)userID;
  3621. pmgBuffer->priority = MG_PRIORITY_HIGHEST;
  3622. //
  3623. // Now decouple the send request. Note that we must put the pong at
  3624. // the back of the request queue even though we want it to flow at
  3625. // high priority because otherwise there are certain circumstances
  3626. // where we get pong reversal due to receipt of multiple pings
  3627. //
  3628. TRACE_OUT(("Inserting pong message 0x%08x at head of pending chain", pmgBuffer));
  3629. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  3630. UT_PostEvent(pmgClient->putTask,
  3631. pmgClient->putTask,
  3632. NO_DELAY,
  3633. NET_MG_SCHEDULE,
  3634. 0,
  3635. 0);
  3636. TRACE_OUT(("%u,0x%08x - PONG %u scheduled",
  3637. pFlo->stream, pmgBuffer->channelId, pFlo->pingPongID));
  3638. DC_EXIT_POINT:
  3639. DebugExitVOID(FLOPong);
  3640. }
  3641. //
  3642. // FLOPing()
  3643. //
  3644. void FLOPing
  3645. (
  3646. PMG_CLIENT pmgClient,
  3647. UINT stream,
  3648. UINT curtime
  3649. )
  3650. {
  3651. PFLO_STREAM_DATA pStr;
  3652. PMG_BUFFER pmgBuffer;
  3653. PTSHR_FLO_CONTROL pFlo;
  3654. UINT rc;
  3655. DebugEntry(FLOPing);
  3656. ValidateMGClient(pmgClient);
  3657. ASSERT(pmgClient->userAttached);
  3658. ASSERT(stream < FLO_MAX_STREAMS);
  3659. pStr = pmgClient->flo.pStrData[stream];
  3660. ValidateFLOStr(pStr);
  3661. rc = MGNewDataBuffer(pmgClient,
  3662. MG_TX_PING,
  3663. sizeof(TSHR_FLO_CONTROL)+sizeof(MG_INT_PKT_HEADER),
  3664. &pmgBuffer);
  3665. if (rc != 0)
  3666. {
  3667. WARNING_OUT(("MGNewDataBuffer failed in FLOPing"));
  3668. DC_QUIT;
  3669. }
  3670. //
  3671. // Flag ping not needed to avoid serialization problems across the
  3672. // sendmessage!
  3673. //
  3674. pStr->pingNeeded = FALSE;
  3675. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3676. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3677. //
  3678. // Set up ping contents
  3679. //
  3680. pFlo->packetType = PACKET_TYPE_PING;
  3681. pFlo->userID = pmgClient->userIDMCS;
  3682. pFlo->stream = (BYTE)stream;
  3683. pmgBuffer->channelId = (ChannelID)pStr->channel;
  3684. pmgBuffer->priority = (NET_PRIORITY)pStr->priority;
  3685. //
  3686. // Generate the next ping value to be used
  3687. //
  3688. pFlo->pingPongID = (BYTE)(pStr->pingValue + 1);
  3689. //
  3690. // Now decouple the send request
  3691. //
  3692. TRACE_OUT(("Inserting ping message 0x%08x into pending chain", pmgBuffer));
  3693. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  3694. UT_PostEvent(pmgClient->putTask,
  3695. pmgClient->putTask,
  3696. NO_DELAY,
  3697. NET_MG_SCHEDULE,
  3698. 0,
  3699. 0);
  3700. //
  3701. // Update flow control variables
  3702. //
  3703. pStr->pingValue = ((pStr->pingValue + 1) & 0xFF);
  3704. pStr->lastPingTime = curtime;
  3705. pStr->nextPingTime = curtime + pStr->pingTime;
  3706. pStr->lastDenialTime = pStr->curDenialTime;
  3707. TRACE_OUT(("%u - PING %u sched, next in %u mS (0x%08x:%u)",
  3708. pFlo->stream,
  3709. pStr->pingValue,
  3710. pStr->pingTime,
  3711. pStr->channel,
  3712. pStr->priority));
  3713. DC_EXIT_POINT:
  3714. DebugExitVOID(FLOPing);
  3715. }
  3716. //
  3717. // FLOPang()
  3718. //
  3719. void FLOPang
  3720. (
  3721. PMG_CLIENT pmgClient,
  3722. UINT stream,
  3723. UINT userID
  3724. )
  3725. {
  3726. PMG_BUFFER pmgBuffer;
  3727. PTSHR_FLO_CONTROL pFlo;
  3728. UINT rc;
  3729. DebugEntry(FLOPang);
  3730. ValidateMGClient(pmgClient);
  3731. ASSERT(pmgClient->userAttached);
  3732. rc = MGNewDataBuffer(pmgClient,
  3733. MG_TX_PANG,
  3734. sizeof(TSHR_FLO_CONTROL) + sizeof(MG_INT_PKT_HEADER),
  3735. &pmgBuffer);
  3736. if (rc != 0)
  3737. {
  3738. WARNING_OUT(("MGNewDataBuffer failed in FLOPang"));
  3739. DC_QUIT;
  3740. }
  3741. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3742. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3743. //
  3744. // Set up pang contents
  3745. //
  3746. pFlo->packetType = PACKET_TYPE_PANG;
  3747. pFlo->userID = pmgClient->userIDMCS;
  3748. pFlo->stream = (BYTE)stream;
  3749. pFlo->pingPongID = 0;
  3750. pmgBuffer->channelId = (ChannelID)userID;
  3751. pmgBuffer->priority = MG_PRIORITY_HIGHEST;
  3752. //
  3753. // Now decouple the send request
  3754. //
  3755. TRACE_OUT(("Inserting pang message 0x%08x into pending chain", pmgBuffer));
  3756. COM_BasedListInsertBefore(&(pmgClient->pendChain),
  3757. &(pmgBuffer->pendChain));
  3758. UT_PostEvent(pmgClient->putTask,
  3759. pmgClient->putTask,
  3760. NO_DELAY,
  3761. NET_MG_SCHEDULE,
  3762. 0,
  3763. 0);
  3764. DC_EXIT_POINT:
  3765. DebugExitVOID(FLOPang);
  3766. }
  3767. //
  3768. // FLOGetStream()
  3769. //
  3770. UINT FLOGetStream
  3771. (
  3772. PMG_CLIENT pmgClient,
  3773. NET_CHANNEL_ID channel,
  3774. UINT priority,
  3775. PFLO_STREAM_DATA * ppStr
  3776. )
  3777. {
  3778. UINT i;
  3779. UINT cStreams;
  3780. DebugEntry(FLOGetStream);
  3781. ValidateMGClient(pmgClient);
  3782. ASSERT(pmgClient->userAttached);
  3783. ASSERT(priority != NET_TOP_PRIORITY);
  3784. cStreams = pmgClient->flo.numStreams;
  3785. ASSERT(cStreams <= FLO_MAX_STREAMS);
  3786. //
  3787. // Scan the list of streams for a match.
  3788. //
  3789. for (i = 0; i < cStreams; i++)
  3790. {
  3791. //
  3792. // Check to ensure that this is a valid stream.
  3793. //
  3794. if (pmgClient->flo.pStrData[i] == NULL)
  3795. {
  3796. continue;
  3797. }
  3798. ValidateFLOStr(pmgClient->flo.pStrData[i]);
  3799. //
  3800. // If the channel and priority match then we have found the stream.
  3801. //
  3802. if ((pmgClient->flo.pStrData[i]->channel == channel) &&
  3803. (pmgClient->flo.pStrData[i]->priority == priority))
  3804. {
  3805. break;
  3806. }
  3807. }
  3808. //
  3809. // If we hit the end of the list then return FLO_NOT_CONTROLLED.
  3810. //
  3811. if (i == cStreams)
  3812. {
  3813. i = FLO_NOT_CONTROLLED;
  3814. *ppStr = NULL;
  3815. TRACE_OUT(("Uncontrolled stream (0x%08x:%u)",
  3816. channel,
  3817. priority));
  3818. }
  3819. else
  3820. {
  3821. *ppStr = pmgClient->flo.pStrData[i];
  3822. TRACE_OUT(("Controlled stream %u (0x%08x:%u)",
  3823. i,
  3824. channel,
  3825. priority));
  3826. }
  3827. DebugExitDWORD(FLOGetStream, i);
  3828. return(i);
  3829. }
  3830. //
  3831. // FUNCTION: FLOAddUser
  3832. //
  3833. // DESCRIPTION:
  3834. //
  3835. // Add a new remote user entry for a stream.
  3836. //
  3837. // PARAMETERS:
  3838. //
  3839. // userID - ID of the new user (single member channel ID)
  3840. // pStr - pointer to the stream to receive the new user.
  3841. //
  3842. // RETURNS: Nothing
  3843. //
  3844. //
  3845. PFLO_USER FLOAddUser
  3846. (
  3847. UINT userID,
  3848. PFLO_STREAM_DATA pStr
  3849. )
  3850. {
  3851. PFLO_USER pFloUser;
  3852. DebugEntry(FLOAddUser);
  3853. ValidateFLOStr(pStr);
  3854. //
  3855. // Allocate memory for the new user entry
  3856. //
  3857. pFloUser = new FLO_USER;
  3858. if (!pFloUser)
  3859. {
  3860. WARNING_OUT(("FLOAddUser failed; out of memory"));
  3861. }
  3862. else
  3863. {
  3864. ZeroMemory(pFloUser, sizeof(*pFloUser));
  3865. SET_STAMP(pFloUser, FLOUSER);
  3866. //
  3867. // Set up the new record
  3868. //
  3869. TRACE_OUT(("UserID %u - New user, CB = 0x%08x", userID, pFloUser));
  3870. pFloUser->userID = (TSHR_UINT16)userID;
  3871. //
  3872. // Add the new User to the end of the list
  3873. //
  3874. COM_BasedListInsertBefore(&(pStr->users), &(pFloUser->list));
  3875. }
  3876. DebugExitVOID(FLOAddUser);
  3877. return(pFloUser);
  3878. }
  3879. //
  3880. // FLO_RemoveUser()
  3881. //
  3882. void FLO_RemoveUser
  3883. (
  3884. PMG_CLIENT pmgClient,
  3885. UINT userID
  3886. )
  3887. {
  3888. PFLO_USER pFloUser;
  3889. PBASEDLIST nextUser;
  3890. UINT stream;
  3891. UINT cStreams;
  3892. PFLO_STREAM_DATA pStr;
  3893. DebugEntry(FLO_RemoveUser);
  3894. ValidateMGClient(pmgClient);
  3895. ASSERT(pmgClient->userAttached);
  3896. cStreams = pmgClient->flo.numStreams;
  3897. ASSERT(cStreams <= FLO_MAX_STREAMS);
  3898. //
  3899. // Check each stream
  3900. //
  3901. for (stream = 0; stream < cStreams; stream++)
  3902. {
  3903. if (pmgClient->flo.pStrData[stream] == NULL)
  3904. {
  3905. continue;
  3906. }
  3907. pStr = pmgClient->flo.pStrData[stream];
  3908. ValidateFLOStr(pStr);
  3909. //
  3910. // Remove this user from the queue, if present
  3911. //
  3912. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3913. while (&(pFloUser->list) != &(pStr->users))
  3914. {
  3915. ValidateFLOUser(pFloUser);
  3916. //
  3917. // Address the follow on record before we free the current
  3918. //
  3919. nextUser = COM_BasedNextListField(&(pFloUser->list));
  3920. //
  3921. // Free the current record, if necessary
  3922. //
  3923. if (pFloUser->userID == userID)
  3924. {
  3925. //
  3926. // Remove from the list
  3927. //
  3928. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  3929. COM_BasedListRemove(&(pFloUser->list));
  3930. delete pFloUser;
  3931. TRACE_OUT(("Stream %u - resetting due to user disappearance",
  3932. stream));
  3933. ValidateFLOStr(pStr);
  3934. pStr->bytesInPipe = 0;
  3935. pStr->pingNeeded = TRUE;
  3936. pStr->nextPingTime = GetTickCount();
  3937. pStr->gotPong = FALSE;
  3938. pStr->eventNeeded = FALSE;
  3939. break;
  3940. }
  3941. //
  3942. // Move on to the next record in the list
  3943. //
  3944. pFloUser = (PFLO_USER)nextUser;
  3945. }
  3946. //
  3947. // Now wake the app again for this stream
  3948. //
  3949. if (pmgClient->flo.callBack != NULL)
  3950. {
  3951. (*(pmgClient->flo.callBack))(pmgClient,
  3952. FLO_WAKEUP,
  3953. pStr->priority,
  3954. pStr->maxBytesInPipe);
  3955. }
  3956. }
  3957. DebugExitVOID(FLO_RemoveUser);
  3958. }
  3959. //
  3960. // FLOStreamEndControl()
  3961. //
  3962. void FLOStreamEndControl
  3963. (
  3964. PMG_CLIENT pmgClient,
  3965. UINT stream
  3966. )
  3967. {
  3968. PFLO_USER pFloUser;
  3969. PFLO_STREAM_DATA pStr;
  3970. PMG_BUFFER pmgBuffer;
  3971. DebugEntry(FLOStreamEndControl);
  3972. ValidateMGClient(pmgClient);
  3973. ASSERT(pmgClient->userAttached);
  3974. //
  3975. // Convert the stream id into a stream pointer.
  3976. //
  3977. ASSERT(stream < FLO_MAX_STREAMS);
  3978. pStr = pmgClient->flo.pStrData[stream];
  3979. ValidateFLOStr(pStr);
  3980. //
  3981. // Trace out that we are about to end flow control.
  3982. //
  3983. TRACE_OUT(("Flow control about to end, stream %u, (0x%08x:%u)",
  3984. stream,
  3985. pStr->channel,
  3986. pStr->priority));
  3987. //
  3988. // First check to see if there are any outstanding buffer CBs with
  3989. // pStr set to this stream and reset pStr to null. We need to do this
  3990. // as we may then try to dereference pStr when we come to send these
  3991. // buffers.
  3992. //
  3993. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->pendChain),
  3994. FIELD_OFFSET(MG_BUFFER, pendChain));
  3995. while (pmgBuffer != NULL)
  3996. {
  3997. ValidateMGBuffer(pmgBuffer);
  3998. if (pmgBuffer->type == MG_TX_BUFFER)
  3999. {
  4000. //
  4001. // Set the stream pointer to NULL.
  4002. //
  4003. pmgBuffer->pStr = NULL;
  4004. TRACE_OUT(("Nulling stream pointer in bufferCB: (0x%08x:%u)",
  4005. pStr->channel, pStr->priority));
  4006. }
  4007. pmgBuffer = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->pendChain),
  4008. pmgBuffer, FIELD_OFFSET(MG_BUFFER, pendChain));
  4009. }
  4010. //
  4011. // Now free up the list of users.
  4012. //
  4013. pFloUser = (PFLO_USER)COM_BasedListFirst(&(pStr->users), FIELD_OFFSET(FLO_USER, list));
  4014. while (pFloUser != NULL)
  4015. {
  4016. ValidateFLOUser(pFloUser);
  4017. //
  4018. // First send the remote user a "pang" to tell them we are not
  4019. // interested in their data any more.
  4020. //
  4021. FLOPang(pmgClient, stream, pFloUser->userID);
  4022. //
  4023. // Remove the remote user from the list.
  4024. //
  4025. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  4026. COM_BasedListRemove(&(pFloUser->list));
  4027. delete pFloUser;
  4028. //
  4029. // Now get the next user in the list.
  4030. //
  4031. ValidateFLOStr(pStr);
  4032. pFloUser = (PFLO_USER)COM_BasedListFirst(&(pStr->users), FIELD_OFFSET(FLO_USER, list));
  4033. }
  4034. //
  4035. // Free the stream data.
  4036. //
  4037. ASSERT(pStr == pmgClient->flo.pStrData[stream]);
  4038. TRACE_OUT(("Freeing FLO_STREAM_DATA 0x%08x", pStr));
  4039. delete pStr;
  4040. pmgClient->flo.pStrData[stream] = NULL;
  4041. //
  4042. // Adjust numStreams (if required)
  4043. //
  4044. if (stream == (pmgClient->flo.numStreams - 1))
  4045. {
  4046. while ((pmgClient->flo.numStreams > 0) &&
  4047. (pmgClient->flo.pStrData[pmgClient->flo.numStreams - 1] == NULL))
  4048. {
  4049. pmgClient->flo.numStreams--;
  4050. }
  4051. TRACE_OUT(("numStreams %u", pmgClient->flo.numStreams));
  4052. }
  4053. DebugExitVOID(FLOStreamEndControl);
  4054. }
  4055. //
  4056. // MGNewCorrelator()
  4057. //
  4058. // Gets a new correlator for events to a particular MGC client
  4059. //
  4060. void MGNewCorrelator
  4061. (
  4062. PMG_CLIENT pmgClient,
  4063. WORD * pCorrelator
  4064. )
  4065. {
  4066. ValidateMGClient(pmgClient);
  4067. pmgClient->joinNextCorr++;
  4068. if (pmgClient->joinNextCorr == 0)
  4069. {
  4070. pmgClient->joinNextCorr++;
  4071. }
  4072. *pCorrelator = pmgClient->joinNextCorr;
  4073. }