Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4878 lines
136 KiB

  1. #include "precomp.h"
  2. //
  3. // MGC.CPP
  4. // MCS Glue Layer, Legacy from simultaneous R.11 and T.120 support
  5. //
  6. // Copyright(c) Microsoft 1997-
  7. //
  8. #define MLZ_FILE_ZONE ZONE_NET
  9. //
  10. //
  11. // CONSTANT DATA
  12. //
  13. // These arrays map between MCAT and DC-Grouwpare constants. They are not
  14. // in separate data file since only referenced from this source file.
  15. //
  16. //
  17. UINT McsErrToNetErr (UINT rcMCS);
  18. const UINT c_RetCodeMap1[] =
  19. {
  20. 0,
  21. NET_RC_MGC_NOT_SUPPORTED,
  22. NET_RC_MGC_NOT_INITIALIZED,
  23. NET_RC_MGC_ALREADY_INITIALIZED,
  24. NET_RC_MGC_INIT_FAIL,
  25. NET_RC_MGC_INVALID_REMOTE_ADDRESS,
  26. NET_RC_NO_MEMORY,
  27. NET_RC_MGC_CALL_FAILED,
  28. NET_RC_MGC_NOT_SUPPORTED,
  29. NET_RC_MGC_NOT_SUPPORTED,
  30. NET_RC_MGC_NOT_SUPPORTED, // security failed
  31. };
  32. const UINT c_RetCodeMap2[] =
  33. {
  34. NET_RC_MGC_DOMAIN_IN_USE,
  35. NET_RC_MGC_INVALID_DOMAIN,
  36. NET_RC_MGC_NOT_ATTACHED,
  37. NET_RC_MGC_INVALID_USER_HANDLE,
  38. NET_RC_MGC_TOO_MUCH_IN_USE,
  39. NET_RC_MGC_INVALID_CONN_HANDLE,
  40. NET_RC_MGC_INVALID_UP_DOWN_PARM,
  41. NET_RC_MGC_NOT_SUPPORTED,
  42. NET_RC_MGC_TOO_MUCH_IN_USE
  43. };
  44. #define MG_NUM_OF_MCS_RESULTS 15
  45. #define MG_INVALID_MCS_RESULT MG_NUM_OF_MCS_RESULTS
  46. NET_RESULT c_ResultMap[MG_NUM_OF_MCS_RESULTS+1] =
  47. {
  48. NET_RESULT_OK,
  49. NET_RESULT_NOK,
  50. NET_RESULT_NOK,
  51. NET_RESULT_CHANNEL_UNAVAILABLE,
  52. NET_RESULT_DOMAIN_UNAVAILABLE,
  53. NET_RESULT_NOK,
  54. NET_RESULT_REJECTED,
  55. NET_RESULT_NOK,
  56. NET_RESULT_NOK,
  57. NET_RESULT_TOKEN_ALREADY_GRABBED,
  58. NET_RESULT_TOKEN_NOT_OWNED,
  59. NET_RESULT_NOK,
  60. NET_RESULT_NOK,
  61. NET_RESULT_NOT_SPECIFIED,
  62. NET_RESULT_USER_REJECTED,
  63. NET_RESULT_UNKNOWN
  64. };
  65. //
  66. // MG_Register()
  67. //
  68. BOOL MG_Register
  69. (
  70. MGTASK task,
  71. PMG_CLIENT * ppmgClient,
  72. PUT_CLIENT putTask
  73. )
  74. {
  75. PMG_CLIENT pmgClient = NULL;
  76. CMTASK cmTask;
  77. BOOL rc = FALSE;
  78. DebugEntry(MG_Register);
  79. UT_Lock(UTLOCK_T120);
  80. ASSERT(task >= MGTASK_FIRST);
  81. ASSERT(task < MGTASK_MAX);
  82. //
  83. // Check the putTask passed in:
  84. //
  85. ValidateUTClient(putTask);
  86. //
  87. // Does this already exist?
  88. //
  89. if (g_amgClients[task].putTask != NULL)
  90. {
  91. ERROR_OUT(("MG task %d already exists", task));
  92. DC_QUIT;
  93. }
  94. pmgClient = &(g_amgClients[task]);
  95. ZeroMemory(pmgClient, sizeof(MG_CLIENT));
  96. pmgClient->putTask = putTask;
  97. //
  98. // Register an exit procedure
  99. //
  100. UT_RegisterExit(putTask, MGExitProc, pmgClient);
  101. pmgClient->exitProcReg = TRUE;
  102. //
  103. // We register a high priority event handler (join by key handler) to
  104. // intercept various events which are generated as part of the join by
  105. // key processing. We register it now, before the call to
  106. // MG_ChannelJoin below, to prevent events which we cant stop from
  107. // going to the client if UT_RegisterEvent fails. This high priority
  108. // handler also looks after our internal scheduling of pending
  109. // requests.
  110. //
  111. UT_RegisterEvent(putTask, MGEventHandler, pmgClient, UT_PRIORITY_OBMAN);
  112. pmgClient->eventProcReg = TRUE;
  113. //
  114. // Register our hidden event handler for the client (the parameter to
  115. // be passed to the event handler is the pointer to the client CB):
  116. //
  117. UT_RegisterEvent(putTask, MGLongStopHandler, pmgClient, UT_PRIORITY_NETWORK);
  118. pmgClient->lowEventProcReg = TRUE;
  119. //
  120. // Register as a call manager secondary.
  121. //
  122. switch (task)
  123. {
  124. case MGTASK_OM:
  125. cmTask = CMTASK_OM;
  126. break;
  127. case MGTASK_DCS:
  128. cmTask = CMTASK_DCS;
  129. break;
  130. default:
  131. ASSERT(FALSE);
  132. ERROR_OUT(("Invalid task %d", task));
  133. DC_QUIT;
  134. }
  135. if (!CMS_Register(putTask, cmTask, &(pmgClient->pcmClient)))
  136. {
  137. ERROR_OUT(("CMS_Register failed"));
  138. DC_QUIT;
  139. }
  140. rc = TRUE;
  141. DC_EXIT_POINT:
  142. *ppmgClient = pmgClient;
  143. UT_Unlock(UTLOCK_T120);
  144. DebugExitBOOL(MG_Register, rc);
  145. return(rc);
  146. }
  147. //
  148. // MG_Deregister(...)
  149. //
  150. void MG_Deregister(PMG_CLIENT * ppmgClient)
  151. {
  152. PMG_CLIENT pmgClient;
  153. DebugEntry(MG_Deregister);
  154. UT_Lock(UTLOCK_T120);
  155. ASSERT(ppmgClient);
  156. pmgClient = *ppmgClient;
  157. ValidateMGClient(pmgClient);
  158. MGExitProc(pmgClient);
  159. //
  160. // Dereg CMS handler. In abnormal situations, the CMS exit proc will
  161. // clean it up for us.
  162. //
  163. if (pmgClient->pcmClient)
  164. {
  165. CMS_Deregister(&pmgClient->pcmClient);
  166. }
  167. *ppmgClient = NULL;
  168. UT_Unlock(UTLOCK_T120);
  169. DebugExitVOID(MG_Deregister);
  170. }
  171. //
  172. // MGExitProc()
  173. //
  174. void CALLBACK MGExitProc(LPVOID uData)
  175. {
  176. PMG_CLIENT pmgClient = (PMG_CLIENT)uData;
  177. PMG_BUFFER pmgBuffer;
  178. DebugEntry(MGExitProc);
  179. UT_Lock(UTLOCK_T120);
  180. ValidateMGClient(pmgClient);
  181. //
  182. // If the client has attached, detach it
  183. //
  184. if (pmgClient->userAttached)
  185. {
  186. MG_Detach(pmgClient);
  187. }
  188. //
  189. // Free all buffers the client may be using:
  190. //
  191. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers), FIELD_OFFSET(MG_BUFFER, clientChain));
  192. while (pmgBuffer != NULL)
  193. {
  194. ValidateMGBuffer(pmgBuffer);
  195. //
  196. // This implicitly frees any user memory or MCS memory associated
  197. // with the buffer CB.
  198. //
  199. MGFreeBuffer(pmgClient, &pmgBuffer);
  200. //
  201. // MGFreeBuffer removed this CB from the list, so we get the first
  202. // one in what's left of the list - if the list is now empty, this
  203. // will give us NULL and we will break out of the while loop:
  204. //
  205. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers), FIELD_OFFSET(MG_BUFFER, clientChain));
  206. }
  207. //
  208. // Deregister our event handler and exit procedure:
  209. //
  210. if (pmgClient->exitProcReg)
  211. {
  212. UT_DeregisterExit(pmgClient->putTask, MGExitProc, pmgClient);
  213. pmgClient->exitProcReg = FALSE;
  214. }
  215. if (pmgClient->lowEventProcReg)
  216. {
  217. UT_DeregisterEvent(pmgClient->putTask, MGLongStopHandler, pmgClient);
  218. pmgClient->lowEventProcReg = FALSE;
  219. }
  220. if (pmgClient->eventProcReg)
  221. {
  222. UT_DeregisterEvent(pmgClient->putTask, MGEventHandler, pmgClient);
  223. pmgClient->eventProcReg = FALSE;
  224. }
  225. //
  226. // We should only ever be asked to free a client CB which has had all
  227. // of its child resources already freed, so do a quick sanity check:
  228. //
  229. ASSERT(pmgClient->buffers.next == 0);
  230. //
  231. // Set the putTask to NULL; that's how we know if a client is in use or
  232. // not.
  233. //
  234. pmgClient->putTask = NULL;
  235. UT_Unlock(UTLOCK_T120);
  236. DebugExitVOID(MGExitProc);
  237. }
  238. //
  239. // MG_Attach(...)
  240. //
  241. UINT MG_Attach
  242. (
  243. PMG_CLIENT pmgClient,
  244. UINT_PTR callID,
  245. PNET_FLOW_CONTROL pFlowControl
  246. )
  247. {
  248. UINT rc = 0;
  249. DebugEntry(MG_Attach);
  250. UT_Lock(UTLOCK_T120);
  251. ValidateCMP(g_pcmPrimary);
  252. ValidateMGClient(pmgClient);
  253. if (!g_pcmPrimary->callID)
  254. {
  255. //
  256. // We aren't in a call yet/anymore.
  257. //
  258. WARNING_OUT(("MG_Attach failing; not in T.120 call"));
  259. rc = NET_RC_MGC_NOT_CONNECTED;
  260. DC_QUIT;
  261. }
  262. ASSERT(callID == g_pcmPrimary->callID);
  263. ASSERT(!pmgClient->userAttached);
  264. pmgClient->userIDMCS = NET_UNUSED_IDMCS;
  265. ZeroMemory(&pmgClient->flo, sizeof(FLO_STATIC_DATA));
  266. pmgClient->userAttached = TRUE;
  267. //
  268. // Call through to the underlying MCS layer (normally, we need our
  269. // callbacks to happen with a task switch but since this is Windows it
  270. // doesn't really matter anyway):
  271. //
  272. rc = MCS_AttachRequest(&(pmgClient->m_piMCSSap),
  273. (DomainSelector) &g_pcmPrimary->callID,
  274. sizeof(g_pcmPrimary->callID),
  275. (MCSCallBack) MGCallback,
  276. (void *) pmgClient,
  277. ATTACHMENT_DISCONNECT_IN_DATA_LOSS);
  278. if (rc != 0)
  279. {
  280. WARNING_OUT(("MCSAttachUserRequest failed with error %x", rc));
  281. MGDetach(pmgClient);
  282. rc = McsErrToNetErr(rc);
  283. DC_QUIT;
  284. }
  285. if (++g_mgAttachCount == 1)
  286. {
  287. UT_PostEvent(pmgClient->putTask,
  288. pmgClient->putTask,
  289. MG_TIMER_PERIOD,
  290. NET_MG_WATCHDOG,
  291. 0, 0);
  292. }
  293. ASSERT(g_mgAttachCount <= MGTASK_MAX);
  294. //
  295. // It is assumed that the client will use the same latencies for every
  296. // attachment, so we keep them at the client level.
  297. //
  298. pmgClient->flowControl = *pFlowControl;
  299. DC_EXIT_POINT:
  300. UT_Unlock(UTLOCK_T120);
  301. DebugExitDWORD(MG_Attach, rc);
  302. return(rc);
  303. }
  304. //
  305. // MG_Detach(...)
  306. //
  307. void MG_Detach
  308. (
  309. PMG_CLIENT pmgClient
  310. )
  311. {
  312. DebugEntry(MG_Detach);
  313. UT_Lock(UTLOCK_T120);
  314. ValidateMGClient(pmgClient);
  315. if (!pmgClient->userAttached)
  316. {
  317. TRACE_OUT(("MG_Detach: client %x not attached", pmgClient));
  318. DC_QUIT;
  319. }
  320. //
  321. // Call FLO_UserTerm to ensure that flow control is stopped on all the
  322. // channels that have been flow controlled on our behalf.
  323. //
  324. FLO_UserTerm(pmgClient);
  325. //
  326. // Clear out the buffers, variabls.
  327. //
  328. MGDetach(pmgClient);
  329. DC_EXIT_POINT:
  330. UT_Unlock(UTLOCK_T120);
  331. DebugExitVOID(MG_Detach);
  332. }
  333. //
  334. // MG_ChannelJoin(...)
  335. //
  336. UINT MG_ChannelJoin
  337. (
  338. PMG_CLIENT pmgClient,
  339. NET_CHANNEL_ID * pCorrelator,
  340. NET_CHANNEL_ID channel
  341. )
  342. {
  343. PMG_BUFFER pmgBuffer;
  344. UINT rc = 0;
  345. DebugEntry(MG_ChannelJoin);
  346. UT_Lock(UTLOCK_T120);
  347. ValidateMGClient(pmgClient);
  348. if (!pmgClient->userAttached)
  349. {
  350. TRACE_OUT(("MG_ChannelJoin: client %x not attached", pmgClient));
  351. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  352. DC_QUIT;
  353. }
  354. //
  355. // MCAT may bounce this request, so we must queue the request
  356. //
  357. rc = MGNewBuffer(pmgClient, MG_RQ_CHANNEL_JOIN, &pmgBuffer);
  358. if (rc != 0)
  359. {
  360. DC_QUIT;
  361. }
  362. MGNewCorrelator(pmgClient, pCorrelator);
  363. pmgBuffer->work = *pCorrelator;
  364. pmgBuffer->channelId = (ChannelID)channel;
  365. TRACE_OUT(("Inserting join message 0x%08x into pending chain", pmgBuffer));
  366. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  367. UT_PostEvent(pmgClient->putTask,
  368. pmgClient->putTask,
  369. NO_DELAY,
  370. NET_MG_SCHEDULE,
  371. 0,
  372. 0);
  373. DC_EXIT_POINT:
  374. UT_Unlock(UTLOCK_T120);
  375. DebugExitDWORD(MG_ChannelJoin, rc);
  376. return(rc);
  377. }
  378. //
  379. // MG_ChannelJoinByKey(...)
  380. //
  381. UINT MG_ChannelJoinByKey
  382. (
  383. PMG_CLIENT pmgClient,
  384. NET_CHANNEL_ID * pCorrelator,
  385. WORD channelKey
  386. )
  387. {
  388. PMG_BUFFER pmgBuffer;
  389. UINT rc = 0;
  390. DebugEntry(MG_ChannelJoinByKey);
  391. UT_Lock(UTLOCK_T120);
  392. ValidateMGClient(pmgClient);
  393. if (!pmgClient->userAttached)
  394. {
  395. TRACE_OUT(("MG_ChannelJoinByKey: client %x not attached", pmgClient));
  396. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  397. DC_QUIT;
  398. }
  399. //
  400. // MCAT may bounce this request, so we must queue the request
  401. //
  402. rc = MGNewBuffer(pmgClient, MG_RQ_CHANNEL_JOIN_BY_KEY, &pmgBuffer);
  403. if (rc != 0)
  404. {
  405. DC_QUIT;
  406. }
  407. //
  408. // Store the various pieces of information in the joinByKeyInfo
  409. // structure of the client CB
  410. //
  411. MGNewCorrelator(pmgClient, pCorrelator);
  412. pmgBuffer->work = *pCorrelator;
  413. pmgBuffer->channelKey = (ChannelID)channelKey;
  414. pmgBuffer->channelId = 0;
  415. TRACE_OUT(("Inserting join message 0x%08x into pending chain", pmgBuffer));
  416. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  417. UT_PostEvent(pmgClient->putTask,
  418. pmgClient->putTask,
  419. NO_DELAY,
  420. NET_MG_SCHEDULE,
  421. 0,
  422. 0);
  423. DC_EXIT_POINT:
  424. UT_Unlock(UTLOCK_T120);
  425. DebugExitDWORD(MG_ChannelJoinByKey, rc);
  426. return(rc);
  427. }
  428. //
  429. // MG_ChannelLeave(...)
  430. //
  431. void MG_ChannelLeave
  432. (
  433. PMG_CLIENT pmgClient,
  434. NET_CHANNEL_ID channel
  435. )
  436. {
  437. PMG_BUFFER pmgBuffer;
  438. DebugEntry(MG_ChannelLeave);
  439. UT_Lock(UTLOCK_T120);
  440. ValidateMGClient(pmgClient);
  441. if (!pmgClient->userAttached)
  442. {
  443. TRACE_OUT(("MG_ChannelLeave: client %x not attached", pmgClient));
  444. DC_QUIT;
  445. }
  446. //
  447. // MCAT may bounce this request, so instead of processing it straight
  448. // away, we put it on the user's request queue and kick off a process
  449. // queue loop: This is a request CB, but we don't need any data buffer
  450. //
  451. if (MGNewBuffer(pmgClient, MG_RQ_CHANNEL_LEAVE, &pmgBuffer) != 0)
  452. {
  453. DC_QUIT;
  454. }
  455. //
  456. // Fill in the specific data fields in the request CB:
  457. //
  458. pmgBuffer->channelId = (ChannelID)channel;
  459. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  460. UT_PostEvent(pmgClient->putTask,
  461. pmgClient->putTask,
  462. NO_DELAY,
  463. NET_MG_SCHEDULE,
  464. 0,
  465. 0);
  466. DC_EXIT_POINT:
  467. UT_Unlock(UTLOCK_T120);
  468. DebugExitVOID(MG_ChannelLeave);
  469. }
  470. //
  471. // MG_SendData(...)
  472. //
  473. UINT MG_SendData
  474. (
  475. PMG_CLIENT pmgClient,
  476. NET_PRIORITY priority,
  477. NET_CHANNEL_ID channel,
  478. UINT length,
  479. void ** ppData
  480. )
  481. {
  482. PMG_BUFFER pmgBuffer;
  483. UINT numControlBlocks;
  484. UINT i;
  485. UINT rc;
  486. DebugEntry(MG_SendData);
  487. UT_Lock(UTLOCK_T120);
  488. ValidateMGClient(pmgClient);
  489. if (!pmgClient->userAttached)
  490. {
  491. TRACE_OUT(("MG_SendData: client %x not attached", pmgClient));
  492. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  493. DC_QUIT;
  494. }
  495. //
  496. // Check for a packet greater than the permitted size
  497. // It must not cause the length to wrap into the flow flag
  498. //
  499. ASSERT(TSHR_MAX_SEND_PKT + sizeof(TSHR_NET_PKT_HEADER) < TSHR_PKT_FLOW);
  500. ASSERT(length <= TSHR_MAX_SEND_PKT);
  501. //
  502. // Ensure we have a priority which is valid for our use of MCS.
  503. //
  504. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  505. if (pmgClient->userIDMCS == NET_UNUSED_IDMCS)
  506. {
  507. //
  508. // We are not yet attached, so don't try to send data.
  509. //
  510. ERROR_OUT(("Sending data prior to attach indication"));
  511. rc = NET_RC_INVALID_STATE;
  512. DC_QUIT;
  513. }
  514. //
  515. // The <ppData> parameter points to a data buffer pointer. This buffer
  516. // pointer should point to a buffer which the client acquired using
  517. // MG_GetBuffer. MG_GetBuffer should have added a buffer CB to the
  518. // client's buffer list containing the same pointer. Note that if the
  519. // NET_SEND_ALL_PRIORITIES flag is set then there will be four buffers
  520. // in the client's buffer list containing the same pointer.
  521. //
  522. // So, we search through the client's buffer list looking for a match
  523. // on the data buffer pointer. Move to the first position in the list.
  524. //
  525. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pmgClient->buffers),
  526. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  527. FIELD_OFFSET(MG_BUFFER, pDataBuffer), (DWORD_PTR)*ppData,
  528. FIELD_SIZE(MG_BUFFER, pDataBuffer));
  529. ValidateMGBuffer(pmgBuffer);
  530. //
  531. // Check the NET_SEND_ALL_PRIORITIES flag to see if it is set
  532. //
  533. if (pmgBuffer->priority & NET_SEND_ALL_PRIORITIES)
  534. {
  535. //
  536. // Check that the priority and channel has not changed. Changing
  537. // the priority between calling MG_GetBuffer and calling
  538. // MG_SendData is not allowed.
  539. //
  540. ASSERT(pmgBuffer->channelId == channel);
  541. ASSERT(priority & NET_SEND_ALL_PRIORITIES);
  542. //
  543. // The flag is set so there should be multiple control buffers
  544. // waiting to be sent.
  545. //
  546. numControlBlocks = MG_NUM_PRIORITIES;
  547. }
  548. else
  549. {
  550. //
  551. // Check that the priority and channel has not changed.
  552. //
  553. ASSERT(pmgBuffer->channelId == channel);
  554. ASSERT(pmgBuffer->priority == priority);
  555. //
  556. // The flag is not set so there should be only one control buffer
  557. // waiting.
  558. //
  559. numControlBlocks = 1;
  560. }
  561. //
  562. // Now send the control blocks
  563. //
  564. for (i = 0; i < numControlBlocks; i++)
  565. {
  566. ValidateMGBuffer(pmgBuffer);
  567. //
  568. // Clear the NET_SEND_ALL_PRIORITIES flag.
  569. //
  570. pmgBuffer->priority &= ~NET_SEND_ALL_PRIORITIES;
  571. //
  572. // Set up the packet length for the send (this may be different
  573. // from the length in the buffer header since the app may not have
  574. // used all the buffer).
  575. //
  576. ASSERT(length + sizeof(TSHR_NET_PKT_HEADER) <= pmgBuffer->length);
  577. pmgBuffer->pPktHeader->header.pktLength = (TSHR_UINT16)(length + sizeof(TSHR_NET_PKT_HEADER));
  578. //
  579. // If the length has changed then tell FC about it.
  580. //
  581. if ((length + sizeof(MG_INT_PKT_HEADER)) < pmgBuffer->length)
  582. {
  583. FLO_ReallocSend(pmgClient, pmgBuffer->pStr,
  584. pmgBuffer->length - (length + sizeof(MG_INT_PKT_HEADER)));
  585. }
  586. TRACE_OUT(("Inserting send 0x%08x into pend chain, pri %u, chan 0x%08x",
  587. pmgBuffer, pmgBuffer->priority, pmgBuffer->channelId));
  588. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  589. //
  590. // If there is one or more control block left to find then search
  591. // the client's buffer list for it.
  592. //
  593. if ((numControlBlocks - (i + 1)) > 0)
  594. {
  595. COM_BasedListFind(LIST_FIND_FROM_NEXT, &(pmgClient->buffers),
  596. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  597. FIELD_OFFSET(MG_BUFFER, pDataBuffer),
  598. (DWORD_PTR)*ppData, FIELD_SIZE(MG_BUFFER, pDataBuffer));
  599. }
  600. }
  601. UT_PostEvent(pmgClient->putTask,
  602. pmgClient->putTask,
  603. NO_DELAY,
  604. NET_MG_SCHEDULE,
  605. 0,
  606. 0);
  607. //
  608. // Everything went OK - set the ppData pointer to NULL to prevent
  609. // the caller from accessing the memory.
  610. //
  611. *ppData = NULL;
  612. rc = 0;
  613. DC_EXIT_POINT:
  614. UT_Unlock(UTLOCK_T120);
  615. DebugExitDWORD(MG_SendData, rc);
  616. return(rc);
  617. }
  618. //
  619. // MG_TokenGrab(...)
  620. //
  621. UINT MG_TokenGrab
  622. (
  623. PMG_CLIENT pmgClient,
  624. NET_TOKEN_ID tokenID
  625. )
  626. {
  627. PMG_BUFFER pmgBuffer;
  628. UINT rc = 0;
  629. DebugEntry(MG_TokenGrab);
  630. UT_Lock(UTLOCK_T120);
  631. ValidateMGClient(pmgClient);
  632. if (!pmgClient->userAttached)
  633. {
  634. TRACE_OUT(("MG_TokenGrab: client 0x%08x not attached", pmgClient));
  635. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  636. DC_QUIT;
  637. }
  638. //
  639. // MCAT may bounce this request, so instead of processing it straight
  640. // away, we put it on the user's request queue and kick off a process
  641. // queue loop:
  642. //
  643. rc = MGNewBuffer(pmgClient, MG_RQ_TOKEN_GRAB, &pmgBuffer);
  644. if (rc != 0)
  645. {
  646. WARNING_OUT(("MGNewBuffer failed in MG_TokenGrab"));
  647. DC_QUIT;
  648. }
  649. pmgBuffer->channelId = (ChannelID)tokenID;
  650. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  651. UT_PostEvent(pmgClient->putTask,
  652. pmgClient->putTask,
  653. NO_DELAY,
  654. NET_MG_SCHEDULE,
  655. 0,
  656. 0);
  657. DC_EXIT_POINT:
  658. UT_Unlock(UTLOCK_T120);
  659. DebugExitDWORD(MG_TokenGrab, rc);
  660. return(rc);
  661. }
  662. //
  663. // MG_TokenInhibit(...)
  664. //
  665. UINT MG_TokenInhibit
  666. (
  667. PMG_CLIENT pmgClient,
  668. NET_TOKEN_ID tokenID
  669. )
  670. {
  671. PMG_BUFFER pmgBuffer;
  672. UINT rc = 0;
  673. DebugEntry(MG_TokenInhibit);
  674. UT_Lock(UTLOCK_T120);
  675. ValidateMGClient(pmgClient);
  676. if (!pmgClient->userAttached)
  677. {
  678. TRACE_OUT(("MG_TokenInhibit: client 0x%08x not attached", pmgClient));
  679. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  680. DC_QUIT;
  681. }
  682. //
  683. // MCAT may bounce this request, so instead of processing it straight
  684. // away, we put it on the user's request queue and kick off a process
  685. // queue loop:
  686. //
  687. rc = MGNewBuffer(pmgClient, MG_RQ_TOKEN_INHIBIT, &pmgBuffer);
  688. if (rc != 0)
  689. {
  690. WARNING_OUT(("MGNewBuffer failed in MG_TokenInhibit"));
  691. DC_QUIT;
  692. }
  693. pmgBuffer->channelId = (ChannelID)tokenID;
  694. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  695. UT_PostEvent(pmgClient->putTask,
  696. pmgClient->putTask,
  697. NO_DELAY,
  698. NET_MG_SCHEDULE,
  699. 0,
  700. 0);
  701. DC_EXIT_POINT:
  702. UT_Unlock(UTLOCK_T120);
  703. DebugExitDWORD(MG_TokenInhibit, rc);
  704. return(rc);
  705. }
  706. //
  707. // MG_GetBuffer(...)
  708. //
  709. UINT MG_GetBuffer
  710. (
  711. PMG_CLIENT pmgClient,
  712. UINT length,
  713. NET_PRIORITY priority,
  714. NET_CHANNEL_ID channel,
  715. void ** ppData
  716. )
  717. {
  718. PMG_BUFFER pmgBuffer;
  719. UINT rc;
  720. DebugEntry(MG_GetBuffer);
  721. UT_Lock(UTLOCK_T120);
  722. ValidateMGClient(pmgClient);
  723. if (!pmgClient->userAttached)
  724. {
  725. TRACE_OUT(("MG_GetBuffer: client 0x%08x not attached", pmgClient));
  726. rc = NET_RC_MGC_INVALID_USER_HANDLE;
  727. DC_QUIT;
  728. }
  729. //
  730. // Ensure we have a priority which is valid for our use of MCS.
  731. //
  732. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  733. //
  734. // Obtain a buffer and store the info in a buffer CB hung off the
  735. // client's list:
  736. //
  737. rc = MGNewTxBuffer(pmgClient, priority, channel, length,
  738. &pmgBuffer);
  739. if (rc != 0)
  740. {
  741. DC_QUIT;
  742. }
  743. //
  744. // We always return a pointer to the data buffer to an application.
  745. // The MG packet header is only used when giving data to MCS or
  746. // receiving data from MCS.
  747. //
  748. *ppData = pmgBuffer->pDataBuffer;
  749. DC_EXIT_POINT:
  750. UT_Unlock(UTLOCK_T120);
  751. DebugExitDWORD(MG_GetBuffer, rc);
  752. return(rc);
  753. }
  754. //
  755. // MG_FreeBuffer(...)
  756. //
  757. void MG_FreeBuffer
  758. (
  759. PMG_CLIENT pmgClient,
  760. void ** ppData
  761. )
  762. {
  763. PMG_BUFFER pmgBuffer;
  764. DebugEntry(MG_FreeBuffer);
  765. UT_Lock(UTLOCK_T120);
  766. ValidateMGClient(pmgClient);
  767. //
  768. // Find the buffer CB associated with the buffer - an application
  769. // always uses a pointer to the data buffer rather than the packet
  770. // header.
  771. //
  772. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pmgClient->buffers),
  773. (void**)&pmgBuffer, FIELD_OFFSET(MG_BUFFER, clientChain),
  774. FIELD_OFFSET(MG_BUFFER, pDataBuffer), (DWORD_PTR)*ppData,
  775. FIELD_SIZE(MG_BUFFER, pDataBuffer));
  776. ValidateMGBuffer(pmgBuffer);
  777. //
  778. // If the app is freeing a send buffer (e.g. because it decided not to
  779. // send it) then inform flow control:
  780. //
  781. if (pmgBuffer->type == MG_TX_BUFFER)
  782. {
  783. FLO_ReallocSend(pmgClient,
  784. pmgBuffer->pStr,
  785. pmgBuffer->length);
  786. }
  787. //
  788. // Now free the buffer CB and all associated data:
  789. //
  790. MGFreeBuffer(pmgClient, &pmgBuffer);
  791. //
  792. // Reset the client's pointer:
  793. //
  794. *ppData = NULL;
  795. UT_Unlock(UTLOCK_T120);
  796. DebugExitVOID(MG_FreeBuffer);
  797. }
  798. //
  799. // MG_FlowControlStart
  800. //
  801. void MG_FlowControlStart
  802. (
  803. PMG_CLIENT pmgClient,
  804. NET_CHANNEL_ID channel,
  805. NET_PRIORITY priority,
  806. UINT backlog,
  807. UINT maxBytesOutstanding
  808. )
  809. {
  810. DebugEntry(MG_FlowControlStart);
  811. ValidateMGClient(pmgClient);
  812. if (!pmgClient->userAttached)
  813. {
  814. TRACE_OUT(("MG_FlowControlStart: client 0x%08x not attached", pmgClient));
  815. DC_QUIT;
  816. }
  817. //
  818. // Ensure we have a priority which is valid for our use of MCS.
  819. //
  820. priority = (NET_PRIORITY)(MG_VALID_PRIORITY(priority));
  821. FLO_StartControl(pmgClient,
  822. channel,
  823. priority,
  824. backlog,
  825. maxBytesOutstanding);
  826. DC_EXIT_POINT:
  827. DebugExitVOID(MG_FlowControlStart);
  828. }
  829. //
  830. // MGLongStopHandler(...)
  831. //
  832. BOOL CALLBACK MGLongStopHandler
  833. (
  834. LPVOID pData,
  835. UINT event,
  836. UINT_PTR UNUSEDparam1,
  837. UINT_PTR param2
  838. )
  839. {
  840. PMG_CLIENT pmgClient;
  841. BOOL processed = FALSE;
  842. DebugEntry(MGLongStopHandler);
  843. pmgClient = (PMG_CLIENT)pData;
  844. ValidateMGClient(pmgClient);
  845. if (event == NET_EVENT_CHANNEL_JOIN)
  846. {
  847. WARNING_OUT(("Failed to process NET_EVENT_CHANNEL_JOIN; freeing buffer 0x%08x",
  848. param2));
  849. MG_FreeBuffer(pmgClient, (void **)&param2);
  850. processed = TRUE;
  851. }
  852. else if (event == NET_FLOW)
  853. {
  854. WARNING_OUT(("Failed to process NET_FLOW; freeing buffer 0x%08x",
  855. param2));
  856. processed = TRUE;
  857. }
  858. DebugExitBOOL(MGLongStopHandler, processed);
  859. return(processed);
  860. }
  861. //
  862. // MGEventHandler(...)
  863. //
  864. BOOL CALLBACK MGEventHandler
  865. (
  866. LPVOID pData,
  867. UINT event,
  868. UINT_PTR param1,
  869. UINT_PTR param2
  870. )
  871. {
  872. PMG_CLIENT pmgClient;
  873. PNET_JOIN_CNF_EVENT pNetJoinCnf = NULL;
  874. BOOL processed = TRUE;
  875. PMG_BUFFER pmgBuffer;
  876. BOOL joinComplete = FALSE;
  877. UINT result = NET_RESULT_USER_REJECTED;
  878. DebugEntry(MGEventHandler);
  879. pmgClient = (PMG_CLIENT)pData;
  880. ValidateMGClient(pmgClient);
  881. switch (event)
  882. {
  883. case NET_EVENT_CHANNEL_JOIN:
  884. {
  885. //
  886. // If there are no join requests queued off the client CB then
  887. // we have nothing more to do. The only NET events we are
  888. // interested in are NET_EV_JOIN_CONFIRM events - pass any others
  889. // on.
  890. //
  891. if (pmgClient->joinChain.next == 0)
  892. {
  893. //
  894. // Pass the event on...
  895. //
  896. processed = FALSE;
  897. DC_QUIT;
  898. }
  899. //
  900. // We must be careful not to process a completed channel join
  901. // which we intend to go to the client. The correlator is only
  902. // filled in on completed events and is always non-zero.
  903. //
  904. pNetJoinCnf = (PNET_JOIN_CNF_EVENT)param2;
  905. if (pNetJoinCnf->correlator != 0)
  906. {
  907. //
  908. // Pass the event on...
  909. //
  910. processed = FALSE;
  911. DC_QUIT;
  912. }
  913. //
  914. // There is only ever one join request outstanding per client,
  915. // so the join confirm is for the first join request in the
  916. // list.
  917. //
  918. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  919. FIELD_OFFSET(MG_BUFFER, pendChain));
  920. ValidateMGBuffer(pmgBuffer);
  921. //
  922. // We will post a join confirm to the application. Set up the
  923. // parameters which are needed.
  924. //
  925. result = pNetJoinCnf->result;
  926. //
  927. // Assume for now that we have completed the pending join
  928. // request.
  929. //
  930. joinComplete = TRUE;
  931. //
  932. // If the result is a failure, we've finished
  933. //
  934. if (result != NET_RESULT_OK)
  935. {
  936. WARNING_OUT(("Failed to join channel 0x%08x, result %u",
  937. pmgBuffer->channelId,
  938. pNetJoinCnf->result));
  939. DC_QUIT;
  940. }
  941. //
  942. // The join request was successful. There are three different
  943. // scenarios for issuing a join request...
  944. //
  945. // (a) A regular channel join.
  946. // (b) Stage 1 of a channel join by key (get MCS to assign a
  947. // channel number, which we will try to register).
  948. // (c) Stage 2 of a channel join by key (join the registered
  949. // channel).
  950. //
  951. if (pmgBuffer->type == MG_RQ_CHANNEL_JOIN)
  952. {
  953. //
  954. // This is the completion of a regular channel join. Copy
  955. // the channel Id from the join confirm to the bufferCB
  956. // (the join request may have been for channel 0).
  957. //
  958. pmgBuffer->channelId = (ChannelID)pNetJoinCnf->channel;
  959. TRACE_OUT(("Channel join complete, channel 0x%08x",
  960. pmgBuffer->channelId));
  961. DC_QUIT;
  962. }
  963. //
  964. // This is channel join by key
  965. //
  966. if (pmgBuffer->channelId != 0)
  967. {
  968. //
  969. // This is the completion of a channel join by key.
  970. //
  971. TRACE_OUT(("Channel join by key complete, channel 0x%08x, key %d",
  972. pmgBuffer->channelId,
  973. pmgBuffer->channelKey));
  974. DC_QUIT;
  975. }
  976. //
  977. // This is Stage 1 of a channel join by key. Fill in the
  978. // channel Id which MCS has assigned us into the bufferCB,
  979. // otherwise we'll lose track of the channel Id which we're
  980. // registering.
  981. //
  982. pmgBuffer->channelId = (ChannelID)pNetJoinCnf->channel;
  983. //
  984. // This must be completion of stage 1 of a join by key. We now
  985. // have to register the channel Id.
  986. //
  987. TRACE_OUT(("Registering channel 0x%08x, key %d",
  988. pmgBuffer->channelId,
  989. pmgBuffer->channelKey));
  990. if (!CMS_ChannelRegister(pmgClient->pcmClient,
  991. pmgBuffer->channelKey,
  992. pmgBuffer->channelId))
  993. {
  994. WARNING_OUT(("Failed to register channel, "
  995. "channel 0x%08x, key %d, result %u",
  996. pmgBuffer->channelId,
  997. pmgBuffer->channelKey,
  998. param1));
  999. //
  1000. // This causes us to post an error notification
  1001. //
  1002. result = NET_RESULT_USER_REJECTED;
  1003. DC_QUIT;
  1004. }
  1005. TRACE_OUT(("Waiting for CMS_CHANNEL_REGISTER_CONFIRM"));
  1006. //
  1007. // We're now waiting for a CMS_CHANNEL_REGISTER_CONFIRM, so we
  1008. // haven't finished processing the join request
  1009. //
  1010. joinComplete = FALSE;
  1011. break;
  1012. }
  1013. case CMS_CHANNEL_REGISTER_CONFIRM:
  1014. {
  1015. //
  1016. // If there are no join requests queued off the client CB then
  1017. // we have nothing more to do.
  1018. //
  1019. if (pmgClient->joinChain.next == 0)
  1020. {
  1021. processed = FALSE;
  1022. DC_QUIT;
  1023. }
  1024. TRACE_OUT(("CMS_CHANNEL_REGISTER rcvd, result %u, channel %u",
  1025. param1, param2));
  1026. //
  1027. // Assume for now that we have completed the pending join
  1028. // request.
  1029. //
  1030. joinComplete = TRUE;
  1031. //
  1032. // There is only ever one join request outstanding per client,
  1033. // so the channel register confirm is for the first join
  1034. // request in the list.
  1035. //
  1036. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  1037. FIELD_OFFSET(MG_BUFFER, pendChain));
  1038. ValidateMGBuffer(pmgBuffer);
  1039. //
  1040. // Param1 contains the result, LOWORD(param2) contains the
  1041. // channel number of the registered channel (NOT necessarily
  1042. // the same as the channel we tried to register).
  1043. //
  1044. if (!param1)
  1045. {
  1046. WARNING_OUT(("Failed to register channel, "
  1047. "channel 0x%08x, key %d, result %u",
  1048. pmgBuffer->channelId,
  1049. pmgBuffer->channelKey,
  1050. param1));
  1051. result = NET_RESULT_USER_REJECTED;
  1052. DC_QUIT;
  1053. }
  1054. //
  1055. // If the channel number returned in the confirm event is the
  1056. // same as the channel number which we tried to register, then
  1057. // we have finished. Otherwise we have to leave the channel we
  1058. // tried to register and join the channel returned instead.
  1059. //
  1060. if (LOWORD(param2) == pmgBuffer->channelId)
  1061. {
  1062. TRACE_OUT(("Channel join by key complete, "
  1063. "channel 0x%08x, key %d",
  1064. pmgBuffer->channelId,
  1065. pmgBuffer->channelKey));
  1066. result = NET_RESULT_OK;
  1067. DC_QUIT;
  1068. }
  1069. MG_ChannelLeave(pmgClient, pmgBuffer->channelId);
  1070. pmgBuffer->channelId = (ChannelID)LOWORD(param2);
  1071. //
  1072. // Now we simply requeue the request onto the pending execution
  1073. // chain, but now with a set channel id to join
  1074. //
  1075. TRACE_OUT(("Inserting 0x%08x into pending chain",pmgBuffer));
  1076. COM_BasedListRemove(&(pmgBuffer->pendChain));
  1077. COM_BasedListInsertBefore(&(pmgClient->pendChain),
  1078. &(pmgBuffer->pendChain));
  1079. //
  1080. // We are now waiting for a join confirm (we've not finished
  1081. // yet !). However, we've requeued the bufferCB, so we can now
  1082. // process another join request (or the one we've requeued if
  1083. // its the only one).
  1084. //
  1085. joinComplete = FALSE;
  1086. pmgClient->joinPending = FALSE;
  1087. MGProcessPendingQueue(pmgClient);
  1088. break;
  1089. }
  1090. case NET_MG_SCHEDULE:
  1091. {
  1092. MGProcessPendingQueue(pmgClient);
  1093. break;
  1094. }
  1095. case NET_MG_WATCHDOG:
  1096. {
  1097. MGProcessDomainWatchdog(pmgClient);
  1098. break;
  1099. }
  1100. default:
  1101. {
  1102. //
  1103. // Don't do anything - we want to pass this event on.
  1104. //
  1105. processed = FALSE;
  1106. break;
  1107. }
  1108. }
  1109. DC_EXIT_POINT:
  1110. if (processed && pNetJoinCnf)
  1111. {
  1112. //
  1113. // Call MG_FreeBuffer to free up the event memory (we know that
  1114. // MG_FreeBuffer doesn't use the hUser so we pass in zero):
  1115. //
  1116. MG_FreeBuffer(pmgClient, (void **)&pNetJoinCnf);
  1117. }
  1118. if (joinComplete)
  1119. {
  1120. //
  1121. // We have either completed the channel join, or failed -
  1122. // either way we have finished processing the join request.
  1123. //
  1124. // We have to:
  1125. // - post a NET_EVENT_CHANNEL_JOIN event to the client
  1126. // - free up the bufferCB
  1127. // - reset the client's joinPending state
  1128. //
  1129. MGPostJoinConfirm(pmgClient,
  1130. (NET_RESULT)result,
  1131. pmgBuffer->channelId,
  1132. (NET_CHANNEL_ID)pmgBuffer->work);
  1133. MGFreeBuffer(pmgClient, &pmgBuffer);
  1134. pmgClient->joinPending = FALSE;
  1135. }
  1136. DebugExitBOOL(MGEventHandler, processed);
  1137. return(processed);
  1138. }
  1139. //
  1140. // MGCallback(...)
  1141. //
  1142. #ifdef _DEBUG
  1143. const char * c_szMCSMsgTbl[22] =
  1144. {
  1145. "MCS_CONNECT_PROVIDER_INDICATION", // 0
  1146. "MCS_CONNECT_PROVIDER_CONFIRM", // 1
  1147. "MCS_DISCONNECT_PROVIDER_INDICATION", // 2
  1148. "MCS_ATTACH_USER_CONFIRM", // 3
  1149. "MCS_DETACH_USER_INDICATION", // 4
  1150. "MCS_CHANNEL_JOIN_CONFIRM", // 5
  1151. "MCS_CHANNEL_LEAVE_INDICATION", // 6
  1152. "MCS_CHANNEL_CONVENE_CONFIRM", // 7
  1153. "MCS_CHANNEL_DISBAND_INDICATION", // 8
  1154. "MCS_CHANNEL_ADMIT_INDICATION", // 9
  1155. "MCS_CHANNEL_EXPEL_INDICATION", // 10
  1156. "MCS_SEND_DATA_INDICATION", // 11
  1157. "MCS_UNIFORM_SEND_DATA_INDICATION", // 12
  1158. "MCS_TOKEN_GRAB_CONFIRM", // 13
  1159. "MCS_TOKEN_INHIBIT_CONFIRM", // 14
  1160. "MCS_TOKEN_GIVE_INDICATION", // 15
  1161. "MCS_TOKEN_GIVE_CONFIRM", // 16
  1162. "MCS_TOKEN_PLEASE_INDICATION", // 17
  1163. "MCS_TOKEN_RELEASE_CONFIRM", // 18
  1164. "MCS_TOKEN_TEST_CONFIRM", // 19
  1165. "MCS_TOKEN_RELEASE_INDICATION", // 20
  1166. "MCS_TRANSMIT_BUFFER_AVAILABLE_INDICATION", // 21
  1167. };
  1168. // MCS_MERGE_DOMAIN_INDICATION 200
  1169. // MCS_TRANSPORT_STATUS_INDICATION 101
  1170. char * DbgGetMCSMsgStr(unsigned short mcsMessageType)
  1171. {
  1172. if (mcsMessageType <= 21)
  1173. {
  1174. return (char *) c_szMCSMsgTbl[mcsMessageType];
  1175. }
  1176. #ifdef USE_MERGE_DOMAIN_CODE
  1177. else if (mcsMessageType == MCS_MERGE_DOMAIN_INDICATION)
  1178. {
  1179. return "MCS_MERGE_DOMAIN_INDICATION";
  1180. }
  1181. #endif // USE_MERGE_DOMAIN_CODE
  1182. else if (mcsMessageType == MCS_TRANSPORT_STATUS_INDICATION)
  1183. {
  1184. return "MCS_TRANSPORT_STATUS_INDICATION";
  1185. }
  1186. return "Unknown";
  1187. }
  1188. #endif // _DEBUG
  1189. void CALLBACK MGCallback
  1190. (
  1191. unsigned int mcsMessageType,
  1192. UINT_PTR eventData,
  1193. UINT_PTR pData
  1194. )
  1195. {
  1196. PMG_CLIENT pmgClient;
  1197. PMG_BUFFER pmgBuffer;
  1198. UINT rc = 0;
  1199. DebugEntry(MGCallback);
  1200. UT_Lock(UTLOCK_T120);
  1201. pmgClient = (PMG_CLIENT)pData;
  1202. ValidateMGClient(pmgClient);
  1203. if (!pmgClient->userAttached)
  1204. {
  1205. TRACE_OUT(("MGCallback: client 0x%08x not attached", pmgClient));
  1206. DC_QUIT;
  1207. }
  1208. ValidateCMP(g_pcmPrimary);
  1209. switch (mcsMessageType)
  1210. {
  1211. case MCS_UNIFORM_SEND_DATA_INDICATION:
  1212. case MCS_SEND_DATA_INDICATION:
  1213. {
  1214. //
  1215. // The processing for a SEND_DATA_INDICATION is complicated
  1216. // significantly by MCS segmenting packets, so we call
  1217. // MGHandleSendInd to do all the work , then quit out of the
  1218. // function rather than special casing throughout.
  1219. //
  1220. rc = MGHandleSendInd(pmgClient, (PSendData)eventData);
  1221. DC_QUIT;
  1222. break;
  1223. }
  1224. case MCS_ATTACH_USER_CONFIRM:
  1225. {
  1226. NET_UID user;
  1227. NET_RESULT result;
  1228. user = LOWUSHORT(eventData);
  1229. result = TranslateResult(HIGHUSHORT(eventData));
  1230. //
  1231. // If the attach did not succeed, clean up:
  1232. //
  1233. if (HIGHUSHORT(eventData) != RESULT_SUCCESSFUL)
  1234. {
  1235. WARNING_OUT(("MG_Attach failed; cleaning up"));
  1236. MGDetach(pmgClient);
  1237. }
  1238. else
  1239. {
  1240. pmgClient->userIDMCS = user;
  1241. //
  1242. // Now initialize flow control for this user attachment
  1243. //
  1244. ZeroMemory(&(pmgClient->flo), sizeof(pmgClient->flo));
  1245. pmgClient->flo.callBack = MGFLOCallBack;
  1246. }
  1247. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1248. NET_EVENT_USER_ATTACH, MAKELONG(user, result),
  1249. g_pcmPrimary->callID);
  1250. break;
  1251. }
  1252. case MCS_DETACH_USER_INDICATION:
  1253. {
  1254. NET_UID user;
  1255. user = LOWUSHORT(eventData);
  1256. //
  1257. // If the detach is for the local user, then clean up
  1258. // the user CB:
  1259. //
  1260. if (user == pmgClient->userIDMCS)
  1261. {
  1262. //
  1263. // First terminate flow control
  1264. //
  1265. FLO_UserTerm(pmgClient);
  1266. MGDetach(pmgClient);
  1267. }
  1268. else
  1269. {
  1270. //
  1271. // Just remove the offending user from flow control
  1272. //
  1273. FLO_RemoveUser(pmgClient, user);
  1274. }
  1275. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1276. NET_EVENT_USER_DETACH, user, g_pcmPrimary->callID);
  1277. break;
  1278. }
  1279. case MCS_CHANNEL_JOIN_CONFIRM:
  1280. {
  1281. PNET_JOIN_CNF_EVENT pNetEvent;
  1282. UINT i;
  1283. //
  1284. // Allocate a buffer for the event
  1285. //
  1286. rc = MGNewDataBuffer(pmgClient, MG_EV_BUFFER,
  1287. sizeof(MG_INT_PKT_HEADER) + sizeof(NET_JOIN_CNF_EVENT), &pmgBuffer);
  1288. if (rc != 0)
  1289. {
  1290. WARNING_OUT(("MGNewDataBuffer failed in MGCallback"));
  1291. DC_QUIT;
  1292. }
  1293. pNetEvent = (PNET_JOIN_CNF_EVENT)pmgBuffer->pDataBuffer;
  1294. //
  1295. // Fill in the call ID:
  1296. //
  1297. pNetEvent->callID = g_pcmPrimary->callID;
  1298. pNetEvent->channel = LOWUSHORT(eventData);
  1299. pNetEvent->result = TranslateResult(HIGHUSHORT(eventData));
  1300. //
  1301. // Now establish flow control for the newly joined channel
  1302. // Only control priorities that have a non-zero latency
  1303. // And remember to ignore our own user channel! And top priority.
  1304. //
  1305. if (HIGHUSHORT(eventData) == RESULT_SUCCESSFUL)
  1306. {
  1307. if (pNetEvent->channel != pmgClient->userIDMCS)
  1308. {
  1309. for (i = 0; i < NET_NUM_PRIORITIES; i++)
  1310. {
  1311. if ((i == MG_VALID_PRIORITY(i)) &&
  1312. (pmgClient->flowControl.latency[i] != 0))
  1313. {
  1314. FLO_StartControl(pmgClient, pNetEvent->channel,
  1315. i, pmgClient->flowControl.latency[i],
  1316. pmgClient->flowControl.streamSize[i]);
  1317. }
  1318. }
  1319. }
  1320. }
  1321. //
  1322. // OK, we've built the DCG event so now post it to our client:
  1323. //
  1324. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1325. NET_EVENT_CHANNEL_JOIN, 0, (UINT_PTR)pNetEvent);
  1326. pmgBuffer->eventPosted = TRUE;
  1327. break;
  1328. }
  1329. case MCS_CHANNEL_LEAVE_INDICATION:
  1330. {
  1331. NET_CHANNEL_ID channel;
  1332. channel = LOWUSHORT(eventData);
  1333. MGProcessEndFlow(pmgClient, channel);
  1334. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1335. NET_EVENT_CHANNEL_LEAVE, channel, g_pcmPrimary->callID);
  1336. break;
  1337. }
  1338. case MCS_TOKEN_GRAB_CONFIRM:
  1339. {
  1340. NET_RESULT result;
  1341. result = TranslateResult(HIGHUSHORT(eventData));
  1342. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1343. NET_EVENT_TOKEN_GRAB, result, g_pcmPrimary->callID);
  1344. break;
  1345. }
  1346. case MCS_TOKEN_INHIBIT_CONFIRM:
  1347. {
  1348. NET_RESULT result;
  1349. result = TranslateResult(HIGHUSHORT(eventData));
  1350. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1351. NET_EVENT_TOKEN_INHIBIT, result, g_pcmPrimary->callID);
  1352. break;
  1353. }
  1354. default:
  1355. break;
  1356. }
  1357. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1358. NET_MG_SCHEDULE, 0, 0);
  1359. DC_EXIT_POINT:
  1360. if (rc != 0)
  1361. {
  1362. //
  1363. // We hit an error, but must return OK to MCS - otherwise it will
  1364. // keep sending us the callback forever!
  1365. //
  1366. WARNING_OUT(("MGCallback: Error 0x%08x processing MCS message %u",
  1367. rc, mcsMessageType));
  1368. }
  1369. UT_Unlock(UTLOCK_T120);
  1370. DebugExitDWORD(MGCallback, MCS_NO_ERROR);
  1371. }
  1372. //
  1373. // ProcessEndFlow(...)
  1374. //
  1375. void MGProcessEndFlow
  1376. (
  1377. PMG_CLIENT pmgClient,
  1378. ChannelID channel
  1379. )
  1380. {
  1381. UINT i;
  1382. DebugEntry(MGProcessEndFlow);
  1383. ValidateMGClient(pmgClient);
  1384. ASSERT(pmgClient->userAttached);
  1385. //
  1386. // Terminate flow control for the newly left channel
  1387. //
  1388. if (channel != pmgClient->userIDMCS)
  1389. {
  1390. for (i = 0; i < NET_NUM_PRIORITIES; i++)
  1391. {
  1392. if ((i == MG_VALID_PRIORITY(i)) &&
  1393. (pmgClient->flowControl.latency[i] != 0))
  1394. {
  1395. TRACE_OUT(("Ending flow control on channel 0x%08x priority %u",
  1396. channel, i));
  1397. FLO_EndControl(pmgClient, channel, i);
  1398. }
  1399. }
  1400. }
  1401. DebugExitVOID(MGProcessEndFlow);
  1402. }
  1403. //
  1404. // MGHandleSendInd(...)
  1405. //
  1406. UINT MGHandleSendInd
  1407. (
  1408. PMG_CLIENT pmgClient,
  1409. PSendData pSendData
  1410. )
  1411. {
  1412. PMG_BUFFER pmgBuffer;
  1413. PNET_SEND_IND_EVENT pEvent;
  1414. NET_PRIORITY priority;
  1415. LPBYTE pData;
  1416. UINT cbData;
  1417. UINT rc = 0;
  1418. TSHR_NET_PKT_HEADER pktHeader;
  1419. DebugEntry(MGHandleSendInd);
  1420. ValidateMGClient(pmgClient);
  1421. ASSERT(pmgClient->userAttached);
  1422. priority = (NET_PRIORITY)MG_VALID_PRIORITY(
  1423. (NET_PRIORITY)pSendData->data_priority);
  1424. pData = pSendData->user_data.value;
  1425. ASSERT(pData != NULL);
  1426. cbData = pSendData->user_data.length;
  1427. ASSERT(cbData > sizeof(TSHR_NET_PKT_HEADER));
  1428. TRACE_OUT(("MCS Data Indication: flags 0x%08x, size %u, first dword 0x%08x",
  1429. pSendData->segmentation, pSendData->user_data.length,
  1430. *((DWORD *)pData)));
  1431. ASSERT (pSendData->segmentation == (SEGMENTATION_BEGIN | SEGMENTATION_END));
  1432. TRACE_OUT(("Only segment: channel %u, priority %u, length %u",
  1433. pSendData->channel_id, pSendData->data_priority, cbData));
  1434. //
  1435. // Look at the header
  1436. //
  1437. memcpy(&pktHeader, pData, sizeof(TSHR_NET_PKT_HEADER));
  1438. //
  1439. // Trace out the MG header word
  1440. //
  1441. TRACE_OUT(("Got 1st MG segment (header=%X)", pktHeader.pktLength));
  1442. //
  1443. // First of all try for a flow control packet
  1444. //
  1445. if (pktHeader.pktLength & TSHR_PKT_FLOW)
  1446. {
  1447. TRACE_OUT(("Flow control packet"));
  1448. if (pktHeader.pktLength == TSHR_PKT_FLOW)
  1449. {
  1450. FLO_ReceivedPacket(pmgClient,
  1451. (PTSHR_FLO_CONTROL)(pData + sizeof(TSHR_NET_PKT_HEADER)));
  1452. }
  1453. else
  1454. {
  1455. WARNING_OUT(("Received obsolete throughput packet size 0x%04x", pktHeader.pktLength));
  1456. }
  1457. pmgClient->m_piMCSSap->FreeBuffer((PVoid) pData);
  1458. DC_QUIT;
  1459. }
  1460. //
  1461. // Allocate headers for the incoming buffer.
  1462. //
  1463. //
  1464. ASSERT((sizeof(NET_SEND_IND_EVENT) + pktHeader.pktLength) <= 0xFFFF);
  1465. ASSERT(pktHeader.pktLength == cbData);
  1466. rc = MGNewRxBuffer(pmgClient,
  1467. priority,
  1468. pSendData->channel_id,
  1469. pSendData->initiator,
  1470. &pmgBuffer);
  1471. if (rc != 0)
  1472. {
  1473. WARNING_OUT(("MGNewRxBuffer of size %u failed",
  1474. sizeof(NET_SEND_IND_EVENT) + sizeof(MG_INT_PKT_HEADER)));
  1475. pmgClient->m_piMCSSap->FreeBuffer((PVoid) pData);
  1476. DC_QUIT;
  1477. }
  1478. pEvent = (PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer;
  1479. ValidateCMP(g_pcmPrimary);
  1480. pEvent->callID = g_pcmPrimary->callID;
  1481. pEvent->priority = priority;
  1482. pEvent->channel = pSendData->channel_id;
  1483. //
  1484. // Copy the length into the data buffer header.
  1485. //
  1486. pmgBuffer->pPktHeader->header = pktHeader;
  1487. //
  1488. // We want to skip past the packet header to the user data
  1489. //
  1490. pData += sizeof(TSHR_NET_PKT_HEADER);
  1491. cbData -= sizeof(TSHR_NET_PKT_HEADER);
  1492. //
  1493. // Set the pointer in the buffer header to point to the received data.
  1494. //
  1495. // pEvent->lengthOfData contains the number of bytes received in this
  1496. // event so far.
  1497. //
  1498. ASSERT(pData);
  1499. pEvent->data_ptr = pData;
  1500. pEvent->lengthOfData = cbData;
  1501. TRACE_OUT(("New RX pmgBuffer 0x%08x pDataBuffer 0x%08x",
  1502. pmgBuffer, pEvent));
  1503. //
  1504. // OK, we've got all the segments, so post it to our client:
  1505. //
  1506. UT_PostEvent(pmgClient->putTask, pmgClient->putTask, NO_DELAY,
  1507. NET_EVENT_DATA_RECEIVED, 0, (UINT_PTR)pEvent);
  1508. pmgBuffer->eventPosted = TRUE;
  1509. DC_EXIT_POINT:
  1510. DebugExitDWORD(MGHandleSendInd, rc);
  1511. return(rc);
  1512. }
  1513. //
  1514. // MGNewBuffer(...)
  1515. //
  1516. UINT MGNewBuffer
  1517. (
  1518. PMG_CLIENT pmgClient,
  1519. UINT bufferType,
  1520. PMG_BUFFER * ppmgBuffer
  1521. )
  1522. {
  1523. PMG_BUFFER pmgBuffer;
  1524. void * pBuffer = NULL;
  1525. UINT rc = 0;
  1526. DebugEntry(MGNewBuffer);
  1527. ValidateMGClient(pmgClient);
  1528. ASSERT(pmgClient->userAttached);
  1529. pmgBuffer = new MG_BUFFER;
  1530. if (!pmgBuffer)
  1531. {
  1532. WARNING_OUT(("MGNewBuffer failed; out of memory"));
  1533. rc = NET_RC_NO_MEMORY;
  1534. DC_QUIT;
  1535. }
  1536. ZeroMemory(pmgBuffer, sizeof(*pmgBuffer));
  1537. SET_STAMP(pmgBuffer, MGBUFFER);
  1538. pmgBuffer->type = bufferType;
  1539. //
  1540. // Insert it at the head of this client's list of allocated buffers:
  1541. //
  1542. COM_BasedListInsertAfter(&(pmgClient->buffers), &(pmgBuffer->clientChain));
  1543. //
  1544. // return the pointer
  1545. //
  1546. *ppmgBuffer = pmgBuffer;
  1547. DC_EXIT_POINT:
  1548. DebugExitDWORD(MGNewBuffer, rc);
  1549. return(rc);
  1550. }
  1551. //
  1552. // MGNewDataBuffer(...)
  1553. //
  1554. UINT MGNewDataBuffer
  1555. (
  1556. PMG_CLIENT pmgClient,
  1557. UINT bufferType,
  1558. UINT bufferSize,
  1559. PMG_BUFFER * ppmgBuffer
  1560. )
  1561. {
  1562. void * pBuffer = NULL;
  1563. PMG_BUFFER pmgBuffer;
  1564. UINT rc = 0;
  1565. DebugEntry(MGNewDataBuffer);
  1566. //
  1567. // Buffers include an MG internal packet header that has a length field
  1568. // which we add to the start of all user data passed to/received from
  1569. // MCS. This is four byte aligned, and since the data buffer starts
  1570. // immediately after this, the data buffer will be aligned.
  1571. //
  1572. pBuffer = new BYTE[bufferSize];
  1573. if (!pBuffer)
  1574. {
  1575. WARNING_OUT(("MGNewDataBuffer allocation of size %u failed", bufferSize));
  1576. rc = NET_RC_NO_MEMORY;
  1577. DC_QUIT;
  1578. }
  1579. ZeroMemory(pBuffer, bufferSize);
  1580. //
  1581. // Now we allocate the buffer CB which we will use to track the use of
  1582. // the buffer.
  1583. //
  1584. rc = MGNewBuffer(pmgClient, bufferType, ppmgBuffer);
  1585. if (rc != 0)
  1586. {
  1587. WARNING_OUT(("MGNewBuffer failed"));
  1588. DC_QUIT;
  1589. }
  1590. //
  1591. // Initialise the buffer entry
  1592. //
  1593. pmgBuffer = *ppmgBuffer;
  1594. pmgBuffer->length = bufferSize;
  1595. pmgBuffer->pPktHeader = (PMG_INT_PKT_HEADER)pBuffer;
  1596. pmgBuffer->pDataBuffer = (LPBYTE)pBuffer + sizeof(MG_INT_PKT_HEADER);
  1597. //
  1598. // Initialize the use count of the data buffer
  1599. //
  1600. pmgBuffer->pPktHeader->useCount = 1;
  1601. DC_EXIT_POINT:
  1602. if (rc != 0)
  1603. {
  1604. //
  1605. // Cleanup:
  1606. //
  1607. if (pBuffer != NULL)
  1608. {
  1609. WARNING_OUT(("Freeing MG_BUFFER data 0x%08x; MGNewBuffer failed", pBuffer));
  1610. delete[] pBuffer;
  1611. }
  1612. }
  1613. DebugExitDWORD(MGNewDataBuffer, rc);
  1614. return(rc);
  1615. }
  1616. //
  1617. // MGNewTxBuffer(...)
  1618. //
  1619. UINT MGNewTxBuffer
  1620. (
  1621. PMG_CLIENT pmgClient,
  1622. NET_PRIORITY priority,
  1623. NET_CHANNEL_ID channel,
  1624. UINT bufferSize,
  1625. PMG_BUFFER * ppmgBuffer
  1626. )
  1627. {
  1628. int i;
  1629. UINT numPrioritiesToUse;
  1630. UINT rc = 0;
  1631. UINT nextPriority;
  1632. PMG_BUFFER pmgBufferArray[MG_NUM_PRIORITIES];
  1633. PFLO_STREAM_DATA pStr[MG_NUM_PRIORITIES];
  1634. NET_PRIORITY priorities[MG_NUM_PRIORITIES];
  1635. DebugEntry(MGNewTxBuffer);
  1636. ValidateMGClient(pmgClient);
  1637. ASSERT(priority != NET_TOP_PRIORITY);
  1638. //
  1639. // Initialise the control buffer pointer array. The first member of
  1640. // this array is the normal buffer which is allocated regardless of the
  1641. // NET_SEND_ALL_PRIORITIES flag. The remaining members are used for
  1642. // duplicate control buffer pointers needed for sending data on all
  1643. // priorities.
  1644. //
  1645. ZeroMemory(pmgBufferArray, sizeof(pmgBufferArray));
  1646. ZeroMemory(pStr, sizeof(pStr));
  1647. //
  1648. // SFR6025: Check for the NET_SEND_ALL_PRIORITIES flag. This means
  1649. // that the data will be sent at all four priorities. If it
  1650. // is not set then we just need to send data at one priority.
  1651. // In either case we need to:
  1652. //
  1653. // Check with flow control that it is possible to send data on
  1654. // all channels
  1655. //
  1656. // Allocate an additional three control blocks which all point
  1657. // to the same data block and bump up the usage count.
  1658. //
  1659. //
  1660. // NOTE: Previously this function just checked with flow control for
  1661. // a single channel.
  1662. //
  1663. if (priority & NET_SEND_ALL_PRIORITIES)
  1664. {
  1665. numPrioritiesToUse = MG_NUM_PRIORITIES;
  1666. }
  1667. else
  1668. {
  1669. numPrioritiesToUse = 1;
  1670. }
  1671. //
  1672. // Disable the flag to prevent FLO_AllocSend being sent an invalid
  1673. // priority.
  1674. //
  1675. priority &= ~NET_SEND_ALL_PRIORITIES;
  1676. nextPriority = priority;
  1677. for (i = 0; i < (int) numPrioritiesToUse; i++)
  1678. {
  1679. //
  1680. // Check with flow control to ensure that send space is available.
  1681. // Start with the requested priority level and continue for the
  1682. // other priority levels.
  1683. //
  1684. priorities[i] = (NET_PRIORITY)nextPriority;
  1685. rc = FLO_AllocSend(pmgClient,
  1686. nextPriority,
  1687. channel,
  1688. bufferSize + sizeof(MG_INT_PKT_HEADER),
  1689. &(pStr[i]));
  1690. //
  1691. // If we have got back pressure then just return.
  1692. //
  1693. if (rc != 0)
  1694. {
  1695. TRACE_OUT(("Received back pressure"));
  1696. //
  1697. // Free any buffer space allocated by FLO_AllocSend.
  1698. //
  1699. for ( --i; i >= 0; i--)
  1700. {
  1701. FLO_ReallocSend(pmgClient,
  1702. pStr[i],
  1703. bufferSize + sizeof(MG_INT_PKT_HEADER));
  1704. }
  1705. DC_QUIT;
  1706. }
  1707. ValidateFLOStr(pStr[i]);
  1708. //
  1709. // Move on to the next priority level. There are MG_NUM_PRIORITY
  1710. // levels, numbered contiguously from MG_PRIORITY_HIGHEST. The
  1711. // first priority processed can be any level in the valid range so
  1712. // rather than simply add 1 to get to the next level, we need to
  1713. // cope with the wrap-around back to MG_PRIORITY_HIGHEST when we
  1714. // have just processed the last priority, ie MG_PRIORITY_HIGHEST +
  1715. // MG_NUM_PRIORITIES - 1. This is achieved by rebasing the priority
  1716. // level to zero (the - MG_PRIORITY_HIGHEST, below), incrementing
  1717. // the rebased priority (+1), taking the modulus of the number of
  1718. // priorities to avoid exceeding the limit (% MG_NUM_PRIORITIES)
  1719. // and then restoring the base by adding back the first priority
  1720. // level (+ MG_PRIORITY_HIGHEST).
  1721. //
  1722. nextPriority = (((nextPriority + 1 - MG_PRIORITY_HIGHEST) %
  1723. MG_NUM_PRIORITIES) + MG_PRIORITY_HIGHEST);
  1724. }
  1725. //
  1726. // Use MGNewDataBuffer to allocate the buffer
  1727. //
  1728. rc = MGNewDataBuffer(pmgClient,
  1729. MG_TX_BUFFER,
  1730. bufferSize + sizeof(MG_INT_PKT_HEADER),
  1731. &pmgBufferArray[0]);
  1732. if (rc != 0)
  1733. {
  1734. WARNING_OUT(("MGNewDataBuffer failed in MGNewTxBuffer"));
  1735. DC_QUIT;
  1736. }
  1737. //
  1738. // Add the fields required for doing the send
  1739. //
  1740. pmgBufferArray[0]->priority = priority;
  1741. pmgBufferArray[0]->channelId = (ChannelID) channel;
  1742. pmgBufferArray[0]->senderId = pmgClient->userIDMCS;
  1743. ValidateFLOStr(pStr[0]);
  1744. pmgBufferArray[0]->pStr = pStr[0];
  1745. //
  1746. // Now allocate an additional three control blocks which are identical
  1747. // to the first one if required.
  1748. //
  1749. if (numPrioritiesToUse > 1)
  1750. {
  1751. //
  1752. // Firstly re-enable the NET_SEND_ALL_PRIORITIES flag. This is to
  1753. // ensure that traversing the linked list in MG_SendData is
  1754. // efficient.
  1755. //
  1756. pmgBufferArray[0]->priority |= NET_SEND_ALL_PRIORITIES;
  1757. //
  1758. // Create the duplicate buffers and initialise them.
  1759. //
  1760. for (i = 1; i < MG_NUM_PRIORITIES; i++)
  1761. {
  1762. TRACE_OUT(("Task allocating extra CB, priority %u",
  1763. priorities[i]));
  1764. //
  1765. // Allocate a new control buffer.
  1766. //
  1767. rc = MGNewBuffer(pmgClient,
  1768. MG_TX_BUFFER,
  1769. &pmgBufferArray[i]);
  1770. if (rc != 0)
  1771. {
  1772. WARNING_OUT(("MGNewBuffer failed"));
  1773. DC_QUIT;
  1774. }
  1775. //
  1776. // Initialise the buffer control block. The priority values of
  1777. // these control blocks are in increasing order from that of
  1778. // pmgBuffer.
  1779. //
  1780. pmgBufferArray[i]->priority = priorities[i];
  1781. pmgBufferArray[i]->channelId = pmgBufferArray[0]->channelId;
  1782. pmgBufferArray[i]->senderId = pmgBufferArray[0]->senderId;
  1783. pmgBufferArray[i]->length = pmgBufferArray[0]->length;
  1784. pmgBufferArray[i]->pPktHeader = pmgBufferArray[0]->pPktHeader;
  1785. pmgBufferArray[i]->pDataBuffer = pmgBufferArray[0]->pDataBuffer;
  1786. ValidateFLOStr(pStr[i]);
  1787. pmgBufferArray[i]->pStr = pStr[i];
  1788. //
  1789. // Set the NET_SEND_ALL_PRIORITIES flag.
  1790. //
  1791. pmgBufferArray[i]->priority |= NET_SEND_ALL_PRIORITIES;
  1792. //
  1793. // Now bump up the usage count of the data block.
  1794. //
  1795. pmgBufferArray[i]->pPktHeader->useCount++;
  1796. TRACE_OUT(("Use count of data buffer %#.8lx now %d",
  1797. pmgBufferArray[i]->pPktHeader,
  1798. pmgBufferArray[i]->pPktHeader->useCount));
  1799. }
  1800. }
  1801. //
  1802. // Assign the passed first control buffer allocated to the passed
  1803. // control buffer parameter.
  1804. //
  1805. *ppmgBuffer = pmgBufferArray[0];
  1806. DC_EXIT_POINT:
  1807. //
  1808. // In the event of a problem we free any buffers that we have already
  1809. // allocated.
  1810. //
  1811. if (rc != 0)
  1812. {
  1813. for (i = 0; i < MG_NUM_PRIORITIES; i++)
  1814. {
  1815. if (pmgBufferArray[i] != NULL)
  1816. {
  1817. TRACE_OUT(("About to free control buffer %u", i));
  1818. MGFreeBuffer(pmgClient, &pmgBufferArray[i]);
  1819. }
  1820. }
  1821. }
  1822. DebugExitDWORD(MGNewTxBuffer, rc);
  1823. return(rc);
  1824. }
  1825. //
  1826. // MGNewRxBuffer(...)
  1827. //
  1828. UINT MGNewRxBuffer
  1829. (
  1830. PMG_CLIENT pmgClient,
  1831. NET_PRIORITY priority,
  1832. NET_CHANNEL_ID channel,
  1833. NET_CHANNEL_ID senderID,
  1834. PMG_BUFFER * ppmgBuffer
  1835. )
  1836. {
  1837. UINT rc = 0;
  1838. DebugEntry(MGNewRxBuffer);
  1839. ValidateMGClient(pmgClient);
  1840. //
  1841. // First tell flow control we need a buffer.
  1842. // No back pressure may be applied here, but flow control uses this
  1843. // notification to control responses to the sender.
  1844. //
  1845. // Note that we always use the sizes including the internal packet
  1846. // header for flow control purposes.
  1847. //
  1848. FLO_AllocReceive(pmgClient,
  1849. priority,
  1850. channel,
  1851. senderID);
  1852. //
  1853. // Use MGNewDataBuffer to allocate the buffer. bufferSize includes the
  1854. // size of the network packet header (this comes over the wire), but
  1855. // not the remainder of the internal packet header.
  1856. //
  1857. rc = MGNewDataBuffer(pmgClient,
  1858. MG_RX_BUFFER,
  1859. sizeof(NET_SEND_IND_EVENT) + sizeof(MG_INT_PKT_HEADER),
  1860. ppmgBuffer);
  1861. //
  1862. // Add the fields required for a receive buffer
  1863. //
  1864. if (rc == 0)
  1865. {
  1866. (*ppmgBuffer)->priority = priority;
  1867. (*ppmgBuffer)->channelId = (ChannelID)channel;
  1868. (*ppmgBuffer)->senderId = (ChannelID)senderID;
  1869. }
  1870. else
  1871. {
  1872. WARNING_OUT(("MGNewDataBuffer failed in MGNewRxBuffer"));
  1873. }
  1874. DebugExitDWORD(MGNewRxBuffer, rc);
  1875. return(rc);
  1876. }
  1877. //
  1878. // MGFreeBuffer(...)
  1879. //
  1880. void MGFreeBuffer
  1881. (
  1882. PMG_CLIENT pmgClient,
  1883. PMG_BUFFER * ppmgBuffer
  1884. )
  1885. {
  1886. PMG_BUFFER pmgBuffer;
  1887. void * pBuffer;
  1888. DebugEntry(MGFreeBuffer);
  1889. pmgBuffer = *ppmgBuffer;
  1890. ValidateMGBuffer(pmgBuffer);
  1891. //
  1892. // If this is a receive buffer then we must first tell flow control
  1893. // about the space available
  1894. // This may trigger a pong, if we are waiting for the app to free up
  1895. // some space
  1896. //
  1897. if (pmgBuffer->type == MG_RX_BUFFER)
  1898. {
  1899. ASSERT (pmgBuffer->pPktHeader->useCount == 1);
  1900. TRACE_OUT(("Free RX pmgBuffer 0x%08x", pmgBuffer));
  1901. //
  1902. // Do a sanity check on the client (there is a window where this
  1903. // may have been freed).
  1904. //
  1905. if (!pmgClient->userAttached)
  1906. {
  1907. TRACE_OUT(("MGFreeBuffer: client 0x%08x not attached", pmgClient));
  1908. }
  1909. else
  1910. {
  1911. FLO_FreeReceive(pmgClient,
  1912. pmgBuffer->priority,
  1913. pmgBuffer->channelId,
  1914. pmgBuffer->senderId);
  1915. // Free the MCS buffer
  1916. if ((pmgBuffer->pPktHeader != NULL) && (pmgClient->m_piMCSSap != NULL))
  1917. {
  1918. ASSERT(pmgBuffer->pDataBuffer != NULL);
  1919. ASSERT(((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr != NULL);
  1920. pmgClient->m_piMCSSap->FreeBuffer (
  1921. (PVoid) (((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)
  1922. ->data_ptr - sizeof(TSHR_NET_PKT_HEADER)));
  1923. TRACE_OUT(("MGFreeBuffer: Freed data_ptr for pmgBuffer 0x%08x pDataBuffer 0x%08x",
  1924. pmgBuffer, pmgBuffer->pDataBuffer));
  1925. ((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr = NULL;
  1926. }
  1927. }
  1928. }
  1929. //
  1930. // Free the data buffer, if there is one present. Note that this can
  1931. // be referenced by more than one bufferCB, and so has a use count
  1932. // which must reach zero before the buffer is freed.
  1933. //
  1934. if (pmgBuffer->pPktHeader != NULL)
  1935. {
  1936. ASSERT(pmgBuffer->pPktHeader->useCount != 0);
  1937. pmgBuffer->pPktHeader->useCount--;
  1938. TRACE_OUT(("Data buffer 0x%08x use count %d",
  1939. pmgBuffer->pPktHeader,
  1940. pmgBuffer->pPktHeader->useCount));
  1941. if (pmgBuffer->pPktHeader->useCount == 0)
  1942. {
  1943. TRACE_OUT(("Freeing MG_BUFFER data 0x%08x; use count is zero", pmgBuffer->pPktHeader));
  1944. delete[] pmgBuffer->pPktHeader;
  1945. pmgBuffer->pPktHeader = NULL;
  1946. }
  1947. }
  1948. //
  1949. // If the buffer CB is in the pending queue then remove it first!
  1950. //
  1951. if (pmgBuffer->pendChain.next != 0)
  1952. {
  1953. COM_BasedListRemove(&(pmgBuffer->pendChain));
  1954. }
  1955. //
  1956. // Now remove the buffer CB itself from the list and free it up:
  1957. //
  1958. COM_BasedListRemove(&(pmgBuffer->clientChain));
  1959. delete pmgBuffer;
  1960. *ppmgBuffer = NULL;
  1961. DebugExitVOID(MGFreeBuffer);
  1962. }
  1963. //
  1964. // MGDetach(...)
  1965. //
  1966. void MGDetach
  1967. (
  1968. PMG_CLIENT pmgClient
  1969. )
  1970. {
  1971. PMG_BUFFER pmgBuffer;
  1972. PMG_BUFFER pmgT;
  1973. PIMCSSap pMCSSap;
  1974. #ifdef _DEBUG
  1975. UINT rc;
  1976. #endif // _DEBUG
  1977. DebugEntry(MGDetach);
  1978. ValidateMGClient(pmgClient);
  1979. ASSERT(pmgClient->userAttached);
  1980. pMCSSap = pmgClient->m_piMCSSap;
  1981. //
  1982. // Remove any entries for this user from the channel join pending list.
  1983. //
  1984. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->joinChain),
  1985. FIELD_OFFSET(MG_BUFFER, pendChain));
  1986. while (pmgBuffer != NULL)
  1987. {
  1988. ValidateMGBuffer(pmgBuffer);
  1989. //
  1990. // Get a pointer to the next bufferCB in the list - we have to do
  1991. // this before we free the current bufferCB (freeing it NULLs it,
  1992. // so we won't be able to step along to the next entry in the
  1993. // list).
  1994. //
  1995. pmgT = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->joinChain), pmgBuffer,
  1996. FIELD_OFFSET(MG_BUFFER, pendChain));
  1997. MGFreeBuffer(pmgClient, &pmgBuffer);
  1998. //
  1999. // We won't get a match on a join request now, so we don't have
  2000. // a join pending.
  2001. //
  2002. pmgClient->joinPending = FALSE;
  2003. pmgBuffer = pmgT;
  2004. }
  2005. //
  2006. // Remove any unsent receive buffers for this user from the buffer list
  2007. //
  2008. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->buffers),
  2009. FIELD_OFFSET(MG_BUFFER, clientChain));
  2010. while (pmgBuffer != NULL)
  2011. {
  2012. ValidateMGBuffer(pmgBuffer);
  2013. //
  2014. // Get a pointer to the next bufferCB in the list - we have to do
  2015. // this before we free the current bufferCB (freeing it NULLs it,
  2016. // so we won't be able to step along to the next entry in the
  2017. // list).
  2018. //
  2019. pmgT = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->buffers), pmgBuffer,
  2020. FIELD_OFFSET(MG_BUFFER, clientChain));
  2021. if (pmgBuffer->type == MG_RX_BUFFER)
  2022. {
  2023. if (pmgBuffer->eventPosted)
  2024. {
  2025. if ((pmgBuffer->pPktHeader != NULL) && (pMCSSap != NULL))
  2026. {
  2027. ASSERT(pmgBuffer->pDataBuffer != NULL);
  2028. ASSERT(((PNET_SEND_IND_EVENT)pmgBuffer->pDataBuffer)->data_ptr != NULL);
  2029. pMCSSap->FreeBuffer (
  2030. (PVoid) (((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)
  2031. ->data_ptr - sizeof(TSHR_NET_PKT_HEADER)));
  2032. TRACE_OUT(("MGDetach: Freed data_ptr for pmgBuffer 0x%08x pDataBuffer 0x%08x",
  2033. pmgBuffer, pmgBuffer->pDataBuffer));
  2034. ((PNET_SEND_IND_EVENT) pmgBuffer->pDataBuffer)->data_ptr = NULL;
  2035. }
  2036. }
  2037. else
  2038. {
  2039. //
  2040. // The bufferCB's user matches the user we are freeing up,
  2041. // and we haven't posted the event to the user, so free it.
  2042. // MGFreeBuffer removes it from the pending list, so we don't
  2043. // have to do that.
  2044. //
  2045. MGFreeBuffer(pmgClient, &pmgBuffer);
  2046. }
  2047. }
  2048. pmgBuffer = pmgT;
  2049. }
  2050. //
  2051. // Clear out the attachment info
  2052. //
  2053. pmgClient->userAttached = FALSE;
  2054. pmgClient->userIDMCS = 0;
  2055. //
  2056. // We can safely do an MCS DetachRequest without adding a requestCB
  2057. // - MCS will not bounce the request due to congestion, domain merging
  2058. // etc.
  2059. //
  2060. if (pMCSSap != NULL)
  2061. {
  2062. #ifdef _DEBUG
  2063. rc = pMCSSap->ReleaseInterface();
  2064. if (rc != 0) {
  2065. //
  2066. // No quit - we need to do our own cleanup.
  2067. //
  2068. // lonchanc: what cleanup needs to be done???
  2069. //
  2070. rc = McsErrToNetErr(rc);
  2071. switch (rc)
  2072. {
  2073. case 0:
  2074. case NET_RC_MGC_INVALID_USER_HANDLE:
  2075. case NET_RC_MGC_TOO_MUCH_IN_USE:
  2076. // These are normal.
  2077. TRACE_OUT(("MCSDetachUser normal error %d", rc));
  2078. break;
  2079. default:
  2080. ERROR_OUT(("MCSDetachUser abnormal error %d", rc));
  2081. break;
  2082. }
  2083. }
  2084. #else
  2085. pMCSSap->ReleaseInterface();
  2086. #endif //_DEBUG
  2087. pmgClient->m_piMCSSap = NULL;
  2088. }
  2089. --g_mgAttachCount;
  2090. DebugExitVOID(MGDetach);
  2091. }
  2092. //
  2093. // MGProcessPendingQueue(...)
  2094. //
  2095. UINT MGProcessPendingQueue(PMG_CLIENT pmgClient)
  2096. {
  2097. PMG_BUFFER pmgBuffer;
  2098. PMG_BUFFER pNextBuffer;
  2099. UINT rc = 0;
  2100. DebugEntry(MGProcessPendingQueue);
  2101. ValidateMGClient(pmgClient);
  2102. pNextBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->pendChain),
  2103. FIELD_OFFSET(MG_BUFFER, pendChain));
  2104. //
  2105. // Try and clear all the pending request queue
  2106. //
  2107. for ( ; (pmgBuffer = pNextBuffer) != NULL; )
  2108. {
  2109. ValidateMGBuffer(pmgBuffer);
  2110. pNextBuffer = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->pendChain),
  2111. pNextBuffer, FIELD_OFFSET(MG_BUFFER, pendChain));
  2112. TRACE_OUT(("Got request 0x%08x from queue, type %u",
  2113. pmgBuffer, pmgBuffer->type));
  2114. //
  2115. // Check that the buffer is still valid. There is a race at
  2116. // conference termination where we can arrive here, but our user
  2117. // has actually already detached. In this case, free the buffer
  2118. // and continue.
  2119. //
  2120. if (!pmgClient->userAttached)
  2121. {
  2122. TRACE_OUT(("MGProcessPendingQueue: client 0x%08x not attached", pmgClient));
  2123. MGFreeBuffer(pmgClient, &pmgBuffer);
  2124. continue;
  2125. }
  2126. switch (pmgBuffer->type)
  2127. {
  2128. case MG_RQ_CHANNEL_JOIN:
  2129. case MG_RQ_CHANNEL_JOIN_BY_KEY:
  2130. {
  2131. //
  2132. // If this client already has a join outstanding, then skip
  2133. // this request.
  2134. //
  2135. if (pmgClient->joinPending)
  2136. {
  2137. //
  2138. // Break out of switch and goto next iteration of for()
  2139. //
  2140. continue;
  2141. }
  2142. pmgClient->joinPending = TRUE;
  2143. //
  2144. // Attempt the join
  2145. //
  2146. rc = pmgClient->m_piMCSSap->ChannelJoin(
  2147. (unsigned short) pmgBuffer->channelId);
  2148. //
  2149. // If the join failed then post an error back immediately
  2150. //
  2151. if (rc != 0)
  2152. {
  2153. if ((rc != MCS_TRANSMIT_BUFFER_FULL) &&
  2154. (rc != MCS_DOMAIN_MERGING))
  2155. {
  2156. //
  2157. // Something terminal went wrong - post a
  2158. // NET_EV_JOIN_CONFIRM (failed) to the client
  2159. //
  2160. MGPostJoinConfirm(pmgClient,
  2161. NET_RESULT_USER_REJECTED,
  2162. pmgBuffer->channelId,
  2163. (NET_CHANNEL_ID)(pmgBuffer->work));
  2164. }
  2165. pmgClient->joinPending = FALSE;
  2166. }
  2167. else
  2168. {
  2169. //
  2170. // If the request worked then we must move it to the
  2171. // join queue for completion
  2172. //
  2173. TRACE_OUT(("Inserting 0x%08x into join queue",pmgBuffer));
  2174. COM_BasedListRemove(&(pmgBuffer->pendChain));
  2175. COM_BasedListInsertBefore(&(pmgClient->joinChain),
  2176. &(pmgBuffer->pendChain));
  2177. //
  2178. // Do not free this buffer - continue processing the
  2179. // pending queue
  2180. //
  2181. continue;
  2182. }
  2183. }
  2184. break;
  2185. case MG_RQ_CHANNEL_LEAVE:
  2186. {
  2187. //
  2188. // Try to leave the channel:
  2189. //
  2190. rc = pmgClient->m_piMCSSap->ChannelLeave(
  2191. (unsigned short) pmgBuffer->channelId);
  2192. if (rc == 0)
  2193. {
  2194. MGProcessEndFlow(pmgClient,
  2195. pmgBuffer->channelId);
  2196. }
  2197. }
  2198. break;
  2199. case MG_RQ_TOKEN_GRAB:
  2200. {
  2201. rc = pmgClient->m_piMCSSap->TokenGrab(pmgBuffer->channelId);
  2202. }
  2203. break;
  2204. case MG_RQ_TOKEN_INHIBIT:
  2205. {
  2206. rc = pmgClient->m_piMCSSap->TokenInhibit(pmgBuffer->channelId);
  2207. }
  2208. break;
  2209. case MG_RQ_TOKEN_RELEASE:
  2210. {
  2211. rc = pmgClient->m_piMCSSap->TokenRelease(pmgBuffer->channelId);
  2212. }
  2213. break;
  2214. case MG_TX_BUFFER:
  2215. {
  2216. ASSERT(!(pmgBuffer->pPktHeader->header.pktLength & TSHR_PKT_FLOW));
  2217. //
  2218. // Send the data. Remember that we don't send all of the
  2219. // packet header, only from the length...
  2220. //
  2221. ASSERT((pmgBuffer->priority != NET_TOP_PRIORITY));
  2222. rc = pmgClient->m_piMCSSap->SendData(NORMAL_SEND_DATA,
  2223. pmgBuffer->channelId,
  2224. (Priority)(pmgBuffer->priority),
  2225. (unsigned char *) &(pmgBuffer->pPktHeader->header),
  2226. pmgBuffer->pPktHeader->header.pktLength,
  2227. APP_ALLOCATION);
  2228. //
  2229. // Check the return code.
  2230. //
  2231. if (rc == 0)
  2232. {
  2233. //
  2234. // Update the allocation. FLO_DecrementAlloc will
  2235. // check that the stream pointer is not null for us.
  2236. // (It will be null if flow control has ended on this
  2237. // channel since this buffer was allocated or if this
  2238. // is an uncontrolled channel).
  2239. //
  2240. // Note that for flow control purposes, we always use
  2241. // packet sizes including the internal packet header.
  2242. //
  2243. FLO_DecrementAlloc(pmgBuffer->pStr,
  2244. (pmgBuffer->pPktHeader->header.pktLength
  2245. - sizeof(TSHR_NET_PKT_HEADER) + sizeof(MG_INT_PKT_HEADER)));
  2246. }
  2247. }
  2248. break;
  2249. case MG_TX_PING:
  2250. case MG_TX_PONG:
  2251. case MG_TX_PANG:
  2252. {
  2253. //
  2254. // This is the length of a ping/pong message:
  2255. //
  2256. ASSERT(pmgBuffer->priority != NET_TOP_PRIORITY);
  2257. rc = pmgClient->m_piMCSSap->SendData(NORMAL_SEND_DATA,
  2258. pmgBuffer->channelId,
  2259. (Priority)(pmgBuffer->priority),
  2260. (unsigned char *) &(pmgBuffer->pPktHeader->header),
  2261. sizeof(TSHR_NET_PKT_HEADER) + sizeof(TSHR_FLO_CONTROL),
  2262. APP_ALLOCATION);
  2263. }
  2264. break;
  2265. }
  2266. rc = McsErrToNetErr(rc);
  2267. //
  2268. // If the request failed due to back pressure then just get out
  2269. // now. We will try again later.
  2270. //
  2271. if (rc == NET_RC_MGC_TOO_MUCH_IN_USE)
  2272. {
  2273. TRACE_OUT(("MCS Back pressure"));
  2274. break;
  2275. }
  2276. //
  2277. // Only for obman...
  2278. //
  2279. if (pmgClient == &g_amgClients[MGTASK_OM])
  2280. {
  2281. ValidateCMP(g_pcmPrimary);
  2282. //
  2283. // For any other error or if everything worked so far
  2284. // then tell the user to keep going
  2285. //
  2286. TRACE_OUT(("Posting NET_FEEDBACK"));
  2287. UT_PostEvent(pmgClient->putTask,
  2288. pmgClient->putTask,
  2289. NO_DELAY,
  2290. NET_FEEDBACK,
  2291. 0,
  2292. g_pcmPrimary->callID);
  2293. }
  2294. //
  2295. // All is OK, or the request failed fatally. In either case we
  2296. // should free this request and attempt to continue.
  2297. //
  2298. MGFreeBuffer(pmgClient, &pmgBuffer);
  2299. }
  2300. DebugExitDWORD(MGProcessPendingQueue, rc);
  2301. return(rc);
  2302. }
  2303. //
  2304. // MGPostJoinConfirm(...)
  2305. //
  2306. UINT MGPostJoinConfirm
  2307. (
  2308. PMG_CLIENT pmgClient,
  2309. NET_RESULT result,
  2310. NET_CHANNEL_ID channel,
  2311. NET_CHANNEL_ID correlator
  2312. )
  2313. {
  2314. PNET_JOIN_CNF_EVENT pNetJoinCnf;
  2315. PMG_BUFFER pmgBuffer;
  2316. UINT rc;
  2317. DebugEntry(MGPostJoinConfirm);
  2318. ValidateMGClient(pmgClient);
  2319. //
  2320. // Allocate a buffer to send the event in - this should only fail if we
  2321. // really are out of virtual memory.
  2322. //
  2323. rc = MGNewDataBuffer(pmgClient, MG_EV_BUFFER,
  2324. sizeof(MG_INT_PKT_HEADER) + sizeof(NET_JOIN_CNF_EVENT), &pmgBuffer);
  2325. if (rc != 0)
  2326. {
  2327. WARNING_OUT(("Failed to alloc NET_JOIN_CNF_EVENT"));
  2328. DC_QUIT;
  2329. }
  2330. pNetJoinCnf = (PNET_JOIN_CNF_EVENT) pmgBuffer->pDataBuffer;
  2331. ValidateCMP(g_pcmPrimary);
  2332. if (!g_pcmPrimary->callID)
  2333. {
  2334. WARNING_OUT(("MGPostJoinConfirm failed; not in call"));
  2335. rc = NET_RC_MGC_NOT_CONNECTED;
  2336. DC_QUIT;
  2337. }
  2338. //
  2339. // Fill in the fields.
  2340. //
  2341. pNetJoinCnf->callID = g_pcmPrimary->callID;
  2342. pNetJoinCnf->result = result;
  2343. pNetJoinCnf->channel = channel;
  2344. pNetJoinCnf->correlator = correlator;
  2345. //
  2346. // OK, we've built the event so now post it to our client:
  2347. //
  2348. UT_PostEvent(pmgClient->putTask,
  2349. pmgClient->putTask,
  2350. NO_DELAY,
  2351. NET_EVENT_CHANNEL_JOIN,
  2352. 0,
  2353. (UINT_PTR) pNetJoinCnf);
  2354. pmgBuffer->eventPosted = TRUE;
  2355. DC_EXIT_POINT:
  2356. DebugExitDWORD(MGPostJoinConfirm, rc);
  2357. return(rc);
  2358. }
  2359. //
  2360. // MCSErrToNetErr()
  2361. //
  2362. UINT McsErrToNetErr ( UINT rcMCS )
  2363. {
  2364. UINT rc = NET_RC_MGC_NOT_SUPPORTED;
  2365. //
  2366. // We use a static array of values to map the return code:
  2367. //
  2368. if (rcMCS < sizeof(c_RetCodeMap1) / sizeof(c_RetCodeMap1[0]))
  2369. {
  2370. rc = c_RetCodeMap1[rcMCS];
  2371. }
  2372. else
  2373. {
  2374. UINT nNewIndex = rcMCS - MCS_DOMAIN_ALREADY_EXISTS;
  2375. if (nNewIndex < sizeof(c_RetCodeMap2) / sizeof(c_RetCodeMap2[0]))
  2376. {
  2377. rc = c_RetCodeMap2[nNewIndex];
  2378. }
  2379. }
  2380. #ifdef _DEBUG
  2381. if (MCS_TRANSMIT_BUFFER_FULL == rcMCS)
  2382. {
  2383. ASSERT(NET_RC_MGC_TOO_MUCH_IN_USE == rc);
  2384. }
  2385. #endif
  2386. return rc;
  2387. }
  2388. //
  2389. // TranslateResult(...)
  2390. //
  2391. NET_RESULT TranslateResult(WORD resultMCS)
  2392. {
  2393. //
  2394. // We use a static array of values to map the result code:
  2395. //
  2396. if (resultMCS >= MG_NUM_OF_MCS_RESULTS)
  2397. resultMCS = MG_INVALID_MCS_RESULT;
  2398. return(c_ResultMap[resultMCS]);
  2399. }
  2400. //
  2401. // MGFLOCallback(...)
  2402. //
  2403. void MGFLOCallBack
  2404. (
  2405. PMG_CLIENT pmgClient,
  2406. UINT callbackType,
  2407. UINT priority,
  2408. UINT newBufferSize
  2409. )
  2410. {
  2411. PMG_BUFFER pmgBuffer;
  2412. DebugEntry(MGFLOCallBack);
  2413. ASSERT(priority != NET_TOP_PRIORITY);
  2414. ValidateMGClient(pmgClient);
  2415. ASSERT(pmgClient->userAttached);
  2416. //
  2417. // If this is a buffermod callback then tell the app
  2418. //
  2419. if (pmgClient == &g_amgClients[MGTASK_DCS])
  2420. {
  2421. if (callbackType == FLO_BUFFERMOD)
  2422. {
  2423. UT_PostEvent(pmgClient->putTask,
  2424. pmgClient->putTask,
  2425. NO_DELAY,
  2426. NET_FLOW,
  2427. priority,
  2428. newBufferSize);
  2429. }
  2430. }
  2431. else
  2432. {
  2433. ASSERT(pmgClient == &g_amgClients[MGTASK_OM]);
  2434. //
  2435. // Wake up the app in case we have applied back pressure.
  2436. //
  2437. TRACE_OUT(("Posting NET_FEEDBACK"));
  2438. UT_PostEvent(pmgClient->putTask,
  2439. pmgClient->putTask,
  2440. NO_DELAY,
  2441. NET_FEEDBACK,
  2442. 0,
  2443. g_pcmPrimary->callID);
  2444. }
  2445. DebugExitVOID(MGFLOCallback);
  2446. }
  2447. //
  2448. // MGProcessDomainWatchdog()
  2449. //
  2450. void MGProcessDomainWatchdog
  2451. (
  2452. PMG_CLIENT pmgClient
  2453. )
  2454. {
  2455. int task;
  2456. DebugEntry(MGProcessDomainWatchdog);
  2457. ValidateMGClient(pmgClient);
  2458. //
  2459. // Call FLO to check each user attachment for delinquency
  2460. //
  2461. if (g_mgAttachCount > 0)
  2462. {
  2463. for (task = MGTASK_FIRST; task < MGTASK_MAX; task++)
  2464. {
  2465. if (g_amgClients[task].userAttached)
  2466. {
  2467. FLO_CheckUsers(&(g_amgClients[task]));
  2468. }
  2469. }
  2470. //
  2471. // Continue periodic messages - but only if there are some users.
  2472. //
  2473. // TRACE_OUT(("Continue watchdog"));
  2474. UT_PostEvent(pmgClient->putTask,
  2475. pmgClient->putTask,
  2476. MG_TIMER_PERIOD,
  2477. NET_MG_WATCHDOG,
  2478. 0, 0);
  2479. }
  2480. else
  2481. {
  2482. TRACE_OUT(("Don't continue Watchdog timer"));
  2483. }
  2484. DebugExitVOID(MGProcessDomainWatchdog);
  2485. }
  2486. //
  2487. // FLO_UserTerm
  2488. //
  2489. void FLO_UserTerm(PMG_CLIENT pmgClient)
  2490. {
  2491. UINT i;
  2492. UINT cStreams;
  2493. DebugEntry(FLO_UserTerm);
  2494. ValidateMGClient(pmgClient);
  2495. ASSERT(pmgClient->userAttached);
  2496. cStreams = pmgClient->flo.numStreams;
  2497. //
  2498. // Stop flow control on all channels. We scan the list of streams and
  2499. // if flow control is active on a stream then we stop it.
  2500. //
  2501. for (i = 0; i < cStreams; i++)
  2502. {
  2503. //
  2504. // Check that the stream is flow controlled.
  2505. //
  2506. if (pmgClient->flo.pStrData[i] != NULL)
  2507. {
  2508. //
  2509. // End control on this controlled stream.
  2510. //
  2511. FLOStreamEndControl(pmgClient, i);
  2512. }
  2513. }
  2514. DebugExitVOID(FLO_UserTerm);
  2515. }
  2516. //
  2517. // FLO_StartControl
  2518. //
  2519. void FLO_StartControl
  2520. (
  2521. PMG_CLIENT pmgClient,
  2522. NET_CHANNEL_ID channel,
  2523. UINT priority,
  2524. UINT backlog,
  2525. UINT maxBytesOutstanding
  2526. )
  2527. {
  2528. UINT rc = 0;
  2529. PFLO_STREAM_DATA pStr;
  2530. UINT i;
  2531. UINT stream;
  2532. DebugEntry(FLO_StartControl);
  2533. ValidateMGClient(pmgClient);
  2534. ASSERT(pmgClient->userAttached);
  2535. ASSERT(priority != NET_TOP_PRIORITY);
  2536. //
  2537. // Flow control is on by default.
  2538. //
  2539. //
  2540. // Check to see if the channel is already flow controlled. If it is
  2541. // then we just exit.
  2542. //
  2543. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  2544. if (stream != FLO_NOT_CONTROLLED)
  2545. {
  2546. ValidateFLOStr(pStr);
  2547. TRACE_OUT(("Stream %u is already controlled (0x%08x:%u)",
  2548. stream, channel, priority));
  2549. DC_QUIT;
  2550. }
  2551. //
  2552. // If we already have hit the stream limit for this app then give up.
  2553. //
  2554. for (i = 0; i < FLO_MAX_STREAMS; i++)
  2555. {
  2556. if ((pmgClient->flo.pStrData[i]) == NULL)
  2557. {
  2558. break;
  2559. }
  2560. }
  2561. if (i == FLO_MAX_STREAMS)
  2562. {
  2563. ERROR_OUT(("Too many streams defined already"));
  2564. DC_QUIT;
  2565. }
  2566. TRACE_OUT(("This is stream %u", i));
  2567. //
  2568. // Allocate memory for our stream data. Hang the pointer off floHandle
  2569. // - this should be returned to us on all subsequent API calls.
  2570. //
  2571. pStr = new FLO_STREAM_DATA;
  2572. if (!pStr)
  2573. {
  2574. WARNING_OUT(("FLO_StartControl failed; out of memory"));
  2575. DC_QUIT;
  2576. }
  2577. ZeroMemory(pStr, sizeof(*pStr));
  2578. //
  2579. // Store the channel and priorities for this stream.
  2580. //
  2581. SET_STAMP(pStr, FLOSTR);
  2582. pStr->channel = channel;
  2583. pStr->priority = priority;
  2584. pStr->backlog = backlog;
  2585. if (maxBytesOutstanding == 0)
  2586. {
  2587. maxBytesOutstanding = FLO_MAX_STREAMSIZE;
  2588. }
  2589. pStr->DC_ABSMaxBytesInPipe = maxBytesOutstanding;
  2590. pStr->maxBytesInPipe = FLO_INIT_STREAMSIZE;
  2591. if (pStr->maxBytesInPipe > maxBytesOutstanding)
  2592. {
  2593. pStr->maxBytesInPipe = maxBytesOutstanding;
  2594. }
  2595. //
  2596. // Set the initial stream bytesAllocated to 0.
  2597. //
  2598. pStr->bytesAllocated = 0;
  2599. //
  2600. // Ping needed immediately.
  2601. //
  2602. pStr->pingNeeded = TRUE;
  2603. pStr->pingTime = FLO_INIT_PINGTIME;
  2604. pStr->nextPingTime = GetTickCount();
  2605. //
  2606. // Initialize the users base pointers.
  2607. //
  2608. COM_BasedListInit(&(pStr->users));
  2609. //
  2610. // Hang the stream CB off the base control block.
  2611. //
  2612. pmgClient->flo.pStrData[i] = pStr;
  2613. if (i >= pmgClient->flo.numStreams)
  2614. {
  2615. pmgClient->flo.numStreams++;
  2616. }
  2617. TRACE_OUT(("Flow control started, stream %u, (0x%08x:%u)",
  2618. i, channel, priority));
  2619. DC_EXIT_POINT:
  2620. DebugExitVOID(FLO_StartControl);
  2621. }
  2622. //
  2623. // FLO_EndControl
  2624. //
  2625. void FLO_EndControl
  2626. (
  2627. PMG_CLIENT pmgClient,
  2628. NET_CHANNEL_ID channel,
  2629. UINT priority
  2630. )
  2631. {
  2632. UINT stream;
  2633. PFLO_STREAM_DATA pStr;
  2634. DebugEntry(FLO_EndControl);
  2635. ValidateMGClient(pmgClient);
  2636. ASSERT(pmgClient->userAttached);
  2637. ASSERT(priority != NET_TOP_PRIORITY);
  2638. //
  2639. // Convert channel and stream into priority.
  2640. //
  2641. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  2642. //
  2643. // The stream is not controlled so we just trace and quit.
  2644. //
  2645. if (stream == FLO_NOT_CONTROLLED)
  2646. {
  2647. WARNING_OUT(("Uncontrolled stream channel 0x%08x priority %u",
  2648. channel, priority));
  2649. DC_QUIT;
  2650. }
  2651. //
  2652. // Call the internal FLOStreamEndControl to end flow control on a
  2653. // given stream.
  2654. //
  2655. ValidateFLOStr(pStr);
  2656. FLOStreamEndControl(pmgClient, stream);
  2657. DC_EXIT_POINT:
  2658. DebugExitVOID(FLO_EndControl);
  2659. }
  2660. //
  2661. // FLO_AllocSend
  2662. //
  2663. UINT FLO_AllocSend
  2664. (
  2665. PMG_CLIENT pmgClient,
  2666. UINT priority,
  2667. NET_CHANNEL_ID channel,
  2668. UINT size,
  2669. PFLO_STREAM_DATA * ppStr
  2670. )
  2671. {
  2672. UINT stream;
  2673. UINT curtime;
  2674. PFLO_STREAM_DATA pStr;
  2675. BOOL denyAlloc = FALSE;
  2676. BOOL doPing = FALSE;
  2677. UINT rc = 0;
  2678. DebugEntry(FLO_AllocSend);
  2679. ValidateMGClient(pmgClient);
  2680. ASSERT(pmgClient->userAttached);
  2681. ASSERT(priority != NET_TOP_PRIORITY);
  2682. //
  2683. // Convert channel and stream into priority
  2684. //
  2685. stream = FLOGetStream(pmgClient, channel, priority, ppStr);
  2686. pStr = *ppStr;
  2687. //
  2688. // For non-controlled streams just send the data
  2689. //
  2690. if (stream == FLO_NOT_CONTROLLED)
  2691. {
  2692. TRACE_OUT(("Send %u bytes on uncontrolled channel/pri (0x%08x:%u)",
  2693. size, channel, priority));
  2694. DC_QUIT;
  2695. }
  2696. //
  2697. // Get the current tick count.
  2698. //
  2699. curtime = GetTickCount();
  2700. //
  2701. // Check whether this request is permitted. We must allow one packet
  2702. // beyond the specified limit to avoid problems determining when we
  2703. // have started rejecting requests and also to avoid situations where a
  2704. // single request exceeds the total pipe size.
  2705. //
  2706. // If we have not yet received a pong then we limit the amount of
  2707. // allocated buffer space to below FLO_MAX_PRE_FC_ALLOC. However this
  2708. // data can be sent immediately so the overall throughput is still
  2709. // relatively high. In this way we minimize the amount of data held in
  2710. // the glue layer to a maximum of FLO_MAX_PRE_FC_ALLOC if there are no
  2711. // remote users.
  2712. //
  2713. ValidateFLOStr(pStr);
  2714. if (!pStr->gotPong)
  2715. {
  2716. //
  2717. // Flag that a ping is required.
  2718. //
  2719. pStr->pingNeeded = TRUE;
  2720. if (curtime > pStr->nextPingTime)
  2721. {
  2722. doPing = TRUE;
  2723. }
  2724. //
  2725. // We haven't got a pong yet (i.e. FC is non-operational) so we
  2726. // need to limit the maximum amount of data held in flow control to
  2727. // FLO_MAX_PRE_FC_ALLOC.
  2728. //
  2729. if (pStr->bytesAllocated > FLO_MAX_PRE_FC_ALLOC)
  2730. {
  2731. denyAlloc = TRUE;
  2732. TRACE_OUT(("Max allocation of %u bytes exceeded (currently %u)",
  2733. FLO_MAX_PRE_FC_ALLOC,
  2734. pStr->bytesAllocated));
  2735. DC_QUIT;
  2736. }
  2737. pStr->bytesInPipe += size;
  2738. pStr->bytesAllocated += size;
  2739. TRACE_OUT((
  2740. "Alloc of %u succeeded: bytesAlloc %u, bytesInPipe %u"
  2741. " (0x%08x:%u)",
  2742. size,
  2743. pStr->bytesAllocated,
  2744. pStr->bytesInPipe,
  2745. pStr->channel,
  2746. pStr->priority));
  2747. DC_QUIT;
  2748. }
  2749. if (pStr->bytesInPipe < pStr->maxBytesInPipe)
  2750. {
  2751. //
  2752. // Check to see if a ping is required and if so send it now.
  2753. //
  2754. if ((pStr->pingNeeded) && (curtime > pStr->nextPingTime))
  2755. {
  2756. doPing = TRUE;
  2757. }
  2758. pStr->bytesInPipe += size;
  2759. pStr->bytesAllocated += size;
  2760. TRACE_OUT(("Stream %u - alloc %u (InPipe:MaxInPipe %u:%u)",
  2761. stream,
  2762. size,
  2763. pStr->bytesInPipe,
  2764. pStr->maxBytesInPipe));
  2765. DC_QUIT;
  2766. }
  2767. //
  2768. // If we get here then we cannot currently allocate any buffers so deny
  2769. // the allocation. Simulate back pressure with NET_OUT_OF_RESOURCE.
  2770. // We also flag that a "wake up" event is required to get the app to
  2771. // send more data.
  2772. //
  2773. denyAlloc = TRUE;
  2774. pStr->eventNeeded = TRUE;
  2775. pStr->curDenialTime = pStr->lastPingTime;
  2776. //
  2777. // We are not allowed to apply back pressure unless we can guarantee
  2778. // that we will wake up the app later on. This is dependent upon our
  2779. // receiving a pong later. But if there is no ping outstanding
  2780. // (because we have allocated all our buffer allowance within the ping
  2781. // delay time) then we should first send a ping to trigger the wake up.
  2782. // If this fails then our watchdog will finally wake us up.
  2783. //
  2784. if (pStr->pingNeeded)
  2785. {
  2786. doPing = TRUE;
  2787. }
  2788. DC_EXIT_POINT:
  2789. //
  2790. // Check to see if we should deny the buffer allocation.
  2791. //
  2792. if (denyAlloc)
  2793. {
  2794. rc = NET_RC_MGC_TOO_MUCH_IN_USE;
  2795. TRACE_OUT(("Denying buffer request on stream %u InPipe %u Alloc %u",
  2796. stream,
  2797. pStr->bytesInPipe,
  2798. pStr->bytesAllocated));
  2799. }
  2800. if (doPing)
  2801. {
  2802. //
  2803. // A ping is required so send it now.
  2804. //
  2805. FLOPing(pmgClient, stream, curtime);
  2806. }
  2807. DebugExitDWORD(FLO_AllocSend, rc);
  2808. return(rc);
  2809. }
  2810. //
  2811. // FLO_ReallocSend
  2812. //
  2813. void FLO_ReallocSend
  2814. (
  2815. PMG_CLIENT pmgClient,
  2816. PFLO_STREAM_DATA pStr,
  2817. UINT size
  2818. )
  2819. {
  2820. DebugEntry(FLO_ReallocSend);
  2821. ValidateMGClient(pmgClient);
  2822. ASSERT(pmgClient->userAttached);
  2823. //
  2824. // For non-controlled streams there is nothing to do so just exit.
  2825. //
  2826. if (pStr == NULL)
  2827. {
  2828. TRACE_OUT(("Realloc data on uncontrolled channel"));
  2829. DC_QUIT;
  2830. }
  2831. //
  2832. // Perform a quick sanity check.
  2833. //
  2834. ValidateFLOStr(pStr);
  2835. if (size > pStr->bytesInPipe)
  2836. {
  2837. ERROR_OUT(("Realloc of %u makes bytesInPipe (%u) neg (0x%08x:%u)",
  2838. size,
  2839. pStr->bytesInPipe,
  2840. pStr->channel,
  2841. pStr->priority));
  2842. DC_QUIT;
  2843. }
  2844. //
  2845. // Add the length not sent back into the pool.
  2846. //
  2847. pStr->bytesInPipe -= size;
  2848. TRACE_OUT(("Realloc %u FC bytes (bytesInPipe is now %u) (0x%08x:%u)",
  2849. size,
  2850. pStr->bytesInPipe,
  2851. pStr->channel,
  2852. pStr->priority));
  2853. DC_EXIT_POINT:
  2854. //
  2855. // Every time that we call FLO_ReallocSend we also want to call
  2856. // FLO_DecrementAlloc (but not vice-versa) so call it now.
  2857. //
  2858. FLO_DecrementAlloc(pStr, size);
  2859. DebugExitVOID(FLO_ReallocSend);
  2860. }
  2861. //
  2862. // FLO_DecrementAlloc
  2863. //
  2864. void FLO_DecrementAlloc
  2865. (
  2866. PFLO_STREAM_DATA pStr,
  2867. UINT size
  2868. )
  2869. {
  2870. DebugEntry(FLO_DecrementAlloc);
  2871. //
  2872. // For non-controlled streams there is nothing to do so just exit.
  2873. //
  2874. if (pStr == NULL)
  2875. {
  2876. TRACE_OUT(("Decrement bytesAllocated on uncontrolled channel"));
  2877. DC_QUIT;
  2878. }
  2879. //
  2880. // Perform a quick sanity check.
  2881. //
  2882. ValidateFLOStr(pStr);
  2883. if (size > pStr->bytesAllocated)
  2884. {
  2885. ERROR_OUT(("Dec of %u makes bytesAllocated (%u) neg (0x%08x:%u)",
  2886. size,
  2887. pStr->bytesAllocated,
  2888. pStr->channel,
  2889. pStr->priority));
  2890. DC_QUIT;
  2891. }
  2892. //
  2893. // Update the count of the data held in the glue for this stream.
  2894. //
  2895. pStr->bytesAllocated -= size;
  2896. TRACE_OUT(("Clearing %u alloc bytes (bytesAlloc is now %u) (0x%08x:%u)",
  2897. size,
  2898. pStr->bytesAllocated,
  2899. pStr->channel,
  2900. pStr->priority));
  2901. DC_EXIT_POINT:
  2902. DebugExitVOID(FLO_DecrementAlloc);
  2903. }
  2904. //
  2905. // FLO_CheckUsers
  2906. //
  2907. void FLO_CheckUsers(PMG_CLIENT pmgClient)
  2908. {
  2909. PFLO_USER pFloUser;
  2910. PBASEDLIST nextUser;
  2911. int waited;
  2912. BYTE stream;
  2913. UINT curtime;
  2914. PFLO_STREAM_DATA pStr;
  2915. DebugEntry(FLO_CheckUsers);
  2916. ValidateMGClient(pmgClient);
  2917. ASSERT(pmgClient->userAttached);
  2918. curtime = GetTickCount();
  2919. //
  2920. // Check users of each stream
  2921. //
  2922. for (stream = 0; stream < pmgClient->flo.numStreams; stream++)
  2923. {
  2924. if (pmgClient->flo.pStrData[stream] == NULL)
  2925. {
  2926. continue;
  2927. }
  2928. pStr = pmgClient->flo.pStrData[stream];
  2929. ValidateFLOStr(pStr);
  2930. //
  2931. // Check whether we have waited long enough and need to reset the
  2932. // wait counters. We only wait a certain time before resetting all
  2933. // our counts. What has happened is that someone has left the call
  2934. // and we have been waiting for their pong.
  2935. //
  2936. // We detect the outage by checking against nextPingTime which, as
  2937. // well as being set to the earliest time we can send a ping is
  2938. // also updated to the current time as each pong comes in so we can
  2939. // use it as a measure of the time since the last repsonse from any
  2940. // user of the stream.
  2941. //
  2942. // To avoid false outages caused by new joiners or transient large
  2943. // buffer situations each user is required to send a pong at the
  2944. // rate of MAX_WAIT_TIME/2. They do this by just sending a
  2945. // duplicate pong if they have not yet got the ping they need to
  2946. // to pong.
  2947. //
  2948. if ((pStr->eventNeeded) &&
  2949. (!pStr->pingNeeded))
  2950. {
  2951. TRACE_OUT(("Checking for valid back pressure on stream %u",
  2952. stream));
  2953. //
  2954. // Note that if there are no remote users then we should reset
  2955. // the flags regardless. We get into this state when we first
  2956. // start an app because OBMAN sends data before the app has
  2957. // joined the channel at the other end.
  2958. //
  2959. waited = curtime - pStr->nextPingTime;
  2960. if (waited > FLO_MAX_WAIT_TIME)
  2961. {
  2962. TRACE_OUT(("Stream %u - Waited for %d, resetting counter",
  2963. stream, waited));
  2964. pStr->bytesInPipe = 0;
  2965. pStr->pingNeeded = TRUE;
  2966. pStr->nextPingTime = curtime;
  2967. pStr->gotPong = FALSE;
  2968. //
  2969. // Remove outdated records from our user queue
  2970. //
  2971. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  2972. while (&(pFloUser->list) != &(pStr->users))
  2973. {
  2974. ValidateFLOUser(pFloUser);
  2975. //
  2976. // Address the follow on record before we free the
  2977. // current
  2978. //
  2979. nextUser = COM_BasedNextListField(&(pFloUser->list));
  2980. //
  2981. // Free the current record, if necessary
  2982. //
  2983. if (pFloUser->lastPongRcvd != pStr->pingValue)
  2984. {
  2985. //
  2986. // Remove from the list
  2987. //
  2988. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  2989. COM_BasedListRemove(&(pFloUser->list));
  2990. delete pFloUser;
  2991. }
  2992. else
  2993. {
  2994. //
  2995. // At least one user still out there so keep flow
  2996. // control active or else we would suddenly send
  2997. // out a burst of data that might flood them
  2998. //
  2999. pStr->gotPong = TRUE;
  3000. }
  3001. //
  3002. // Move on to the next record in the list
  3003. //
  3004. pFloUser = (PFLO_USER)nextUser;
  3005. }
  3006. //
  3007. // We have previously rejected an application request so we
  3008. // had better call back now
  3009. //
  3010. if (pmgClient->flo.callBack != NULL)
  3011. {
  3012. (*(pmgClient->flo.callBack))(pmgClient,
  3013. FLO_WAKEUP,
  3014. pStr->priority,
  3015. pStr->maxBytesInPipe);
  3016. }
  3017. pStr->eventNeeded = FALSE;
  3018. }
  3019. }
  3020. }
  3021. DebugExitVOID(FLO_CheckUsers);
  3022. }
  3023. //
  3024. // FLO_ReceivedPacket
  3025. //
  3026. void FLO_ReceivedPacket
  3027. (
  3028. PMG_CLIENT pmgClient,
  3029. PTSHR_FLO_CONTROL pPkt
  3030. )
  3031. {
  3032. BOOL canPing = TRUE;
  3033. PFLO_USER pFloUser;
  3034. BOOL userFound = FALSE;
  3035. UINT stream;
  3036. UINT curtime;
  3037. PFLO_STREAM_DATA pStr;
  3038. UINT callbackType = 0;
  3039. int latency;
  3040. UINT throughput;
  3041. DebugEntry(FLO_ReceivedPacket);
  3042. ValidateMGClient(pmgClient);
  3043. ASSERT(pmgClient->userAttached);
  3044. stream = pPkt->stream;
  3045. ASSERT(stream < FLO_MAX_STREAMS);
  3046. pStr = pmgClient->flo.pStrData[stream];
  3047. //
  3048. // If the stream CB has been freed up already then we can ignore any
  3049. // flow information pertaining to it.
  3050. //
  3051. if (pStr == NULL)
  3052. {
  3053. TRACE_OUT(("Found a null stream pointer for stream %u", stream));
  3054. DC_QUIT;
  3055. }
  3056. ValidateFLOStr(pStr);
  3057. curtime = GetTickCount();
  3058. //
  3059. // First we must locate the user for this ping/pong/pang
  3060. // Also, while we are doing it we can check to see if it is a pong and
  3061. // if so whether it is the last pong we need
  3062. //
  3063. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3064. while (&(pFloUser->list) != &(pStr->users))
  3065. {
  3066. ValidateFLOUser(pFloUser);
  3067. if (pFloUser->userID == pPkt->userID)
  3068. {
  3069. userFound = TRUE;
  3070. //
  3071. // We have got a match so set up the last pong value
  3072. // Accumulate pong stats for query
  3073. //
  3074. if (pPkt->packetType == PACKET_TYPE_PONG)
  3075. {
  3076. pFloUser->lastPongRcvd = pPkt->pingPongID;
  3077. pFloUser->gotPong = TRUE;
  3078. pFloUser->numPongs++;
  3079. pFloUser->pongDelay += curtime - pStr->lastPingTime;
  3080. }
  3081. else
  3082. {
  3083. break;
  3084. }
  3085. }
  3086. //
  3087. // So, is it the final pong - are there any users with different
  3088. // pong required entries?
  3089. // Note that if the user has never sent us a pong then we don't
  3090. // reference their lastPongRcvd field at this stage.
  3091. //
  3092. if (pPkt->packetType == PACKET_TYPE_PONG)
  3093. {
  3094. if (pFloUser->gotPong &&
  3095. (pFloUser->lastPongRcvd != pStr->pingValue))
  3096. {
  3097. TRACE_OUT(("%u,%u - Entry 0x%08x has different ping id %u",
  3098. stream,
  3099. pFloUser->userID,
  3100. pFloUser,
  3101. pFloUser->lastPongRcvd));
  3102. canPing = FALSE;
  3103. }
  3104. }
  3105. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pFloUser->list));
  3106. }
  3107. //
  3108. // If this is a new User then add them to the list
  3109. //
  3110. if (!userFound)
  3111. {
  3112. pFloUser = FLOAddUser(pPkt->userID, pStr);
  3113. //
  3114. // If this is a pong then we can set up the lastpong as well
  3115. //
  3116. if ((pFloUser != NULL) &&
  3117. (pPkt->packetType == PACKET_TYPE_PONG))
  3118. {
  3119. pFloUser->lastPongRcvd = pPkt->pingPongID;
  3120. }
  3121. }
  3122. //
  3123. // Now perform the actual packet specific processing
  3124. //
  3125. switch (pPkt->packetType)
  3126. {
  3127. //
  3128. // PING
  3129. //
  3130. // If this is a ping packet then just flag we must send a pong. If
  3131. // we failed to alloc a user CB then just ignore the ping and they
  3132. // will continue in blissful ignorance of our presence
  3133. //
  3134. case PACKET_TYPE_PING:
  3135. {
  3136. TRACE_OUT(("%u,%u - PING %u received",
  3137. stream, pPkt->userID, pPkt->pingPongID));
  3138. ValidateFLOUser(pFloUser);
  3139. pFloUser->sendPongID = pPkt->pingPongID;
  3140. if (pFloUser->rxPackets < FLO_MAX_RCV_PACKETS)
  3141. {
  3142. FLOPong(pmgClient, stream, pFloUser->userID, pPkt->pingPongID);
  3143. pFloUser->sentPongTime = curtime;
  3144. }
  3145. else
  3146. {
  3147. TRACE_OUT(("Receive backlog - just flagging pong needed"));
  3148. pFloUser->pongNeeded = TRUE;
  3149. }
  3150. }
  3151. break;
  3152. //
  3153. // PONG
  3154. //
  3155. // Flag we have got a pong from any user so we should start
  3156. // applying send flow control to this stream now (Within the stream
  3157. // we achieve per user granularity by ignoring those users that
  3158. // have never ponged when we inspect the stream byte count.)
  3159. //
  3160. case PACKET_TYPE_PONG:
  3161. {
  3162. pStr->gotPong = TRUE;
  3163. //
  3164. // Keep a note that we are receiving messages on this stream by
  3165. // moving nextPing on (but only if we have passed it)
  3166. //
  3167. if (curtime > pStr->nextPingTime)
  3168. {
  3169. pStr->nextPingTime = curtime;
  3170. }
  3171. //
  3172. // Update the user entry and schedule a ping if necessary
  3173. //
  3174. TRACE_OUT(("%u,%u - PONG %u received",
  3175. stream, pPkt->userID, pPkt->pingPongID));
  3176. //
  3177. // Check for readiness to send another ping This may be because
  3178. // this is the first users pong, in which case we should also send
  3179. // another ping when ready
  3180. //
  3181. if (canPing)
  3182. {
  3183. TRACE_OUT(("%u - PING scheduled, pipe was %d",
  3184. stream,
  3185. pStr->bytesInPipe));
  3186. //
  3187. // Reset byte count and ping readiness flag
  3188. //
  3189. pStr->bytesInPipe = 0;
  3190. pStr->pingNeeded = TRUE;
  3191. //
  3192. // Adjust the buffer size limit based on our current throughput
  3193. //
  3194. // If we hit the back pressure point and yet we are ahead of
  3195. // the target backlog then we should increase the buffer size
  3196. // to avoid constraining the pipe. If we have already
  3197. // increased the buffer size to our maximum value then try
  3198. // decreasing the tick delay. If we are already ticking at the
  3199. // max rate then we are going as fast as we can. If we make
  3200. // either of these adjustments then allow the next ping to flow
  3201. // immediately so that we can ramp up as fast as possible to
  3202. // LAN bandwidths.
  3203. //
  3204. // We dont need to do the decrease buffer checks if we have not
  3205. // gone into back pressure during the last pong cycle
  3206. //
  3207. if (pStr->eventNeeded)
  3208. {
  3209. TRACE_OUT(("We were in a back pressure situation"));
  3210. callbackType = FLO_WAKEUP;
  3211. TRACE_OUT(("Backlog %u denial delta %d ping delta %d",
  3212. pStr->backlog, curtime-pStr->lastDenialTime,
  3213. curtime-pStr->lastPingTime));
  3214. //
  3215. // The next is a little complex.
  3216. //
  3217. // If the turnaround of this ping pong is significantly
  3218. // less than our target then open the pipe up. But we must
  3219. // adjust to allow for the ping being sent at a quiet
  3220. // period, which we do by remembering when each ping is
  3221. // sent and, if we encounter a backlog situation, storing
  3222. // that ping time for future reference
  3223. //
  3224. // So the equation for latency is
  3225. //
  3226. // Pongtime-previous backlogged ping time
  3227. //
  3228. // The previous ping time is the that we sent prior to the
  3229. // last back pressure situation so there are two times in
  3230. // the control block, one for the last Ping time and one
  3231. // for the last but one ping time.
  3232. //
  3233. if ((int)(pStr->backlog/2 - curtime +
  3234. pStr->lastDenialTime) > 0)
  3235. {
  3236. //
  3237. // We are coping easily so increase the buffer to pump
  3238. // more data through. Predict the new buffer size
  3239. // based on the latency for the current backlog so that
  3240. // we don't artificially constrain the app. We do this
  3241. // by taking the observed latency, decrementing by a
  3242. // small factor to allow for the latency we might
  3243. // observe over the fastest possible link and then
  3244. // calculating the connection throughput.
  3245. //
  3246. // latency = curtime - lastDenialTime - fudge(100mS)
  3247. // amount sent = maxBytesInPipe (because we we were
  3248. // backed up)
  3249. // throughput = amount sent/latency (bytes/millisec)
  3250. // New buffer = throughput * target latency
  3251. //
  3252. if (pStr->maxBytesInPipe < pStr->DC_ABSMaxBytesInPipe)
  3253. {
  3254. latency = (curtime -
  3255. pStr->lastDenialTime -
  3256. 30);
  3257. if (latency <= 0)
  3258. {
  3259. latency = 1;
  3260. }
  3261. throughput = (pStr->maxBytesInPipe*8)/latency;
  3262. pStr->maxBytesInPipe = (throughput * pStr->backlog)/8;
  3263. TRACE_OUT(("Potential maxbytes of %d",
  3264. pStr->maxBytesInPipe));
  3265. if (pStr->maxBytesInPipe > pStr->DC_ABSMaxBytesInPipe)
  3266. {
  3267. pStr->maxBytesInPipe = pStr->DC_ABSMaxBytesInPipe;
  3268. }
  3269. TRACE_OUT((
  3270. "Modified buffer maxBytesInPipe up to %u "
  3271. "(0x%08x:%u)",
  3272. pStr->maxBytesInPipe,
  3273. pStr->channel,
  3274. pStr->priority));
  3275. callbackType = FLO_BUFFERMOD;
  3276. }
  3277. else
  3278. {
  3279. //
  3280. // We have hit our maximum allowed pipe size but
  3281. // are still backlogged and yet pings are going
  3282. // through acceptably.
  3283. //
  3284. // Our first action is to try reducing the ping
  3285. // time thus increasing out throughput.
  3286. //
  3287. // If we have already decreased the ping time to
  3288. // its minimum then we cannot do anything else. It
  3289. // is possible that the application parameters
  3290. // should be changed to increase the permissible
  3291. // throughput so log an alert to suggest this.
  3292. // however there are situations (input management)
  3293. // where we want some back pressure in order to
  3294. // prevent excessive cpu loading at the recipient.
  3295. //
  3296. // To increase the throughput either
  3297. //
  3298. // - Increase the maximum size of the stream. The
  3299. // disadvantage of this is that a low badwidth
  3300. // joiner may suddenly see a lot of high
  3301. // bandwidth data in the pipe. However this
  3302. // is the preferred solution in general, as
  3303. // it avoids having the pipe flooded with pings.
  3304. //
  3305. // - Reduce the target latency. This is a little
  3306. // dangerous because the latency is composed of
  3307. // the pre-queued data and the network turnaround
  3308. // time and if the network turnaround time
  3309. // approaches the target latency then the flow
  3310. // control will simply close the pipe right down
  3311. // irrespective of the achievable throughput.
  3312. //
  3313. pStr->maxBytesInPipe = pStr->DC_ABSMaxBytesInPipe;
  3314. pStr->pingTime = pStr->pingTime/2;
  3315. if (pStr->pingTime < FLO_MIN_PINGTIME)
  3316. {
  3317. pStr->pingTime = FLO_MIN_PINGTIME;
  3318. }
  3319. TRACE_OUT((
  3320. "Hit DC_ABS max - reduce ping time to %u",
  3321. pStr->pingTime));
  3322. }
  3323. //
  3324. // Allow the ping just scheduled to flow immediately
  3325. //
  3326. pStr->nextPingTime = curtime;
  3327. }
  3328. pStr->eventNeeded = FALSE;
  3329. }
  3330. //
  3331. // If we have exceeded our target latency at all then throttle
  3332. // back
  3333. //
  3334. if ((int)(pStr->backlog - curtime + pStr->lastPingTime) < 0)
  3335. {
  3336. pStr->maxBytesInPipe /= 2;
  3337. if (pStr->maxBytesInPipe < FLO_MIN_STREAMSIZE)
  3338. {
  3339. pStr->maxBytesInPipe = FLO_MIN_STREAMSIZE;
  3340. }
  3341. pStr->pingTime = pStr->pingTime * 2;
  3342. if (pStr->pingTime > FLO_INIT_PINGTIME)
  3343. {
  3344. pStr->pingTime = FLO_INIT_PINGTIME;
  3345. }
  3346. TRACE_OUT((
  3347. "Mod buffer maxBytesInPipe down to %u, ping to %u "
  3348. "(0x%08x:%u)",
  3349. pStr->maxBytesInPipe,
  3350. pStr->pingTime,
  3351. pStr->channel,
  3352. pStr->priority));
  3353. callbackType = FLO_BUFFERMOD;
  3354. }
  3355. //
  3356. // Now make athe callback if callbackType has been set
  3357. //
  3358. if ((callbackType != 0) &&
  3359. (pmgClient->flo.callBack != NULL))
  3360. {
  3361. (pmgClient->flo.callBack)(pmgClient,
  3362. callbackType,
  3363. pStr->priority,
  3364. pStr->maxBytesInPipe);
  3365. }
  3366. }
  3367. }
  3368. break;
  3369. //
  3370. // PANG
  3371. //
  3372. // Remove the user and continue
  3373. //
  3374. case PACKET_TYPE_PANG:
  3375. {
  3376. TRACE_OUT(("%u,%u - PANG received, removing user",
  3377. stream, pPkt->userID));
  3378. //
  3379. // Remove from the list
  3380. //
  3381. ValidateFLOUser(pFloUser);
  3382. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  3383. COM_BasedListRemove(&(pFloUser->list));
  3384. delete pFloUser;
  3385. //
  3386. // If we are currently waiting then generate an event for the
  3387. // app to get it moving again
  3388. //
  3389. if ((pStr->eventNeeded) &&
  3390. (pmgClient->flo.callBack != NULL))
  3391. {
  3392. TRACE_OUT(("Waking up the app because user has left"));
  3393. (*(pmgClient->flo.callBack))(pmgClient,
  3394. FLO_WAKEUP,
  3395. pStr->priority,
  3396. pStr->maxBytesInPipe);
  3397. pStr->eventNeeded = FALSE;
  3398. }
  3399. }
  3400. break;
  3401. //
  3402. // UNKNOWN
  3403. //
  3404. // Just trace alert and press on
  3405. //
  3406. default:
  3407. {
  3408. WARNING_OUT(("Invalid packet type 0x%08x", pPkt->packetType));
  3409. }
  3410. break;
  3411. }
  3412. DC_EXIT_POINT:
  3413. DebugExitVOID(FLO_ReceivedPacket);
  3414. }
  3415. //
  3416. // FLO_AllocReceive
  3417. //
  3418. void FLO_AllocReceive
  3419. (
  3420. PMG_CLIENT pmgClient,
  3421. UINT priority,
  3422. NET_CHANNEL_ID channel,
  3423. UINT userID
  3424. )
  3425. {
  3426. UINT stream;
  3427. PFLO_USER pFloUser;
  3428. BOOL userFound = FALSE;
  3429. PFLO_STREAM_DATA pStr;
  3430. UINT curtime;
  3431. DebugEntry(FLO_AllocReceive);
  3432. ValidateMGClient(pmgClient);
  3433. ASSERT(pmgClient->userAttached);
  3434. ASSERT(priority != NET_TOP_PRIORITY);
  3435. //
  3436. // Convert channel and priority into stream
  3437. //
  3438. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  3439. //
  3440. // Only process controlled streams
  3441. //
  3442. if (stream == FLO_NOT_CONTROLLED)
  3443. {
  3444. DC_QUIT;
  3445. }
  3446. //
  3447. // First we must locate the user
  3448. //
  3449. ValidateFLOStr(pStr);
  3450. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pStr->users),
  3451. (void**)&pFloUser, FIELD_OFFSET(FLO_USER, list), FIELD_OFFSET(FLO_USER, userID),
  3452. (DWORD)userID, FIELD_SIZE(FLO_USER, userID));
  3453. //
  3454. // SFR6101: If this is a new User then add them to the list
  3455. //
  3456. if (pFloUser == NULL)
  3457. {
  3458. TRACE_OUT(("Message from user 0x%08x who is not flow controlled", userID));
  3459. pFloUser = FLOAddUser(userID, pStr);
  3460. }
  3461. //
  3462. // If we failed to allocate a usr CB then just ignore for now
  3463. //
  3464. if (pFloUser != NULL)
  3465. {
  3466. ValidateFLOUser(pFloUser);
  3467. //
  3468. // Add in the new receive packet usage
  3469. //
  3470. pFloUser->rxPackets++;
  3471. TRACE_OUT(("Num outstanding receives on stream %u now %u",
  3472. stream, pFloUser->rxPackets));
  3473. //
  3474. // Now check that we have not got some kind of creep
  3475. //
  3476. if (pFloUser->rxPackets > FLO_MAX_RCV_PKTS_CREEP)
  3477. {
  3478. WARNING_OUT(("Creep? Stream %u has %u unacked rcv pkts",
  3479. stream, pFloUser->rxPackets));
  3480. }
  3481. //
  3482. // Finally check to see that we are responding OK to this person
  3483. //
  3484. curtime = GetTickCount();
  3485. if ((pFloUser->pongNeeded) &&
  3486. (curtime - pFloUser->sentPongTime > (FLO_MAX_WAIT_TIME/4)))
  3487. {
  3488. TRACE_OUT(("Send keepalive pong"));
  3489. FLOPong(pmgClient, stream, pFloUser->userID, pFloUser->sendPongID);
  3490. pFloUser->sentPongTime = curtime;
  3491. }
  3492. }
  3493. DC_EXIT_POINT:
  3494. DebugExitVOID(FLO_AllocReceive);
  3495. }
  3496. //
  3497. // FLO_FreeReceive
  3498. //
  3499. void FLO_FreeReceive
  3500. (
  3501. PMG_CLIENT pmgClient,
  3502. NET_PRIORITY priority,
  3503. NET_CHANNEL_ID channel,
  3504. UINT userID
  3505. )
  3506. {
  3507. UINT stream;
  3508. PFLO_USER pFloUser;
  3509. PFLO_STREAM_DATA pStr;
  3510. BOOL userFound = FALSE;
  3511. DebugEntry(FLO_FreeReceive);
  3512. ValidateMGClient(pmgClient);
  3513. ASSERT(pmgClient->userAttached);
  3514. ASSERT(priority != NET_TOP_PRIORITY);
  3515. //
  3516. // Convert channel and priority into stream
  3517. //
  3518. stream = FLOGetStream(pmgClient, channel, priority, &pStr);
  3519. //
  3520. // Only process controlled streams
  3521. //
  3522. if (stream != FLO_NOT_CONTROLLED)
  3523. {
  3524. ValidateFLOStr(pStr);
  3525. //
  3526. // First we must locate the user
  3527. //
  3528. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3529. while (&(pFloUser->list) != &(pStr->users))
  3530. {
  3531. ValidateFLOUser(pFloUser);
  3532. if (pFloUser->userID == userID)
  3533. {
  3534. userFound = TRUE;
  3535. break;
  3536. }
  3537. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pFloUser->list));
  3538. }
  3539. //
  3540. // If we do not find the user record then two things may have
  3541. // happened.
  3542. // - They have joined the channel and immediately sent data
  3543. // - They were removed as being delinquent and are now sending
  3544. // data again
  3545. // - We failed to add them to our user list
  3546. // Try and allocate the user entry now
  3547. // (This will start tracking receive buffer space, but this user
  3548. // will not participate in our send flow control until we receive
  3549. // a pong from them and set "gotpong" in their FLO_USER CB.)
  3550. //
  3551. if (!userFound)
  3552. {
  3553. pFloUser = FLOAddUser(userID, pStr);
  3554. }
  3555. if (pFloUser != NULL)
  3556. {
  3557. ValidateFLOUser(pFloUser);
  3558. //
  3559. // Check that we have not got some kind of creep
  3560. //
  3561. if (pFloUser->rxPackets == 0)
  3562. {
  3563. WARNING_OUT(("Freed too many buffers for user 0x%08x on str %u",
  3564. userID, stream));
  3565. }
  3566. else
  3567. {
  3568. pFloUser->rxPackets--;
  3569. TRACE_OUT(("Num outstanding receives now %u",
  3570. pFloUser->rxPackets));
  3571. }
  3572. //
  3573. // Now we must Pong if there is a pong pending and we have
  3574. // moved below the high water mark
  3575. //
  3576. if ((pFloUser->pongNeeded) &&
  3577. (pFloUser->rxPackets < FLO_MAX_RCV_PACKETS))
  3578. {
  3579. FLOPong(pmgClient, stream, pFloUser->userID, pFloUser->sendPongID);
  3580. pFloUser->pongNeeded = FALSE;
  3581. pFloUser->sentPongTime = GetTickCount();
  3582. }
  3583. }
  3584. }
  3585. DebugExitVOID(FLO_FreeReceive);
  3586. }
  3587. //
  3588. // FLOPong()
  3589. //
  3590. void FLOPong
  3591. (
  3592. PMG_CLIENT pmgClient,
  3593. UINT stream,
  3594. UINT userID,
  3595. UINT pongID
  3596. )
  3597. {
  3598. PTSHR_FLO_CONTROL pFlo;
  3599. PMG_BUFFER pmgBuffer;
  3600. UINT rc;
  3601. DebugEntry(FLOPong);
  3602. ValidateMGClient(pmgClient);
  3603. ASSERT(pmgClient->userAttached);
  3604. rc = MGNewDataBuffer(pmgClient,
  3605. MG_TX_PONG,
  3606. sizeof(TSHR_FLO_CONTROL) + sizeof(MG_INT_PKT_HEADER),
  3607. &pmgBuffer);
  3608. if (rc != 0)
  3609. {
  3610. WARNING_OUT(("MGNewDataBuffer failed in FLOPong"));
  3611. DC_QUIT;
  3612. }
  3613. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3614. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3615. //
  3616. // Set up pong contents
  3617. //
  3618. pFlo->packetType = PACKET_TYPE_PONG;
  3619. pFlo->userID = pmgClient->userIDMCS;
  3620. pFlo->stream = (BYTE)stream;
  3621. pFlo->pingPongID = (BYTE)pongID;
  3622. pmgBuffer->channelId = (ChannelID)userID;
  3623. pmgBuffer->priority = MG_PRIORITY_HIGHEST;
  3624. //
  3625. // Now decouple the send request. Note that we must put the pong at
  3626. // the back of the request queue even though we want it to flow at
  3627. // high priority because otherwise there are certain circumstances
  3628. // where we get pong reversal due to receipt of multiple pings
  3629. //
  3630. TRACE_OUT(("Inserting pong message 0x%08x at head of pending chain", pmgBuffer));
  3631. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  3632. UT_PostEvent(pmgClient->putTask,
  3633. pmgClient->putTask,
  3634. NO_DELAY,
  3635. NET_MG_SCHEDULE,
  3636. 0,
  3637. 0);
  3638. TRACE_OUT(("%u,0x%08x - PONG %u scheduled",
  3639. pFlo->stream, pmgBuffer->channelId, pFlo->pingPongID));
  3640. DC_EXIT_POINT:
  3641. DebugExitVOID(FLOPong);
  3642. }
  3643. //
  3644. // FLOPing()
  3645. //
  3646. void FLOPing
  3647. (
  3648. PMG_CLIENT pmgClient,
  3649. UINT stream,
  3650. UINT curtime
  3651. )
  3652. {
  3653. PFLO_STREAM_DATA pStr;
  3654. PMG_BUFFER pmgBuffer;
  3655. PTSHR_FLO_CONTROL pFlo;
  3656. UINT rc;
  3657. DebugEntry(FLOPing);
  3658. ValidateMGClient(pmgClient);
  3659. ASSERT(pmgClient->userAttached);
  3660. ASSERT(stream < FLO_MAX_STREAMS);
  3661. pStr = pmgClient->flo.pStrData[stream];
  3662. ValidateFLOStr(pStr);
  3663. rc = MGNewDataBuffer(pmgClient,
  3664. MG_TX_PING,
  3665. sizeof(TSHR_FLO_CONTROL)+sizeof(MG_INT_PKT_HEADER),
  3666. &pmgBuffer);
  3667. if (rc != 0)
  3668. {
  3669. WARNING_OUT(("MGNewDataBuffer failed in FLOPing"));
  3670. DC_QUIT;
  3671. }
  3672. //
  3673. // Flag ping not needed to avoid serialization problems across the
  3674. // sendmessage!
  3675. //
  3676. pStr->pingNeeded = FALSE;
  3677. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3678. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3679. //
  3680. // Set up ping contents
  3681. //
  3682. pFlo->packetType = PACKET_TYPE_PING;
  3683. pFlo->userID = pmgClient->userIDMCS;
  3684. pFlo->stream = (BYTE)stream;
  3685. pmgBuffer->channelId = (ChannelID)pStr->channel;
  3686. pmgBuffer->priority = (NET_PRIORITY)pStr->priority;
  3687. //
  3688. // Generate the next ping value to be used
  3689. //
  3690. pFlo->pingPongID = (BYTE)(pStr->pingValue + 1);
  3691. //
  3692. // Now decouple the send request
  3693. //
  3694. TRACE_OUT(("Inserting ping message 0x%08x into pending chain", pmgBuffer));
  3695. COM_BasedListInsertBefore(&(pmgClient->pendChain), &(pmgBuffer->pendChain));
  3696. UT_PostEvent(pmgClient->putTask,
  3697. pmgClient->putTask,
  3698. NO_DELAY,
  3699. NET_MG_SCHEDULE,
  3700. 0,
  3701. 0);
  3702. //
  3703. // Update flow control variables
  3704. //
  3705. pStr->pingValue = ((pStr->pingValue + 1) & 0xFF);
  3706. pStr->lastPingTime = curtime;
  3707. pStr->nextPingTime = curtime + pStr->pingTime;
  3708. pStr->lastDenialTime = pStr->curDenialTime;
  3709. TRACE_OUT(("%u - PING %u sched, next in %u mS (0x%08x:%u)",
  3710. pFlo->stream,
  3711. pStr->pingValue,
  3712. pStr->pingTime,
  3713. pStr->channel,
  3714. pStr->priority));
  3715. DC_EXIT_POINT:
  3716. DebugExitVOID(FLOPing);
  3717. }
  3718. //
  3719. // FLOPang()
  3720. //
  3721. void FLOPang
  3722. (
  3723. PMG_CLIENT pmgClient,
  3724. UINT stream,
  3725. UINT userID
  3726. )
  3727. {
  3728. PMG_BUFFER pmgBuffer;
  3729. PTSHR_FLO_CONTROL pFlo;
  3730. UINT rc;
  3731. DebugEntry(FLOPang);
  3732. ValidateMGClient(pmgClient);
  3733. ASSERT(pmgClient->userAttached);
  3734. rc = MGNewDataBuffer(pmgClient,
  3735. MG_TX_PANG,
  3736. sizeof(TSHR_FLO_CONTROL) + sizeof(MG_INT_PKT_HEADER),
  3737. &pmgBuffer);
  3738. if (rc != 0)
  3739. {
  3740. WARNING_OUT(("MGNewDataBuffer failed in FLOPang"));
  3741. DC_QUIT;
  3742. }
  3743. pFlo = (PTSHR_FLO_CONTROL)pmgBuffer->pDataBuffer;
  3744. pmgBuffer->pPktHeader->header.pktLength = TSHR_PKT_FLOW;
  3745. //
  3746. // Set up pang contents
  3747. //
  3748. pFlo->packetType = PACKET_TYPE_PANG;
  3749. pFlo->userID = pmgClient->userIDMCS;
  3750. pFlo->stream = (BYTE)stream;
  3751. pFlo->pingPongID = 0;
  3752. pmgBuffer->channelId = (ChannelID)userID;
  3753. pmgBuffer->priority = MG_PRIORITY_HIGHEST;
  3754. //
  3755. // Now decouple the send request
  3756. //
  3757. TRACE_OUT(("Inserting pang message 0x%08x into pending chain", pmgBuffer));
  3758. COM_BasedListInsertBefore(&(pmgClient->pendChain),
  3759. &(pmgBuffer->pendChain));
  3760. UT_PostEvent(pmgClient->putTask,
  3761. pmgClient->putTask,
  3762. NO_DELAY,
  3763. NET_MG_SCHEDULE,
  3764. 0,
  3765. 0);
  3766. DC_EXIT_POINT:
  3767. DebugExitVOID(FLOPang);
  3768. }
  3769. //
  3770. // FLOGetStream()
  3771. //
  3772. UINT FLOGetStream
  3773. (
  3774. PMG_CLIENT pmgClient,
  3775. NET_CHANNEL_ID channel,
  3776. UINT priority,
  3777. PFLO_STREAM_DATA * ppStr
  3778. )
  3779. {
  3780. UINT i;
  3781. UINT cStreams;
  3782. DebugEntry(FLOGetStream);
  3783. ValidateMGClient(pmgClient);
  3784. ASSERT(pmgClient->userAttached);
  3785. ASSERT(priority != NET_TOP_PRIORITY);
  3786. cStreams = pmgClient->flo.numStreams;
  3787. ASSERT(cStreams <= FLO_MAX_STREAMS);
  3788. //
  3789. // Scan the list of streams for a match.
  3790. //
  3791. for (i = 0; i < cStreams; i++)
  3792. {
  3793. //
  3794. // Check to ensure that this is a valid stream.
  3795. //
  3796. if (pmgClient->flo.pStrData[i] == NULL)
  3797. {
  3798. continue;
  3799. }
  3800. ValidateFLOStr(pmgClient->flo.pStrData[i]);
  3801. //
  3802. // If the channel and priority match then we have found the stream.
  3803. //
  3804. if ((pmgClient->flo.pStrData[i]->channel == channel) &&
  3805. (pmgClient->flo.pStrData[i]->priority == priority))
  3806. {
  3807. break;
  3808. }
  3809. }
  3810. //
  3811. // If we hit the end of the list then return FLO_NOT_CONTROLLED.
  3812. //
  3813. if (i == cStreams)
  3814. {
  3815. i = FLO_NOT_CONTROLLED;
  3816. *ppStr = NULL;
  3817. TRACE_OUT(("Uncontrolled stream (0x%08x:%u)",
  3818. channel,
  3819. priority));
  3820. }
  3821. else
  3822. {
  3823. *ppStr = pmgClient->flo.pStrData[i];
  3824. TRACE_OUT(("Controlled stream %u (0x%08x:%u)",
  3825. i,
  3826. channel,
  3827. priority));
  3828. }
  3829. DebugExitDWORD(FLOGetStream, i);
  3830. return(i);
  3831. }
  3832. //
  3833. // FUNCTION: FLOAddUser
  3834. //
  3835. // DESCRIPTION:
  3836. //
  3837. // Add a new remote user entry for a stream.
  3838. //
  3839. // PARAMETERS:
  3840. //
  3841. // userID - ID of the new user (single member channel ID)
  3842. // pStr - pointer to the stream to receive the new user.
  3843. //
  3844. // RETURNS: Nothing
  3845. //
  3846. //
  3847. PFLO_USER FLOAddUser
  3848. (
  3849. UINT userID,
  3850. PFLO_STREAM_DATA pStr
  3851. )
  3852. {
  3853. PFLO_USER pFloUser;
  3854. DebugEntry(FLOAddUser);
  3855. ValidateFLOStr(pStr);
  3856. //
  3857. // Allocate memory for the new user entry
  3858. //
  3859. pFloUser = new FLO_USER;
  3860. if (!pFloUser)
  3861. {
  3862. WARNING_OUT(("FLOAddUser failed; out of memory"));
  3863. }
  3864. else
  3865. {
  3866. ZeroMemory(pFloUser, sizeof(*pFloUser));
  3867. SET_STAMP(pFloUser, FLOUSER);
  3868. //
  3869. // Set up the new record
  3870. //
  3871. TRACE_OUT(("UserID %u - New user, CB = 0x%08x", userID, pFloUser));
  3872. pFloUser->userID = (TSHR_UINT16)userID;
  3873. //
  3874. // Add the new User to the end of the list
  3875. //
  3876. COM_BasedListInsertBefore(&(pStr->users), &(pFloUser->list));
  3877. }
  3878. DebugExitVOID(FLOAddUser);
  3879. return(pFloUser);
  3880. }
  3881. //
  3882. // FLO_RemoveUser()
  3883. //
  3884. void FLO_RemoveUser
  3885. (
  3886. PMG_CLIENT pmgClient,
  3887. UINT userID
  3888. )
  3889. {
  3890. PFLO_USER pFloUser;
  3891. PBASEDLIST nextUser;
  3892. UINT stream;
  3893. UINT cStreams;
  3894. PFLO_STREAM_DATA pStr;
  3895. DebugEntry(FLO_RemoveUser);
  3896. ValidateMGClient(pmgClient);
  3897. ASSERT(pmgClient->userAttached);
  3898. cStreams = pmgClient->flo.numStreams;
  3899. ASSERT(cStreams <= FLO_MAX_STREAMS);
  3900. //
  3901. // Check each stream
  3902. //
  3903. for (stream = 0; stream < cStreams; stream++)
  3904. {
  3905. if (pmgClient->flo.pStrData[stream] == NULL)
  3906. {
  3907. continue;
  3908. }
  3909. pStr = pmgClient->flo.pStrData[stream];
  3910. ValidateFLOStr(pStr);
  3911. //
  3912. // Remove this user from the queue, if present
  3913. //
  3914. pFloUser = (PFLO_USER)COM_BasedNextListField(&(pStr->users));
  3915. while (&(pFloUser->list) != &(pStr->users))
  3916. {
  3917. ValidateFLOUser(pFloUser);
  3918. //
  3919. // Address the follow on record before we free the current
  3920. //
  3921. nextUser = COM_BasedNextListField(&(pFloUser->list));
  3922. //
  3923. // Free the current record, if necessary
  3924. //
  3925. if (pFloUser->userID == userID)
  3926. {
  3927. //
  3928. // Remove from the list
  3929. //
  3930. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  3931. COM_BasedListRemove(&(pFloUser->list));
  3932. delete pFloUser;
  3933. TRACE_OUT(("Stream %u - resetting due to user disappearance",
  3934. stream));
  3935. ValidateFLOStr(pStr);
  3936. pStr->bytesInPipe = 0;
  3937. pStr->pingNeeded = TRUE;
  3938. pStr->nextPingTime = GetTickCount();
  3939. pStr->gotPong = FALSE;
  3940. pStr->eventNeeded = FALSE;
  3941. break;
  3942. }
  3943. //
  3944. // Move on to the next record in the list
  3945. //
  3946. pFloUser = (PFLO_USER)nextUser;
  3947. }
  3948. //
  3949. // Now wake the app again for this stream
  3950. //
  3951. if (pmgClient->flo.callBack != NULL)
  3952. {
  3953. (*(pmgClient->flo.callBack))(pmgClient,
  3954. FLO_WAKEUP,
  3955. pStr->priority,
  3956. pStr->maxBytesInPipe);
  3957. }
  3958. }
  3959. DebugExitVOID(FLO_RemoveUser);
  3960. }
  3961. //
  3962. // FLOStreamEndControl()
  3963. //
  3964. void FLOStreamEndControl
  3965. (
  3966. PMG_CLIENT pmgClient,
  3967. UINT stream
  3968. )
  3969. {
  3970. PFLO_USER pFloUser;
  3971. PFLO_STREAM_DATA pStr;
  3972. PMG_BUFFER pmgBuffer;
  3973. DebugEntry(FLOStreamEndControl);
  3974. ValidateMGClient(pmgClient);
  3975. ASSERT(pmgClient->userAttached);
  3976. //
  3977. // Convert the stream id into a stream pointer.
  3978. //
  3979. ASSERT(stream < FLO_MAX_STREAMS);
  3980. pStr = pmgClient->flo.pStrData[stream];
  3981. ValidateFLOStr(pStr);
  3982. //
  3983. // Trace out that we are about to end flow control.
  3984. //
  3985. TRACE_OUT(("Flow control about to end, stream %u, (0x%08x:%u)",
  3986. stream,
  3987. pStr->channel,
  3988. pStr->priority));
  3989. //
  3990. // First check to see if there are any outstanding buffer CBs with
  3991. // pStr set to this stream and reset pStr to null. We need to do this
  3992. // as we may then try to dereference pStr when we come to send these
  3993. // buffers.
  3994. //
  3995. pmgBuffer = (PMG_BUFFER)COM_BasedListFirst(&(pmgClient->pendChain),
  3996. FIELD_OFFSET(MG_BUFFER, pendChain));
  3997. while (pmgBuffer != NULL)
  3998. {
  3999. ValidateMGBuffer(pmgBuffer);
  4000. if (pmgBuffer->type == MG_TX_BUFFER)
  4001. {
  4002. //
  4003. // Set the stream pointer to NULL.
  4004. //
  4005. pmgBuffer->pStr = NULL;
  4006. TRACE_OUT(("Nulling stream pointer in bufferCB: (0x%08x:%u)",
  4007. pStr->channel, pStr->priority));
  4008. }
  4009. pmgBuffer = (PMG_BUFFER)COM_BasedListNext(&(pmgClient->pendChain),
  4010. pmgBuffer, FIELD_OFFSET(MG_BUFFER, pendChain));
  4011. }
  4012. //
  4013. // Now free up the list of users.
  4014. //
  4015. pFloUser = (PFLO_USER)COM_BasedListFirst(&(pStr->users), FIELD_OFFSET(FLO_USER, list));
  4016. while (pFloUser != NULL)
  4017. {
  4018. ValidateFLOUser(pFloUser);
  4019. //
  4020. // First send the remote user a "pang" to tell them we are not
  4021. // interested in their data any more.
  4022. //
  4023. FLOPang(pmgClient, stream, pFloUser->userID);
  4024. //
  4025. // Remove the remote user from the list.
  4026. //
  4027. TRACE_OUT(("Freeing FLO_USER 0x%08x ID 0x%08x", pFloUser, pFloUser->userID));
  4028. COM_BasedListRemove(&(pFloUser->list));
  4029. delete pFloUser;
  4030. //
  4031. // Now get the next user in the list.
  4032. //
  4033. ValidateFLOStr(pStr);
  4034. pFloUser = (PFLO_USER)COM_BasedListFirst(&(pStr->users), FIELD_OFFSET(FLO_USER, list));
  4035. }
  4036. //
  4037. // Free the stream data.
  4038. //
  4039. ASSERT(pStr == pmgClient->flo.pStrData[stream]);
  4040. TRACE_OUT(("Freeing FLO_STREAM_DATA 0x%08x", pStr));
  4041. delete pStr;
  4042. pmgClient->flo.pStrData[stream] = NULL;
  4043. //
  4044. // Adjust numStreams (if required)
  4045. //
  4046. if (stream == (pmgClient->flo.numStreams - 1))
  4047. {
  4048. while ((pmgClient->flo.numStreams > 0) &&
  4049. (pmgClient->flo.pStrData[pmgClient->flo.numStreams - 1] == NULL))
  4050. {
  4051. pmgClient->flo.numStreams--;
  4052. }
  4053. TRACE_OUT(("numStreams %u", pmgClient->flo.numStreams));
  4054. }
  4055. DebugExitVOID(FLOStreamEndControl);
  4056. }
  4057. //
  4058. // MGNewCorrelator()
  4059. //
  4060. // Gets a new correlator for events to a particular MGC client
  4061. //
  4062. void MGNewCorrelator
  4063. (
  4064. PMG_CLIENT pmgClient,
  4065. WORD * pCorrelator
  4066. )
  4067. {
  4068. ValidateMGClient(pmgClient);
  4069. pmgClient->joinNextCorr++;
  4070. if (pmgClient->joinNextCorr == 0)
  4071. {
  4072. pmgClient->joinNextCorr++;
  4073. }
  4074. *pCorrelator = pmgClient->joinNextCorr;
  4075. }