Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

19105 lines
538 KiB

  1. #include "precomp.h"
  2. //
  3. // OM.CPP
  4. // Object Manager
  5. //
  6. // Copyright(c) Microsoft 1997-
  7. //
  8. #define MLZ_FILE_ZONE ZONE_OM
  9. //
  10. // Function profile ID <--> name mapping
  11. //
  12. typedef struct tagOMFP_MAP
  13. {
  14. char szName[16];
  15. }
  16. OMFP_MAP;
  17. const OMFP_MAP c_aFpMap[OMFP_MAX] =
  18. {
  19. { AL_FP_NAME },
  20. { OM_FP_NAME },
  21. { WB_FP_NAME }
  22. };
  23. //
  24. // Workset Group ID <--> name mapping
  25. //
  26. typedef struct tagOMWSG_MAP
  27. {
  28. char szName[16];
  29. }
  30. OMWSG_MAP;
  31. const OMWSG_MAP c_aWsgMap[OMWSG_MAX] =
  32. {
  33. { OMC_WSG_NAME },
  34. { AL_WSG_NAME },
  35. { WB_WSG_NAME }
  36. };
  37. //
  38. // OMP_Init()
  39. //
  40. BOOL OMP_Init(BOOL * pfCleanup)
  41. {
  42. BOOL fInit = FALSE;
  43. DebugEntry(OMP_Init);
  44. UT_Lock(UTLOCK_OM);
  45. //
  46. // Register the OM service
  47. //
  48. if (g_putOM || g_pomPrimary)
  49. {
  50. *pfCleanup = FALSE;
  51. ERROR_OUT(("Can't start OM primary task; already running"));
  52. DC_QUIT;
  53. }
  54. *pfCleanup = TRUE;
  55. if (!UT_InitTask(UTTASK_OM, &g_putOM))
  56. {
  57. ERROR_OUT(("Failed to start OM task"));
  58. DC_QUIT;
  59. }
  60. g_pomPrimary = (POM_PRIMARY)UT_MallocRefCount(sizeof(OM_PRIMARY), TRUE);
  61. if (!g_pomPrimary)
  62. {
  63. ERROR_OUT(("Failed to allocate OM memory block"));
  64. DC_QUIT;
  65. }
  66. SET_STAMP(g_pomPrimary, OPRIMARY);
  67. g_pomPrimary->putTask = g_putOM;
  68. g_pomPrimary->correlator = 1;
  69. COM_BasedListInit(&(g_pomPrimary->domains));
  70. UT_RegisterExit(g_putOM, OMPExitProc, g_pomPrimary);
  71. g_pomPrimary->exitProcReg = TRUE;
  72. UT_RegisterEvent(g_putOM, OMPEventsHandler, g_pomPrimary, UT_PRIORITY_NORMAL);
  73. g_pomPrimary->eventProcReg = TRUE;
  74. if (!MG_Register(MGTASK_OM, &(g_pomPrimary->pmgClient), g_putOM))
  75. {
  76. ERROR_OUT(("Couldn't register OM with the MG layer"));
  77. DC_QUIT;
  78. }
  79. if (!CMS_Register(g_putOM, CMTASK_OM, &(g_pomPrimary->pcmClient)))
  80. {
  81. ERROR_OUT(("Couldn't register OM as call secondary"));
  82. DC_QUIT;
  83. }
  84. //
  85. // Allocate our GDC buffer.
  86. //
  87. g_pomPrimary->pgdcWorkBuf = new BYTE[GDC_WORKBUF_SIZE];
  88. if (!g_pomPrimary->pgdcWorkBuf)
  89. {
  90. ERROR_OUT(("SendMessagePkt: can't allocate GDC work buf, not compressing"));
  91. DC_QUIT;
  92. }
  93. fInit = TRUE;
  94. DC_EXIT_POINT:
  95. UT_Unlock(UTLOCK_OM);
  96. DebugExitBOOL(OMP_Init, fInit);
  97. return(fInit);
  98. }
  99. //
  100. // OMP_Term()
  101. //
  102. void OMP_Term(void)
  103. {
  104. DebugEntry(OMP_Term);
  105. UT_Lock(UTLOCK_OM);
  106. if (g_pomPrimary)
  107. {
  108. ValidateOMP(g_pomPrimary);
  109. //
  110. // Deregister from Call Manager
  111. //
  112. if (g_pomPrimary->pcmClient)
  113. {
  114. CMS_Deregister(&g_pomPrimary->pcmClient);
  115. }
  116. //
  117. // Deregister from MG
  118. //
  119. if (g_pomPrimary->pmgClient)
  120. {
  121. MG_Deregister(&g_pomPrimary->pmgClient);
  122. }
  123. OMPExitProc(g_pomPrimary);
  124. }
  125. UT_TermTask(&g_putOM);
  126. UT_Unlock(UTLOCK_OM);
  127. DebugExitVOID(OMP_Term);
  128. }
  129. //
  130. // OMPExitProc()
  131. //
  132. void CALLBACK OMPExitProc(LPVOID uData)
  133. {
  134. POM_PRIMARY pomPrimary = (POM_PRIMARY)uData;
  135. POM_DOMAIN pDomain;
  136. POM_WSGROUP pWSGroup;
  137. POM_CLIENT_LIST pClient;
  138. DebugEntry(OMPExitProc);
  139. UT_Lock(UTLOCK_OM);
  140. ValidateOMP(pomPrimary);
  141. ASSERT(pomPrimary == g_pomPrimary);
  142. if (pomPrimary->exitProcReg)
  143. {
  144. UT_DeregisterExit(pomPrimary->putTask, OMPExitProc, pomPrimary);
  145. pomPrimary->exitProcReg = FALSE;
  146. }
  147. if (pomPrimary->eventProcReg)
  148. {
  149. UT_DeregisterEvent(pomPrimary->putTask, OMPEventsHandler, pomPrimary);
  150. pomPrimary->eventProcReg = FALSE;
  151. }
  152. //
  153. // Free domains
  154. //
  155. while (pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains),
  156. FIELD_OFFSET(OM_DOMAIN, chain)))
  157. {
  158. TRACE_OUT(("OMPExitProc: Freeing domain 0x%08x call ID 0x%08x",
  159. pDomain, pDomain->callID));
  160. //
  161. // Free workset groups
  162. // NOTE:
  163. // WSGDiscard() may destroy the domain, hence the weird
  164. // loop
  165. //
  166. if (pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups),
  167. FIELD_OFFSET(OM_WSGROUP, chain)))
  168. {
  169. TRACE_OUT(("OMPExitProc: Freeing wsg 0x%08x domain 0x%08x",
  170. pWSGroup, pDomain));
  171. //
  172. // Free clients
  173. //
  174. while (pClient = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients),
  175. FIELD_OFFSET(OM_CLIENT_LIST, chain)))
  176. {
  177. TRACE_OUT(("OMPExitProc: Freeing client 0x%08x wsg 0x%08x",
  178. pClient, pWSGroup));
  179. COM_BasedListRemove(&(pClient->chain));
  180. UT_FreeRefCount((void**)&pClient, FALSE);
  181. }
  182. WSGDiscard(pomPrimary, pDomain, pWSGroup, TRUE);
  183. }
  184. else
  185. {
  186. FreeDomainRecord(&pDomain);
  187. }
  188. }
  189. if (pomPrimary->pgdcWorkBuf)
  190. {
  191. delete[] pomPrimary->pgdcWorkBuf;
  192. pomPrimary->pgdcWorkBuf = NULL;
  193. }
  194. UT_FreeRefCount((void**)&g_pomPrimary, TRUE);
  195. UT_Unlock(UTLOCK_OM);
  196. DebugExitVOID(OMPExitProc);
  197. }
  198. //
  199. // OMPEventsHandler(...)
  200. //
  201. BOOL CALLBACK OMPEventsHandler
  202. (
  203. LPVOID uData,
  204. UINT event,
  205. UINT_PTR param1,
  206. UINT_PTR param2
  207. )
  208. {
  209. POM_PRIMARY pomPrimary = (POM_PRIMARY)uData;
  210. POM_DOMAIN pDomain = NULL;
  211. BOOL fProcessed = TRUE;
  212. DebugEntry(OMPEventsHandler);
  213. UT_Lock(UTLOCK_OM);
  214. ValidateOMP(pomPrimary);
  215. //
  216. // Check event is in the range we deal with:
  217. //
  218. if ((event < CM_BASE_EVENT) || (event > CM_LAST_EVENT))
  219. {
  220. goto CHECK_OM_EVENTS;
  221. }
  222. switch (event)
  223. {
  224. case CMS_NEW_CALL:
  225. {
  226. TRACE_OUT(( "CMS_NEW_CALL"));
  227. //
  228. // We ignore the return code - it will have been handled lower
  229. // down.
  230. //
  231. DomainRecordFindOrCreate(pomPrimary, (UINT)param2, &pDomain);
  232. }
  233. break;
  234. case CMS_END_CALL:
  235. {
  236. TRACE_OUT(( "CMS_END_CALL"));
  237. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  238. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  239. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)param2,
  240. FIELD_SIZE(OM_DOMAIN, callID));
  241. if (pDomain == NULL)
  242. {
  243. //
  244. // We don't have a record for this Domain so either we
  245. // never attached or we've already detached. Do nothing.
  246. //
  247. TRACE_OUT(( "No record for Domain %u found", param2));
  248. }
  249. else
  250. {
  251. ProcessOwnDetach(pomPrimary, pDomain);
  252. }
  253. }
  254. break;
  255. case CMS_TOKEN_ASSIGN_CONFIRM:
  256. {
  257. TRACE_OUT(( "CMS_TOKEN_ASSIGN_CONFIRM"));
  258. //
  259. // There is a flaw in the CMS_ASSIGN_TOKEN_CONFIRM API in that
  260. // it does not tell us which domain it refers to. So, we
  261. // operate under the assumption that this event relates to the
  262. // most recent domain we created i.e. the first one in the
  263. // list (they go in at the beginning).
  264. //
  265. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains),
  266. FIELD_OFFSET(OM_DOMAIN, chain));
  267. if (pDomain != NULL)
  268. {
  269. ProcessCMSTokenAssign(pomPrimary,
  270. pDomain,
  271. (param1 != 0),
  272. LOWORD(param2));
  273. }
  274. else
  275. {
  276. WARNING_OUT(( "No domain found for CMS_TOKEN_ASSIGN_CONFIRM"));
  277. }
  278. }
  279. break;
  280. }
  281. TRACE_OUT(( "Processed Call Manager event %#x", event));
  282. DC_QUIT;
  283. CHECK_OM_EVENTS:
  284. //
  285. // Check event is in the range we deal with:
  286. //
  287. if ((event < OM_BASE_EVENT) || (event > OM_LAST_EVENT))
  288. {
  289. goto CHECK_NET_EVENTS;
  290. }
  291. switch (event)
  292. {
  293. case OMINT_EVENT_LOCK_TIMEOUT:
  294. {
  295. ProcessLockTimeout(pomPrimary, (UINT)param1, (UINT)param2);
  296. }
  297. break;
  298. case OMINT_EVENT_SEND_QUEUE:
  299. {
  300. //
  301. // Param2 is the domain record.
  302. //
  303. pDomain = (POM_DOMAIN)param2;
  304. ProcessSendQueue(pomPrimary, pDomain, TRUE);
  305. }
  306. break;
  307. case OMINT_EVENT_PROCESS_MESSAGE:
  308. {
  309. ProcessBouncedMessages(pomPrimary, (POM_DOMAIN) param2);
  310. }
  311. break;
  312. case OMINT_EVENT_WSGROUP_DISCARD:
  313. {
  314. ProcessWSGDiscard(pomPrimary, (POM_WSGROUP)param2);
  315. }
  316. break;
  317. case OMINT_EVENT_WSGROUP_MOVE:
  318. case OMINT_EVENT_WSGROUP_REGISTER:
  319. {
  320. ProcessWSGRegister(pomPrimary, (POM_WSGROUP_REG_CB)param2);
  321. }
  322. break;
  323. case OMINT_EVENT_WSGROUP_REGISTER_CONT:
  324. {
  325. WSGRegisterStage1(pomPrimary, (POM_WSGROUP_REG_CB) param2);
  326. }
  327. break;
  328. //
  329. // The remaining events are ones we get by virtue of being
  330. // considered as a client of the ObManControl workset group
  331. //
  332. case OM_WORKSET_LOCK_CON:
  333. {
  334. switch (((POM_EVENT_DATA16)&param1)->worksetID)
  335. {
  336. case OM_INFO_WORKSET:
  337. ProcessOMCLockConfirm(pomPrimary,
  338. ((POM_EVENT_DATA32) &param2)->correlator,
  339. ((POM_EVENT_DATA32) &param2)->result);
  340. break;
  341. case OM_CHECKPOINT_WORKSET:
  342. ProcessCheckpoint(pomPrimary,
  343. ((POM_EVENT_DATA32) &param2)->correlator,
  344. ((POM_EVENT_DATA32) &param2)->result);
  345. break;
  346. }
  347. }
  348. break;
  349. case OM_WORKSET_NEW_IND:
  350. {
  351. ProcessOMCWorksetNew(pomPrimary,
  352. ((POM_EVENT_DATA16) &param1)->hWSGroup,
  353. ((POM_EVENT_DATA16) &param1)->worksetID);
  354. }
  355. break;
  356. case OM_PERSON_JOINED_IND:
  357. case OM_PERSON_LEFT_IND:
  358. case OM_PERSON_DATA_CHANGED_IND:
  359. case OM_WSGROUP_MOVE_IND:
  360. case OM_WORKSET_UNLOCK_IND:
  361. {
  362. //
  363. // We ignore these events.
  364. //
  365. }
  366. break;
  367. case OM_OBJECT_ADD_IND:
  368. case OM_OBJECT_REPLACED_IND:
  369. case OM_OBJECT_UPDATED_IND:
  370. case OM_OBJECT_DELETED_IND:
  371. {
  372. ProcessOMCObjectEvents(pomPrimary,
  373. event,
  374. ((POM_EVENT_DATA16) &param1)->hWSGroup,
  375. ((POM_EVENT_DATA16) &param1)->worksetID,
  376. (POM_OBJECT) param2);
  377. }
  378. break;
  379. default:
  380. {
  381. ERROR_OUT(( "Unexpected ObMan event 0x%08x", event));
  382. }
  383. }
  384. TRACE_OUT(( "Processed ObMan event %x", event));
  385. DC_QUIT;
  386. CHECK_NET_EVENTS:
  387. //
  388. // This function is only for network layer events so we quit if we've
  389. // got something else:
  390. //
  391. if ((event < NET_BASE_EVENT) || (event > NET_LAST_EVENT))
  392. {
  393. fProcessed = FALSE;
  394. DC_QUIT;
  395. }
  396. //
  397. // Now switch on the event type:
  398. //
  399. switch (event)
  400. {
  401. case NET_EVENT_USER_ATTACH:
  402. {
  403. //
  404. // Find the domain data for this call
  405. //
  406. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  407. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  408. FIELD_OFFSET(OM_DOMAIN, callID),
  409. param2, FIELD_SIZE(OM_DOMAIN, callID));
  410. if (pDomain)
  411. {
  412. ProcessNetAttachUser(pomPrimary, pDomain, LOWORD(param1),
  413. HIWORD(param1));
  414. }
  415. break;
  416. }
  417. case NET_EVENT_USER_DETACH:
  418. {
  419. //
  420. // Find the domain data for this call
  421. //
  422. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  423. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  424. FIELD_OFFSET(OM_DOMAIN, callID),
  425. param2, FIELD_SIZE(OM_DOMAIN, callID));
  426. if (pDomain)
  427. {
  428. ProcessNetDetachUser(pomPrimary, pDomain, LOWORD(param1));
  429. }
  430. break;
  431. }
  432. case NET_EVENT_CHANNEL_LEAVE:
  433. {
  434. //
  435. // Find the domain data for this call
  436. //
  437. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  438. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  439. FIELD_OFFSET(OM_DOMAIN, callID),
  440. param2, FIELD_SIZE(OM_DOMAIN, callID));
  441. if (pDomain)
  442. {
  443. ProcessNetLeaveChannel(pomPrimary, pDomain, LOWORD(param1));
  444. }
  445. break;
  446. }
  447. case NET_EVENT_TOKEN_GRAB:
  448. {
  449. //
  450. // Find the domain data for this call
  451. //
  452. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  453. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  454. FIELD_OFFSET(OM_DOMAIN, callID),
  455. param2, FIELD_SIZE(OM_DOMAIN, callID));
  456. if (pDomain)
  457. {
  458. ProcessNetTokenGrab(pomPrimary, pDomain, LOWORD(param1));
  459. }
  460. break;
  461. }
  462. case NET_EVENT_TOKEN_INHIBIT:
  463. {
  464. //
  465. // Find the domain data for this call
  466. //
  467. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  468. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  469. FIELD_OFFSET(OM_DOMAIN, callID),
  470. param2, FIELD_SIZE(OM_DOMAIN, callID));
  471. if (pDomain)
  472. {
  473. ProcessNetTokenInhibit(pomPrimary, pDomain, LOWORD(param1));
  474. }
  475. break;
  476. }
  477. case NET_EVENT_CHANNEL_JOIN:
  478. {
  479. PNET_JOIN_CNF_EVENT pEvent = (PNET_JOIN_CNF_EVENT)param2;
  480. //
  481. // Find the domain data for this call
  482. //
  483. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  484. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  485. FIELD_OFFSET(OM_DOMAIN, callID),
  486. pEvent->callID, FIELD_SIZE(OM_DOMAIN, callID));
  487. if (pDomain)
  488. {
  489. ProcessNetJoinChannel(pomPrimary, pDomain, pEvent);
  490. }
  491. MG_FreeBuffer(pomPrimary->pmgClient, (void **)&pEvent);
  492. break;
  493. }
  494. case NET_EVENT_DATA_RECEIVED:
  495. {
  496. PNET_SEND_IND_EVENT pEvent = (PNET_SEND_IND_EVENT)param2;
  497. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  498. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  499. FIELD_OFFSET(OM_DOMAIN, callID),
  500. pEvent->callID, FIELD_SIZE(OM_DOMAIN, callID));
  501. if (pDomain)
  502. {
  503. ProcessNetData(pomPrimary, pDomain, pEvent);
  504. }
  505. MG_FreeBuffer(pomPrimary->pmgClient, (void**)&pEvent);
  506. break;
  507. }
  508. case NET_FEEDBACK:
  509. {
  510. //
  511. // A NET_FEEDBACK event includes the pmgUser which identifies
  512. // the send pool from which the buffer has been freed. We use
  513. // it to find the Domain:
  514. //
  515. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  516. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  517. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)param2,
  518. FIELD_SIZE(OM_DOMAIN, callID));
  519. if (pDomain)
  520. {
  521. //
  522. // Generating a FEEDBACK event doesn't cause the use count
  523. // of the Domain record to be bumped, so set the
  524. // <domainRecBumped> flag to FALSE on the call to
  525. // ProcessSendQueue:
  526. //
  527. ProcessSendQueue(pomPrimary, pDomain, FALSE);
  528. }
  529. break;
  530. }
  531. case NET_FLOW:
  532. {
  533. ERROR_OUT(("OMPEventsHandler received NET_FLOW; shouldn't have"));
  534. break;
  535. }
  536. }
  537. DC_EXIT_POINT:
  538. UT_Unlock(UTLOCK_OM);
  539. DebugExitBOOL(OMPEventsHandler, fProcessed);
  540. return(fProcessed);
  541. }
  542. //
  543. // DomainRecordFindOrCreate(...)
  544. //
  545. UINT DomainRecordFindOrCreate
  546. (
  547. POM_PRIMARY pomPrimary,
  548. UINT callID,
  549. POM_DOMAIN * ppDomain
  550. )
  551. {
  552. POM_DOMAIN pDomain;
  553. UINT rc = 0;
  554. DebugEntry(DomainRecordFindOrCreate);
  555. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  556. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  557. FIELD_OFFSET(OM_DOMAIN, callID),
  558. (DWORD)callID, FIELD_SIZE(OM_DOMAIN, callID));
  559. if (pDomain == NULL)
  560. {
  561. //
  562. // We don't have a record for this Domain so create one:
  563. //
  564. rc = DomainAttach(pomPrimary, callID, &pDomain);
  565. if (rc != 0)
  566. {
  567. DC_QUIT;
  568. }
  569. }
  570. *ppDomain = pDomain;
  571. DC_EXIT_POINT:
  572. DebugExitDWORD(DomainRecordFindOrCreate, rc);
  573. return(rc);
  574. }
  575. //
  576. // DomainAttach(...)
  577. //
  578. UINT DomainAttach
  579. (
  580. POM_PRIMARY pomPrimary,
  581. UINT callID,
  582. POM_DOMAIN * ppDomain
  583. )
  584. {
  585. POM_DOMAIN pDomain = NULL;
  586. NET_FLOW_CONTROL netFlow;
  587. UINT rc = 0;
  588. DebugEntry(DomainAttach);
  589. TRACE_OUT(( "Attaching to Domain 0x%08x...", callID));
  590. if (callID != OM_NO_CALL)
  591. {
  592. CM_STATUS status;
  593. CMS_GetStatus(&status);
  594. if (!(status.attendeePermissions & NM_PERMIT_USEOLDWBATALL))
  595. {
  596. WARNING_OUT(("Joining Meeting with no OLDWB OM at all"));
  597. rc = NET_RC_MGC_NOT_CONNECTED;
  598. DC_QUIT;
  599. }
  600. }
  601. //
  602. // This function does the following:
  603. //
  604. // - create a new Domain record
  605. //
  606. // - if the Domain is our local Domain (OM_NO_CALL) call
  607. // ObManControlInit
  608. //
  609. // - else call MG_AttachUser to start attaching to the Domain.
  610. //
  611. rc = NewDomainRecord(pomPrimary,
  612. callID,
  613. &pDomain);
  614. if (rc != 0)
  615. {
  616. DC_QUIT;
  617. }
  618. //
  619. // What we do now depends on whether this is our "local" Domain (i.e.
  620. // callID == OM_NO_CALL):
  621. //
  622. if (callID == OM_NO_CALL)
  623. {
  624. TRACE_OUT(( "Is local domain - skipping forward"));
  625. //
  626. // This is our "local" Domain, so don't call MG_AttachUser.
  627. // Instead, we fake up a successful token grab event and rejoin the
  628. // domain attach processing there:
  629. //
  630. TRACE_OUT(( "Faking successful token grab for local domain"));
  631. pDomain->state = PENDING_TOKEN_GRAB;
  632. rc = ProcessNetTokenGrab(pomPrimary, pDomain, NET_RESULT_OK);
  633. if (rc != 0)
  634. {
  635. DC_QUIT;
  636. }
  637. }
  638. else
  639. {
  640. TRACE_OUT(( "Is real domain - attaching"));
  641. //
  642. // Set up our target latencies. Don't bother restricting the max
  643. // stream sizes.
  644. //
  645. ZeroMemory(&netFlow, sizeof(netFlow));
  646. netFlow.latency[NET_TOP_PRIORITY] = 0;
  647. netFlow.latency[NET_HIGH_PRIORITY] = 2000L;
  648. netFlow.latency[NET_MEDIUM_PRIORITY] = 5000L;
  649. netFlow.latency[NET_LOW_PRIORITY] = 10000L;
  650. rc = MG_Attach(pomPrimary->pmgClient, callID, &netFlow);
  651. if (rc != 0)
  652. {
  653. DC_QUIT;
  654. }
  655. //
  656. // Set up the remaining fields of the Domain record:
  657. //
  658. pDomain->state = PENDING_ATTACH;
  659. //
  660. // The <userID> field is set when the NET_ATTACH event arrives.
  661. //
  662. //
  663. // The next stage in the Domain attach process is when the
  664. // NET_ATTACH event arrives. This will cause the
  665. // ProcessNetAttachUser function to be called.
  666. //
  667. }
  668. //
  669. // Finally, set caller's pointer:
  670. //
  671. *ppDomain = pDomain;
  672. DC_EXIT_POINT:
  673. if (rc != 0)
  674. {
  675. //
  676. // Do not trace an error if we get NOT_CONNECTED - it is a valid
  677. // race condition (but we still must do the cleanup below).
  678. //
  679. if (rc != NET_RC_MGC_NOT_CONNECTED)
  680. {
  681. // lonchanc: rc=0x706 can happen here, bug #942.
  682. // this was ERROR_OUT
  683. WARNING_OUT(( "Error %d attaching to Domain %u", rc, callID));
  684. }
  685. if (pDomain != NULL)
  686. {
  687. ProcessOwnDetach(pomPrimary, pDomain);
  688. }
  689. }
  690. DebugExitDWORD(DomainAttach, rc);
  691. return(rc);
  692. }
  693. //
  694. // DomainDetach(...)
  695. //
  696. void DomainDetach
  697. (
  698. POM_PRIMARY pomPrimary,
  699. POM_DOMAIN * ppDomain,
  700. BOOL fExit
  701. )
  702. {
  703. POM_DOMAIN pDomain;
  704. DebugEntry(DomainDetach);
  705. ASSERT(ppDomain != NULL);
  706. pDomain = *ppDomain;
  707. //
  708. // This function does all the network cleanup required, then calls on
  709. // to discard the ObMan memory etc associated with the domain. Note
  710. // that we don't bother releasing tokens, leaving channels, etc since
  711. // the network layer will do this for us automatically.
  712. //
  713. if (!fExit &&
  714. (pDomain->callID != OM_NO_CALL) &&
  715. (pDomain->state >= PENDING_ATTACH))
  716. {
  717. MG_Detach(pomPrimary->pmgClient);
  718. }
  719. TRACE_OUT(( "Detached from Domain %u", pDomain->callID));
  720. FreeDomainRecord(ppDomain);
  721. DebugExitVOID(DomainDetach);
  722. }
  723. //
  724. // NewDomainRecord(...)
  725. //
  726. UINT NewDomainRecord
  727. (
  728. POM_PRIMARY pomPrimary,
  729. UINT callID,
  730. POM_DOMAIN* ppDomain
  731. )
  732. {
  733. POM_WSGROUP pOMCWSGroup = NULL;
  734. POM_DOMAIN pDomain;
  735. BOOL noCompression;
  736. BOOL inserted = FALSE;
  737. UINT rc = 0;
  738. DebugEntry(NewDomainRecord);
  739. //
  740. // Allocate Domain record:
  741. //
  742. pDomain = (POM_DOMAIN)UT_MallocRefCount(sizeof(OM_DOMAIN), TRUE);
  743. if (!pDomain)
  744. {
  745. rc = UT_RC_NO_MEM;
  746. DC_QUIT;
  747. }
  748. SET_STAMP(pDomain, DOMAIN);
  749. //
  750. // Fill in the fields:
  751. //
  752. pDomain->callID = callID;
  753. pDomain->valid = TRUE;
  754. //
  755. // Set up our maximum compression caps. They are subsequently
  756. // negotiated as follows:
  757. //
  758. // - if there are any other nodes out there, we will negotiate down
  759. // when we receive a WELCOME message from one of them
  760. //
  761. // - if any other nodes join subsequently, we will negotiate down when
  762. // we receive their HELLO message.
  763. //
  764. COM_ReadProfInt(DBG_INI_SECTION_NAME, OM_INI_NOCOMPRESSION, FALSE,
  765. &noCompression);
  766. if (noCompression)
  767. {
  768. WARNING_OUT(("NewDomainRecord: compression off"));
  769. pDomain->compressionCaps = OM_CAPS_NO_COMPRESSION;
  770. }
  771. else
  772. {
  773. pDomain->compressionCaps = OM_CAPS_PKW_COMPRESSION;
  774. }
  775. //
  776. // This will be ObMan's workset group handle for the ObManControl
  777. // workset group in this domain. Since we know that domain handles are
  778. // only ever -1 or 0, we just cast the domain handle down to 8 bits to
  779. // give the hWSGroup. If the way domain handles are allocated changes,
  780. // will need to do something cleverer here.
  781. //
  782. pDomain->omchWSGroup = (BYTE) callID;
  783. COM_BasedListInit(&(pDomain->wsGroups));
  784. COM_BasedListInit(&(pDomain->pendingRegs));
  785. COM_BasedListInit(&(pDomain->pendingLocks));
  786. COM_BasedListInit(&(pDomain->receiveList));
  787. COM_BasedListInit(&(pDomain->bounceList));
  788. COM_BasedListInit(&(pDomain->helperCBs));
  789. COM_BasedListInit(&(pDomain->sendQueue[ NET_TOP_PRIORITY ]));
  790. COM_BasedListInit(&(pDomain->sendQueue[ NET_HIGH_PRIORITY ]));
  791. COM_BasedListInit(&(pDomain->sendQueue[ NET_MEDIUM_PRIORITY ]));
  792. COM_BasedListInit(&(pDomain->sendQueue[ NET_LOW_PRIORITY ]));
  793. //
  794. // Insert the record for this new Domain in the list hung off the root
  795. // data structure:
  796. //
  797. TRACE_OUT((" Inserting record for Domain %u in global list", callID));
  798. COM_BasedListInsertAfter(&(pomPrimary->domains), &(pDomain->chain));
  799. inserted = TRUE;
  800. //
  801. // Here we create a record for the ObManControl workset group and cause
  802. // it to be inserted in the list hung off the Domain record:
  803. //
  804. // Note that this does not involve sending any data; it merely creates
  805. // the record locally.
  806. //
  807. rc = WSGRecordCreate(pomPrimary,
  808. pDomain,
  809. OMWSG_OM,
  810. OMFP_OM,
  811. &pOMCWSGroup);
  812. if (rc != 0)
  813. {
  814. DC_QUIT;
  815. }
  816. //
  817. // Create a single, empty workset (this function broadcasts the
  818. // creation throughout the Domain):
  819. //
  820. rc = WorksetCreate(pomPrimary->putTask,
  821. pOMCWSGroup,
  822. OM_INFO_WORKSET,
  823. FALSE,
  824. NET_TOP_PRIORITY);
  825. if (rc != 0)
  826. {
  827. DC_QUIT;
  828. }
  829. //
  830. // Fill in the fixed workset group ID (normally, we would call
  831. // WSGGetNewID to allocate an unused one).
  832. //
  833. pOMCWSGroup->wsGroupID = WSGROUPID_OMC;
  834. //
  835. // We fill in the channel ID when we get the result from JoinByKey
  836. //
  837. //
  838. // Add ObMan's putTask to the workset group's client list, so it will
  839. // get events posted to it.
  840. //
  841. rc = AddClientToWSGList(pomPrimary->putTask,
  842. pOMCWSGroup,
  843. pDomain->omchWSGroup,
  844. PRIMARY);
  845. if (rc != 0)
  846. {
  847. DC_QUIT;
  848. }
  849. *ppDomain = pDomain;
  850. DC_EXIT_POINT:
  851. if (rc != 0)
  852. {
  853. ERROR_OUT(( "Error %d creating record for domain %u", callID));
  854. if (pOMCWSGroup != NULL)
  855. {
  856. COM_BasedListRemove(&(pOMCWSGroup->chain));
  857. UT_FreeRefCount((void**)&pOMCWSGroup, FALSE);
  858. }
  859. if (inserted)
  860. {
  861. COM_BasedListRemove(&(pDomain->chain));
  862. }
  863. if (pDomain != NULL)
  864. {
  865. UT_FreeRefCount((void**)&pDomain, FALSE);
  866. }
  867. }
  868. DebugExitDWORD(NewDomainRecord, rc);
  869. return(rc);
  870. }
  871. //
  872. // FreeDomainRecord(...)
  873. //
  874. void FreeDomainRecord
  875. (
  876. POM_DOMAIN * ppDomain
  877. )
  878. {
  879. POM_DOMAIN pDomain;
  880. NET_PRIORITY priority;
  881. POM_SEND_INST pSendInst;
  882. DebugEntry(FreeDomainRecord);
  883. //
  884. // This function
  885. //
  886. // - frees any outstanding send requests (and their associated CBs)
  887. //
  888. // - invalidates, removes from the global list and frees the Domain
  889. // record.
  890. //
  891. pDomain = *ppDomain;
  892. //
  893. // Free all the send instructions queued in the domain:
  894. //
  895. for (priority = NET_TOP_PRIORITY;priority <= NET_LOW_PRIORITY;priority++)
  896. {
  897. for (; ; )
  898. {
  899. pSendInst = (POM_SEND_INST)COM_BasedListFirst(&(pDomain->sendQueue[priority]),
  900. FIELD_OFFSET(OM_SEND_INST, chain));
  901. if (pSendInst == NULL)
  902. {
  903. break;
  904. }
  905. TRACE_OUT(( "Freeing send instruction at priority %u", priority));
  906. FreeSendInst(pSendInst);
  907. }
  908. }
  909. pDomain->valid = FALSE;
  910. COM_BasedListRemove(&(pDomain->chain));
  911. UT_FreeRefCount((void**)ppDomain, FALSE);
  912. DebugExitVOID(FreeDomainRecord);
  913. }
  914. //
  915. // ProcessNetAttachUser(...)
  916. //
  917. void ProcessNetAttachUser
  918. (
  919. POM_PRIMARY pomPrimary,
  920. POM_DOMAIN pDomain,
  921. NET_UID userId,
  922. NET_RESULT result
  923. )
  924. {
  925. NET_CHANNEL_ID channelCorrelator;
  926. UINT rc = 0;
  927. DebugEntry(ProcessNetAttachUser);
  928. TRACE_OUT(( "Got NET_ATTACH for Domain %u (userID: %hu, result: %hu)",
  929. pDomain->callID, userId, result));
  930. //
  931. // Check that this Domain is in the pending attach state:
  932. //
  933. if (pDomain->state != PENDING_ATTACH)
  934. {
  935. WARNING_OUT(( "Unexpected NET_ATTACH - Domain %u is in state %hu)",
  936. pDomain->callID, pDomain->state));
  937. DC_QUIT;
  938. }
  939. //
  940. // If we failed to attach, set the retCode so we tidy up below:
  941. //
  942. if (result != NET_RESULT_OK)
  943. {
  944. ERROR_OUT(( "Failed to attach to Domain %u; cleaning up...",
  945. pDomain->callID));
  946. rc = result;
  947. DC_QUIT;
  948. }
  949. //
  950. // Otherwise, record our user ID for this Domain and then join our user
  951. // ID channel:
  952. //
  953. pDomain->userID = userId;
  954. TRACE_OUT(("Asking to join own channel %hu", pDomain->userID));
  955. rc = MG_ChannelJoin(pomPrimary->pmgClient,
  956. &channelCorrelator,
  957. pDomain->userID);
  958. if (rc != 0)
  959. {
  960. DC_QUIT;
  961. }
  962. //
  963. // Set the Domain <state>:
  964. //
  965. pDomain->state = PENDING_JOIN_OWN;
  966. //
  967. // The next step in the Domain attach process happens when the NET_JOIN
  968. // event arrives for the channel we've just joined. This event causes
  969. // the ProcessNetJoinChannel function to be called.
  970. //
  971. DC_EXIT_POINT:
  972. if (rc != 0)
  973. {
  974. WARNING_OUT(("Error %d joining own user channel %hu",
  975. rc, pDomain->userID));
  976. ProcessOwnDetach(pomPrimary, pDomain);
  977. }
  978. DebugExitVOID(ProcessNetAttachUser);
  979. }
  980. //
  981. // ProcessNetJoinChannel(...)
  982. //
  983. void ProcessNetJoinChannel
  984. (
  985. POM_PRIMARY pomPrimary,
  986. POM_DOMAIN pDomain,
  987. PNET_JOIN_CNF_EVENT pNetJoinCnf
  988. )
  989. {
  990. POM_WSGROUP pOMCWSGroup;
  991. NET_CHANNEL_ID channelCorrelator;
  992. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  993. BOOL success = TRUE;
  994. DebugEntry(ProcessNetJoinChannel);
  995. TRACE_OUT(( "JOIN_CON - channel %hu - result %hu",
  996. pNetJoinCnf->channel, pNetJoinCnf->result));
  997. switch (pDomain->state)
  998. {
  999. case PENDING_JOIN_OWN:
  1000. {
  1001. //
  1002. // This event is in response to us trying to join our own user
  1003. // channel, as part of the mutli-stage Domain attach process.
  1004. // The next step is to join the ObManControl channel.
  1005. //
  1006. //
  1007. // First check that the join was successful:
  1008. //
  1009. if (pNetJoinCnf->result != NET_RESULT_OK)
  1010. {
  1011. ERROR_OUT(("Failed to join own user ID channel (reason: %hu)",
  1012. pNetJoinCnf->result));
  1013. success = FALSE;
  1014. DC_QUIT;
  1015. }
  1016. //
  1017. // Verify that this is a join event for the correct channel
  1018. //
  1019. ASSERT(pNetJoinCnf->channel == pDomain->userID);
  1020. //
  1021. // The next step in the process of attaching to a Domain is to
  1022. // join the ObManControl channel; we set the state accordingly:
  1023. //
  1024. TRACE_OUT(( "Asking to join ObManControl channel using key"));
  1025. if (MG_ChannelJoinByKey(pomPrimary->pmgClient,
  1026. &channelCorrelator,
  1027. GCC_OBMAN_CHANNEL_KEY) != 0)
  1028. {
  1029. success = FALSE;
  1030. DC_QUIT;
  1031. }
  1032. pDomain->state = PENDING_JOIN_OMC;
  1033. //
  1034. // The next stage in the Domain attach process happens when the
  1035. // NET_JOIN event arrives for the ObManControl channel. This
  1036. // will cause this function to be executed again, but this time
  1037. // the next case statement will be executed.
  1038. //
  1039. }
  1040. break;
  1041. case PENDING_JOIN_OMC:
  1042. {
  1043. //
  1044. // This event is in response to us trying to join the
  1045. // ObManControl workset group channel, as part of the
  1046. // multi-stage Domain attach process.
  1047. //
  1048. //
  1049. // Check that the join was successful:
  1050. //
  1051. if (pNetJoinCnf->result != NET_RESULT_OK)
  1052. {
  1053. WARNING_OUT(( "Bad result %#hx joining ObManControl channel",
  1054. pNetJoinCnf->result));
  1055. success = FALSE;
  1056. DC_QUIT;
  1057. }
  1058. //
  1059. // If so, store the value returned in the domain record:
  1060. //
  1061. pDomain->omcChannel = pNetJoinCnf->channel;
  1062. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1063. if( NULL == pOMCWSGroup )
  1064. {
  1065. TRACE_OUT(( "NULL pOMCWSGroup" ));
  1066. success = FALSE;
  1067. DC_QUIT;
  1068. }
  1069. pOMCWSGroup->channelID = pDomain->omcChannel;
  1070. //
  1071. // We need a token to determine which ObMan is going to
  1072. // initialise the ObManControl workset group. Get GCC to
  1073. // assign us one (this returns a static value for R1.1 calls).
  1074. //
  1075. if (!CMS_AssignTokenId(pomPrimary->pcmClient, GCC_OBMAN_TOKEN_KEY))
  1076. {
  1077. success = FALSE;
  1078. DC_QUIT;
  1079. }
  1080. pDomain->state = PENDING_TOKEN_ASSIGN;
  1081. }
  1082. break;
  1083. case DOMAIN_READY:
  1084. {
  1085. //
  1086. // This should be a join event for a regular workset group
  1087. // channel. We check that we have indeed set up a workset
  1088. // group registration CB containing the channel correlator
  1089. // associated with this event:
  1090. //
  1091. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  1092. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  1093. FIELD_OFFSET(OM_WSGROUP_REG_CB, channelCorrelator),
  1094. pNetJoinCnf->correlator,
  1095. FIELD_SIZE(OM_WSGROUP_REG_CB, channelCorrelator));
  1096. if (pRegistrationCB == NULL)
  1097. {
  1098. ERROR_OUT((
  1099. "Unexpected JOIN for channel %hu - no reg CB found",
  1100. pNetJoinCnf->channel));
  1101. DC_QUIT;
  1102. }
  1103. //
  1104. // Check that the join was successful:
  1105. //
  1106. if (pNetJoinCnf->result != NET_RESULT_OK)
  1107. {
  1108. //
  1109. // If not, trace then try again:
  1110. //
  1111. WARNING_OUT(("Failure 0x%08x joining channel %hu for WSG %d, trying again",
  1112. pNetJoinCnf->result,
  1113. pNetJoinCnf->channel,
  1114. pRegistrationCB->wsg));
  1115. pRegistrationCB->pWSGroup->state = INITIAL;
  1116. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  1117. DC_QUIT;
  1118. }
  1119. //
  1120. // Otherwise, call WSGRegisterStage3 to continue the
  1121. // registration process:
  1122. //
  1123. WSGRegisterStage3(pomPrimary,
  1124. pDomain,
  1125. pRegistrationCB,
  1126. pNetJoinCnf->channel);
  1127. }
  1128. break;
  1129. case PENDING_ATTACH:
  1130. case PENDING_WELCOME:
  1131. case GETTING_OMC:
  1132. {
  1133. //
  1134. // Shouldn't get any join indications in these states.
  1135. //
  1136. ERROR_OUT(( "Unexpected JOIN in domain state %hu",
  1137. pDomain->state));
  1138. }
  1139. break;
  1140. default:
  1141. {
  1142. //
  1143. // This is also an error:
  1144. //
  1145. ERROR_OUT(( "Invalid state %hu for domain %u",
  1146. pDomain->state, pDomain->callID));
  1147. }
  1148. }
  1149. DC_EXIT_POINT:
  1150. if (!success)
  1151. {
  1152. //
  1153. // For any error here, we react as if we've been kicked out of the
  1154. // domain:
  1155. //
  1156. ProcessOwnDetach(pomPrimary, pDomain);
  1157. }
  1158. DebugExitVOID(ProcessNetJoinChannel);
  1159. }
  1160. //
  1161. //
  1162. //
  1163. // ProcessCMSTokenAssign(...)
  1164. //
  1165. //
  1166. //
  1167. void ProcessCMSTokenAssign
  1168. (
  1169. POM_PRIMARY pomPrimary,
  1170. POM_DOMAIN pDomain,
  1171. BOOL success,
  1172. NET_TOKEN_ID tokenID
  1173. )
  1174. {
  1175. DebugEntry(ProcessCMSTokenAssign);
  1176. TRACE_OUT(( "TOKEN_ASSIGN_CONFIRM: result %hu, token ID %#hx",
  1177. success, tokenID));
  1178. if (pDomain->state != PENDING_TOKEN_ASSIGN)
  1179. {
  1180. WARNING_OUT(("Got TOKEN_ASSIGN_CONFIRM in state %hu",
  1181. pDomain->state));
  1182. DC_QUIT;
  1183. }
  1184. if (!success)
  1185. {
  1186. //
  1187. // Nothing to do - the domain attach process will time out.
  1188. //
  1189. ERROR_OUT(( "Failed to get token assigned"));
  1190. DC_QUIT;
  1191. }
  1192. pDomain->tokenID = tokenID;
  1193. //
  1194. // Now that we know what the token ID is, try to grab it:
  1195. //
  1196. if (MG_TokenGrab(pomPrimary->pmgClient,
  1197. pDomain->tokenID) != 0)
  1198. {
  1199. ERROR_OUT(( "Failed to grab token"));
  1200. DC_QUIT;
  1201. }
  1202. pDomain->state = PENDING_TOKEN_GRAB;
  1203. DC_EXIT_POINT:
  1204. DebugExitVOID(ProcessCMSTokenAssign);
  1205. }
  1206. //
  1207. // ProcessNetTokenGrab(...)
  1208. //
  1209. UINT ProcessNetTokenGrab
  1210. (
  1211. POM_PRIMARY pomPrimary,
  1212. POM_DOMAIN pDomain,
  1213. NET_RESULT result
  1214. )
  1215. {
  1216. POM_WSGROUP pOMCWSGroup = NULL;
  1217. UINT rc = 0;
  1218. DebugEntry(ProcessNetTokenGrab);
  1219. TRACE_OUT(( "Got token grab confirm - result = %hu", result));
  1220. if (pDomain->state != PENDING_TOKEN_GRAB)
  1221. {
  1222. ERROR_OUT(( "Got TOKEN_GRAB_CONFIRM in state %hu",
  1223. pDomain->state));
  1224. rc = OM_RC_NETWORK_ERROR;
  1225. DC_QUIT;
  1226. }
  1227. //
  1228. // What to do here depends on whether we've succeeded in grabbing the
  1229. // token:
  1230. //
  1231. if (result == NET_RESULT_OK)
  1232. {
  1233. //
  1234. // We're the "top ObMan" in the Domain, so it's up to us to
  1235. // initialise the ObManControl workset group and welcome any others
  1236. // into the Domain (the Welcome message is broadcast on the
  1237. // ObManControl channel):
  1238. //
  1239. rc = ObManControlInit(pomPrimary, pDomain);
  1240. if (rc != 0)
  1241. {
  1242. DC_QUIT;
  1243. }
  1244. //
  1245. // If we get here, then the Domain attach process has finished.
  1246. // Phew! Any workset group registration attempts in progress will
  1247. // be processed shortly, next time the bouncing
  1248. // OMINT_EVENT_WSG_REGISTER_CONT event is processed
  1249. //
  1250. }
  1251. else
  1252. {
  1253. //
  1254. // Someone else is in charge, so we need to get a copy of
  1255. // ObManControl from them (or anyone else who's prepared to give it
  1256. // to us). So, we need to discover the user ID of one of them so
  1257. // we can send our request there (if we just broadcasted our
  1258. // request, then each node would reply, flooding the Domain)
  1259. //
  1260. rc = SayHello(pomPrimary, pDomain);
  1261. if (rc != 0)
  1262. {
  1263. DC_QUIT;
  1264. }
  1265. //
  1266. // The next step in the Domain attach process happens when one of
  1267. // the other nodes out there replies to our HELLO with a WELCOME
  1268. // message. Execution continues in the ProcessWelcome function.
  1269. //
  1270. }
  1271. DC_EXIT_POINT:
  1272. if (rc != 0)
  1273. {
  1274. if (pOMCWSGroup != NULL)
  1275. {
  1276. //
  1277. // This will remove the ObManControl workset group from the
  1278. // Domain and subsequently call DomainDetach to detach from the
  1279. // Domain and free the Domain record:
  1280. //
  1281. DeregisterLocalClient(pomPrimary, &pDomain, pOMCWSGroup, FALSE);
  1282. UT_FreeRefCount((void**)&pOMCWSGroup, FALSE);
  1283. ASSERT((pDomain == NULL));
  1284. }
  1285. }
  1286. DebugExitDWORD(ProcessNetTokenGrab, rc);
  1287. return(rc);
  1288. }
  1289. //
  1290. //
  1291. //
  1292. // ProcessNetTokenInhibit(...)
  1293. //
  1294. //
  1295. //
  1296. UINT ProcessNetTokenInhibit(POM_PRIMARY pomPrimary,
  1297. POM_DOMAIN pDomain,
  1298. NET_RESULT result)
  1299. {
  1300. UINT rc = 0;
  1301. DebugEntry(ProcessNetTokenInhibit);
  1302. TRACE_OUT(( "Got token inhibit confirm - result = %hu", result));
  1303. if (result == NET_RESULT_OK)
  1304. {
  1305. //
  1306. // Now send a Welcome message on the ObManControl channel. It is
  1307. // crucial that this happens at the same time as we set the Domain
  1308. // state to READY, because if another node is joining the call at
  1309. // the same time it will send a Hello message:
  1310. //
  1311. // - if the message has already arrived, we will have thrown it
  1312. // away
  1313. // because the Domain state was not READY, so we must send it now
  1314. //
  1315. // - if it has yet to arrive, then setting the Domain state to
  1316. // READY
  1317. // now means we'll respond with another Welcome when it does
  1318. // arrive.
  1319. //
  1320. pDomain->state = DOMAIN_READY;
  1321. rc = SayWelcome(pomPrimary, pDomain, pDomain->omcChannel);
  1322. if (rc != 0)
  1323. {
  1324. DC_QUIT;
  1325. }
  1326. //
  1327. // OK, the domain attach process has finished. We need to take no
  1328. // further action other than setting the state. Any pending
  1329. // workset group registrations will continue back at the
  1330. // WSGRegisterStage1 function, where hopefully the bounced
  1331. // OMINT_EVENT_WSGROUP_REGISTER event is just about to arrive...
  1332. //
  1333. }
  1334. else
  1335. {
  1336. //
  1337. // Again, no action. We cannot join the domain, but the workset
  1338. // group registrations will time out in due course.
  1339. //
  1340. WARNING_OUT(( "Token inhibit failed!"));
  1341. }
  1342. DC_EXIT_POINT:
  1343. DebugExitDWORD(ProcessNetTokenInhibit, rc);
  1344. return(rc);
  1345. }
  1346. //
  1347. //
  1348. //
  1349. // ObManControlInit(...)
  1350. //
  1351. //
  1352. //
  1353. UINT ObManControlInit(POM_PRIMARY pomPrimary,
  1354. POM_DOMAIN pDomain)
  1355. {
  1356. POM_WSGROUP pOMCWSGroup;
  1357. UINT rc = 0;
  1358. DebugEntry(ObManControlInit);
  1359. //
  1360. // First, set up a pointer to the ObManControl workset group, which
  1361. // should already have been put in the Domain record:
  1362. //
  1363. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1364. //
  1365. // Initialising the ObManControl workset group involves
  1366. //
  1367. // - adding a WSGROUP_INFO object to it, which identifies ObManControl
  1368. // itself.
  1369. //
  1370. TRACE_OUT(( "Initialising ObManControl in Domain %u",
  1371. pDomain->callID));
  1372. //
  1373. // Now we must add a workset group identification object, identifying
  1374. // ObManControl, to workset #0 in ObManControl.
  1375. //
  1376. // Slightly circular, but we try to treat ObManControl as a regular
  1377. // workset group as much as possible; if we didn't add this
  1378. // identification object then when a Client (e.g. AppLoader) tries to
  1379. // register with ObManControl, we would look in workset #0 for a
  1380. // reference to it, not find one and then create it again!
  1381. //
  1382. rc = CreateAnnounce(pomPrimary, pDomain, pOMCWSGroup);
  1383. if (rc != 0)
  1384. {
  1385. DC_QUIT;
  1386. }
  1387. //
  1388. // In addition, we add our registration object to ObManControl workset
  1389. // #0 and update it immediately to status READY_TO_SEND:
  1390. //
  1391. rc = RegAnnounceBegin(pomPrimary,
  1392. pDomain,
  1393. pOMCWSGroup,
  1394. pDomain->userID,
  1395. &(pOMCWSGroup->pObjReg));
  1396. if (rc != 0)
  1397. {
  1398. DC_QUIT;
  1399. }
  1400. rc = RegAnnounceComplete(pomPrimary, pDomain, pOMCWSGroup);
  1401. if (rc != 0)
  1402. {
  1403. DC_QUIT;
  1404. }
  1405. //
  1406. // OK, we've initialised ObManControl for this call - inhibit the token
  1407. // so that no one else can do the same (if this is the local domain,
  1408. // just fake up an inhibit confirm):
  1409. //
  1410. if (pDomain->callID == OM_NO_CALL)
  1411. {
  1412. TRACE_OUT(( "Faking successful token inhibit for local domain"));
  1413. rc = ProcessNetTokenInhibit(pomPrimary, pDomain, NET_RESULT_OK);
  1414. if (rc != 0)
  1415. {
  1416. DC_QUIT;
  1417. }
  1418. }
  1419. else
  1420. {
  1421. rc = MG_TokenInhibit(pomPrimary->pmgClient,
  1422. pDomain->tokenID);
  1423. if (rc != 0)
  1424. {
  1425. DC_QUIT;
  1426. }
  1427. pDomain->state = PENDING_TOKEN_INHIBIT;
  1428. }
  1429. DC_EXIT_POINT:
  1430. if (rc != 0)
  1431. {
  1432. WARNING_OUT(("Error %d initialising ObManControl WSG for Domain %u",
  1433. rc, pDomain->callID));
  1434. }
  1435. DebugExitDWORD(ObManControlInit, rc);
  1436. return(rc);
  1437. }
  1438. //
  1439. //
  1440. //
  1441. // SayHello(...)
  1442. //
  1443. //
  1444. //
  1445. UINT SayHello(POM_PRIMARY pomPrimary,
  1446. POM_DOMAIN pDomain)
  1447. {
  1448. POMNET_JOINER_PKT pHelloPkt;
  1449. UINT rc = 0;
  1450. DebugEntry(SayHello);
  1451. //
  1452. // Generate and queue an OMNET_HELLO message:
  1453. //
  1454. TRACE_OUT(( "Saying hello in Domain %u", pDomain->callID));
  1455. pHelloPkt = (POMNET_JOINER_PKT)UT_MallocRefCount(sizeof(OMNET_JOINER_PKT), TRUE);
  1456. if (!pHelloPkt)
  1457. {
  1458. rc = UT_RC_NO_MEM;
  1459. DC_QUIT;
  1460. }
  1461. pHelloPkt->header.sender = pDomain->userID;
  1462. pHelloPkt->header.messageType = OMNET_HELLO;
  1463. //
  1464. // All fields in the joiner packet after <capsLen> are capabilities. To
  1465. // calculate the size of these capabilities, we use the offset and size
  1466. // of the caps len field itself to determine the amount of data after
  1467. // it.
  1468. //
  1469. pHelloPkt->capsLen = sizeof(OMNET_JOINER_PKT) -
  1470. (offsetof(OMNET_JOINER_PKT, capsLen) + sizeof(pHelloPkt->capsLen));
  1471. TRACE_OUT(( "Our caps len is 0x%08x", pHelloPkt->capsLen));
  1472. //
  1473. // Take our compression caps from the domain record:
  1474. //
  1475. pHelloPkt->compressionCaps = pDomain->compressionCaps;
  1476. TRACE_OUT(( "Broadcasting compression caps 0x%08x in HELLO",
  1477. pHelloPkt->compressionCaps));
  1478. rc = QueueMessage(pomPrimary->putTask,
  1479. pDomain,
  1480. pDomain->omcChannel,
  1481. NET_TOP_PRIORITY,
  1482. NULL, // no wsgroup
  1483. NULL, // no workset
  1484. NULL, // no object
  1485. (POMNET_PKT_HEADER) pHelloPkt,
  1486. NULL, // no associated object data
  1487. FALSE);
  1488. if (rc != 0)
  1489. {
  1490. DC_QUIT;
  1491. }
  1492. //
  1493. // When the associated response (OMNET_WELCOME) is received from another
  1494. // node, we will ask that node for a copy of the ObManControl workset
  1495. // group. In the meantime, there's nothing else to do.
  1496. //
  1497. pDomain->state = PENDING_WELCOME;
  1498. DC_EXIT_POINT:
  1499. if (rc != 0)
  1500. {
  1501. ERROR_OUT(( "Error %d saying hello in Domain %u", rc, pDomain->callID));
  1502. }
  1503. DebugExitDWORD(SayHello, rc);
  1504. return(rc);
  1505. }
  1506. //
  1507. //
  1508. //
  1509. // ProcessHello(...)
  1510. //
  1511. //
  1512. //
  1513. UINT ProcessHello(POM_PRIMARY pomPrimary,
  1514. POM_DOMAIN pDomain,
  1515. POMNET_JOINER_PKT pHelloPkt,
  1516. UINT lengthOfPkt)
  1517. {
  1518. NET_CHANNEL_ID lateJoiner;
  1519. UINT rc = 0;
  1520. DebugEntry(ProcessHello);
  1521. lateJoiner = pHelloPkt->header.sender;
  1522. //
  1523. // A late joiner has said hello. If we are not fully attached yet, we
  1524. // trace and quit:
  1525. //
  1526. if (pDomain->state != DOMAIN_READY)
  1527. {
  1528. WARNING_OUT(( "Can't process HELLO on channel %#hx - domain state %hu",
  1529. lateJoiner, pDomain->state));
  1530. DC_QUIT;
  1531. }
  1532. //
  1533. // Merge in the late joiner's capabilities with our view of the
  1534. // domain-wide caps.
  1535. //
  1536. MergeCaps(pDomain, pHelloPkt, lengthOfPkt);
  1537. //
  1538. // Now send a welcome message to the late joiner.
  1539. //
  1540. rc = SayWelcome(pomPrimary, pDomain, lateJoiner);
  1541. if (rc != 0)
  1542. {
  1543. DC_QUIT;
  1544. }
  1545. DC_EXIT_POINT:
  1546. if (rc != 0)
  1547. {
  1548. ERROR_OUT(( "Error %d processing hello from node %#hx in Domain %u",
  1549. rc, lateJoiner, pDomain->callID));
  1550. }
  1551. DebugExitDWORD(ProcessHello, rc);
  1552. return(rc);
  1553. } // ProcessHello
  1554. //
  1555. //
  1556. //
  1557. // MergeCaps(...)
  1558. //
  1559. //
  1560. //
  1561. void MergeCaps(POM_DOMAIN pDomain,
  1562. POMNET_JOINER_PKT pJoinerPkt,
  1563. UINT lengthOfPkt)
  1564. {
  1565. NET_CHANNEL_ID sender;
  1566. UINT compressionCaps;
  1567. DebugEntry(MergeCaps);
  1568. sender = pJoinerPkt->header.sender;
  1569. compressionCaps = 0;
  1570. //
  1571. // We have received a HELLO or WELCOME packet from another node.
  1572. //
  1573. // - For a HELLO packet, these caps will be the caps of a late joiner.
  1574. //
  1575. // - For a WELCOME packet, these caps will be the domain-wide caps as
  1576. // viewed by our helper node.
  1577. //
  1578. // Either way, we need to merge in the capabilities from the packet into
  1579. // our view of the domain-wide capabilities.
  1580. //
  1581. // Note that in some backlevel calls, the joiner packet will not contain
  1582. // capabilities - so check the length of the packet first
  1583. //
  1584. if (lengthOfPkt >= (offsetof(OMNET_JOINER_PKT, capsLen) +
  1585. sizeof(pJoinerPkt->capsLen)))
  1586. {
  1587. //
  1588. // OK, this packet contains a capsLen field. See if it contains
  1589. // compression capabilities (these immediately follow the capsLen
  1590. // field and are four bytes long).
  1591. //
  1592. TRACE_OUT(( "Caps len from node 0x%08x is 0x%08x",
  1593. sender, pJoinerPkt->capsLen));
  1594. if (pJoinerPkt->capsLen >= 4)
  1595. {
  1596. //
  1597. // Packet contains compression caps - record them:
  1598. //
  1599. compressionCaps = pJoinerPkt->compressionCaps;
  1600. TRACE_OUT(( "Compression caps in joiner packet from 0x%08x: 0x%08x",
  1601. sender, compressionCaps));
  1602. }
  1603. else
  1604. {
  1605. //
  1606. // If not specified, assume NO compression is supported. This
  1607. // should never happen in practice, because if someone supports
  1608. // any capabilities at all, they should support compression
  1609. // capabilities.
  1610. //
  1611. compressionCaps = OM_CAPS_NO_COMPRESSION;
  1612. ERROR_OUT(( "Party 0x%08x supports caps but not compression caps",
  1613. sender));
  1614. }
  1615. }
  1616. else
  1617. {
  1618. //
  1619. // If no capabilities specified at all, assume PKW compression plus
  1620. // no compression (since that is how LSP20 behaves).
  1621. //
  1622. compressionCaps = (OM_CAPS_PKW_COMPRESSION | OM_CAPS_NO_COMPRESSION);
  1623. TRACE_OUT(( "No caps in joiner pkt - assume PKW + NO compress (0x%08x)",
  1624. compressionCaps));
  1625. }
  1626. //
  1627. // OK, we've determined the capabilities from the packet. Now merge
  1628. // them into our view of the domain-wide caps:
  1629. //
  1630. pDomain->compressionCaps &= compressionCaps;
  1631. TRACE_OUT(( "Domain-wide compression caps now 0x%08x",
  1632. pDomain->compressionCaps));
  1633. DebugExitVOID(MergeCaps);
  1634. } // MergeCaps
  1635. //
  1636. //
  1637. //
  1638. // SayWelcome(...)
  1639. //
  1640. //
  1641. //
  1642. UINT SayWelcome(POM_PRIMARY pomPrimary,
  1643. POM_DOMAIN pDomain,
  1644. NET_CHANNEL_ID channel)
  1645. {
  1646. POMNET_JOINER_PKT pWelcomePkt;
  1647. UINT rc = 0;
  1648. DebugEntry(SayWelcome);
  1649. //
  1650. // The <channel> passed in is one of the following:
  1651. //
  1652. // - the channel of a late-joiner which just sent us a HELLO message, or
  1653. //
  1654. // - the broadcast ObManControl channel, in the case where this is a
  1655. // Welcome we're sending at start of day.
  1656. //
  1657. TRACE_OUT(( "Sending welcome on channel %hu ", channel));
  1658. pWelcomePkt = (POMNET_JOINER_PKT)UT_MallocRefCount(sizeof(OMNET_JOINER_PKT), TRUE);
  1659. if (!pWelcomePkt)
  1660. {
  1661. rc = UT_RC_NO_MEM;
  1662. DC_QUIT;
  1663. }
  1664. pWelcomePkt->header.sender = pDomain->userID; // own user ID
  1665. pWelcomePkt->header.messageType = OMNET_WELCOME;
  1666. //
  1667. // All fields in the joiner packet after <capsLen> are capabilities. To
  1668. // calculate the size of these capabilities, we use the offset and size
  1669. // of the <capsLen> field itself to determine the amount of data after
  1670. // it.
  1671. //
  1672. pWelcomePkt->capsLen = sizeof(OMNET_JOINER_PKT) -
  1673. (offsetof(OMNET_JOINER_PKT, capsLen) + sizeof(pWelcomePkt->capsLen));
  1674. //
  1675. // The value we use for the compressionCaps is our current view of the
  1676. // domain-wide compression capabilities.
  1677. //
  1678. pWelcomePkt->compressionCaps = pDomain->compressionCaps;
  1679. TRACE_OUT(( "Sending caps 0x%08x in WELCOME on channel 0x%08x",
  1680. pWelcomePkt->compressionCaps, channel));
  1681. rc = QueueMessage(pomPrimary->putTask,
  1682. pDomain,
  1683. channel,
  1684. NET_TOP_PRIORITY,
  1685. NULL, // no wsgroup
  1686. NULL, // no workset
  1687. NULL, // no object
  1688. (POMNET_PKT_HEADER) pWelcomePkt,
  1689. NULL, // no object data
  1690. FALSE);
  1691. if (rc != 0)
  1692. {
  1693. DC_QUIT;
  1694. }
  1695. //
  1696. // When this WELCOME message is received at the other end, the
  1697. // ProcessWelcome function is invoked.
  1698. //
  1699. DC_EXIT_POINT:
  1700. if (rc != 0)
  1701. {
  1702. ERROR_OUT(( "Error %d sending welcome on channel 0x%08x in Domain %u",
  1703. rc, channel, pDomain->callID));
  1704. }
  1705. DebugExitDWORD(SayWelcome, rc);
  1706. return(rc);
  1707. } // SayWelcome
  1708. //
  1709. //
  1710. //
  1711. // ProcessWelcome(...)
  1712. //
  1713. //
  1714. //
  1715. UINT ProcessWelcome(POM_PRIMARY pomPrimary,
  1716. POM_DOMAIN pDomain,
  1717. POMNET_JOINER_PKT pWelcomePkt,
  1718. UINT lengthOfPkt)
  1719. {
  1720. POM_WSGROUP pOMCWSGroup;
  1721. UINT rc = 0;
  1722. DebugEntry(ProcessWelcome);
  1723. //
  1724. // This function is called when a remote instance of ObMan has replied
  1725. // to an OMNET_HELLO message which we sent.
  1726. //
  1727. // We sent the HELLO message as part of the procedure to get a copy of
  1728. // the ObManControl workset group; now we know someone who has it, we
  1729. // send them an OMNET_WSGROUP_SEND_REQ on their single-user channel,
  1730. // enclosing our own single-user channel ID for the response.
  1731. //
  1732. // However, every node in the Domain will respond to our initial HELLO,
  1733. // but we only need to ask the first respondent for the workset group.
  1734. // So, we check the Domain state and then change it so we will ignore
  1735. // future WELCOMES for this Domain:
  1736. //
  1737. // (No mutex required for this test-and-set since only ever executed in
  1738. // ObMan task).
  1739. //
  1740. if (pDomain->state == PENDING_WELCOME)
  1741. {
  1742. //
  1743. // OK, this is the first WELCOME we've got since we broadcast the
  1744. // HELLO. So, we reply to it with a SEND_REQUEST for ObManControl.
  1745. //
  1746. TRACE_OUT((
  1747. "Got first WELCOME message in Domain %u, from node 0x%08x",
  1748. pDomain->callID, pWelcomePkt->header.sender));
  1749. //
  1750. // Merge in the capabilities which our helper node has told us
  1751. // about:
  1752. //
  1753. MergeCaps(pDomain, pWelcomePkt, lengthOfPkt);
  1754. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1755. if( pOMCWSGroup == NULL)
  1756. {
  1757. TRACE_OUT(("pOMCWSGroup not found"));
  1758. DC_QUIT;
  1759. }
  1760. //
  1761. // ...and call the IssueSendReq function specifying the sender of
  1762. // the WELCOME message as the node to get the workset group from:
  1763. //
  1764. rc = IssueSendReq(pomPrimary,
  1765. pDomain,
  1766. pOMCWSGroup,
  1767. pWelcomePkt->header.sender);
  1768. if (rc != 0)
  1769. {
  1770. ERROR_OUT(( "Error %d requesting OMC from 0x%08x in Domain %u",
  1771. rc, pWelcomePkt->header.sender, pDomain->callID));
  1772. DC_QUIT;
  1773. }
  1774. pDomain->state = GETTING_OMC;
  1775. //
  1776. // Next, the remote node which welcomed us will send us the
  1777. // contents of the ObManControl workset group. When it has
  1778. // finished, it will send an OMNET_WSGROUP_SEND_COMPLETE message,
  1779. // which is where we take up the next step of the multi-stage
  1780. // Domain attach process.
  1781. //
  1782. }
  1783. else
  1784. {
  1785. //
  1786. // OK, we're in some other state i.e. not waiting for a WELCOME
  1787. // message - so just ignore it.
  1788. //
  1789. TRACE_OUT(( "Ignoring WELCOME from 0x%08x - in state %hu",
  1790. pWelcomePkt->header.sender, pDomain->state));
  1791. }
  1792. TRACE_OUT(( "Processed WELCOME message from node 0x%08x in Domain %u",
  1793. pWelcomePkt->header.sender, pDomain->callID));
  1794. DC_EXIT_POINT:
  1795. if (rc != 0)
  1796. {
  1797. ERROR_OUT(( "Error %d processing WELCOME message from "
  1798. "node 0x%08x in Domain %u",
  1799. rc, pWelcomePkt->header.sender, pDomain->callID));
  1800. }
  1801. DebugExitDWORD(ProcessWelcome, rc);
  1802. return(rc);
  1803. }
  1804. //
  1805. // ProcessNetDetachUser()
  1806. //
  1807. void ProcessNetDetachUser
  1808. (
  1809. POM_PRIMARY pomPrimary,
  1810. POM_DOMAIN pDomain,
  1811. NET_UID detachedUserID
  1812. )
  1813. {
  1814. DebugEntry(ProcessNetDetachUser);
  1815. //
  1816. // There are two cases here:
  1817. //
  1818. // 1. this is a detach indication for ourselves i.e. we have been
  1819. // booted off the network by MCS for some reason
  1820. //
  1821. // 2. this is a detach indication for someone else i.e. another user
  1822. // has left (or been booted off) the MCS Domain.
  1823. //
  1824. // We differentiate the two cases by checking the ID of the detached
  1825. // user against our own.
  1826. //
  1827. if (detachedUserID == pDomain->userID)
  1828. {
  1829. //
  1830. // It's for us, so call the ProcessOwnDetach function:
  1831. //
  1832. ProcessOwnDetach(pomPrimary, pDomain);
  1833. }
  1834. else
  1835. {
  1836. //
  1837. // It's someone else, so we call the ProcessOtherDetach function:
  1838. //
  1839. ProcessOtherDetach(pomPrimary, pDomain, detachedUserID);
  1840. }
  1841. DebugExitVOID(ProcessNetDetachUser);
  1842. }
  1843. //
  1844. // ProcessOtherDetach(...)
  1845. //
  1846. UINT ProcessOtherDetach
  1847. (
  1848. POM_PRIMARY pomPrimary,
  1849. POM_DOMAIN pDomain,
  1850. NET_UID detachedUserID
  1851. )
  1852. {
  1853. POM_WSGROUP pOMCWSGroup;
  1854. POM_WORKSET pOMCWorkset;
  1855. OM_WORKSET_ID worksetID;
  1856. UINT rc = 0;
  1857. DebugEntry(ProcessOtherDetach);
  1858. TRACE_OUT(( "DETACH_IND for user 0x%08x in domain %u",
  1859. detachedUserID, pDomain->callID));
  1860. //
  1861. // Someone else has left the Domain. What this means is that we must
  1862. //
  1863. // - release any locks they may have acquired for worksets/objects in
  1864. // this Domain
  1865. //
  1866. // - remove any registration objects they might have added to worksets
  1867. // in ObManControl
  1868. //
  1869. // - remove any objects they have added to non-persistent worksets
  1870. //
  1871. // - if we are catching up from them then select another node to catch
  1872. // up from or stop catch up if no one else is left.
  1873. //
  1874. //
  1875. // The processing is as follows:
  1876. //
  1877. // FOR each registration workset in ObManControl which is in use
  1878. //
  1879. // FOR each object in the workset
  1880. //
  1881. // IF it relates to the node which has just/has just been
  1882. // detached, then that node was registered with the
  1883. // workset group, so
  1884. //
  1885. // - delete the object and post a DELETE_IND to
  1886. // any local Clients which have the workset open
  1887. // - search this workset group for any locks held by this
  1888. // node and release them.
  1889. //
  1890. //
  1891. // OK, to work: first we derive a pointer to the ObManControl workset
  1892. // group:
  1893. //
  1894. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1895. if( pOMCWSGroup == NULL)
  1896. {
  1897. TRACE_OUT(("pOMCWSGroup not found"));
  1898. DC_QUIT;
  1899. }
  1900. //
  1901. // Now begin the outer FOR loop:
  1902. //
  1903. for (worksetID = 0;
  1904. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  1905. worksetID++)
  1906. {
  1907. //
  1908. // Get a pointer to the workset:
  1909. //
  1910. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  1911. if (pOMCWorkset == NULL)
  1912. {
  1913. //
  1914. // There is no workset with this ID so we skip to the next one:
  1915. //
  1916. continue;
  1917. }
  1918. ValidateWorkset(pOMCWorkset);
  1919. //
  1920. // OK, worksetID corresponds to the ID of an actual workset group
  1921. // in the domain. These functions will do any clearup on behalf of
  1922. // the detached node.
  1923. //
  1924. RemovePersonObject(pomPrimary,
  1925. pDomain,
  1926. (OM_WSGROUP_ID) worksetID,
  1927. detachedUserID);
  1928. ReleaseAllNetLocks(pomPrimary,
  1929. pDomain,
  1930. (OM_WSGROUP_ID) worksetID,
  1931. detachedUserID);
  1932. PurgeNonPersistent(pomPrimary,
  1933. pDomain,
  1934. (OM_WSGROUP_ID) worksetID,
  1935. detachedUserID);
  1936. //
  1937. // Finished this workset so go on to the next.
  1938. //
  1939. }
  1940. //
  1941. // Well, that's it:
  1942. //
  1943. TRACE_OUT(( "Cleaned up after node 0x%08x detached from Domain %u",
  1944. detachedUserID, pDomain->callID));
  1945. DC_EXIT_POINT:
  1946. DebugExitDWORD(ProcessOtherDetach, rc);
  1947. return(rc);
  1948. }
  1949. //
  1950. // ProcessOwnDetach(..)
  1951. //
  1952. UINT ProcessOwnDetach
  1953. (
  1954. POM_PRIMARY pomPrimary,
  1955. POM_DOMAIN pDomain
  1956. )
  1957. {
  1958. POM_DOMAIN pLocalDomainRec;
  1959. POM_WSGROUP pWSGroup;
  1960. POM_LOCK_REQ pLockReq;
  1961. POM_LOCK_REQ pTempLockReq;
  1962. POM_WSGROUP pTempWSGroup;
  1963. POM_WSGROUP_REG_CB pRegistrationCB;
  1964. POM_WSGROUP_REG_CB pTempRegCB;
  1965. UINT callID;
  1966. UINT rc = 0;
  1967. DebugEntry(ProcessOwnDetach);
  1968. //
  1969. // First of all, remove all traces of everybody else (because the call
  1970. // may have ended already, we may not get explicit DETACH_INDICATIONs
  1971. // for them):
  1972. //
  1973. ProcessOtherDetach(pomPrimary, pDomain, NET_ALL_REMOTES);
  1974. //
  1975. // We proceed as follows:
  1976. //
  1977. // - get a pointer to the record for the "local" Domain (or create it
  1978. // if it doesn't exist)
  1979. //
  1980. // - move all the pending lock requests, registrations and workset
  1981. // groups in this Domain into the local Domain.
  1982. //
  1983. callID = pDomain->callID;
  1984. if (callID == OM_NO_CALL)
  1985. {
  1986. WARNING_OUT(( "Detach for local domain - avoiding recursive cleanup"));
  1987. FreeDomainRecord(&pDomain);
  1988. DC_QUIT;
  1989. }
  1990. TRACE_OUT(( "Processing own detach/end call etc. for Domain %u",
  1991. callID));
  1992. rc = DomainRecordFindOrCreate(pomPrimary, OM_NO_CALL, &pLocalDomainRec);
  1993. if (rc != 0)
  1994. {
  1995. DC_QUIT;
  1996. }
  1997. //
  1998. // Move the pending lock requests (need the pTemp... variables since we
  1999. // need to chain from the old position):
  2000. //
  2001. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  2002. while (pLockReq != NULL)
  2003. {
  2004. TRACE_OUT((" Moving lock for workset %hu in WSG ID %hu",
  2005. pLockReq->worksetID, pLockReq->wsGroupID));
  2006. pTempLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  2007. FIELD_OFFSET(OM_LOCK_REQ, chain));
  2008. COM_BasedListRemove(&(pLockReq->chain));
  2009. COM_BasedListInsertBefore(&(pLocalDomainRec->pendingLocks),
  2010. &(pLockReq->chain));
  2011. pLockReq = pTempLockReq;
  2012. }
  2013. //
  2014. // Now cancel any outstanding registrations:
  2015. //
  2016. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListFirst(&(pDomain->pendingRegs),
  2017. FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  2018. while (pRegistrationCB != NULL)
  2019. {
  2020. TRACE_OUT(("Aborting registration for WSG %d", pRegistrationCB->wsg));
  2021. pTempRegCB = (POM_WSGROUP_REG_CB)COM_BasedListNext(&(pDomain->pendingRegs),
  2022. pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  2023. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_NETWORK_ERROR);
  2024. pRegistrationCB = pTempRegCB;
  2025. }
  2026. //
  2027. // Move the workset groups.
  2028. //
  2029. // Note that we will move the ObManControl workset group for the Domain
  2030. // we've detached from into the local Domain as well; it does not
  2031. // replace the OMC workset group for the local Domain, but we can't just
  2032. // throw it away since the Application Loader Primary and Secondaries
  2033. // still have valid workset group handles for it. They will eventually
  2034. // deregister from it and it will be thrown away.
  2035. //
  2036. // Since WSGMove relies on the fact that there is an OMC workset group
  2037. // in the Domain out of which workset groups are being moved, we must
  2038. // move the OMC workset group last.
  2039. //
  2040. // So, start at the end and work backwards:
  2041. //
  2042. pWSGroup = (POM_WSGROUP)COM_BasedListLast(&(pDomain->wsGroups), FIELD_OFFSET(OM_WSGROUP, chain));
  2043. while (pWSGroup != NULL)
  2044. {
  2045. //
  2046. // Move each one into the local Domain. We need pTempWSGroup
  2047. // since we have to do the chaining before calling WSGroupMove.
  2048. // That function removes the workset group from the list.
  2049. //
  2050. pTempWSGroup = (POM_WSGROUP)COM_BasedListPrev(&(pDomain->wsGroups), pWSGroup,
  2051. FIELD_OFFSET(OM_WSGROUP, chain));
  2052. WSGMove(pomPrimary, pLocalDomainRec, pWSGroup);
  2053. pWSGroup = pTempWSGroup;
  2054. }
  2055. DC_EXIT_POINT:
  2056. if (rc != 0)
  2057. {
  2058. ERROR_OUT(( "Error %d processing NET_DETACH for self in Domain %u",
  2059. rc, callID));
  2060. }
  2061. DebugExitDWORD(ProcessOwnDetach, rc);
  2062. return(rc);
  2063. }
  2064. //
  2065. //
  2066. //
  2067. // ProcessNetLeaveChannel(...)
  2068. //
  2069. //
  2070. //
  2071. UINT ProcessNetLeaveChannel
  2072. (
  2073. POM_PRIMARY pomPrimary,
  2074. POM_DOMAIN pDomain,
  2075. NET_CHANNEL_ID channel
  2076. )
  2077. {
  2078. POM_DOMAIN pLocalDomainRec;
  2079. POM_WSGROUP pWSGroup;
  2080. UINT callID;
  2081. UINT rc = 0;
  2082. DebugEntry(ProcessNetLeaveChannel);
  2083. callID = pDomain->callID;
  2084. //
  2085. // We've been forced out of the channel by MCS. We don't try to rejoin
  2086. // as this usually indicates a serious error. Instead, we treat this
  2087. // as a move of the associated workset group into the local Domain
  2088. // (unless it's our own user ID channel or the ObManControl channel, in
  2089. // which case we can't really do anything useful in this Domain, so we
  2090. // detach completely).
  2091. //
  2092. if ((channel == pDomain->userID) ||
  2093. (channel == pDomain->omcChannel))
  2094. {
  2095. //
  2096. // This is our own user ID channel, so we behave as if we were
  2097. // booted out by MCS:
  2098. //
  2099. rc = ProcessOwnDetach(pomPrimary, pDomain);
  2100. if (rc != 0)
  2101. {
  2102. DC_QUIT;
  2103. }
  2104. }
  2105. else
  2106. {
  2107. //
  2108. // Not our own single-user channel or the ObManControl channel, so
  2109. // we don't need to take such drastic action. Instead, we process
  2110. // it as if it's a regular move of a workset group into the "local"
  2111. // Domain (i.e. NET_INVALID_DOMAIN_ID).
  2112. //
  2113. // SFR ? { Purge our list of outstanding receives for channel
  2114. PurgeReceiveCBs(pDomain, channel);
  2115. //
  2116. // So, find the workset group which is involved...
  2117. //
  2118. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  2119. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  2120. FIELD_OFFSET(OM_WSGROUP, channelID), (DWORD)channel,
  2121. FIELD_SIZE(OM_WSGROUP, channelID));
  2122. if (pWSGroup == NULL)
  2123. {
  2124. ERROR_OUT((
  2125. "Got NET_LEAVE for channel %hu but no workset group!",
  2126. channel));
  2127. DC_QUIT;
  2128. }
  2129. //
  2130. // ...and move it into the local Domain:
  2131. //
  2132. rc = DomainRecordFindOrCreate(pomPrimary,
  2133. OM_NO_CALL,
  2134. &pLocalDomainRec);
  2135. if (rc != 0)
  2136. {
  2137. DC_QUIT;
  2138. }
  2139. WSGMove(pomPrimary, pLocalDomainRec, pWSGroup);
  2140. }
  2141. TRACE_OUT(( "Processed NET_LEAVE for channel %hu in Domain %u",
  2142. channel, callID));
  2143. DC_EXIT_POINT:
  2144. if (rc != 0)
  2145. {
  2146. ERROR_OUT(( "Error %d processing NET_LEAVE for %hu in Domain %u",
  2147. rc, channel, callID));
  2148. }
  2149. DebugExitDWORD(ProcessNetLeaveChannel, rc);
  2150. return(rc);
  2151. }
  2152. //
  2153. //
  2154. // LOCKING - OVERVIEW
  2155. //
  2156. // Workset locking operates on a request/reply protocol, which means that
  2157. // when we want a lock, we ask everyone else on the channel if we can have
  2158. // it. If they all say yes, we get it; otherwise we don't.
  2159. //
  2160. // This is non-trivial. Some nodes might disappear before they send us
  2161. // their reply, while some might disappear after they've send their reply.
  2162. // Others might just be far away and take a long time to reply. In
  2163. // addition, new nodes can join the channel at any time.
  2164. //
  2165. // To cope with all this, to lock a workset we build up a list of the
  2166. // remote nodes in the call which are using the workset group (the
  2167. // "expected respondents" list) and if the list is non-empty, we broadcast
  2168. // an OMNET_LOCK_REQ message on the channel for the workset group which
  2169. // contains the workset
  2170. //
  2171. // As each reply comes in, we check it off against the list of expected
  2172. // respondents. If we weren't expecting a reply from that node we ignore
  2173. // it. Otherwise, if the reply is a GRANT, we remove that node from the
  2174. // list and continue waiting for the others. If the reply is a DENY, we
  2175. // give up, discard all the memory allocated for the lock request and its
  2176. // associated CBs and post a failure event to the client.
  2177. //
  2178. // If the list of expected respondents becomes empty because everyone has
  2179. // replied with a GRANT, we again free up any memory used and post an event
  2180. // to the client.
  2181. //
  2182. // While all this is going on, we have a timer running in the background.
  2183. // It ticks every second for ten seconds (both configurable via .INI file)
  2184. // and when it does, we re-examine our list of expected respondents to see
  2185. // if any of them have deregistered from the workset group (or detached
  2186. // from the domain, which implies the former). If they have, we fake up a
  2187. // GRANT message from them, thus potentially triggering the success event
  2188. // to our local client.
  2189. //
  2190. // If anyone ever requests a lock while we have the lock, we DENY them the
  2191. // lock. If anyone ever requests a lock while we are also requesting the
  2192. // lock, we compare their MCS user IDs. If the other node has a higher
  2193. // numerical value, we abort our attempt in favour of them and send back a
  2194. // GRANT; otherwise we DENY the lock.
  2195. //
  2196. // If ever a node detaches when it has a lock, we trap this in
  2197. // ReleaseAllNetLocks, which compares the ID of the lock owner against the
  2198. // ID of the detached node and unlocks the workset if they match. For this
  2199. // reason, it is vital that we always know exactly who has the lock. We
  2200. // achieve this by, whenever we grant the lock to someone, we record their
  2201. // user ID.
  2202. //
  2203. // So, if we ever abort the locking of a workset in favour of someone else,
  2204. // we must broadcast this info to everyone else (since they must be told
  2205. // who really has the lock, and they will think that we have the lock if we
  2206. // don't tell them otherwise). We use a LOCK_NOTIFY message for this.
  2207. //
  2208. //
  2209. //
  2210. // ProcessLockRequest(...)
  2211. //
  2212. void ProcessLockRequest
  2213. (
  2214. POM_PRIMARY pomPrimary,
  2215. POM_DOMAIN pDomain,
  2216. POMNET_LOCK_PKT pLockReqPkt
  2217. )
  2218. {
  2219. POM_WSGROUP pWSGroup;
  2220. POM_WORKSET pWorkset;
  2221. NET_UID sender;
  2222. OM_WORKSET_ID worksetID;
  2223. OMNET_MESSAGE_TYPE reply = OMNET_LOCK_DENY;
  2224. UINT rc = 0;
  2225. DebugEntry(ProcessLockRequest);
  2226. sender = pLockReqPkt->header.sender;
  2227. worksetID = pLockReqPkt->worksetID;
  2228. //
  2229. // Find the workset group and workset this lock request relates to:
  2230. //
  2231. rc = PreProcessMessage(pDomain,
  2232. pLockReqPkt->wsGroupID,
  2233. worksetID,
  2234. NULL,
  2235. pLockReqPkt->header.messageType,
  2236. &pWSGroup,
  2237. &pWorkset,
  2238. NULL);
  2239. switch (rc)
  2240. {
  2241. case 0:
  2242. {
  2243. //
  2244. // Fine, this is what we want.
  2245. //
  2246. }
  2247. break;
  2248. case OM_RC_WSGROUP_NOT_FOUND:
  2249. {
  2250. //
  2251. // We shouldn't be getting network events for this workset
  2252. // group if we don't have a workset group record for it!
  2253. //
  2254. WARNING_OUT(( "Got LOCK_REQUEST for unknown workset group %hu",
  2255. pLockReqPkt->wsGroupID));
  2256. //
  2257. // Grant the lock anyway:
  2258. //
  2259. reply = OMNET_LOCK_GRANT;
  2260. DC_QUIT;
  2261. }
  2262. break;
  2263. case OM_RC_WORKSET_NOT_FOUND:
  2264. {
  2265. //
  2266. // If we don't have this workset, that means that the lock
  2267. // request has got here before the WORKSET_NEW event for the
  2268. // workset. This means that we're in the early stages of
  2269. // registering with the workset group, and somebody else is
  2270. // trying to lock the workset. So, we create the workset now
  2271. // and continue as normal.
  2272. //
  2273. // In the DC_ABSence of any other information, we create the
  2274. // workset with TOP_PRIORITY and PERSISTENT - it will be set to
  2275. // the correct priority when the WORKSET_CATCHUP/NEW arrives.
  2276. //
  2277. WARNING_OUT(( "Lock req for unknown WSG %d workset %d - creating",
  2278. pWSGroup->wsg, worksetID));
  2279. rc = WorksetCreate(pomPrimary->putTask,
  2280. pWSGroup,
  2281. worksetID,
  2282. FALSE,
  2283. NET_TOP_PRIORITY);
  2284. if (rc != 0)
  2285. {
  2286. reply = OMNET_LOCK_DENY;
  2287. DC_QUIT;
  2288. }
  2289. pWorkset = pWSGroup->apWorksets[worksetID];
  2290. }
  2291. break;
  2292. default:
  2293. {
  2294. ERROR_OUT(( "Error %d from PreProcessMessage", rc));
  2295. reply = OMNET_LOCK_DENY;
  2296. DC_QUIT;
  2297. }
  2298. }
  2299. //
  2300. // Whether we grant this lock to the remote node depends on whether
  2301. // we're trying to lock it for ourselves, so switch according to the
  2302. // workset's lock state:
  2303. //
  2304. ValidateWorkset(pWorkset);
  2305. switch (pWorkset->lockState)
  2306. {
  2307. case LOCKING:
  2308. {
  2309. //
  2310. // We're trying to lock it ourselves, so compare MCS user IDs
  2311. // to resolve the conflict:
  2312. //
  2313. if (pDomain->userID > sender)
  2314. {
  2315. //
  2316. // We win, so deny the lock:
  2317. //
  2318. reply = OMNET_LOCK_DENY;
  2319. }
  2320. else
  2321. {
  2322. //
  2323. // The other node wins, so grant the lock to the node which
  2324. // requested it (marking it as granted to that node) and
  2325. // cancel our own attempt to get it:
  2326. //
  2327. WARNING_OUT(( "Aborting attempt to lock workset %u in WSG %d "
  2328. "in favour of node 0x%08x",
  2329. pWorkset->worksetID, pWSGroup->wsg, sender));
  2330. reply = OMNET_LOCK_GRANT;
  2331. //
  2332. // To cancel our own attempt, we must find the lock request
  2333. // CBs which we set up when we sent out our own
  2334. // OMNET_LOCK_REQ.
  2335. //
  2336. // To do this, call HandleMultLockReq which will find and
  2337. // deal with all the pending requests for this workset:
  2338. //
  2339. pWorkset->lockState = LOCK_GRANTED;
  2340. pWorkset->lockCount = 0;
  2341. pWorkset->lockedBy = sender;
  2342. HandleMultLockReq(pomPrimary,
  2343. pDomain,
  2344. pWSGroup,
  2345. pWorkset,
  2346. OM_RC_WORKSET_LOCK_GRANTED);
  2347. //
  2348. // Since we are aborting in favour of another node, need to
  2349. // broadcast a LOCK_NOTIFY so that evryone else stays in
  2350. // sync with who's got the lock.
  2351. //
  2352. // Note: we do not do this in R1.1 calls since this message
  2353. // is not part of the ObMan R1.1 protocol.
  2354. //
  2355. QueueLockNotify(pomPrimary,
  2356. pDomain,
  2357. pWSGroup,
  2358. pWorkset,
  2359. sender);
  2360. }
  2361. }
  2362. break;
  2363. case LOCKED:
  2364. {
  2365. //
  2366. // We already have the workset locked so we deny the lock:
  2367. //
  2368. reply = OMNET_LOCK_DENY;
  2369. }
  2370. break;
  2371. case LOCK_GRANTED:
  2372. {
  2373. //
  2374. // If the state is LOCK_GRANTED, we allow this node to have the
  2375. // lock - the other node to which it was previously granted may
  2376. // refuse, but that's not our problem. We don't change the
  2377. // <lockedBy> field - if the node we think has the lock grants
  2378. // it to the other one, we will receive a LOCK_NOTIFY in due
  2379. // course.
  2380. //
  2381. reply = OMNET_LOCK_GRANT;
  2382. }
  2383. break;
  2384. case UNLOCKED:
  2385. {
  2386. //
  2387. // If the state is UNLOCKED, the other node can have the lock;
  2388. // we don't care, but make sure to record the ID of the node
  2389. // we're granting the lock to:
  2390. //
  2391. reply = OMNET_LOCK_GRANT;
  2392. //
  2393. // SFR5900: Only change the internal state if this is not a
  2394. // check point workset.
  2395. //
  2396. if (pWorkset->worksetID != OM_CHECKPOINT_WORKSET)
  2397. {
  2398. pWorkset->lockState = LOCK_GRANTED;
  2399. pWorkset->lockCount = 0;
  2400. pWorkset->lockedBy = sender;
  2401. }
  2402. }
  2403. break;
  2404. default:
  2405. {
  2406. //
  2407. // We should have covered all the options so if we get here
  2408. // there's something wrong.
  2409. //
  2410. ERROR_OUT(("Reached default case in workset lock switch (state: %hu)",
  2411. pWorkset->lockState));
  2412. }
  2413. }
  2414. DC_EXIT_POINT:
  2415. QueueLockReply(pomPrimary, pDomain, reply, sender, pLockReqPkt);
  2416. DebugExitVOID(ProcessLockRequest);
  2417. }
  2418. //
  2419. // QueueLockReply(...)
  2420. //
  2421. void QueueLockReply
  2422. (
  2423. POM_PRIMARY pomPrimary,
  2424. POM_DOMAIN pDomain,
  2425. OMNET_MESSAGE_TYPE message,
  2426. NET_CHANNEL_ID channel,
  2427. POMNET_LOCK_PKT pLockReqPkt
  2428. )
  2429. {
  2430. POMNET_LOCK_PKT pLockReplyPkt;
  2431. NET_PRIORITY priority;
  2432. DebugEntry(QueueLockReply);
  2433. //
  2434. // The reply is identical to the request with the exception of the
  2435. // <messageType> and <sender> fields. However, we can't just queue the
  2436. // same chunk of memory to be sent, because pLockReqPkt points to a NET
  2437. // buffer which will be freed soon. So, we allocate some new memory,
  2438. // copy the data across and set the fields:
  2439. //
  2440. pLockReplyPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  2441. if (!pLockReplyPkt)
  2442. {
  2443. ERROR_OUT(("Out of memory for QueueLockReply"));
  2444. DC_QUIT;
  2445. }
  2446. pLockReplyPkt->header.sender = pDomain->userID;
  2447. pLockReplyPkt->header.messageType = message;
  2448. pLockReplyPkt->wsGroupID = pLockReqPkt->wsGroupID;
  2449. pLockReplyPkt->worksetID = pLockReqPkt->worksetID;
  2450. //
  2451. // The <data1> field of the lock packet is the correlator the requester
  2452. // put in the original LOCK_REQUEST packet.
  2453. //
  2454. pLockReplyPkt->data1 = pLockReqPkt->data1;
  2455. //
  2456. // Lock replies normally go LOW_PRIORITY (with NET_SEND_ALL_PRIORITIES)
  2457. // so that they do not overtake any data queued at this node.
  2458. //
  2459. // However, if they're for ObManControl we send them TOP_PRIORITY
  2460. // (WITHOUT NET_SEND_ALL_PRIORITIES). This is safe because _all_
  2461. // ObManControl data is sent TOP_PRIORITY so there's no fear of a lock
  2462. // reply overtaking a data packet.
  2463. //
  2464. // Correspondingly, when we request a lock, we expect one reply at each
  2465. // priority unless it is for ObManControl.
  2466. //
  2467. if (pLockReqPkt->wsGroupID == WSGROUPID_OMC)
  2468. {
  2469. priority = NET_TOP_PRIORITY;
  2470. }
  2471. else
  2472. {
  2473. priority = NET_LOW_PRIORITY | NET_SEND_ALL_PRIORITIES;
  2474. }
  2475. if (QueueMessage(pomPrimary->putTask,
  2476. pDomain,
  2477. channel,
  2478. priority,
  2479. NULL,
  2480. NULL,
  2481. NULL,
  2482. (POMNET_PKT_HEADER) pLockReplyPkt,
  2483. NULL,
  2484. TRUE) != 0)
  2485. {
  2486. ERROR_OUT(("Error queueing lock reply for workset %hu, WSG %hu",
  2487. pLockReqPkt->worksetID, pLockReqPkt->wsGroupID));
  2488. UT_FreeRefCount((void**)&pLockReplyPkt, FALSE);
  2489. }
  2490. DC_EXIT_POINT:
  2491. DebugExitVOID(QueueLockReply);
  2492. }
  2493. //
  2494. // QueueLockNotify(...)
  2495. //
  2496. void QueueLockNotify
  2497. (
  2498. POM_PRIMARY pomPrimary,
  2499. POM_DOMAIN pDomain,
  2500. POM_WSGROUP pWSGroup,
  2501. POM_WORKSET pWorkset,
  2502. NET_UID sender
  2503. )
  2504. {
  2505. POMNET_LOCK_PKT pLockNotifyPkt;
  2506. NET_PRIORITY priority;
  2507. DebugEntry(QueueLockNotify);
  2508. ValidateWorkset(pWorkset);
  2509. pLockNotifyPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  2510. if (!pLockNotifyPkt)
  2511. {
  2512. ERROR_OUT(("Out of memory for QueueLockNotify"));
  2513. DC_QUIT;
  2514. }
  2515. //
  2516. // For a LOCK_NOTIFY, the <data1> field is the user ID of the node
  2517. // we've granted the lock to.
  2518. //
  2519. pLockNotifyPkt->header.sender = pDomain->userID;
  2520. pLockNotifyPkt->header.messageType = OMNET_LOCK_NOTIFY;
  2521. pLockNotifyPkt->wsGroupID = pWSGroup->wsGroupID;
  2522. pLockNotifyPkt->worksetID = pWorkset->worksetID;
  2523. pLockNotifyPkt->data1 = sender;
  2524. //
  2525. // LOCK_NOTIFY messages go at the priority of the workset involved. If
  2526. // this is OBMAN_CHOOSES_PRIORITY, then all bets are off and we send
  2527. // them TOP_PRIORITY.
  2528. //
  2529. priority = pWorkset->priority;
  2530. if (priority == OM_OBMAN_CHOOSES_PRIORITY)
  2531. {
  2532. priority = NET_TOP_PRIORITY;
  2533. }
  2534. if (QueueMessage(pomPrimary->putTask,
  2535. pDomain,
  2536. pWSGroup->channelID,
  2537. priority,
  2538. NULL,
  2539. NULL,
  2540. NULL,
  2541. (POMNET_PKT_HEADER) pLockNotifyPkt,
  2542. NULL,
  2543. TRUE) != 0)
  2544. {
  2545. ERROR_OUT(("Error queueing lock notify for workset %hu in WSG %hu",
  2546. pWorkset->worksetID, pWSGroup->wsGroupID));
  2547. UT_FreeRefCount((void**)&pLockNotifyPkt, FALSE);
  2548. }
  2549. DC_EXIT_POINT:
  2550. DebugExitVOID(QueueLockNotify);
  2551. }
  2552. //
  2553. // ProcessLockNotify(...)
  2554. //
  2555. void ProcessLockNotify
  2556. (
  2557. POM_PRIMARY pomPrimary,
  2558. POM_DOMAIN pDomain,
  2559. POM_WSGROUP pWSGroup,
  2560. POM_WORKSET pWorkset,
  2561. NET_UID owner
  2562. )
  2563. {
  2564. POM_WORKSET pOMCWorkset;
  2565. POM_OBJECT pObjPerson;
  2566. DebugEntry(ProcessLockNotify);
  2567. ValidateWSGroup(pWSGroup);
  2568. ValidateWorkset(pWorkset);
  2569. //
  2570. // This message is sent when one remote node has granted the lock to
  2571. // another. We use it to update our view of who has got the lock.
  2572. //
  2573. TRACE_OUT(("Got LOCK_NOTIFY for workset %u in WSG %d - node 0x%08x has the lock",
  2574. pWorkset->worksetID, pWSGroup->wsg, owner));
  2575. //
  2576. // Check the lock state for the workset:
  2577. //
  2578. switch (pWorkset->lockState)
  2579. {
  2580. case LOCKED:
  2581. {
  2582. //
  2583. // A remote node has just told us that another remote node has
  2584. // got this workset lock - but we think we've got it!
  2585. //
  2586. ERROR_OUT(( "Bad LOCK_NOTIFY for WSG %d workset %d, owner 0x%08x",
  2587. pWSGroup->wsg, pWorkset->worksetID, owner));
  2588. DC_QUIT;
  2589. }
  2590. break;
  2591. case LOCKING:
  2592. {
  2593. //
  2594. // We should get a LOCK_DENY or a LOCK_GRANT later - do nothing
  2595. // now.
  2596. //
  2597. DC_QUIT;
  2598. }
  2599. break;
  2600. case LOCK_GRANTED:
  2601. case UNLOCKED:
  2602. {
  2603. //
  2604. // One remote node has granted the lock to another. Check the
  2605. // latter is still attached, by looking in the control workset:
  2606. //
  2607. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  2608. FindPersonObject(pOMCWorkset,
  2609. owner,
  2610. FIND_THIS,
  2611. &pObjPerson);
  2612. if (pObjPerson != NULL)
  2613. {
  2614. ValidateObject(pObjPerson);
  2615. //
  2616. // If our internal state is LOCK_GRANTED and we have just
  2617. // received a LOCK_NOTIFY from another node then we can
  2618. // just ignore it - it is for a lock request that we have
  2619. // just abandoned.
  2620. //
  2621. if ( (pWorkset->lockState == LOCK_GRANTED) &&
  2622. (owner == pDomain->userID) )
  2623. {
  2624. TRACE_OUT(( "Ignoring LOCK_NOTIFY for ourselves"));
  2625. DC_QUIT;
  2626. }
  2627. //
  2628. // Only store the new ID it is greater than the last ID we
  2629. // were notified of - it is possible for LOCK_NOTIFIES to
  2630. // get crossed on the wire. Consider the following
  2631. // scenario:
  2632. //
  2633. // Machines 1, 2, 3 and 4 are all in a call and all try and
  2634. // lock at the same time.
  2635. //
  2636. // - 2 grants to 3 and sends a LOCK_NOTIFY saying that 3
  2637. // has the lock.
  2638. //
  2639. // - 3 grants to 4 and sends a LOCK_NOTIFY saying that 4
  2640. // has the lock
  2641. //
  2642. // 4 actually has the lock at this point.
  2643. //
  2644. // Machine 1 gets the lock notification from 3 and sets its
  2645. // 'lockedBy' field to 4.
  2646. // Machine 1 then gets the lock notification from 2 and
  2647. // resets the 'lockedBy' field to 3.
  2648. //
  2649. // 4 then unlocks and sends the unlock notification. When
  2650. // 1 gets the unlock, it does not recognise the ID of the
  2651. // unlocking machine (it thinks 3 has the lock) so doesnt
  2652. // bother to reset the local locked state. Any subsequent
  2653. // attempts to lock the workset on 1 fail because it still
  2654. // still thinks 3 has the lock.
  2655. //
  2656. if (owner > pWorkset->lockedBy)
  2657. {
  2658. pWorkset->lockedBy = owner;
  2659. TRACE_OUT(( "Node ID 0x%08x has the lock (?)",
  2660. pWorkset->lockedBy));
  2661. }
  2662. }
  2663. else
  2664. {
  2665. //
  2666. // If not, we assume that this node was granted the lock
  2667. // but then went away. If we did think the workset was
  2668. // locked, mark it as unlocked and post an unlock event.
  2669. //
  2670. if (pWorkset->lockState == LOCK_GRANTED)
  2671. {
  2672. TRACE_OUT(("node 0x%08x had lock on workset %d in WSG %d but has left",
  2673. owner, pWorkset->worksetID, pWSGroup->wsg));
  2674. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  2675. }
  2676. }
  2677. }
  2678. break;
  2679. default:
  2680. {
  2681. //
  2682. // We should have covered all the options so if we get here
  2683. // there's something wrong.
  2684. //
  2685. ERROR_OUT(("Reached deafult case in workset lock switch (state: %hu)",
  2686. pWorkset->lockState));
  2687. }
  2688. }
  2689. DC_EXIT_POINT:
  2690. DebugExitVOID(ProcessLockNotify);
  2691. }
  2692. //
  2693. // ProcessLockReply(...)
  2694. //
  2695. void ProcessLockReply
  2696. (
  2697. POM_PRIMARY pomPrimary,
  2698. POM_DOMAIN pDomain,
  2699. NET_UID sender,
  2700. OM_CORRELATOR correlator,
  2701. OMNET_MESSAGE_TYPE replyType)
  2702. {
  2703. POM_WSGROUP pWSGroup = NULL;
  2704. POM_WORKSET pWorkset;
  2705. POM_LOCK_REQ pLockReq;
  2706. POM_NODE_LIST pNodeEntry;
  2707. DebugEntry(ProcessLockReply);
  2708. //
  2709. // Search the domain's list of pending locks for one which matches the
  2710. // correlator (we do it this way rather than using the workset group ID
  2711. // and workset ID to ensure that we don't get confused between
  2712. // successive lock requests for the same workset).
  2713. //
  2714. TRACE_OUT(( "Searching domain %u's list for lock corr %hu",
  2715. pDomain->callID, correlator));
  2716. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingLocks),
  2717. (void**)&pLockReq, FIELD_OFFSET(OM_LOCK_REQ, chain),
  2718. FIELD_OFFSET(OM_LOCK_REQ, correlator), (DWORD)correlator,
  2719. FIELD_SIZE(OM_LOCK_REQ, correlator));
  2720. if (pLockReq == NULL)
  2721. {
  2722. //
  2723. // Could be any of the following:
  2724. //
  2725. // - This reply is from a node we were never expecting a lock
  2726. // request from in the first place, and we've got all the other
  2727. // replies so we've thrown away the lock request.
  2728. //
  2729. // - Someone else has denied us the lock so we've given up.
  2730. //
  2731. // - The node was too slow to reply and we've given up on the lock
  2732. // request.
  2733. //
  2734. // - We've left the domain and so moved all the pending lock
  2735. // requests into the local domain.
  2736. //
  2737. // - A logic error.
  2738. //
  2739. // The only thing we can do here is quit.
  2740. //
  2741. WARNING_OUT(( "Unexpected lock correlator 0x%08x (domain %u)",
  2742. correlator, pDomain->callID));
  2743. DC_QUIT;
  2744. }
  2745. //
  2746. // Otherwise, we search the list of expected respondents looking for
  2747. // the node which has just replied:
  2748. //
  2749. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pLockReq->nodes),
  2750. (void**)&pNodeEntry, FIELD_OFFSET(OM_NODE_LIST, chain),
  2751. FIELD_OFFSET(OM_NODE_LIST, userID), (DWORD)sender,
  2752. FIELD_SIZE(OM_NODE_LIST, userID));
  2753. if (pNodeEntry == NULL)
  2754. {
  2755. //
  2756. // Could be any of the following:
  2757. //
  2758. // - We removed the node from the list because it had deregistered
  2759. // when the timeout expired (will only happen when delete of
  2760. // person object overtakes lock reply and timeout expires locally
  2761. // betweem the two).
  2762. //
  2763. // - The node joined since we compiled the list.
  2764. //
  2765. // - A logic error.
  2766. //
  2767. TRACE_OUT(("Recd unexpected lock reply from node 0x%08x in Domain %u",
  2768. sender, pDomain->callID));
  2769. DC_QUIT;
  2770. }
  2771. //
  2772. // Otherwise, this is a normal lock reply so we just remove the node
  2773. // from the list and free up its chunk of memory.
  2774. //
  2775. COM_BasedListRemove(&(pNodeEntry->chain));
  2776. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  2777. pWSGroup = pLockReq->pWSGroup;
  2778. //
  2779. // If the client has just deregistered from the workset group, we'll
  2780. // be throwing it away soon, so don't do any more processing:
  2781. //
  2782. if (!pWSGroup->valid)
  2783. {
  2784. WARNING_OUT(("Ignoring lock reply for discarded WSG %d", pWSGroup->wsg));
  2785. DC_QUIT;
  2786. }
  2787. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  2788. ASSERT((pWorkset != NULL));
  2789. //
  2790. // Now check the workset's lock state: if we're not/no longer trying to
  2791. // lock it, quit.
  2792. //
  2793. // Note, however, that checkpointing worksets are never marked as
  2794. // LOCKING, even when we're locking them, so exclude them from the
  2795. // test:
  2796. //
  2797. if ((pWorkset->lockState != LOCKING) &&
  2798. (pWorkset->worksetID != OM_CHECKPOINT_WORKSET))
  2799. {
  2800. WARNING_OUT(( "Recd unwanted lock reply from %hu for workset %d WSG %d",
  2801. sender, pWorkset->worksetID, pWSGroup->wsg));
  2802. DC_QUIT;
  2803. }
  2804. //
  2805. // If this is a negative reply, then we have failed to get the lock so
  2806. // inform our local client and then quit:
  2807. //
  2808. if (replyType == OMNET_LOCK_DENY)
  2809. {
  2810. //
  2811. // We do not expect this for a CHECKPOINT_WORKSET:
  2812. //
  2813. ASSERT((pWorkset->worksetID != OM_CHECKPOINT_WORKSET));
  2814. WARNING_OUT(( "node 0x%08x has denied the lock for workset %u in WSG %d",
  2815. sender, pWorkset->worksetID, pWSGroup->wsg));
  2816. pWorkset->lockState = UNLOCKED;
  2817. pWorkset->lockCount = 0;
  2818. HandleMultLockReq(pomPrimary,
  2819. pDomain,
  2820. pWSGroup,
  2821. pWorkset,
  2822. OM_RC_WORKSET_LOCK_GRANTED);
  2823. //
  2824. // Since we have given up our lock request in favour of another
  2825. // node, need to broadcast a LOCK_NOTIFY so that everyone else
  2826. // stays in sync with who's got the lock.
  2827. //
  2828. QueueLockNotify(pomPrimary, pDomain, pWSGroup, pWorkset, sender);
  2829. DC_QUIT;
  2830. }
  2831. TRACE_OUT(( "Affirmative lock reply received from node 0x%08x", sender));
  2832. //
  2833. // Check if the list of expected respondents is now empty:
  2834. //
  2835. if (COM_BasedListIsEmpty(&(pLockReq->nodes)))
  2836. {
  2837. //
  2838. // List is now empty, so all nodes have replied to the request,
  2839. // therefore lock has succeeded:
  2840. //
  2841. TRACE_OUT(( "Got all LOCK_GRANT replies for workset %u in WSG %d",
  2842. pWorkset->worksetID, pWSGroup->wsg));
  2843. if (pWorkset->worksetID == OM_CHECKPOINT_WORKSET)
  2844. {
  2845. //
  2846. // This is a checkpointing workset. We do not set the state to
  2847. // LOCKED (we never do for these worksets) and we only process
  2848. // the particular pending lock request which this packet came
  2849. // in reply to - otherwise we couldn't guarantee an end-to-end
  2850. // ping on each checkpoint:
  2851. //
  2852. WorksetLockResult(pomPrimary->putTask, &pLockReq, 0);
  2853. }
  2854. else
  2855. {
  2856. //
  2857. // This is not a checkpointing workset, so set the state to
  2858. // LOCKED and process ALL pending locks for this workset:
  2859. //
  2860. pWorkset->lockState = LOCKED;
  2861. HandleMultLockReq(pomPrimary, pDomain, pWSGroup, pWorkset, 0);
  2862. }
  2863. }
  2864. else
  2865. {
  2866. //
  2867. // Otherwise, still awaiting some replies, so we do nothing more
  2868. // for the moment except trace.
  2869. //
  2870. TRACE_OUT(( "Still need lock replies for workset %u in WSG %d",
  2871. pLockReq->worksetID, pWSGroup->wsg));
  2872. }
  2873. DC_EXIT_POINT:
  2874. DebugExitVOID(ProcessLockReply);
  2875. }
  2876. //
  2877. // PurgeLockRequests(...)
  2878. //
  2879. void PurgeLockRequests
  2880. (
  2881. POM_DOMAIN pDomain,
  2882. POM_WSGROUP pWSGroup
  2883. )
  2884. {
  2885. POM_LOCK_REQ pLockReq;
  2886. POM_LOCK_REQ pNextLockReq;
  2887. POM_NODE_LIST pNodeEntry;
  2888. DebugEntry(PurgeLockRequests);
  2889. //
  2890. // Search this domain's list of lock requests looking for a match on
  2891. // workset group ID:
  2892. //
  2893. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  2894. while (pLockReq != NULL)
  2895. {
  2896. //
  2897. // This loop might remove pLockReq from the list, so chain first:
  2898. //
  2899. pNextLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  2900. FIELD_OFFSET(OM_LOCK_REQ, chain));
  2901. //
  2902. // For each match...
  2903. //
  2904. if (pLockReq->wsGroupID == pWSGroup->wsGroupID)
  2905. {
  2906. TRACE_OUT(( "'%s' still has lock req oustanding - discarding"));
  2907. //
  2908. // Discard any node list entries remaining...
  2909. //
  2910. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  2911. while (pNodeEntry != NULL)
  2912. {
  2913. COM_BasedListRemove(&(pNodeEntry->chain));
  2914. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  2915. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  2916. }
  2917. //
  2918. // ...and discard the lock request itself:
  2919. //
  2920. COM_BasedListRemove(&(pLockReq->chain));
  2921. UT_FreeRefCount((void**)&pLockReq, FALSE);
  2922. }
  2923. pLockReq = pNextLockReq;
  2924. }
  2925. DebugExitVOID(PurgeLockRequests);
  2926. }
  2927. //
  2928. // ProcessLockTimeout(...)
  2929. //
  2930. void ProcessLockTimeout
  2931. (
  2932. POM_PRIMARY pomPrimary,
  2933. UINT retriesToGo,
  2934. UINT callID
  2935. )
  2936. {
  2937. POM_DOMAIN pDomain;
  2938. POM_WSGROUP pWSGroup;
  2939. POM_WORKSET pWorkset;
  2940. POM_LOCK_REQ pLockReq = NULL;
  2941. POM_WORKSET pOMCWorkset;
  2942. POM_OBJECT pObj;
  2943. POM_NODE_LIST pNodeEntry;
  2944. POM_NODE_LIST pNextNodeEntry;
  2945. DebugEntry(ProcessLockTimeout);
  2946. //
  2947. // When we broadcast a lock request, we start a timer going so that we
  2948. // don't hang around for ever waiting for replies from nodes which have
  2949. // gone away. This timer has now popped, so we validate our list of
  2950. // expected respondents by checking that each entry relates to a node
  2951. // still in the domain.
  2952. //
  2953. //
  2954. // First, find the lock request CB by looking in each domain and then
  2955. // at the correlators of each pending lock request:
  2956. //
  2957. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  2958. while (pDomain != NULL)
  2959. {
  2960. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingLocks),
  2961. (void**)&pLockReq, FIELD_OFFSET(OM_LOCK_REQ, chain),
  2962. FIELD_OFFSET(OM_LOCK_REQ, retriesToGo), (DWORD)retriesToGo,
  2963. FIELD_SIZE(OM_LOCK_REQ, retriesToGo));
  2964. if (pLockReq != NULL)
  2965. {
  2966. TRACE_OUT(( "Found correlated lock request"));
  2967. break;
  2968. }
  2969. //
  2970. // Didn't find anything in this domain - go on to the next:
  2971. //
  2972. pDomain = (POM_DOMAIN)COM_BasedListNext(&(pomPrimary->domains), pDomain,
  2973. FIELD_OFFSET(OM_DOMAIN, chain));
  2974. }
  2975. if (pLockReq == NULL)
  2976. {
  2977. TRACE_OUT(( "Lock timeout expired after lock granted/refused"));
  2978. DC_QUIT;
  2979. }
  2980. pWSGroup = pLockReq->pWSGroup;
  2981. //
  2982. // If the client has just deregistered from the workset group, we'll
  2983. // be throwing it away soon, so don't do any more processing:
  2984. //
  2985. if (!pWSGroup->valid)
  2986. {
  2987. WARNING_OUT(( "Ignoring lock timeout for discarded WSG %d",
  2988. pWSGroup->wsg));
  2989. DC_QUIT;
  2990. }
  2991. //
  2992. // We know the workset must still exist because worksets don't get
  2993. // discarded unless the whole workset group is being discarded.
  2994. //
  2995. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  2996. ASSERT((pWorkset != NULL));
  2997. //
  2998. // The workset must be in the LOCKING state because if it is LOCKED or
  2999. // UNLOCKED, then we shouldn't have found a lock request CB for it
  3000. // (unless of course it's a checkpointing workset):
  3001. //
  3002. if (pWorkset->lockState != LOCKING)
  3003. {
  3004. if (pWorkset->worksetID != OM_CHECKPOINT_WORKSET)
  3005. {
  3006. WARNING_OUT((
  3007. "Got lock timeout for workset %u in WSG %d but state is %u",
  3008. pWorkset->worksetID, pWSGroup->wsg,
  3009. pWorkset->lockState));
  3010. DC_QUIT;
  3011. }
  3012. }
  3013. //
  3014. // Go through the relevant control workset to see if any of the
  3015. // expected respondents have disappeared.
  3016. //
  3017. pOMCWorkset = GetOMCWorkset(pDomain, pLockReq->wsGroupID);
  3018. ASSERT((pOMCWorkset != NULL));
  3019. //
  3020. // Chain through each of the objects in our expected respondents list
  3021. // as follows:
  3022. //
  3023. // FOR each object in the expected respondents list
  3024. //
  3025. // FOR each person object in the relevant ObManControl workset
  3026. //
  3027. // IF they match on user ID, this node is still around so
  3028. // don't delete it
  3029. //
  3030. // IF no match found then node has gone away so remove it from
  3031. // expected respondents list.
  3032. //
  3033. //
  3034. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  3035. while (pNodeEntry != NULL)
  3036. {
  3037. //
  3038. // We might free up pNodeEntry on a pass through the loop (in
  3039. // ProcessLockReply), but we will need to be able to chain from it
  3040. // all the same. So, we chain at the START of the loop, putting a
  3041. // pointer to the next item in pTempNodeEntry; at the end of the
  3042. // loop, we assign this value to pNodeEntry:
  3043. //
  3044. pNextNodeEntry = (POM_NODE_LIST)COM_BasedListNext(&(pLockReq->nodes), pNodeEntry,
  3045. FIELD_OFFSET(OM_NODE_LIST, chain));
  3046. //
  3047. // Now, search for this user's person object:
  3048. //
  3049. FindPersonObject(pOMCWorkset,
  3050. pNodeEntry->userID,
  3051. FIND_THIS,
  3052. &pObj);
  3053. if (pObj == NULL)
  3054. {
  3055. //
  3056. // We didn't find this node in the workset, so it must have
  3057. // disappeared. Therefore, we fake a LOCK_GRANT message from
  3058. // it. ProcessLockReply will duplicate some of the processing
  3059. // we've done but it saves duplicating code.
  3060. //
  3061. WARNING_OUT((
  3062. "node 0x%08x has disappeared - faking LOCK_GRANT message",
  3063. pNodeEntry->userID));
  3064. ProcessLockReply(pomPrimary,
  3065. pDomain,
  3066. pNodeEntry->userID,
  3067. pLockReq->correlator,
  3068. OMNET_LOCK_GRANT);
  3069. }
  3070. //
  3071. // Now, go on to the next item in the expected respondents list:
  3072. //
  3073. pNodeEntry = pNextNodeEntry;
  3074. }
  3075. //
  3076. // ProcessLockReply may have determined, with the faked messages we
  3077. // gave it, that the lock attempt has succeeded completely. If so, the
  3078. // workset's lock state will now be LOCKED. If it isn't, we'll need to
  3079. // post another timeout event.
  3080. //
  3081. if (pWorkset->lockState == LOCKING)
  3082. {
  3083. TRACE_OUT(( "Replies to lock request still expected"));
  3084. if (pLockReq->retriesToGo == 0)
  3085. {
  3086. //
  3087. // We've run out of retries so give up now:
  3088. //
  3089. WARNING_OUT(( "Timed out trying to lock workset %u in WSG %d",
  3090. pLockReq->worksetID, pWSGroup->wsg));
  3091. pWorkset->lockState = UNLOCKED;
  3092. pWorkset->lockedBy = 0;
  3093. pWorkset->lockCount = 0;
  3094. HandleMultLockReq(pomPrimary,
  3095. pDomain,
  3096. pWSGroup,
  3097. pWorkset,
  3098. OM_RC_OUT_OF_RESOURCES);
  3099. //
  3100. // Now send an unlock message to all nodes, so that they don't
  3101. // think we still have it locked.
  3102. //
  3103. if (QueueUnlock(pomPrimary->putTask,
  3104. pDomain,
  3105. pWSGroup->wsGroupID,
  3106. pWorkset->worksetID,
  3107. pWSGroup->channelID,
  3108. pWorkset->priority) != 0)
  3109. {
  3110. DC_QUIT;
  3111. }
  3112. }
  3113. else // retriesToGo == 0
  3114. {
  3115. pLockReq->retriesToGo--;
  3116. UT_PostEvent(pomPrimary->putTask,
  3117. pomPrimary->putTask,
  3118. OM_LOCK_RETRY_DELAY_DFLT,
  3119. OMINT_EVENT_LOCK_TIMEOUT,
  3120. retriesToGo,
  3121. callID);
  3122. }
  3123. }
  3124. DC_EXIT_POINT:
  3125. DebugExitVOID(ProcessLockTimeout);
  3126. }
  3127. //
  3128. // HandleMultLockReq
  3129. //
  3130. void HandleMultLockReq
  3131. (
  3132. POM_PRIMARY pomPrimary,
  3133. POM_DOMAIN pDomain,
  3134. POM_WSGROUP pWSGroup,
  3135. POM_WORKSET pWorkset,
  3136. UINT result
  3137. )
  3138. {
  3139. POM_LOCK_REQ pLockReq;
  3140. DebugEntry(HandleMultLockReq);
  3141. //
  3142. // We need to search this Domain's list of lock requests for every one
  3143. // which matches the workset group and workset specified in the
  3144. // parameter list. Find the primary record first as a sanity check:
  3145. //
  3146. FindLockReq(pDomain, pWSGroup, pWorkset, &pLockReq, LOCK_PRIMARY);
  3147. if (pLockReq == NULL)
  3148. {
  3149. ERROR_OUT(( "No primary lock request CB found for workset %u!",
  3150. pWorkset->worksetID));
  3151. DC_QUIT;
  3152. }
  3153. while (pLockReq != NULL)
  3154. {
  3155. WorksetLockResult(pomPrimary->putTask, &pLockReq, result);
  3156. FindLockReq(pDomain, pWSGroup, pWorkset,
  3157. &pLockReq, LOCK_SECONDARY);
  3158. }
  3159. DC_EXIT_POINT:
  3160. DebugExitVOID(HandleMultLockReq);
  3161. }
  3162. //
  3163. //
  3164. //
  3165. // FindLockReq
  3166. //
  3167. //
  3168. //
  3169. void FindLockReq(POM_DOMAIN pDomain,
  3170. POM_WSGROUP pWSGroup,
  3171. POM_WORKSET pWorkset,
  3172. POM_LOCK_REQ * ppLockReq,
  3173. BYTE lockType)
  3174. {
  3175. POM_LOCK_REQ pLockReq;
  3176. DebugEntry(FindLockReq);
  3177. //
  3178. // We need to search this Domain's list of lock requests for every one
  3179. // which matches the workset group, workset and lock type specified in
  3180. // the parameter list.
  3181. //
  3182. // So, we search the list to find a match on workset group ID, then
  3183. // compare the workset ID. If that doesn't match, we continue down the
  3184. // list:
  3185. //
  3186. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  3187. while (pLockReq != NULL)
  3188. {
  3189. if ((pLockReq->wsGroupID == pWSGroup->wsGroupID) &&
  3190. (pLockReq->worksetID == pWorkset->worksetID) &&
  3191. (pLockReq->type == lockType))
  3192. {
  3193. break;
  3194. }
  3195. pLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  3196. FIELD_OFFSET(OM_LOCK_REQ, chain));
  3197. }
  3198. *ppLockReq = pLockReq;
  3199. DebugExitVOID(FindLockReq);
  3200. }
  3201. //
  3202. // ProcessUnlock(...)
  3203. //
  3204. void ProcessUnlock
  3205. (
  3206. POM_PRIMARY pomPrimary,
  3207. POM_WORKSET pWorkset,
  3208. NET_UID sender
  3209. )
  3210. {
  3211. DebugEntry(ProcessUnlock);
  3212. //
  3213. // Check the workset was locked by the node that's now unlocking it:
  3214. //
  3215. if (pWorkset->lockedBy != sender)
  3216. {
  3217. WARNING_OUT(( "Unexpected UNLOCK from node 0x%08x for %hu!",
  3218. sender, pWorkset->worksetID));
  3219. }
  3220. else
  3221. {
  3222. TRACE_OUT(( "Unlocking:%hu for node 0x%08x",
  3223. pWorkset->worksetID, sender));
  3224. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  3225. }
  3226. DebugExitVOID(ProcessUnlock);
  3227. }
  3228. //
  3229. // ReleaseAllNetLocks(...)
  3230. //
  3231. void ReleaseAllNetLocks
  3232. (
  3233. POM_PRIMARY pomPrimary,
  3234. POM_DOMAIN pDomain,
  3235. OM_WSGROUP_ID wsGroupID,
  3236. NET_UID userID
  3237. )
  3238. {
  3239. POM_WSGROUP pWSGroup;
  3240. POM_WORKSET pWorkset;
  3241. OM_WORKSET_ID worksetID;
  3242. DebugEntry(ReleaseAllNetLocks);
  3243. //
  3244. // Find the workset group:
  3245. //
  3246. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  3247. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  3248. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  3249. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  3250. if (pWSGroup == NULL)
  3251. {
  3252. //
  3253. // This will happen for a workset group which the other node is
  3254. // registered with but we're not, so just trace and quit:
  3255. //
  3256. TRACE_OUT(("No record found for WSG ID %hu", wsGroupID));
  3257. DC_QUIT;
  3258. }
  3259. TRACE_OUT(( "Releasing all locks held by node 0x%08x in WSG %d",
  3260. userID, pWSGroup->wsg));
  3261. //
  3262. // For each workset in it, if the lock has been granted to the detached
  3263. // node, unlock it:
  3264. //
  3265. for (worksetID = 0;
  3266. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  3267. worksetID++)
  3268. {
  3269. pWorkset = pWSGroup->apWorksets[worksetID];
  3270. if (pWorkset == NULL)
  3271. {
  3272. continue;
  3273. }
  3274. //
  3275. // If this workset is locked by someone other than us...
  3276. //
  3277. if (pWorkset->lockState == LOCK_GRANTED)
  3278. {
  3279. //
  3280. // ...and if it is locked by the departed node (or if everyone
  3281. // has been detached)...
  3282. //
  3283. if ((userID == pWorkset->lockedBy) ||
  3284. (userID == NET_ALL_REMOTES))
  3285. {
  3286. //
  3287. // ...unlock it.
  3288. //
  3289. TRACE_OUT((
  3290. "Unlocking workset %u in WSG %d for detached node 0x%08x",
  3291. worksetID, pWSGroup->wsg, userID));
  3292. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  3293. }
  3294. }
  3295. }
  3296. DC_EXIT_POINT:
  3297. DebugExitVOID(ReleaseAllNetLocks);
  3298. }
  3299. //
  3300. // ProcessWSGRegister(...)
  3301. //
  3302. void ProcessWSGRegister
  3303. (
  3304. POM_PRIMARY pomPrimary,
  3305. POM_WSGROUP_REG_CB pRegistrationCB
  3306. )
  3307. {
  3308. POM_DOMAIN pDomain;
  3309. POM_WSGROUP pWSGroup;
  3310. POM_USAGE_REC pUsageRec = NULL;
  3311. POM_CLIENT_LIST pClientListEntry;
  3312. UINT mode;
  3313. UINT type;
  3314. UINT rc = 0;
  3315. DebugEntry(ProcessWSGRegister);
  3316. //
  3317. // Check if this registration has been aborted already:
  3318. //
  3319. if (!pRegistrationCB->valid)
  3320. {
  3321. WARNING_OUT(( "Reg CB for WSG %d no longer valid - aborting registration",
  3322. pRegistrationCB->wsg));
  3323. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  3324. DC_QUIT;
  3325. }
  3326. //
  3327. // Determine whether we're doing a REGISTER or a MOVE (we use the
  3328. // string values for tracing):
  3329. //
  3330. mode = pRegistrationCB->mode;
  3331. type = pRegistrationCB->type;
  3332. TRACE_OUT(( "Processing %d request (pre-Stage1) for WSG %d",
  3333. pRegistrationCB->wsg));
  3334. //
  3335. // Find the Domain record (in the case of a MOVE, this will be the
  3336. // record for the Domain INTO WHICH the Client wants to move the WSG).
  3337. //
  3338. // Note that this process will cause us to attach to the Domain if
  3339. // we're not already attached.
  3340. //
  3341. rc = DomainRecordFindOrCreate(pomPrimary,
  3342. pRegistrationCB->callID,
  3343. &pDomain);
  3344. if (rc != 0)
  3345. {
  3346. DC_QUIT;
  3347. }
  3348. //
  3349. // Save the pointer to the Domain record because we'll need it later:
  3350. //
  3351. pRegistrationCB->pDomain = pDomain;
  3352. //
  3353. // Put the registration CB in the list hung off the Domain record:
  3354. //
  3355. COM_BasedListInsertAfter(&(pDomain->pendingRegs),
  3356. &(pRegistrationCB->chain));
  3357. //
  3358. // OK, now we need to look for the workset group.
  3359. //
  3360. // If this is a MOVE, we can find the workset group record immediately
  3361. // using the offset stored in the request CB.
  3362. //
  3363. // If this is a REGISTER, we need to look for the record in the list
  3364. // hung off the Domain record, and, if none is found, create one:
  3365. //
  3366. if (type == WSGROUP_REGISTER)
  3367. {
  3368. WSGRecordFind(pDomain, pRegistrationCB->wsg, pRegistrationCB->fpHandler,
  3369. &pWSGroup);
  3370. if (pWSGroup == NULL)
  3371. {
  3372. //
  3373. // The workset group was not found in the list hung off the
  3374. // Domain record, which means that there is no workset group
  3375. // with this name/FP combination present ON THIS MACHINE for
  3376. // this Domain.
  3377. //
  3378. rc = WSGRecordCreate(pomPrimary,
  3379. pDomain,
  3380. pRegistrationCB->wsg,
  3381. pRegistrationCB->fpHandler,
  3382. &pWSGroup);
  3383. if (rc != 0)
  3384. {
  3385. DC_QUIT;
  3386. }
  3387. }
  3388. //
  3389. // Now that we've got a pointer to the workset group, we put a
  3390. // Client pointer to it into the usage record.
  3391. //
  3392. // We use the <clientPRootData> field of the registration CB as the
  3393. // base and to it we add the offset of the workset group we've just
  3394. // found/created.
  3395. //
  3396. // First, however, to get access to the usage record we need to
  3397. // generate an ObMan pointer to it:
  3398. //
  3399. pUsageRec = pRegistrationCB->pUsageRec;
  3400. //
  3401. // ...and add it to the Client pointer to the root of OMGLOBAL,
  3402. // putting the result in the relevant field in the usage record:
  3403. //
  3404. pUsageRec->pWSGroup = pWSGroup;
  3405. pUsageRec->flags &= ~PWSGROUP_IS_PREGCB;
  3406. //
  3407. // Now add this Client to the workset group's client list (as a
  3408. // PRIMARY):
  3409. //
  3410. rc = AddClientToWSGList(pRegistrationCB->putTask,
  3411. pWSGroup,
  3412. pRegistrationCB->hWSGroup,
  3413. PRIMARY);
  3414. if (rc != 0)
  3415. {
  3416. DC_QUIT;
  3417. }
  3418. pUsageRec->flags |= ADDED_TO_WSGROUP_LIST;
  3419. }
  3420. else // type == WSGROUP_MOVE
  3421. {
  3422. //
  3423. // Get pointer to WSGroup from the offset stored in the
  3424. // Registration CB:
  3425. //
  3426. pWSGroup = pRegistrationCB->pWSGroup;
  3427. //
  3428. // If it has become invalid, then all local Clients must have
  3429. // deregistered from it in the time it took for this event to to be
  3430. // processed. This is unusual, but not wrong, so we alert:
  3431. //
  3432. if (!pWSGroup->valid)
  3433. {
  3434. WARNING_OUT(( "Aborting Move req for WSG %d - record is invalid",
  3435. pWSGroup->wsg));
  3436. DC_QUIT;
  3437. }
  3438. }
  3439. //
  3440. // So, whatever just happened above, we should now have a valid pointer
  3441. // to a valid workset group record which is the one the Client wanted
  3442. // to move/register with in the first place.
  3443. //
  3444. //
  3445. // This workset group might be marked TO_BE_DISCARDED, if the last
  3446. // local Client deregistered from it a while ago but it hasn't actually
  3447. // been discarded. We don't want it discardable any more:
  3448. //
  3449. if (pWSGroup->toBeDiscarded)
  3450. {
  3451. WARNING_OUT(("WSG %d marked TO_BE_DISCARDED - clearing flag for new registration",
  3452. pWSGroup->wsg));
  3453. pWSGroup->toBeDiscarded = FALSE;
  3454. }
  3455. //
  3456. // We'll need the ObMan-context pointer to the workset group later, so
  3457. // store it in the CB:
  3458. //
  3459. pRegistrationCB->pWSGroup = pWSGroup;
  3460. //
  3461. // OK, now we've set up the various records and put the necessary
  3462. // pointers in the registration CB, so start the workset group
  3463. // registration/move process in earnest. To do this, we post another
  3464. // event to the ObMan task which will result in WSGRegisterStage1 being
  3465. // called.
  3466. //
  3467. // The reason we don't call the function directly is that this event
  3468. // may have to be bounced, and if so, we want to restart the
  3469. // registration process at the beginning of WSGRegisterStage1 (rather
  3470. // than the beginning of this function).
  3471. //
  3472. // Before we post the event, bump up the use counts of the Domain
  3473. // record and workset group, since the CB holds references to them and
  3474. // they may be freed by something else before we process the event.
  3475. //
  3476. // In addition, bump up the use count of the registration CB because if
  3477. // the call goes down before the event is processed, the reg CB will
  3478. // have been freed.
  3479. //
  3480. UT_BumpUpRefCount(pDomain);
  3481. UT_BumpUpRefCount(pWSGroup);
  3482. UT_BumpUpRefCount(pRegistrationCB);
  3483. pRegistrationCB->flags |= BUMPED_CBS;
  3484. UT_PostEvent(pomPrimary->putTask,
  3485. pomPrimary->putTask,
  3486. 0, // no delay
  3487. OMINT_EVENT_WSGROUP_REGISTER_CONT,
  3488. 0, // no param1
  3489. (UINT_PTR) pRegistrationCB);
  3490. TRACE_OUT(( "Processed initial request for WSG %d TASK 0x%08x",
  3491. pRegistrationCB->wsg, pRegistrationCB->putTask));
  3492. DC_EXIT_POINT:
  3493. if (rc != 0)
  3494. {
  3495. //
  3496. // We hit an error, so let the Client know:
  3497. //
  3498. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  3499. // lonchanc: bug #942 happened here
  3500. // this was ERROR_OUT
  3501. WARNING_OUT(( "Error %d processing WSG %d",
  3502. rc, pRegistrationCB->wsg));
  3503. //
  3504. // Calling WSGRegisterResult above will have dealt with our bad
  3505. // return code, so we don't need to return it to our caller. So,
  3506. // swallow:
  3507. //
  3508. rc = 0;
  3509. }
  3510. DebugExitVOID(ProcessWSGRegister);
  3511. }
  3512. //
  3513. //
  3514. //
  3515. // WSGRegisterAbort(...)
  3516. //
  3517. //
  3518. //
  3519. void WSGRegisterAbort(POM_PRIMARY pomPrimary,
  3520. POM_DOMAIN pDomain,
  3521. POM_WSGROUP_REG_CB pRegistrationCB)
  3522. {
  3523. DebugEntry(WSGRegisterAbort);
  3524. //
  3525. // This function can be called at any stage of the workset group
  3526. // registration process if for some reason the registration has to be
  3527. // aborted.
  3528. //
  3529. //
  3530. // Now remove this Client from the list of Clients registered with the
  3531. // workset group and if there are none left, discard the workset group:
  3532. //
  3533. RemoveClientFromWSGList(pomPrimary->putTask,
  3534. pRegistrationCB->putTask,
  3535. pRegistrationCB->pWSGroup);
  3536. //
  3537. // Now post failure to the Client and finish up the cleanup:
  3538. //
  3539. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_OUT_OF_RESOURCES);
  3540. DebugExitVOID(WSGRegisterAbort);
  3541. }
  3542. //
  3543. // WSGRecordCreate(...)
  3544. //
  3545. UINT WSGRecordCreate
  3546. (
  3547. POM_PRIMARY pomPrimary,
  3548. POM_DOMAIN pDomain,
  3549. OMWSG wsg,
  3550. OMFP fpHandler,
  3551. POM_WSGROUP * ppWSGroup
  3552. )
  3553. {
  3554. POM_WSGROUP pWSGroup;
  3555. BOOL opened = FALSE;
  3556. UINT rc = 0;
  3557. DebugEntry(WSGRecordCreate);
  3558. pWSGroup = (POM_WSGROUP)UT_MallocRefCount(sizeof(OM_WSGROUP), TRUE);
  3559. if (!pWSGroup)
  3560. {
  3561. rc = UT_RC_NO_MEM;
  3562. DC_QUIT;
  3563. }
  3564. SET_STAMP(pWSGroup, WSGROUP);
  3565. pWSGroup->pDomain = pDomain;
  3566. pWSGroup->valid = TRUE;
  3567. pWSGroup->wsg = wsg;
  3568. pWSGroup->fpHandler = fpHandler;
  3569. COM_BasedListInit(&(pWSGroup->clients));
  3570. pWSGroup->state = INITIAL;
  3571. //
  3572. // Finally insert the new WSG record into the domain's list. We insert
  3573. // at the end of the list so if we get forced out of a channel
  3574. // (a LEAVE_IND event) and the channel happens to be reused by MCS
  3575. // for another WSG before we have a chance to process the LEAVE_IND,
  3576. // the record for the old WSG will be found first.
  3577. //
  3578. COM_BasedListInsertBefore(&(pDomain->wsGroups),
  3579. &(pWSGroup->chain));
  3580. //
  3581. // *** NEW FOR MULTI-PARTY ***
  3582. //
  3583. // The checkpointing process used when helping a late joiner catch up
  3584. // uses a dummy workset (#255) in each workset group. Create this now:
  3585. //
  3586. rc = WorksetCreate(pomPrimary->putTask,
  3587. pWSGroup,
  3588. OM_CHECKPOINT_WORKSET,
  3589. FALSE,
  3590. NET_TOP_PRIORITY);
  3591. if (rc != 0)
  3592. {
  3593. DC_QUIT;
  3594. }
  3595. //
  3596. // Set up caller's pointer:
  3597. //
  3598. *ppWSGroup = pWSGroup;
  3599. TRACE_OUT(( "Created record for WSG %d FP %d in Domain %u",
  3600. wsg, fpHandler, pDomain->callID));
  3601. DC_EXIT_POINT:
  3602. //
  3603. // Cleanup:
  3604. //
  3605. if (rc != 0)
  3606. {
  3607. ERROR_OUT(( "Error %d creating record for WSG %d FP %d in Domain %u",
  3608. rc, wsg, fpHandler, pDomain->callID));
  3609. if (pWSGroup != NULL)
  3610. {
  3611. COM_BasedListRemove(&(pWSGroup->chain));
  3612. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  3613. }
  3614. }
  3615. DebugExitDWORD(WSGRecordCreate, rc);
  3616. return(rc);
  3617. }
  3618. //
  3619. //
  3620. //
  3621. // WSGRegisterStage1(...)
  3622. //
  3623. //
  3624. //
  3625. void WSGRegisterStage1(POM_PRIMARY pomPrimary,
  3626. POM_WSGROUP_REG_CB pRegistrationCB)
  3627. {
  3628. POM_DOMAIN pDomain;
  3629. POM_WSGROUP pWSGroup;
  3630. UINT type;
  3631. DebugEntry(WSGRegisterStage1);
  3632. //
  3633. // If the registration CB has been marked invalid, then just quit
  3634. // (don't have to do any abort processing since that will have been
  3635. // done by whatever marked the CB invalid):
  3636. //
  3637. if (!pRegistrationCB->valid )
  3638. {
  3639. WARNING_OUT(( "Reg CB for WSG %d marked invalid, quitting",
  3640. pRegistrationCB->wsg));
  3641. DC_QUIT;
  3642. }
  3643. //
  3644. // Determine whether we're doing a REGISTER or a MOVE (we use the
  3645. // string values for tracing):
  3646. //
  3647. type = pRegistrationCB->type;
  3648. TRACE_OUT(( "Processing %d request (Stage1) for WSG %d",
  3649. type, pRegistrationCB->wsg));
  3650. //
  3651. // Set up pointers
  3652. //
  3653. pDomain = pRegistrationCB->pDomain;
  3654. pWSGroup = pRegistrationCB->pWSGroup;
  3655. //
  3656. // Check they're still valid:
  3657. //
  3658. if (!pDomain->valid)
  3659. {
  3660. WARNING_OUT(( "Record for Domain %u not valid, aborting registration",
  3661. pDomain->callID));
  3662. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  3663. DC_QUIT;
  3664. }
  3665. ValidateWSGroup(pWSGroup);
  3666. if (!pWSGroup->valid)
  3667. {
  3668. WARNING_OUT(( "Record for WSG %d in Domain %u not valid, aborting",
  3669. pWSGroup->wsg, pDomain->callID));
  3670. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  3671. DC_QUIT;
  3672. }
  3673. //
  3674. // Now examine the Domain state. If it is
  3675. //
  3676. // - READY, then this is a Domain that we are fully attached to
  3677. //
  3678. // - anything else, then we are some way through the process of
  3679. // attaching to the Domain (in some other part of the code).
  3680. //
  3681. // We react to each situation as follows:
  3682. //
  3683. // - continue with the workset group registration/move
  3684. //
  3685. // - repost the event with a delay to retry the registration/move in a
  3686. // short while.
  3687. //
  3688. if (pDomain->state != DOMAIN_READY)
  3689. {
  3690. //
  3691. // Since we are in the process of attaching to the Domain, we can
  3692. // do nothing else at the moment. Therefore, we bounce this event
  3693. // back to our event queue, with a delay.
  3694. //
  3695. TRACE_OUT(( "State for Domain %u is %hu",
  3696. pDomain->callID, pDomain->state));
  3697. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3698. DC_QUIT;
  3699. }
  3700. //
  3701. // OK, so the Domain is in the READY state. What we do next depends on
  3702. // two things:
  3703. //
  3704. // - whether this is a WSGMove or a WSGRegister
  3705. //
  3706. // - what state the workset group is in.
  3707. //
  3708. //
  3709. // If this is a REGISTER, then if the workset group state is
  3710. //
  3711. // - READY, then there's another local Client registered with the
  3712. // workset, and everything is all set up so we just call
  3713. // WSGRegisterSuccess straight away.
  3714. //
  3715. // - INITIAL, then this is the first time we've been here for this
  3716. // workset group, so we start the process of locking
  3717. // ObManControl etc. (see below)
  3718. //
  3719. // - anything else, then we're somewhere in between the two:
  3720. // another reqeust to register with the workset group is in
  3721. // progress so we repost the event with a delay; by the time it
  3722. // comes back to us the workset group should be in the READY
  3723. // state.
  3724. //
  3725. //
  3726. // If this is a MOVE, then if the workset group state is
  3727. //
  3728. // - READY, then the workset group is fully set up in whatever
  3729. // Domain it's in at the moment so we allow the move to proceed
  3730. //
  3731. // - anything else, then we're somewhere in the middle of the
  3732. // registration process for the workset group. We do not want
  3733. // to interfere with the registration by trying to do a move
  3734. // simultaneously (for the simple reason that it introduces far
  3735. // more complexity into the state machine) so we bounce the
  3736. // event (i.e. we only process a MOVE when the workset group
  3737. // is fully set up).
  3738. //
  3739. TRACE_OUT(( "State for WSG %d is %u", pWSGroup->wsg, pWSGroup->state));
  3740. switch (pWSGroup->state)
  3741. {
  3742. case INITIAL:
  3743. {
  3744. //
  3745. // Workset group record has just been created, but nothing else
  3746. // has been done.
  3747. //
  3748. //
  3749. // OK, proceed with processing the Client's move/registration
  3750. // attempt. Whichever is involved, we start by locking the
  3751. // ObManControl workset group; when that completes, we continue
  3752. // in WSGRegisterStage2.
  3753. //
  3754. // Note: this function returns a lock correlator which it
  3755. // will be the same as the correlator returned in
  3756. // the WORKSET_LOCK_CON event. We will use this
  3757. // correlator to look up the registration CB, so
  3758. // stuff the return value from the function in it
  3759. //
  3760. // Note: in the case of a move, we will only ever get
  3761. // here because we had to retry the move from the
  3762. // top after failing to lock ObManControl
  3763. //
  3764. LockObManControl(pomPrimary,
  3765. pDomain,
  3766. &(pRegistrationCB->lockCorrelator));
  3767. pRegistrationCB->flags |= LOCKED_OMC;
  3768. pWSGroup->state = LOCKING_OMC;
  3769. }
  3770. break;
  3771. case LOCKING_OMC:
  3772. case PENDING_JOIN:
  3773. case PENDING_SEND_MIDWAY:
  3774. {
  3775. //
  3776. // We're already in the process of either registering another
  3777. // Client with this workset group, or moving the workset group
  3778. // into a new Domain, so we delay this Client's
  3779. // registration/move attempt for the moment:
  3780. //
  3781. // Don't expect to get here - remove if error not hit
  3782. //
  3783. // CMF 21/11/95
  3784. ERROR_OUT(( "Should not be here"));
  3785. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3786. DC_QUIT;
  3787. }
  3788. break;
  3789. case PENDING_SEND_COMPLETE:
  3790. {
  3791. //
  3792. // WSG Already exists locally, and is fully set up.
  3793. //
  3794. if (type == WSGROUP_REGISTER)
  3795. {
  3796. //
  3797. // If we're doing a REGISTER, this means that some other
  3798. // Client must be registered with it. If we've passed the
  3799. // Clients-per-wsgroup check in ProcessWSGRegister, we must
  3800. // be OK, so we post a result straight away (0 indicates
  3801. // success):
  3802. //
  3803. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  3804. }
  3805. else // type == WSGROUP_MOVE
  3806. {
  3807. //
  3808. // We prohibit moves until we're fully caught up:
  3809. //
  3810. // Don't expect to get here - remove if error not hit
  3811. //
  3812. // CMF 21/11/95
  3813. ERROR_OUT(( "Should not be here"));
  3814. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3815. DC_QUIT;
  3816. }
  3817. }
  3818. break;
  3819. case WSGROUP_READY:
  3820. {
  3821. if (type == WSGROUP_REGISTER)
  3822. {
  3823. //
  3824. // As above:
  3825. //
  3826. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  3827. }
  3828. else // type == WSGROUP_MOVE
  3829. {
  3830. //
  3831. // If we're doing a MOVE, then we start by locking
  3832. // ObManControl, just as above:
  3833. //
  3834. LockObManControl(pomPrimary,
  3835. pDomain,
  3836. &(pRegistrationCB->lockCorrelator));
  3837. pRegistrationCB->flags |= LOCKED_OMC;
  3838. pWSGroup->state = LOCKING_OMC;
  3839. }
  3840. }
  3841. break;
  3842. default:
  3843. {
  3844. ERROR_OUT(("Invalid state %u for WSG %d",
  3845. pWSGroup->state, pWSGroup->wsg));
  3846. }
  3847. }
  3848. TRACE_OUT(( "Completed Stage 1 of %d for WSG %d",
  3849. type, pRegistrationCB->wsg));
  3850. DC_EXIT_POINT:
  3851. //
  3852. // We bumped up the use count of the registration CB when we posted the
  3853. // REGISTER_CONT event which got us here, so now free the CB to
  3854. // decrement the use count. Unless it's already been freed (e.g.
  3855. // because the call went down and the registration was cancelled) it
  3856. // will still be around so future stages of the registration process
  3857. // will be able to use it.
  3858. //
  3859. // NB: Although future stages of the registration process are
  3860. // asynchronous, they will abort if they cannot find the reg CB in
  3861. // the Domain list, so we don't have to worry about bumping it for
  3862. // them (since if it is finally freed, then it must have been
  3863. // removed from the Domain list).
  3864. //
  3865. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  3866. DebugExitVOID(WSGRegisterStage1);
  3867. }
  3868. //
  3869. // LockObManControl(...)
  3870. //
  3871. void LockObManControl(POM_PRIMARY pomPrimary,
  3872. POM_DOMAIN pDomain,
  3873. OM_CORRELATOR * pLockCorrelator)
  3874. {
  3875. POM_WSGROUP pOMCWSGroup;
  3876. POM_WORKSET pOMCWorkset;
  3877. UINT rc = 0;
  3878. DebugEntry(LockObManControl);
  3879. //
  3880. // Get pointers to the ObManControl workset group and workset #0 in it:
  3881. //
  3882. pOMCWSGroup = GetOMCWsgroup(pDomain);
  3883. if( pOMCWSGroup == NULL)
  3884. {
  3885. TRACE_OUT(("pOMCWSGroup not found"));
  3886. DC_QUIT;
  3887. }
  3888. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  3889. //
  3890. // Start the lock procedure to lock the workset:
  3891. //
  3892. WorksetLockReq(pomPrimary->putTask,
  3893. pomPrimary,
  3894. pOMCWSGroup,
  3895. pOMCWorkset,
  3896. 0,
  3897. pLockCorrelator);
  3898. TRACE_OUT(( "Requested lock for ObManControl in Domain %u",
  3899. pDomain->callID));
  3900. DC_EXIT_POINT:
  3901. DebugExitVOID(LockObManControl);
  3902. }
  3903. //
  3904. //
  3905. //
  3906. // MaybeUnlockObManControl(...)
  3907. //
  3908. //
  3909. //
  3910. void MaybeUnlockObManControl(POM_PRIMARY pomPrimary,
  3911. POM_WSGROUP_REG_CB pRegistrationCB)
  3912. {
  3913. POM_WSGROUP pOMCWSGroup;
  3914. POM_WORKSET pOMCWorkset;
  3915. DebugEntry(MaybeUnlockObManControl);
  3916. //
  3917. // If we've got ObManControl locked for THIS registration, unlock it
  3918. //
  3919. if (pRegistrationCB->flags & LOCKED_OMC)
  3920. {
  3921. pOMCWSGroup = GetOMCWsgroup(pRegistrationCB->pDomain);
  3922. if( pOMCWSGroup == NULL)
  3923. {
  3924. TRACE_OUT(("pOMCWSGroup not found"));
  3925. DC_QUIT;
  3926. }
  3927. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  3928. TRACE_OUT(( "Unlocking OMC for %d in WSG %d",
  3929. pRegistrationCB->type,
  3930. pRegistrationCB->wsg));
  3931. WorksetUnlock(pomPrimary->putTask, pOMCWSGroup, pOMCWorkset);
  3932. pRegistrationCB->flags &= ~LOCKED_OMC;
  3933. }
  3934. DC_EXIT_POINT:
  3935. DebugExitVOID(MaybeUnlockObManControl);
  3936. }
  3937. //
  3938. // ProcessOMCLockConfirm(...)
  3939. //
  3940. void ProcessOMCLockConfirm
  3941. (
  3942. POM_PRIMARY pomPrimary,
  3943. OM_CORRELATOR correlator,
  3944. UINT result
  3945. )
  3946. {
  3947. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  3948. POM_DOMAIN pDomain;
  3949. DebugEntry(ProcessOMCLockConfirm);
  3950. TRACE_OUT(( "Got LOCK_CON with result = 0x%08x and correlator = %hu",
  3951. result, correlator));
  3952. //
  3953. // Next step is to find the registration attempt this lock relates to.
  3954. // It could be in any domain, so search through all of them:
  3955. //
  3956. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  3957. while (pDomain != NULL)
  3958. {
  3959. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  3960. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  3961. FIELD_OFFSET(OM_WSGROUP_REG_CB, lockCorrelator),
  3962. (DWORD)correlator, FIELD_SIZE(OM_WSGROUP_REG_CB, lockCorrelator));
  3963. if (pRegistrationCB != NULL)
  3964. {
  3965. TRACE_OUT(( "Found correlated reg CB in domain %u, for WSG %d",
  3966. pDomain->callID, pRegistrationCB->wsg));
  3967. break;
  3968. }
  3969. //
  3970. // Didn't find anything in this domain - go on to the next:
  3971. //
  3972. pDomain = (POM_DOMAIN)COM_BasedListNext(&(pomPrimary->domains), pDomain,
  3973. FIELD_OFFSET(OM_DOMAIN, chain));
  3974. }
  3975. //
  3976. // If we didn't find it in any of the Domains, it's probably because
  3977. // we've detached from the Domain and thrown away its pending
  3978. // registrations CBs. So trace and quit:
  3979. //
  3980. if (pRegistrationCB == NULL)
  3981. {
  3982. TRACE_OUT(( "Got LOCK_CON event (correlator: 0x%08x) but no reg CB found",
  3983. correlator));
  3984. DC_QUIT;
  3985. }
  3986. //
  3987. // Now check whether the lock succeeded:
  3988. //
  3989. if (result != 0)
  3990. {
  3991. //
  3992. // Failed to get the lock on ObManControl for some reason. This
  3993. // could be because of contention, or else a more general problem.
  3994. // In any event, we call WSGRegisterRetry which will retry (or call
  3995. // WSGRegisterResult if we've run out of retries).
  3996. //
  3997. // Note: since WSGRegisterRetry handles move requests as well, we
  3998. // don't need to check here which type of request it is:
  3999. //
  4000. pRegistrationCB->flags &= ~LOCKED_OMC;
  4001. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  4002. }
  4003. else
  4004. {
  4005. //
  4006. // We've got the lock on ObManControl workset #0, so now we proceed
  4007. // to the next step of the registration process.
  4008. //
  4009. // As above, this function handles both MOVE and REGISTER attempts.
  4010. //
  4011. WSGRegisterStage2(pomPrimary, pRegistrationCB);
  4012. }
  4013. DC_EXIT_POINT:
  4014. DebugExitVOID(ProcessOMCLockConfirm);
  4015. }
  4016. //
  4017. // ProcessCheckpoint(...)
  4018. //
  4019. void ProcessCheckpoint
  4020. (
  4021. POM_PRIMARY pomPrimary,
  4022. OM_CORRELATOR correlator,
  4023. UINT result
  4024. )
  4025. {
  4026. POM_DOMAIN pDomain;
  4027. POM_WSGROUP pWSGroup;
  4028. POM_HELPER_CB pHelperCB = NULL;
  4029. DebugEntry(ProcessCheckpoint);
  4030. //
  4031. // Next step is to find the helper CB this lock relates to. It could
  4032. // be in any domain, so search through all of them:
  4033. //
  4034. pDomain = (POM_DOMAIN)COM_BasedListLast(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  4035. while (pDomain != NULL)
  4036. {
  4037. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->helperCBs),
  4038. (void**)&pHelperCB, FIELD_OFFSET(OM_HELPER_CB, chain),
  4039. FIELD_OFFSET(OM_HELPER_CB, lockCorrelator),
  4040. (DWORD)correlator, FIELD_SIZE(OM_HELPER_CB, lockCorrelator));
  4041. if (pHelperCB != NULL)
  4042. {
  4043. TRACE_OUT(( "Found correlated helper CB, for WSG %d",
  4044. pHelperCB->pWSGroup->wsg));
  4045. break;
  4046. }
  4047. //
  4048. // Didn't find anything in this domain - go on to the next:
  4049. //
  4050. pDomain = (POM_DOMAIN)COM_BasedListPrev(&(pomPrimary->domains), pDomain,
  4051. FIELD_OFFSET(OM_DOMAIN, chain));
  4052. }
  4053. //
  4054. // If we didn't find it in any of the Domains, it's probably because
  4055. // we've detached from the Domain and thrown away its pending helper
  4056. // CBs. So trace and quit:
  4057. //
  4058. if (pHelperCB == NULL)
  4059. {
  4060. WARNING_OUT(( "No helper CB found with lock correlator 0x%08x!", correlator));
  4061. DC_QUIT;
  4062. }
  4063. //
  4064. // Set up local pointers:
  4065. //
  4066. pWSGroup = pHelperCB->pWSGroup;
  4067. ValidateWSGroup(pWSGroup);
  4068. //
  4069. // If the "lock" failed, we send a SEND_DENY message to the late
  4070. // joiner.
  4071. //
  4072. if (result != 0)
  4073. {
  4074. WARNING_OUT(( "Failed to checkpoint WSG %d for %u - giving up",
  4075. pWSGroup->wsg,
  4076. pHelperCB->lateJoiner));
  4077. IssueSendDeny(pomPrimary,
  4078. pDomain,
  4079. pWSGroup->wsGroupID,
  4080. pHelperCB->lateJoiner,
  4081. pHelperCB->remoteCorrelator);
  4082. DC_QUIT;
  4083. }
  4084. //
  4085. // The lock succeeded, so check to see if the workset group pointer we
  4086. // stored is still valid:
  4087. //
  4088. if (!pWSGroup->valid)
  4089. {
  4090. WARNING_OUT(("Discarded WSG %d while checkpointing it for %hu",
  4091. pWSGroup->wsg,
  4092. pHelperCB->lateJoiner));
  4093. IssueSendDeny(pomPrimary,
  4094. pDomain,
  4095. pWSGroup->wsGroupID,
  4096. pHelperCB->lateJoiner,
  4097. pHelperCB->remoteCorrelator);
  4098. DC_QUIT;
  4099. }
  4100. //
  4101. // All is well - go ahead and send the workset group to the late
  4102. // joiner:
  4103. //
  4104. TRACE_OUT(("Checkpoint succeeded for WSG %d - sending to late joiner %hu",
  4105. pWSGroup->wsg, pHelperCB->lateJoiner));
  4106. SendWSGToLateJoiner(pomPrimary,
  4107. pDomain,
  4108. pWSGroup,
  4109. pHelperCB->lateJoiner,
  4110. pHelperCB->remoteCorrelator);
  4111. DC_EXIT_POINT:
  4112. //
  4113. // If we found a helper CB, then we just discard it now:
  4114. //
  4115. if (pHelperCB != NULL)
  4116. {
  4117. FreeHelperCB(&pHelperCB);
  4118. }
  4119. DebugExitVOID(ProcessCheckpoint);
  4120. }
  4121. //
  4122. // NewHelperCB(...)
  4123. //
  4124. BOOL NewHelperCB
  4125. (
  4126. POM_DOMAIN pDomain,
  4127. POM_WSGROUP pWSGroup,
  4128. NET_UID lateJoiner,
  4129. OM_CORRELATOR remoteCorrelator,
  4130. POM_HELPER_CB * ppHelperCB
  4131. )
  4132. {
  4133. POM_HELPER_CB pHelperCB;
  4134. BOOL rc = FALSE;
  4135. DebugEntry(NewHelperCB);
  4136. //
  4137. // This function
  4138. //
  4139. // - allocates a new helper CB
  4140. //
  4141. // - fills in the fields
  4142. //
  4143. // - stores it in the domain's list of helper CBs
  4144. //
  4145. // - bumps the use count of the workset group referenced.
  4146. //
  4147. pHelperCB = (POM_HELPER_CB)UT_MallocRefCount(sizeof(OM_HELPER_CB), TRUE);
  4148. if (!pHelperCB)
  4149. {
  4150. ERROR_OUT(("Out of memory in NewHelperCB"));
  4151. DC_QUIT;
  4152. }
  4153. UT_BumpUpRefCount(pWSGroup);
  4154. SET_STAMP(pHelperCB, HELPERCB);
  4155. pHelperCB->pWSGroup = pWSGroup;
  4156. pHelperCB->lateJoiner = lateJoiner;
  4157. pHelperCB->remoteCorrelator = remoteCorrelator;
  4158. //
  4159. // The lock correlator field is filled in later.
  4160. //
  4161. COM_BasedListInsertBefore(&(pDomain->helperCBs), &(pHelperCB->chain));
  4162. rc = TRUE;
  4163. DC_EXIT_POINT:
  4164. *ppHelperCB = pHelperCB;
  4165. DebugExitBOOL(NewHelperCB, rc);
  4166. return(rc);
  4167. }
  4168. //
  4169. // FreeHelperCB(...)
  4170. //
  4171. void FreeHelperCB
  4172. (
  4173. POM_HELPER_CB * ppHelperCB
  4174. )
  4175. {
  4176. DebugEntry(FreeHelperCB);
  4177. //
  4178. // This function
  4179. //
  4180. // - frees the workset group referenced in the helper CB
  4181. //
  4182. // - removes the helper CB from the domain's list
  4183. //
  4184. // - frees the helper CB.
  4185. //
  4186. UT_FreeRefCount((void**)&((*ppHelperCB)->pWSGroup), FALSE);
  4187. COM_BasedListRemove(&((*ppHelperCB)->chain));
  4188. UT_FreeRefCount((void**)ppHelperCB, FALSE);
  4189. DebugExitVOID(FreeHelperCB);
  4190. }
  4191. //
  4192. // WSGRegisterStage2(...)
  4193. //
  4194. void WSGRegisterStage2
  4195. (
  4196. POM_PRIMARY pomPrimary,
  4197. POM_WSGROUP_REG_CB pRegistrationCB
  4198. )
  4199. {
  4200. POM_DOMAIN pDomain;
  4201. POM_WSGROUP pWSGroup;
  4202. POM_OBJECT pObjInfo;
  4203. POM_WSGROUP_INFO pInfoObject;
  4204. NET_CHANNEL_ID channelID;
  4205. UINT type;
  4206. UINT rc = 0;
  4207. DebugEntry(WSGRegisterStage2);
  4208. //
  4209. // Determine whether we're doing a REGISTER or a MOVE (we use the string
  4210. // value for tracing):
  4211. //
  4212. type = pRegistrationCB->type;
  4213. TRACE_OUT(( "Processing %d request (Stage2) for WSG %d",
  4214. type, pRegistrationCB->wsg));
  4215. //
  4216. // We'll need these below:
  4217. //
  4218. pDomain = pRegistrationCB->pDomain;
  4219. pWSGroup = pRegistrationCB->pWSGroup;
  4220. //
  4221. // Check they're still valid:
  4222. //
  4223. if (!pDomain->valid)
  4224. {
  4225. WARNING_OUT(( "Record for Domain %u not valid, aborting registration",
  4226. pDomain->callID));
  4227. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4228. DC_QUIT;
  4229. }
  4230. if (!pWSGroup->valid)
  4231. {
  4232. WARNING_OUT(( "Record for WSG %d in Domain %u not valid, "
  4233. "aborting registration",
  4234. pWSGroup->wsg, pDomain->callID));
  4235. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4236. DC_QUIT;
  4237. }
  4238. //
  4239. // Sanity check:
  4240. //
  4241. ASSERT(pWSGroup->state == LOCKING_OMC);
  4242. //
  4243. // Now find the information object in workset #0 of ObManControl which
  4244. // matches the WSG name/FP that the Client requested to register with:
  4245. //
  4246. FindInfoObject(pDomain,
  4247. 0, // don't know the ID yet
  4248. pWSGroup->wsg,
  4249. pWSGroup->fpHandler,
  4250. &pObjInfo);
  4251. if (pObjInfo == NULL)
  4252. {
  4253. //
  4254. // The workset group doesn't already exist in the Domain.
  4255. //
  4256. // If this is a REGISTER, this means we must create it. If this is a
  4257. // MOVE, then we can move it into the Domain, which is essentially
  4258. // creating it in the Domain with pre-existing contents.
  4259. //
  4260. // So, for both types of operation, our behaviour is the same at this
  4261. // point; we've already created the workset group record so what we
  4262. // do now is
  4263. //
  4264. // 1. get the Network layer to allocate a new channel ID,
  4265. //
  4266. // 2. allocate a new workset group ID and
  4267. //
  4268. // 3. announce the new workset group to the rest of the Domain.
  4269. //
  4270. // However, the network layer will not assign us a new channel ID
  4271. // synchronously, so steps 2 and 3 must be delayed until we receive
  4272. // the Join event.
  4273. //
  4274. // So, now we set the channel to be joined to 0 (this tells the
  4275. // Network layer to join us to a currently unused channel).
  4276. //
  4277. channelID = 0;
  4278. }
  4279. else
  4280. {
  4281. //
  4282. // Otherwise, the workset group already exists.
  4283. //
  4284. ValidateObject(pObjInfo);
  4285. if (type == WSGROUP_REGISTER)
  4286. {
  4287. //
  4288. // We're registering the Client with an existing workset group, so
  4289. // set the workset group ID to the existing value, and ditto for
  4290. // the channel ID:
  4291. //
  4292. pInfoObject = (POM_WSGROUP_INFO) pObjInfo->pData;
  4293. if (!pInfoObject)
  4294. {
  4295. ERROR_OUT(("WSGRegisterStage2 object 0x%08x has no data", pObjInfo));
  4296. rc = OM_RC_OBJECT_DELETED;
  4297. DC_QUIT;
  4298. }
  4299. ValidateObjectDataWSGINFO(pInfoObject);
  4300. channelID = pInfoObject->channelID;
  4301. }
  4302. else // type == WSGROUP_MOVE
  4303. {
  4304. //
  4305. // We can't move a workset group into a Domain where there already
  4306. // exists a workest group with the same name/FP, so we abort our
  4307. // move attempt at this point (we set the workset group sate back
  4308. // to READY, since that is its state in the Domain it was
  4309. // originally in):
  4310. //
  4311. WARNING_OUT(( "Cannot move WSG %d into Domain %u - WSG/FP clash",
  4312. pWSGroup->wsg, pDomain->callID));
  4313. pWSGroup->state = WSGROUP_READY;
  4314. rc = OM_RC_CANNOT_MOVE_WSGROUP;
  4315. DC_QUIT;
  4316. }
  4317. }
  4318. //
  4319. // Now join the relevant channel (possibly a new one, if <channel> was
  4320. // set to 0 above) and stuff the correlator in the <channelCorrelator>
  4321. // field of the registration CB (when the Join event arrives,
  4322. // ProcessNetJoinChannel will search for the registration CB by channel
  4323. // correlator)
  4324. //
  4325. // Note: if this is our "local" Domain, we skip this step.
  4326. //
  4327. if (pDomain->callID != NET_INVALID_DOMAIN_ID)
  4328. {
  4329. TRACE_OUT(( "Joining channel %hu, Domain %u",
  4330. channelID, pDomain->callID));
  4331. rc = MG_ChannelJoin(pomPrimary->pmgClient,
  4332. &(pRegistrationCB->channelCorrelator),
  4333. channelID);
  4334. if (rc != 0)
  4335. {
  4336. DC_QUIT;
  4337. }
  4338. pWSGroup->state = PENDING_JOIN;
  4339. //
  4340. // OK, that's it for the moment. The saga of workset group
  4341. // move/registration will be picked up by the ProcessNetJoinChannel
  4342. // function, which will invoke the WSGRegisterStage3 function.
  4343. //
  4344. }
  4345. else
  4346. {
  4347. //
  4348. // Since we didn't do a join just now, we won't be getting a JOIN
  4349. // event from the Network layer, so we better call WSGRegisterStage3
  4350. // directly:
  4351. //
  4352. pWSGroup->state = PENDING_JOIN;
  4353. // channel ID not relevant here so use zero
  4354. WSGRegisterStage3(pomPrimary, pDomain, pRegistrationCB, 0);
  4355. }
  4356. TRACE_OUT(( "Completed Register/Move Stage 2 for WSG %d", pWSGroup->wsg));
  4357. DC_EXIT_POINT:
  4358. if (rc != 0)
  4359. {
  4360. //
  4361. // Cleanup:
  4362. //
  4363. ERROR_OUT(( "Error %d at Stage 2 of %d for WSG %d",
  4364. rc, pWSGroup->wsg));
  4365. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  4366. }
  4367. DebugExitVOID(WSGRegisterStage2);
  4368. }
  4369. //
  4370. // WSGRegisterStage3(...)
  4371. //
  4372. void WSGRegisterStage3
  4373. (
  4374. POM_PRIMARY pomPrimary,
  4375. POM_DOMAIN pDomain,
  4376. POM_WSGROUP_REG_CB pRegistrationCB,
  4377. NET_CHANNEL_ID channelID
  4378. )
  4379. {
  4380. POM_WSGROUP pWSGroup;
  4381. POM_WSGROUP pOMCWSGroup;
  4382. POM_WORKSET pOMCWorkset;
  4383. POM_OBJECT pObjInfo;
  4384. POM_OBJECT pObjReg;
  4385. POM_WSGROUP_INFO pInfoObject = NULL;
  4386. UINT type;
  4387. BOOL catchUpReqd = FALSE;
  4388. BOOL success = FALSE; // SFR 2744
  4389. UINT rc = 0;
  4390. DebugEntry(WSGRegisterStage3);
  4391. //
  4392. // We get here when a Join event has been received containing a channel
  4393. // correlator for a channel which is a regular workset group channel.
  4394. //
  4395. //
  4396. // Determine whether we're doing a REGISTER or a MOVE (we use the
  4397. // string values for tracing):
  4398. //
  4399. type = pRegistrationCB->type;
  4400. TRACE_OUT(( "Processing %d request (Stage3) for WSG %d",
  4401. type, pRegistrationCB->wsg));
  4402. //
  4403. // Get a pointer to the workset group:
  4404. //
  4405. pWSGroup = pRegistrationCB->pWSGroup;
  4406. //
  4407. // Check it's still valid:
  4408. //
  4409. if (!pWSGroup->valid)
  4410. {
  4411. WARNING_OUT(("WSG %d' discarded from domain %u - aborting registration",
  4412. pWSGroup->wsg, pDomain->callID));
  4413. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4414. DC_QUIT;
  4415. }
  4416. //
  4417. // Check that this workset group is pending join:
  4418. //
  4419. if (pWSGroup->state != PENDING_JOIN)
  4420. {
  4421. WARNING_OUT(( "Received unexpected Join indication for WSG (state: %hu)",
  4422. pWSGroup->state));
  4423. rc = OM_RC_NETWORK_ERROR;
  4424. DC_QUIT;
  4425. }
  4426. //
  4427. // Now set the channel ID value in the workset group record:
  4428. //
  4429. pWSGroup->channelID = channelID;
  4430. TRACE_OUT(( "Channel ID for WSG %d in Domain %u is %hu",
  4431. pWSGroup->wsg, pDomain->callID, channelID));
  4432. //
  4433. // We'll need this below:
  4434. //
  4435. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4436. //
  4437. // What we do next depends on whether we just created the workset
  4438. // group:
  4439. //
  4440. // - if it already existed, we need to catch up by asking another node
  4441. // for a copy
  4442. //
  4443. // - if we've just created it, we need to allocate a new workset group
  4444. // ID and add an INFO object to workset #0 in ObManControl.
  4445. //
  4446. // So, we search workset #0 for an INFO object to see if the workset
  4447. // group exists.
  4448. //
  4449. // Note: we did a similar search in Stage2 to find out the channel to
  4450. // join for the workset group. The reason we search again here
  4451. // is that the workset group could have been discarded by the
  4452. // other node in the time taken for the join to complete.
  4453. //
  4454. FindInfoObject(pDomain,
  4455. 0, // don't know the ID yet
  4456. pWSGroup->wsg,
  4457. pWSGroup->fpHandler,
  4458. &pObjInfo);
  4459. if (!pObjInfo || !pObjInfo->pData)
  4460. {
  4461. //
  4462. // Doesn't already exist, so no catch-up required:
  4463. //
  4464. catchUpReqd = FALSE;
  4465. }
  4466. else
  4467. {
  4468. //
  4469. // OK, so we found an INFO object, but there might not be any
  4470. // registration record objects in the relevant registration
  4471. // workset, so check:
  4472. //
  4473. ValidateObject(pObjInfo);
  4474. pInfoObject = (POM_WSGROUP_INFO) pObjInfo->pData;
  4475. ValidateObjectDataWSGINFO(pInfoObject);
  4476. pOMCWorkset = pOMCWSGroup->apWorksets[pInfoObject->wsGroupID];
  4477. if (pOMCWorkset == NULL)
  4478. {
  4479. catchUpReqd = TRUE;
  4480. }
  4481. else
  4482. {
  4483. FindPersonObject(pOMCWorkset,
  4484. pDomain->userID,
  4485. FIND_OTHERS,
  4486. &pObjReg);
  4487. if (pObjReg == NULL)
  4488. {
  4489. //
  4490. // This will happen when the remote node has deleted its
  4491. // registration record object but hasn't yet deleted the
  4492. // info object. Because the reg rec object is gone, we
  4493. // can't catch up from that node (or any node):
  4494. //
  4495. TRACE_OUT(( "INFO object found but no reg object - creating"));
  4496. catchUpReqd = FALSE;
  4497. }
  4498. else
  4499. {
  4500. ValidateObject(pObjReg);
  4501. catchUpReqd = TRUE;
  4502. }
  4503. }
  4504. }
  4505. //
  4506. // We should never try to catch up in the local Domain:
  4507. //
  4508. if (catchUpReqd && (pDomain->callID == OM_NO_CALL))
  4509. {
  4510. ERROR_OUT(( "Nearly tried to catch up in local Domain!"));
  4511. catchUpReqd = FALSE;
  4512. }
  4513. if (catchUpReqd)
  4514. {
  4515. //
  4516. // The workset group already exists, so we need to
  4517. //
  4518. // - set the workset group ID to the value in the INFO object, and
  4519. //
  4520. // - start the catch up process.
  4521. //
  4522. // Note: this will only happen in the case of a REGISTER, so we
  4523. // assert
  4524. //
  4525. ASSERT((pRegistrationCB->type == WSGROUP_REGISTER));
  4526. ASSERT((pInfoObject != NULL));
  4527. pWSGroup->wsGroupID = pInfoObject->wsGroupID;
  4528. rc = WSGCatchUp(pomPrimary, pDomain, pWSGroup);
  4529. if (rc == OM_RC_NO_NODES_READY)
  4530. {
  4531. //
  4532. // We get this return code when there are nodes out there with
  4533. // a copy but none of them are ready to send us the workset
  4534. // group.
  4535. //
  4536. // The correct thing to do is to give up for the moment and try
  4537. // again:
  4538. //
  4539. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  4540. rc = 0;
  4541. DC_QUIT;
  4542. }
  4543. //
  4544. // Any other error is more serious:
  4545. //
  4546. if (rc != 0)
  4547. {
  4548. DC_QUIT;
  4549. }
  4550. //
  4551. // We won't be ready to send the workset group to a late-joiner
  4552. // node until we've caught up ourselves; when we have, the
  4553. // ProcessSendComplete function will call RegAnnounceComplete to
  4554. // update the reg object added for us by our helper node.
  4555. //
  4556. }
  4557. else
  4558. {
  4559. if (type == WSGROUP_MOVE)
  4560. {
  4561. //
  4562. // If this is a MOVE, pWSGroup refers to a workset group record
  4563. // which currently belongs to its "old" Domain. Since we're
  4564. // just about to announce the workset group's presence in its
  4565. // new Domain, this is the time to do the move:
  4566. //
  4567. WSGRecordMove(pomPrimary, pRegistrationCB->pDomain, pWSGroup);
  4568. //
  4569. // This will have reset the channel ID in the workset group
  4570. // record so we set it again here (yeah, it's naff):
  4571. //
  4572. pWSGroup->channelID = channelID;
  4573. }
  4574. //
  4575. // We've either just created a new workset group, or moved one into
  4576. // a new Domain, so we need to create a new ID for it in this
  4577. // Domain:
  4578. //
  4579. rc = WSGGetNewID(pomPrimary, pDomain, &(pWSGroup->wsGroupID));
  4580. if (rc != 0)
  4581. {
  4582. DC_QUIT;
  4583. }
  4584. TRACE_OUT(( "Workset group ID for WSG %d in Domain %u is %hu",
  4585. pWSGroup->wsg, pDomain->callID, pWSGroup->wsGroupID));
  4586. //
  4587. // Now call CreateAnnounce to add a WSG_INFO object to workset #0
  4588. // in ObManControl.
  4589. //
  4590. rc = CreateAnnounce(pomPrimary, pDomain, pWSGroup);
  4591. if (rc != 0)
  4592. {
  4593. DC_QUIT;
  4594. }
  4595. //
  4596. // Since we have completed our registration with the workset group,
  4597. // we announce to the world that we have a copy and will send it to
  4598. // others on request:
  4599. //
  4600. rc = RegAnnounceBegin(pomPrimary,
  4601. pDomain,
  4602. pWSGroup,
  4603. pDomain->userID,
  4604. &(pWSGroup->pObjReg));
  4605. if (rc != 0)
  4606. {
  4607. DC_QUIT;
  4608. }
  4609. rc = SetPersonData(pomPrimary, pDomain, pWSGroup);
  4610. if (rc != 0)
  4611. {
  4612. DC_QUIT;
  4613. }
  4614. rc = RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  4615. if (rc != 0)
  4616. {
  4617. DC_QUIT;
  4618. }
  4619. //
  4620. // If we're not catching up, we call Result immediately (if we are
  4621. // catching up, Result will be called when we get the SEND_MIDWAY
  4622. // message):
  4623. //
  4624. // SFR 2744 : Can't call result here because we refer to the reg
  4625. // CB below. So, just set a flag and act on it below.
  4626. //
  4627. success = TRUE;
  4628. }
  4629. TRACE_OUT(( "Completed Register/Move Stage 3 for WSG %d",
  4630. pWSGroup->wsg));
  4631. DC_EXIT_POINT:
  4632. //
  4633. // OK, the critical test-and-set on the ObManControl workset group is
  4634. // finished, so we unlock workset #0 in ObManControl:
  4635. //
  4636. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  4637. // SFR 2744 { : Call WSGRegResult AFTER checks on the flags in reg CB
  4638. if (success == TRUE)
  4639. {
  4640. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  4641. }
  4642. // SFR 2744 }
  4643. if (rc != 0)
  4644. {
  4645. WARNING_OUT(( "Error %d at Stage 3 of %d with WSG %d",
  4646. rc, type, pWSGroup->wsg));
  4647. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  4648. rc = 0;
  4649. }
  4650. DebugExitVOID(WSGRegisterStage2);
  4651. }
  4652. //
  4653. // WSGGetNewID(...)
  4654. //
  4655. UINT WSGGetNewID
  4656. (
  4657. POM_PRIMARY pomPrimary,
  4658. POM_DOMAIN pDomain,
  4659. POM_WSGROUP_ID pWSGroupID
  4660. )
  4661. {
  4662. POM_WSGROUP pOMCWSGroup;
  4663. POM_WORKSET pOMCWorkset;
  4664. POM_OBJECT pObj;
  4665. POM_WSGROUP_INFO pInfoObject;
  4666. OM_WSGROUP_ID wsGroupID;
  4667. BOOL found;
  4668. BYTE wsGroupIDsInUse[OM_MAX_WSGROUPS_PER_DOMAIN];
  4669. UINT rc = 0;
  4670. DebugEntry(WSGGetNewID);
  4671. TRACE_OUT(( "Searching for new WSG ID in Domain %u", pDomain->callID));
  4672. ZeroMemory(wsGroupIDsInUse, sizeof(wsGroupIDsInUse));
  4673. //
  4674. // Need to pick a workset group ID so far unused in this Domain to
  4675. // identify this new workset group. So, we build up a list of the IDs
  4676. // currently in use (by examining the INFO objects in workset #0) and
  4677. // then choose one that's not in use.
  4678. //
  4679. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4680. if( pOMCWSGroup == NULL)
  4681. {
  4682. TRACE_OUT(("pOMCWSGroup not found"));
  4683. DC_QUIT;
  4684. }
  4685. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  4686. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  4687. while (pObj != NULL)
  4688. {
  4689. ValidateObject(pObj);
  4690. if (pObj->flags & DELETED)
  4691. {
  4692. //
  4693. // Do nothing
  4694. //
  4695. }
  4696. else if (!pObj->pData)
  4697. {
  4698. //
  4699. // Do nothing
  4700. //
  4701. ERROR_OUT(("WSGGetNewID: object 0x%08x has no data", pObj));
  4702. }
  4703. else
  4704. {
  4705. ValidateObjectData(pObj->pData);
  4706. pInfoObject = (POM_WSGROUP_INFO)pObj->pData;
  4707. if (pInfoObject->idStamp != OM_WSGINFO_ID_STAMP)
  4708. {
  4709. //
  4710. // Do nothing
  4711. //
  4712. }
  4713. else
  4714. {
  4715. //
  4716. // OK, we've found a WSGROUP_INFO object, so cross off the
  4717. // workset group ID which its workset group is using:
  4718. //
  4719. wsGroupID = pInfoObject->wsGroupID;
  4720. wsGroupIDsInUse[wsGroupID] = TRUE;
  4721. }
  4722. }
  4723. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj,
  4724. FIELD_OFFSET(OM_OBJECT, chain));
  4725. }
  4726. //
  4727. // Now go through the array to find an ID that wasn't marked as being in
  4728. // use:
  4729. //
  4730. found = FALSE;
  4731. for (wsGroupID = 0; wsGroupID < OM_MAX_WSGROUPS_PER_DOMAIN; wsGroupID++)
  4732. {
  4733. if (!wsGroupIDsInUse[wsGroupID])
  4734. {
  4735. TRACE_OUT(( "Workset group ID %hu is not in use, using", wsGroupID));
  4736. found = TRUE;
  4737. break;
  4738. }
  4739. }
  4740. //
  4741. // We checked earlier that the number of workset groups in the Domain
  4742. // hadn't exceeded the maximum (in WSGRecordCreate).
  4743. //
  4744. // However, if the Domain has run out of workset groups in the period
  4745. // since then, we won't have found any:
  4746. //
  4747. if (found == FALSE)
  4748. {
  4749. WARNING_OUT(( "No more workset group IDs for Domain %u!",
  4750. pDomain->callID));
  4751. rc = OM_RC_TOO_MANY_WSGROUPS;
  4752. DC_QUIT;
  4753. }
  4754. //
  4755. // If this is the first time that this ID has been used, then the
  4756. // associated registration workset won't exist. In this case, we create
  4757. // it now.
  4758. //
  4759. // If the ID has been used before, it will exist but it should be empty.
  4760. // In this case, we check that it really is empty.
  4761. //
  4762. pOMCWorkset = pOMCWSGroup->apWorksets[wsGroupID];
  4763. if (pOMCWorkset == NULL)
  4764. {
  4765. TRACE_OUT(( "Registration workset %u not used yet, creating", wsGroupID));
  4766. rc = WorksetCreate(pomPrimary->putTask,
  4767. pOMCWSGroup,
  4768. wsGroupID,
  4769. FALSE,
  4770. NET_TOP_PRIORITY);
  4771. if (rc != 0)
  4772. {
  4773. DC_QUIT;
  4774. }
  4775. }
  4776. else
  4777. {
  4778. ASSERT((pOMCWorkset->numObjects == 0));
  4779. TRACE_OUT(( "Registration workset %u previously used, re-using",
  4780. wsGroupID));
  4781. }
  4782. //
  4783. // Set the caller's pointer:
  4784. //
  4785. *pWSGroupID = wsGroupID;
  4786. DC_EXIT_POINT:
  4787. if (rc != 0)
  4788. {
  4789. //
  4790. // Cleanup:
  4791. //
  4792. ERROR_OUT(( "Error %d allocating ID for new workset group", rc));
  4793. }
  4794. DebugExitDWORD(WSGGetNewID, rc);
  4795. return(rc);
  4796. }
  4797. //
  4798. // CreateAnnounce(...)
  4799. //
  4800. UINT CreateAnnounce
  4801. (
  4802. POM_PRIMARY pomPrimary,
  4803. POM_DOMAIN pDomain,
  4804. POM_WSGROUP pWSGroup
  4805. )
  4806. {
  4807. POM_WSGROUP pOMCWSGroup;
  4808. POM_WORKSET pOMCWorkset;
  4809. POM_WSGROUP_INFO pInfoObject;
  4810. POM_OBJECT pObj;
  4811. OM_OBJECT_ID infoObjectID;
  4812. UINT rc = 0;
  4813. DebugEntry(CreateAnnounce);
  4814. TRACE_OUT(("Announcing creation of WSG %d in Domain %u",
  4815. pWSGroup->wsg, pDomain->callID));
  4816. //
  4817. // Announcing a new workset group involves adding an object which
  4818. // defines the workset group to workset #0 in ObManControl.
  4819. //
  4820. // So, we derive a pointer to the workset...
  4821. //
  4822. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4823. if( pOMCWSGroup == NULL)
  4824. {
  4825. TRACE_OUT(("pOMCWSGroup not found"));
  4826. DC_QUIT;
  4827. }
  4828. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  4829. ASSERT((pOMCWorkset != NULL));
  4830. //
  4831. // ...create a definition object...
  4832. //
  4833. pInfoObject = (POM_WSGROUP_INFO)UT_MallocRefCount(sizeof(OM_WSGROUP_INFO), TRUE);
  4834. if (!pInfoObject)
  4835. {
  4836. rc = UT_RC_NO_MEM;
  4837. DC_QUIT;
  4838. }
  4839. //
  4840. // ...fill in the fields...
  4841. //
  4842. // (length = sizeof - 4 since value of length field doesn't include the
  4843. // size of the length field itself).
  4844. //
  4845. pInfoObject->length = sizeof(OM_WSGROUP_INFO) -
  4846. sizeof(OM_MAX_OBJECT_SIZE);
  4847. pInfoObject->idStamp = OM_WSGINFO_ID_STAMP;
  4848. pInfoObject->channelID = pWSGroup->channelID;
  4849. pInfoObject->creator = pDomain->userID;
  4850. pInfoObject->wsGroupID = pWSGroup->wsGroupID;
  4851. lstrcpy(pInfoObject->wsGroupName, OMMapWSGToName(pWSGroup->wsg));
  4852. lstrcpy(pInfoObject->functionProfile, OMMapFPToName(pWSGroup->fpHandler));
  4853. //
  4854. // ...and add the object to the workset...
  4855. //
  4856. rc = ObjectAdd(pomPrimary->putTask,
  4857. pomPrimary,
  4858. pOMCWSGroup,
  4859. pOMCWorkset,
  4860. (POM_OBJECTDATA) pInfoObject,
  4861. 0, // update size == 0
  4862. LAST,
  4863. &infoObjectID,
  4864. &pObj);
  4865. if (rc != 0)
  4866. {
  4867. DC_QUIT;
  4868. }
  4869. TRACE_OUT(( "Announced new WSG %d in Domain %u",
  4870. pWSGroup->wsg, pDomain->callID));
  4871. DC_EXIT_POINT:
  4872. if (rc != 0)
  4873. {
  4874. //
  4875. // Cleanup:
  4876. //
  4877. ERROR_OUT(("Error %d announcing new WSG %d in Domain %u",
  4878. rc, pWSGroup->wsg, pDomain->callID));
  4879. }
  4880. DebugExitDWORD(CreateAnnounce, rc);
  4881. return(rc);
  4882. }
  4883. //
  4884. // WSGCatchUp(...)
  4885. //
  4886. UINT WSGCatchUp
  4887. (
  4888. POM_PRIMARY pomPrimary,
  4889. POM_DOMAIN pDomain,
  4890. POM_WSGROUP pWSGroup)
  4891. {
  4892. POM_WORKSET pOMCWorkset;
  4893. POM_OBJECT pObj;
  4894. POM_WSGROUP_REG_REC pRegObject;
  4895. NET_UID remoteUserID;
  4896. UINT rc = 0;
  4897. DebugEntry(WSGCatchUp);
  4898. TRACE_OUT(( "Starting catch-up for WSG %d in Domain %u",
  4899. pWSGroup->wsg, pDomain->callID));
  4900. //
  4901. // This should never be for the "local" Domain:
  4902. //
  4903. ASSERT((pDomain->callID != NET_INVALID_DOMAIN_ID));
  4904. //
  4905. // The catch-up procedure is as follows:
  4906. //
  4907. // - look in ObManControl workset group for the ID of an instance of
  4908. // ObMan which has a copy of this workset group
  4909. //
  4910. // - send it an OMNET_WSGROUP_SEND_REQ message
  4911. //
  4912. // So, start by getting a pointer to the relevant workset:
  4913. //
  4914. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  4915. ValidateWorkset(pOMCWorkset);
  4916. //
  4917. // Now we chain through the workset looking for a reg object which has
  4918. // status READY_TO_SEND:
  4919. //
  4920. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  4921. remoteUserID = 0;
  4922. while (pObj != NULL)
  4923. {
  4924. ValidateObject(pObj);
  4925. if (pObj->flags & DELETED)
  4926. {
  4927. //
  4928. // Skip this one
  4929. //
  4930. }
  4931. else if (!pObj->pData)
  4932. {
  4933. //
  4934. // Skip this one
  4935. //
  4936. ERROR_OUT(("WSGCatchUp: object 0x%08x has no data", pObj));
  4937. }
  4938. else
  4939. {
  4940. pRegObject = (POM_WSGROUP_REG_REC)pObj->pData;
  4941. ValidateObjectDataWSGREGREC(pRegObject);
  4942. if ((pRegObject->status == READY_TO_SEND) &&
  4943. (pRegObject->userID != pDomain->userID))
  4944. {
  4945. //
  4946. // OK, this node has a full copy, so we'll try to get it from
  4947. // there:
  4948. //
  4949. remoteUserID = pRegObject->userID;
  4950. break;
  4951. }
  4952. }
  4953. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj,
  4954. FIELD_OFFSET(OM_OBJECT, chain));
  4955. }
  4956. //
  4957. // ...check that we did actually find a node to get the data from:
  4958. //
  4959. if (remoteUserID == 0)
  4960. {
  4961. WARNING_OUT(( "No node in Domain %u is ready to send WSG %d - retrying",
  4962. pDomain->callID, pWSGroup->wsg));
  4963. rc = OM_RC_NO_NODES_READY;
  4964. DC_QUIT;
  4965. }
  4966. //
  4967. // ...then send that node a request to send us the workset group:
  4968. //
  4969. rc = IssueSendReq(pomPrimary,
  4970. pDomain,
  4971. pWSGroup,
  4972. remoteUserID);
  4973. DC_EXIT_POINT:
  4974. if ((rc != 0) && (rc != OM_RC_NO_NODES_READY))
  4975. {
  4976. ERROR_OUT(( "Error %d starting catch-up for WSG %d in Domain %u",
  4977. rc, pWSGroup->wsg, pDomain->callID));
  4978. }
  4979. DebugExitDWORD(WSGCatchUp, rc);
  4980. return(rc);
  4981. }
  4982. //
  4983. // IssueSendDeny(...)
  4984. //
  4985. void IssueSendDeny
  4986. (
  4987. POM_PRIMARY pomPrimary,
  4988. POM_DOMAIN pDomain,
  4989. OM_WSGROUP_ID wsGroupID,
  4990. NET_UID sender,
  4991. OM_CORRELATOR remoteCorrelator
  4992. )
  4993. {
  4994. POMNET_WSGROUP_SEND_PKT pWSGSendPkt;
  4995. DebugEntry(IssueSendDeny);
  4996. //
  4997. // Now issue the SEND_DENY.
  4998. //
  4999. TRACE_OUT(( "Sending SEND_DENY message to late joiner 0x%08x", sender));
  5000. //
  5001. // We start by allocating some memory:
  5002. //
  5003. pWSGSendPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5004. if (!pWSGSendPkt)
  5005. {
  5006. ERROR_OUT(("Out of memory in IssueSendDeny"));
  5007. DC_QUIT;
  5008. }
  5009. //
  5010. // Now fill in the fields:
  5011. //
  5012. pWSGSendPkt->header.sender = pDomain->userID;
  5013. pWSGSendPkt->header.messageType = OMNET_WSGROUP_SEND_DENY;
  5014. pWSGSendPkt->wsGroupID = wsGroupID;
  5015. //
  5016. // SFR 7124. Return the correlator for this catchup.
  5017. //
  5018. pWSGSendPkt->correlator = remoteCorrelator;
  5019. //
  5020. // Queue the message to be sent.
  5021. //
  5022. QueueMessage(pomPrimary->putTask,
  5023. pDomain,
  5024. sender,
  5025. NET_TOP_PRIORITY,
  5026. NULL, // no WSG
  5027. NULL, // no workset
  5028. NULL, // no object
  5029. (POMNET_PKT_HEADER) pWSGSendPkt,
  5030. NULL, // no object data
  5031. TRUE);
  5032. DC_EXIT_POINT:
  5033. DebugExitVOID(IssueSendDeny);
  5034. }
  5035. //
  5036. //
  5037. //
  5038. // IssueSendReq(...)
  5039. //
  5040. //
  5041. //
  5042. UINT IssueSendReq(POM_PRIMARY pomPrimary,
  5043. POM_DOMAIN pDomain,
  5044. POM_WSGROUP pWSGroup,
  5045. NET_UID helperNode)
  5046. {
  5047. POMNET_WSGROUP_SEND_PKT pWSGSendPkt;
  5048. UINT rc = 0;
  5049. DebugEntry(IssueSendReq);
  5050. //
  5051. // We start by allocating some memory for the OMNET_SEND_REQ message:
  5052. //
  5053. pWSGSendPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5054. if (!pWSGSendPkt)
  5055. {
  5056. rc = UT_RC_NO_MEM;
  5057. DC_QUIT;
  5058. }
  5059. //
  5060. // Now fill in the fields:
  5061. //
  5062. // SFR 7124. Generate a correlator so we can match
  5063. // SEND_MIDWAY,SEND_COMPLETE and SEND_DENY messages to this catchup.
  5064. //
  5065. pWSGSendPkt->header.sender = pDomain->userID;
  5066. pWSGSendPkt->header.messageType = OMNET_WSGROUP_SEND_REQ;
  5067. pWSGSendPkt->wsGroupID = pWSGroup->wsGroupID;
  5068. pWSGroup->catchupCorrelator = NextCorrelator(pomPrimary);
  5069. pWSGSendPkt->correlator = pWSGroup->catchupCorrelator;
  5070. //
  5071. // The <helperNode> parameter is the node which the calling function
  5072. // has identified as a remote node which is capable of sending us the
  5073. // workset group we want. So, we send that instance of ObMan an
  5074. // OMNET_WSGROUP_SEND_REQ on its single-user channel, enclosing our own
  5075. // single-user channel ID for the response:
  5076. //
  5077. // Note: the SEND_REQ must not overtake any data on its way from us to
  5078. // the remote node (e.g. if we've just added an object,
  5079. // deregistered and then reregistered). Therefore, set the
  5080. // NET_SEND_ALL_PRIORITIES flag.
  5081. //
  5082. // SFR 6117: Don't believe this is a problem for R2.0, so just send at
  5083. // low priority.
  5084. //
  5085. rc = QueueMessage(pomPrimary->putTask,
  5086. pDomain,
  5087. helperNode,
  5088. NET_LOW_PRIORITY,
  5089. pWSGroup,
  5090. NULL, // no workset
  5091. NULL, // no object
  5092. (POMNET_PKT_HEADER) pWSGSendPkt,
  5093. NULL, // no object data
  5094. TRUE);
  5095. if (rc != 0)
  5096. {
  5097. DC_QUIT;
  5098. }
  5099. //
  5100. // Set the workset group state, and record the number of SEND_MIDWAY
  5101. // and SEND_COMPLETE messages we're expecting (one for R11, one per
  5102. // priority for R20).
  5103. //
  5104. // Note: we set the counts up here because we may get some of the
  5105. // SEND_COMPLETEs before we get all the SEND_MIDWAYs, so to set the
  5106. // count in ProcessSendMidway would be too late.
  5107. //
  5108. pWSGroup->state = PENDING_SEND_MIDWAY;
  5109. pWSGroup->sendMidwCount = NET_NUM_PRIORITIES;
  5110. pWSGroup->sendCompCount = NET_NUM_PRIORITIES;
  5111. //
  5112. // Store the helper node ID in the WSG structure.
  5113. //
  5114. pWSGroup->helperNode = helperNode;
  5115. DC_EXIT_POINT:
  5116. if (rc != 0)
  5117. {
  5118. //
  5119. // Cleanup:
  5120. //
  5121. ERROR_OUT(( "Error %d requesting send from node 0x%08x "
  5122. "for WSG %d in Domain %u",
  5123. rc, pWSGroup->wsg, helperNode, pDomain->callID));
  5124. }
  5125. else
  5126. {
  5127. //
  5128. // Success:
  5129. //
  5130. TRACE_OUT(("Requested copy of WSG %d' from node 0x%08x (in Domain %u), correlator %hu",
  5131. pWSGroup->wsg, helperNode, pDomain->callID,
  5132. pWSGroup->catchupCorrelator));
  5133. }
  5134. DebugExitDWORD(IssueSendReq, rc);
  5135. return(rc);
  5136. }
  5137. //
  5138. // ProcessSendReq(...)
  5139. //
  5140. void ProcessSendReq
  5141. (
  5142. POM_PRIMARY pomPrimary,
  5143. POM_DOMAIN pDomain,
  5144. POMNET_WSGROUP_SEND_PKT pSendReqPkt
  5145. )
  5146. {
  5147. POM_WSGROUP pWSGroup;
  5148. POM_WORKSET pWorkset;
  5149. POM_HELPER_CB pHelperCB;
  5150. NET_UID sender;
  5151. BOOL sendDeny = FALSE;
  5152. DebugEntry(ProcessSendReq);
  5153. //
  5154. // This is the user ID of the late joiner:
  5155. //
  5156. sender = pSendReqPkt->header.sender;
  5157. //
  5158. // We start by finding our copy of the workset group:
  5159. //
  5160. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  5161. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  5162. FIELD_OFFSET(OM_WSGROUP, wsGroupID),
  5163. (DWORD)pSendReqPkt->wsGroupID, FIELD_SIZE(OM_WSGROUP, wsGroupID));
  5164. //
  5165. // Quit and deny the send if workset group not found:
  5166. //
  5167. if (pWSGroup == NULL)
  5168. {
  5169. WARNING_OUT(( "Don't have workset group %hu to send to node 0x%08x",
  5170. pSendReqPkt->wsGroupID, sender));
  5171. sendDeny = TRUE;
  5172. DC_QUIT;
  5173. }
  5174. //
  5175. // Quit and deny the send if we don't have ALL the workset group:
  5176. //
  5177. if (pWSGroup->state != WSGROUP_READY)
  5178. {
  5179. WARNING_OUT(("WSG %d is in state %hu - can't send to node 0x%08x",
  5180. pWSGroup->wsg, pWSGroup->state, sender));
  5181. sendDeny = TRUE;
  5182. DC_QUIT;
  5183. }
  5184. TRACE_OUT(( "Processing SEND_REQUEST from node 0x%08x for WSG %d, correlator %hu",
  5185. sender, pWSGroup->wsg, pSendReqPkt->correlator));
  5186. //
  5187. // Right, we're fully registered with the workset group, so we will be
  5188. // its helper node. First, allocate a helper CB to keep track of the
  5189. // process:
  5190. //
  5191. if (!NewHelperCB(pDomain,
  5192. pWSGroup,
  5193. sender,
  5194. pSendReqPkt->correlator,
  5195. &pHelperCB))
  5196. {
  5197. //
  5198. // Deny the workset send request
  5199. //
  5200. sendDeny = TRUE;
  5201. WARNING_OUT(( "Failed to allocate helper CB - issuing SEND_DENY"));
  5202. DC_QUIT;
  5203. }
  5204. //
  5205. // Before we can send the contents of the workset group to the late
  5206. // joiner, we must ensure that our view of the contents is up to date.
  5207. // We do this by checkpointing the workset group, which means locking
  5208. // the dummy workset which exists in all workset groups. Do this now:
  5209. //
  5210. pWorkset = pWSGroup->apWorksets[OM_CHECKPOINT_WORKSET];
  5211. WorksetLockReq(pomPrimary->putTask, pomPrimary,
  5212. pWSGroup,
  5213. pWorkset,
  5214. 0,
  5215. &(pHelperCB->lockCorrelator));
  5216. //
  5217. // We will shortly get a WORKSET_LOCK_CON event containing the
  5218. // correlator just stored in the helper CB. We will look this up and
  5219. // continue the catch-up process then.
  5220. //
  5221. DC_EXIT_POINT:
  5222. //
  5223. // If we set the sendDeny flag above then now send the SEND_DENY
  5224. // message to the late joiner.
  5225. //
  5226. if (sendDeny)
  5227. {
  5228. IssueSendDeny(pomPrimary,
  5229. pDomain,
  5230. pSendReqPkt->wsGroupID,
  5231. sender,
  5232. pSendReqPkt->correlator);
  5233. }
  5234. DebugExitVOID(ProcessSendReq);
  5235. }
  5236. //
  5237. // SendWSGToLateJoiner(...)
  5238. //
  5239. void SendWSGToLateJoiner
  5240. (
  5241. POM_PRIMARY pomPrimary,
  5242. POM_DOMAIN pDomain,
  5243. POM_WSGROUP pWSGroup,
  5244. NET_UID lateJoiner,
  5245. OM_CORRELATOR remoteCorrelator
  5246. )
  5247. {
  5248. POM_WORKSET pWorkset;
  5249. POMNET_OPERATION_PKT pPacket;
  5250. POM_OBJECT pObj;
  5251. POMNET_WSGROUP_SEND_PKT pSendMidwayPkt;
  5252. POMNET_WSGROUP_SEND_PKT pSendCompletePkt;
  5253. POM_OBJECTDATA pData;
  5254. OM_WORKSET_ID worksetID;
  5255. UINT maxSeqUsed = 0;
  5256. NET_PRIORITY catchupPriority = 0;
  5257. UINT rc = 0;
  5258. DebugEntry(SendWSGToLateJoiner);
  5259. //
  5260. // The first thing to do is to announce that the remote node is
  5261. // registering with the workset group:
  5262. //
  5263. rc = RegAnnounceBegin(pomPrimary,
  5264. pDomain,
  5265. pWSGroup,
  5266. lateJoiner,
  5267. &pObj);
  5268. if (rc != 0)
  5269. {
  5270. DC_QUIT;
  5271. }
  5272. //
  5273. // We then start flow control on the user channel of the node that we
  5274. // are sending the data to. We only start flow control on the low
  5275. // priority channel and don't bother to restrict the maximum stream
  5276. // size. If flow control is already started on this stream then this
  5277. // call will have no effect. Note that flow control will automatically
  5278. // be stopped when the call ends.
  5279. //
  5280. MG_FlowControlStart(pomPrimary->pmgClient,
  5281. lateJoiner,
  5282. NET_LOW_PRIORITY,
  5283. 0,
  5284. 8192);
  5285. //
  5286. // Now, cycle through each of the worksets and generate and send
  5287. //
  5288. // - WORKSET_NEW messages for each workset,
  5289. //
  5290. // - a WSG_SEND_MIDWAY message to indicate we've sent all the worksets
  5291. //
  5292. // - OBJECT_ADD messages for each of the objects in each of the
  5293. // worksets.
  5294. //
  5295. // - a WSG_SEND_COMPLETE message to indicate we've sent all the
  5296. // objects.
  5297. //
  5298. // NOTE: We do not send CHECKPOINT worksets, so the for loop should
  5299. // stop before it gets 255.
  5300. //
  5301. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  5302. {
  5303. pWorkset = pWSGroup->apWorksets[worksetID];
  5304. if (!pWorkset)
  5305. {
  5306. continue;
  5307. }
  5308. TRACE_OUT(( "Sending WORKSET_CATCHUP for workset %u", worksetID));
  5309. rc = GenerateOpMessage(pWSGroup,
  5310. worksetID,
  5311. NULL, // no object ID
  5312. NULL, // no object data
  5313. OMNET_WORKSET_CATCHUP,
  5314. &pPacket);
  5315. if (rc != 0)
  5316. {
  5317. DC_QUIT;
  5318. }
  5319. rc = QueueMessage(pomPrimary->putTask,
  5320. pWSGroup->pDomain,
  5321. lateJoiner,
  5322. NET_TOP_PRIORITY,
  5323. pWSGroup,
  5324. pWorkset,
  5325. NULL, // no object
  5326. (POMNET_PKT_HEADER) pPacket,
  5327. NULL, // no object data
  5328. TRUE);
  5329. if (rc != 0)
  5330. {
  5331. DC_QUIT;
  5332. }
  5333. }
  5334. //
  5335. // Now send the SEND_MIDWAY message to indicate that all the
  5336. // WORKSET_NEW messages have been sent:
  5337. //
  5338. pSendMidwayPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5339. if (!pSendMidwayPkt)
  5340. {
  5341. rc = UT_RC_NO_MEM;
  5342. DC_QUIT;
  5343. }
  5344. pSendMidwayPkt->header.sender = pDomain->userID;
  5345. pSendMidwayPkt->header.messageType = OMNET_WSGROUP_SEND_MIDWAY;
  5346. pSendMidwayPkt->wsGroupID = pWSGroup->wsGroupID;
  5347. pSendMidwayPkt->correlator = remoteCorrelator;
  5348. //
  5349. // The next field is the ID of the reg object which we added above.
  5350. // So, convert the handle of the reg object returned by RegAnnouncBegin
  5351. // to a pointer to the object record and then copy the object ID into
  5352. // the message packet:
  5353. //
  5354. memcpy(&(pSendMidwayPkt->objectID), &(pObj->objectID), sizeof(OM_OBJECT_ID));
  5355. //
  5356. // The last field, which is the highest object ID sequence number
  5357. // previously used by the late joiner in this workset group, is not yet
  5358. // know; it will be filled in below. However (see note below), we
  5359. // queue the message now to ensure it doesn't get stuck behind lots of
  5360. // objects:
  5361. //
  5362. TRACE_OUT(("Queueing WSG_SEND_MIDWAY message to node 0x%08x for WSG %d, correlator %hu",
  5363. lateJoiner, pWSGroup->wsg, remoteCorrelator));
  5364. rc = QueueMessage(pomPrimary->putTask,
  5365. pWSGroup->pDomain,
  5366. lateJoiner,
  5367. NET_TOP_PRIORITY | NET_SEND_ALL_PRIORITIES,
  5368. pWSGroup,
  5369. NULL, // no workset
  5370. NULL, // no object
  5371. (POMNET_PKT_HEADER) pSendMidwayPkt,
  5372. NULL, // no object data
  5373. TRUE);
  5374. if (rc != 0)
  5375. {
  5376. DC_QUIT;
  5377. }
  5378. //
  5379. // If the workset group is ObMan control then we should send it at top
  5380. // priority to ensure that it can overtake any slower pending sends to
  5381. // other nodes. Otherwise we send the send the data at the lowest
  5382. // priority.
  5383. //
  5384. if (pWSGroup->wsGroupID == WSGROUPID_OMC)
  5385. {
  5386. catchupPriority = NET_TOP_PRIORITY;
  5387. }
  5388. else
  5389. {
  5390. catchupPriority = NET_LOW_PRIORITY;
  5391. }
  5392. TRACE_OUT(( "Sending catchup data at priority %hu for 0x%08x",
  5393. catchupPriority,
  5394. lateJoiner));
  5395. //
  5396. // Now start the loop which does the OBJECT_ADDs:
  5397. //
  5398. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  5399. {
  5400. pWorkset = pWSGroup->apWorksets[worksetID];
  5401. if (pWorkset == NULL)
  5402. {
  5403. continue;
  5404. }
  5405. TRACE_OUT(( "Sending OBJECT_CATCHUPs for workset %u", worksetID));
  5406. //
  5407. // Note that we must send deleted objects too, since late-joiners
  5408. // have just as much need as we do to detect out of date
  5409. // operations:
  5410. //
  5411. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  5412. while (pObj != NULL)
  5413. {
  5414. ValidateObject(pObj);
  5415. //
  5416. // The workset group that the late joiner is catching up with
  5417. // may contain objects which it has added in a previous call
  5418. // (with the same network user ID). Since that call is over,
  5419. // it may reuse IDs present in this workset group - to prevent
  5420. // this, we must tell it the highest sequence count it used for
  5421. // object IDs for this workset group, so while we're going
  5422. // through the objects, keep a count:
  5423. //
  5424. if (pObj->objectID.creator == lateJoiner)
  5425. {
  5426. maxSeqUsed = max(maxSeqUsed, pObj->objectID.sequence);
  5427. }
  5428. if (pObj->flags & PENDING_DELETE)
  5429. {
  5430. //
  5431. // If the object is pending delete at this node, we do not
  5432. // send the object data. The way to avoid this is to set
  5433. // pData to NULL (must be done before call to
  5434. // GenerateOpMessage):
  5435. //
  5436. pData = NULL;
  5437. }
  5438. else
  5439. {
  5440. pData = pObj->pData;
  5441. if (pData)
  5442. {
  5443. ValidateObjectData(pData);
  5444. }
  5445. }
  5446. //
  5447. // Now generate the message packet:
  5448. //
  5449. rc = GenerateOpMessage(pWSGroup,
  5450. worksetID,
  5451. &(pObj->objectID),
  5452. pData,
  5453. OMNET_OBJECT_CATCHUP,
  5454. &pPacket);
  5455. if (rc != 0)
  5456. {
  5457. DC_QUIT;
  5458. }
  5459. //
  5460. // Now fill in the catchup-specific fields (note that the
  5461. // <seqStamp> will already have been filled in, but with the
  5462. // current sequence stamp for the workset; for a CatchUp
  5463. // message, this should be the add stamp for the object):
  5464. //
  5465. pPacket->position = pObj->position;
  5466. pPacket->flags = pObj->flags;
  5467. pPacket->updateSize = pObj->updateSize;
  5468. if (pObj->flags & PENDING_DELETE)
  5469. {
  5470. //
  5471. // If the object is pending delete at this node, we send it
  5472. // as if it has been delete-confirmed (since local
  5473. // delete-confirms or their DC_ABSence should have no effect
  5474. // outside this box). To do this, we just set the DELETED
  5475. // flag in the packet:
  5476. //
  5477. pPacket->flags &= ~PENDING_DELETE;
  5478. pPacket->flags |= DELETED;
  5479. }
  5480. COPY_SEQ_STAMP(pPacket->seqStamp, pObj->addStamp);
  5481. COPY_SEQ_STAMP(pPacket->positionStamp, pObj->positionStamp);
  5482. COPY_SEQ_STAMP(pPacket->updateStamp, pObj->updateStamp);
  5483. COPY_SEQ_STAMP(pPacket->replaceStamp, pObj->replaceStamp);
  5484. //
  5485. // ...and queue the message:
  5486. //
  5487. rc = QueueMessage(pomPrimary->putTask,
  5488. pWSGroup->pDomain,
  5489. lateJoiner,
  5490. catchupPriority,
  5491. pWSGroup,
  5492. pWorkset,
  5493. NULL, // no object
  5494. (POMNET_PKT_HEADER) pPacket,
  5495. pData,
  5496. TRUE);
  5497. if (rc != 0)
  5498. {
  5499. DC_QUIT;
  5500. }
  5501. //
  5502. // Now go around the loop again:
  5503. //
  5504. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  5505. FIELD_OFFSET(OM_OBJECT, chain));
  5506. }
  5507. }
  5508. //
  5509. // Now that we know the max sequence number used by this user ID in
  5510. // this workset group, we can set the field in the SEND_MIDWAY packet:
  5511. //
  5512. // NOTE: because the ObMan task is single threaded (in the DC_ABSence of
  5513. // assertion failure which cause a sort of multithreading while
  5514. // the assert box is up) it is safe to alter this value AFTER the
  5515. // message has been queued because we know that the queue will
  5516. // not have been serviced yet.
  5517. //
  5518. pSendMidwayPkt->maxObjIDSeqUsed = maxSeqUsed;
  5519. //
  5520. // Now we send the OMNET_SEND_COMPLETE message. First, allocate some
  5521. // memory...
  5522. //
  5523. pSendCompletePkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5524. if (!pSendCompletePkt)
  5525. {
  5526. rc = UT_RC_NO_MEM;
  5527. DC_QUIT;
  5528. }
  5529. //
  5530. // ...fill in the fields...
  5531. //
  5532. pSendCompletePkt->header.sender = pDomain->userID;
  5533. pSendCompletePkt->header.messageType = OMNET_WSGROUP_SEND_COMPLETE;
  5534. pSendCompletePkt->wsGroupID = pWSGroup->wsGroupID;
  5535. pSendCompletePkt->correlator = remoteCorrelator;
  5536. //
  5537. // ...and queue the message for sending (it musn't overtake any of the
  5538. // data so send it at all priorities):
  5539. //
  5540. TRACE_OUT(( "Sending WSG_SEND_COMPLETE message, correlator %hu",
  5541. remoteCorrelator));
  5542. rc = QueueMessage(pomPrimary->putTask,
  5543. pWSGroup->pDomain,
  5544. lateJoiner,
  5545. NET_LOW_PRIORITY | NET_SEND_ALL_PRIORITIES,
  5546. pWSGroup,
  5547. NULL, // no workset
  5548. NULL, // no object
  5549. (POMNET_PKT_HEADER) pSendCompletePkt,
  5550. NULL, // no object data
  5551. TRUE);
  5552. if (rc != 0)
  5553. {
  5554. DC_QUIT;
  5555. }
  5556. TRACE_OUT(( "Processed send request from node 0x%08x for WSG %d",
  5557. lateJoiner, pWSGroup->wsg));
  5558. DC_EXIT_POINT:
  5559. if (rc != 0)
  5560. {
  5561. //
  5562. // An error occurred. We must issue a SEND_DENY message to the
  5563. // remote node.
  5564. //
  5565. ERROR_OUT(( "Error %d sending WSG %d to node 0x%08x",
  5566. rc, pWSGroup->wsg, lateJoiner));
  5567. IssueSendDeny(pomPrimary,
  5568. pDomain,
  5569. pWSGroup->wsGroupID,
  5570. lateJoiner,
  5571. remoteCorrelator);
  5572. }
  5573. DebugExitVOID(SendWSGToLateJoiner);
  5574. }
  5575. //
  5576. // ProcessSendMidway(...)
  5577. //
  5578. void ProcessSendMidway
  5579. (
  5580. POM_PRIMARY pomPrimary,
  5581. POM_DOMAIN pDomain,
  5582. POMNET_WSGROUP_SEND_PKT pSendMidwayPkt
  5583. )
  5584. {
  5585. POM_WORKSET pOMCWorkset;
  5586. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  5587. POM_WSGROUP pWSGroup;
  5588. BOOL fSetPersonData;
  5589. NET_UID sender;
  5590. POM_OBJECT pObjReg;
  5591. UINT rc = 0;
  5592. DebugEntry(ProcessSendMidway);
  5593. sender = pSendMidwayPkt->header.sender;
  5594. //
  5595. // OK, this is an message indicating that the helper node has sent us
  5596. // all the WORKSET_CATCHUPs in the workset group we're catching up with
  5597. // (but note that the objects haven't yet been sent).
  5598. //
  5599. // So, search the list of pending registrations using the correlator
  5600. // value in the packet (we can't use the workset group ID since if it
  5601. // is zero i.e. ObManControl, we'll match on workset groups which
  5602. // haven't yet had their IDs determined (since they are initially
  5603. // zero).
  5604. //
  5605. if (pSendMidwayPkt->wsGroupID == WSGROUPID_OMC)
  5606. {
  5607. //
  5608. // This is a SEND_MIDWAY message for ObManControl.
  5609. //
  5610. pWSGroup = GetOMCWsgroup(pDomain);
  5611. fSetPersonData = FALSE;
  5612. }
  5613. else
  5614. {
  5615. //
  5616. // Not for ObManControl so we search the list of pending
  5617. // registrations.
  5618. //
  5619. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListFirst(&(pDomain->pendingRegs),
  5620. FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  5621. while ((pRegistrationCB != NULL) && (pRegistrationCB->pWSGroup->wsGroupID != pSendMidwayPkt->wsGroupID))
  5622. {
  5623. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListNext(&(pDomain->pendingRegs),
  5624. pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  5625. }
  5626. if (pRegistrationCB == NULL)
  5627. {
  5628. WARNING_OUT(( "Unexpected SEND_MIDWAY for WSG %hu from 0x%08x",
  5629. pSendMidwayPkt->wsGroupID, sender));
  5630. DC_QUIT;
  5631. }
  5632. pWSGroup = pRegistrationCB->pWSGroup;
  5633. fSetPersonData = TRUE;
  5634. }
  5635. if (NULL == pWSGroup)
  5636. {
  5637. TRACE_OUT(( "NULL pWSGroup" ));
  5638. DC_QUIT;
  5639. }
  5640. if (!pWSGroup->valid)
  5641. {
  5642. WARNING_OUT(( "Recd SEND_MIDWAY too late for WSG %d (marked invalid)",
  5643. pWSGroup->wsg));
  5644. DC_QUIT;
  5645. }
  5646. //
  5647. // We should be in the PENDING_SEND_MIDWAY state:
  5648. //
  5649. if (pWSGroup->state != PENDING_SEND_MIDWAY)
  5650. {
  5651. WARNING_OUT(( "Recd SEND_MIDWAY with WSG %d in state %hu",
  5652. pWSGroup->wsg, pWSGroup->state));
  5653. DC_QUIT;
  5654. }
  5655. //
  5656. // SFR 7124. Check the correlator of this SEND_MIDWAY against the
  5657. // correlator we generated locally when we sent the last SEND_REQUEST.
  5658. // If they dont match, this is part of an out of date catchup which we
  5659. // can ignore.
  5660. //
  5661. if (pSendMidwayPkt->correlator != pWSGroup->catchupCorrelator)
  5662. {
  5663. WARNING_OUT(("Ignoring SEND_MIDWAY with old correlator %hu (expecting %hu)",
  5664. pSendMidwayPkt->correlator, pWSGroup->catchupCorrelator));
  5665. DC_QUIT;
  5666. }
  5667. //
  5668. // We should get four of these messages, one at each priority (except
  5669. // in a backlevel call when we only get one). Check how many are
  5670. // outstanding:
  5671. //
  5672. pWSGroup->sendMidwCount--;
  5673. if (pWSGroup->sendMidwCount != 0)
  5674. {
  5675. TRACE_OUT(( "Still need %hu SEND_MIDWAY(s) for WSG %d",
  5676. pWSGroup->sendMidwCount, pWSGroup->wsg));
  5677. DC_QUIT;
  5678. }
  5679. TRACE_OUT(( "Last SEND_MIDWAY for WSG %d, ID %hu, from 0x%08x",
  5680. pWSGroup->wsg, pWSGroup->wsGroupID, sender));
  5681. //
  5682. // Set up pointers to the ObManControl workset which holds the reg
  5683. // objects for the workset group we've just registered with:
  5684. //
  5685. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  5686. //
  5687. // If we don't have an associated OMC workset, something's wrong...
  5688. //
  5689. if (pOMCWorkset == NULL)
  5690. {
  5691. //
  5692. // ...unless it's ObManControl itself that we're catching up with -
  5693. // since we can get its SEND_MIDWAY before we've got any of the
  5694. // WORKSET_CATCHUPs:
  5695. //
  5696. if (pWSGroup->wsGroupID != WSGROUPID_OMC)
  5697. {
  5698. ERROR_OUT(( "Got SEND_MIDWAY for unknown workset group %hu!",
  5699. pWSGroup->wsGroupID));
  5700. }
  5701. DC_QUIT;
  5702. }
  5703. //
  5704. // Convert the ID of our reg object (as sent by our helper who added it
  5705. // in the first place) to an object handle:
  5706. //
  5707. rc = ObjectIDToPtr(pOMCWorkset, pSendMidwayPkt->objectID, &pObjReg);
  5708. if (rc != 0)
  5709. {
  5710. DC_QUIT;
  5711. }
  5712. //
  5713. // If we haven't yet stored a reg object handle for this workset
  5714. // group...
  5715. //
  5716. if (pWSGroup->pObjReg == NULL)
  5717. {
  5718. //
  5719. // ...store it now...
  5720. //
  5721. pWSGroup->pObjReg = pObjReg;
  5722. }
  5723. //
  5724. // ...but if we have...
  5725. //
  5726. else // pWSGroup->pObjReg != NULL
  5727. {
  5728. //
  5729. // ...and if it's a different one, something's wrong:
  5730. //
  5731. if (pWSGroup->pObjReg != pObjReg)
  5732. {
  5733. WARNING_OUT(( "Recd SEND_MIDWAY from node 0x%08x claiming our reg object "
  5734. "for WSG %d is 0x%08x but we think it's 0x%08x",
  5735. sender, pWSGroup->wsg, pObjReg,pWSGroup->pObjReg));
  5736. }
  5737. }
  5738. //
  5739. // OK, if we've passed all the above tests then everything is normal,
  5740. // so proceed:
  5741. //
  5742. pWSGroup->state = PENDING_SEND_COMPLETE;
  5743. if (pSendMidwayPkt->maxObjIDSeqUsed > pomPrimary->objectIDsequence)
  5744. {
  5745. TRACE_OUT(( "We've already used ID sequence numbers up to %u for "
  5746. "this workset group - setting global sequence count to this value",
  5747. pSendMidwayPkt->objectID.sequence));
  5748. pomPrimary->objectIDsequence = pSendMidwayPkt->objectID.sequence;
  5749. }
  5750. //
  5751. // Our registration object (added by the remote node) should have
  5752. // arrived by now. We need to add the FE/person data to it (unless
  5753. // this is for ObManControl, in which case there won't be any):
  5754. //
  5755. if (fSetPersonData)
  5756. {
  5757. rc = SetPersonData(pomPrimary, pDomain, pWSGroup);
  5758. if (rc != 0)
  5759. {
  5760. DC_QUIT;
  5761. }
  5762. }
  5763. //
  5764. // Now post the successful REGISTER_CON event back to the Client, if we
  5765. // found a reg CB above:
  5766. //
  5767. if (pRegistrationCB != NULL)
  5768. {
  5769. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  5770. }
  5771. DC_EXIT_POINT:
  5772. DebugExitVOID(ProcessSendMidway);
  5773. }
  5774. //
  5775. // ProcessSendComplete(...)
  5776. //
  5777. UINT ProcessSendComplete
  5778. (
  5779. POM_PRIMARY pomPrimary,
  5780. POM_DOMAIN pDomain,
  5781. POMNET_WSGROUP_SEND_PKT pSendCompletePkt
  5782. )
  5783. {
  5784. POM_WSGROUP pWSGroup;
  5785. NET_UID sender;
  5786. UINT rc = 0;
  5787. DebugEntry(ProcessSendComplete);
  5788. //
  5789. // We are now "fully-caught-up" and so are eligible to be helpers
  5790. // ourselves, i.e. if someone wants to ask us for the workset group,
  5791. // we will be able to send them a copy.
  5792. //
  5793. sender = pSendCompletePkt->header.sender;
  5794. //
  5795. // First, we find the workset group the message relates to:
  5796. //
  5797. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  5798. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  5799. FIELD_OFFSET(OM_WSGROUP, wsGroupID),
  5800. (DWORD)pSendCompletePkt->wsGroupID,
  5801. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  5802. if (pWSGroup == NULL)
  5803. {
  5804. //
  5805. // This will happen just after we have deregistered from a WSGroup
  5806. //
  5807. WARNING_OUT(( "Unexpected SEND_COMPLETE (ID %hu) from node 0x%08x",
  5808. pSendCompletePkt->wsGroupID, sender));
  5809. DC_QUIT;
  5810. }
  5811. if (!pWSGroup->valid)
  5812. {
  5813. //
  5814. // This will happen while we are in the process of deregistering
  5815. // from a workset group.
  5816. //
  5817. WARNING_OUT(( "Recd SEND_COMPLETE too late for WSG %d (marked invalid)",
  5818. pWSGroup->wsg));
  5819. DC_QUIT;
  5820. }
  5821. //
  5822. // Check it has come from the correct node and that we are in an
  5823. // appropriate state to receive it.
  5824. //
  5825. // The correct state is either PENDING_SEND_COMPLETE or
  5826. // PENDING_SEND_MIDWAY (we can receive SEND_COMPLETEs in
  5827. // PENDING_SEND_MIDWAY state because of MCS packet reordering).
  5828. //
  5829. if (pSendCompletePkt->header.sender != pWSGroup->helperNode)
  5830. {
  5831. //
  5832. // This will happen if we get a late SEND_COMPLETE after we have
  5833. // decided to catch up from someone else - don't think this should
  5834. // happen!
  5835. //
  5836. // lonchanc: this actually happened in bug #1554.
  5837. // Changed ERROR_OUT to WARNING_OUT
  5838. WARNING_OUT(( "Got SEND_COMPLETE from 0x%08x for WSG %d but helper is 0x%08x",
  5839. sender, pWSGroup->wsg, pWSGroup->helperNode));
  5840. DC_QUIT;
  5841. }
  5842. if ((pWSGroup->state != PENDING_SEND_MIDWAY)
  5843. &&
  5844. (pWSGroup->state != PENDING_SEND_COMPLETE))
  5845. {
  5846. WARNING_OUT(( "Got SEND_COMPLETE for WSG %d from 0x%08x in bad state %hu",
  5847. pWSGroup->wsg, sender, pWSGroup->state));
  5848. DC_QUIT;
  5849. }
  5850. //
  5851. // SFR 7124. Check the correlator of this SEND_COMPLETE against the
  5852. // correlator we generated locally when we sent the last SEND_REQUEST.
  5853. // If they dont match, this is part of an out of date catchup which we
  5854. // can ignore.
  5855. //
  5856. if (pSendCompletePkt->correlator != pWSGroup->catchupCorrelator)
  5857. {
  5858. WARNING_OUT((
  5859. "Ignoring SEND_COMPLETE with old correlator %hu (expecting %hu)",
  5860. pSendCompletePkt->correlator, pWSGroup->catchupCorrelator));
  5861. DC_QUIT;
  5862. }
  5863. //
  5864. // We should get four of these messages, one at each priority (except
  5865. // in a backlevel call when we only get one). Check how many are
  5866. // outstanding:
  5867. //
  5868. pWSGroup->sendCompCount--;
  5869. if (pWSGroup->sendCompCount != 0)
  5870. {
  5871. TRACE_OUT(( "Still need %hu SEND_COMPLETE(s) for WSG %d obj 0x%08x",
  5872. pWSGroup->sendCompCount, pWSGroup->wsg,
  5873. pWSGroup->pObjReg));
  5874. DC_QUIT;
  5875. }
  5876. //
  5877. // If so, we announce that we are registered:
  5878. //
  5879. TRACE_OUT(( "Last SEND_COMPLETE for WSG %d, ID %hu, from 0x%08x obj 0x%08x",
  5880. pWSGroup->wsg, pWSGroup->wsGroupID, sender,
  5881. pWSGroup->pObjReg));
  5882. rc = RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  5883. if (rc != 0)
  5884. {
  5885. DC_QUIT;
  5886. }
  5887. //
  5888. // In addition to the above, if this send-completion message is for the
  5889. // ObManControl workset group we must also set the Domain state:
  5890. //
  5891. if (pSendCompletePkt->wsGroupID == WSGROUPID_OMC)
  5892. {
  5893. //
  5894. // If this message relates to the ObManControl workset group, its
  5895. // arrival signifies that we have completed the Domain attach
  5896. // process, and are now free to continue the processing of the
  5897. // workset group registration attempt which prompted the attach in
  5898. // the first place.
  5899. //
  5900. // The way we "continue" is to set the Domain state to
  5901. // DOMAIN_READY, so that next time the delayed-and-retried
  5902. // OMINT_EVENT_WSGROUP_REGISTER event arrives, it will actually be
  5903. // processed rather than bounced again.
  5904. //
  5905. TRACE_OUT(( "ObManControl fully arrived for Domain %u - inhibiting token",
  5906. pDomain->callID));
  5907. rc = MG_TokenInhibit(pomPrimary->pmgClient,
  5908. pDomain->tokenID);
  5909. if (rc != 0)
  5910. {
  5911. DC_QUIT;
  5912. }
  5913. pDomain->state = PENDING_TOKEN_INHIBIT;
  5914. }
  5915. DC_EXIT_POINT:
  5916. if (rc != 0)
  5917. {
  5918. ERROR_OUT(( "Error %d processing SEND_COMPLETE for WSG %u:%hu",
  5919. rc, pDomain->callID, pSendCompletePkt->wsGroupID));
  5920. }
  5921. DebugExitDWORD(ProcessSendComplete, rc);
  5922. return(rc);
  5923. }
  5924. //
  5925. // RegAnnounceBegin(...)
  5926. //
  5927. UINT RegAnnounceBegin
  5928. (
  5929. POM_PRIMARY pomPrimary,
  5930. POM_DOMAIN pDomain,
  5931. POM_WSGROUP pWSGroup,
  5932. NET_UID nodeID,
  5933. POM_OBJECT * ppObjReg
  5934. )
  5935. {
  5936. POM_WSGROUP pOMCWSGroup;
  5937. POM_WORKSET pOMCWorkset;
  5938. POM_WSGROUP_REG_REC pRegObject = NULL;
  5939. OM_OBJECT_ID regObjectID;
  5940. UINT updateSize;
  5941. UINT rc = 0;
  5942. DebugEntry(RegAnnounceBegin);
  5943. //
  5944. // Trace out who this reg object is for:
  5945. //
  5946. if (nodeID == pDomain->userID)
  5947. {
  5948. TRACE_OUT(("Announcing start of our reg with WSG %d in Domain %u",
  5949. pWSGroup->wsg, pDomain->callID));
  5950. }
  5951. else
  5952. {
  5953. TRACE_OUT(( "Announcing start of reg with WSG %d in Domain %u for node 0x%08x",
  5954. pWSGroup->wsg, pDomain->callID, nodeID));
  5955. }
  5956. //
  5957. // To announce the fact that a node has registered with a workset group,
  5958. // we add a registration object to the relevant workset in ObManControl.
  5959. //
  5960. //
  5961. // The "relevant" ObManControl workset is that whose ID is the same as
  5962. // the ID of the workset group. To add an object to this workset, we
  5963. // will need pointers to the workset itself and to the ObManControl
  5964. // workset group:
  5965. //
  5966. pOMCWSGroup = GetOMCWsgroup(pDomain);
  5967. if( pOMCWSGroup == NULL)
  5968. {
  5969. TRACE_OUT(("pOMCWSGroup not found"));
  5970. DC_QUIT;
  5971. }
  5972. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  5973. //
  5974. // If the ObManControl workset group is not transferred correctly, this
  5975. // assertion may fail:
  5976. //
  5977. ASSERT((pOMCWorkset != NULL));
  5978. //
  5979. // Now, alloc some memory for the registration record object...
  5980. //
  5981. pRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_REC), TRUE);
  5982. if (!pRegObject)
  5983. {
  5984. rc = UT_RC_NO_MEM;
  5985. DC_QUIT;
  5986. }
  5987. //
  5988. // ...set its fields...
  5989. //
  5990. pRegObject->length = sizeof(OM_WSGROUP_REG_REC) -
  5991. sizeof(OM_MAX_OBJECT_SIZE); // == 4
  5992. pRegObject->idStamp = OM_WSGREGREC_ID_STAMP;
  5993. pRegObject->userID = nodeID;
  5994. pRegObject->status = CATCHING_UP;
  5995. //
  5996. // ...determine the update size, which is meant to be all fields in the
  5997. // REG_REC object except the CPI stuff. We also subtract the size of
  5998. // the <length> field because of the way object update sizes are
  5999. // defined.
  6000. //
  6001. updateSize = (sizeof(OM_WSGROUP_REG_REC) - sizeof(TSHR_PERSON_DATA)) -
  6002. sizeof(OM_MAX_OBJECT_SIZE);
  6003. //
  6004. // ...and add it to the workset:
  6005. //
  6006. rc = ObjectAdd(pomPrimary->putTask,
  6007. pomPrimary,
  6008. pOMCWSGroup,
  6009. pOMCWorkset,
  6010. (POM_OBJECTDATA) pRegObject,
  6011. updateSize,
  6012. FIRST,
  6013. &regObjectID,
  6014. ppObjReg);
  6015. if (rc != 0)
  6016. {
  6017. DC_QUIT;
  6018. }
  6019. //
  6020. // Done!
  6021. //
  6022. TRACE_OUT(( "Added reg object for WSG %d to workset %u in OMC "
  6023. "(handle: 0x%08x, ID: 0x%08x:0x%08x)",
  6024. pWSGroup->wsg, pOMCWorkset->worksetID,
  6025. *ppObjReg, regObjectID.creator, regObjectID.sequence));
  6026. DC_EXIT_POINT:
  6027. if (rc != 0)
  6028. {
  6029. ERROR_OUT(( "Error %d adding registration object for WSG %d to "
  6030. "workset %u in ObManControl",
  6031. rc, pWSGroup->wsg, pOMCWorkset->worksetID));
  6032. }
  6033. DebugExitDWORD(RegAnnounceBegin, rc);
  6034. return(rc);
  6035. }
  6036. //
  6037. // RegAnnounceComplete(...)
  6038. //
  6039. UINT RegAnnounceComplete
  6040. (
  6041. POM_PRIMARY pomPrimary,
  6042. POM_DOMAIN pDomain,
  6043. POM_WSGROUP pWSGroup
  6044. )
  6045. {
  6046. POM_WSGROUP pOMCWSGroup;
  6047. POM_WORKSET pOMCWorkset;
  6048. POM_OBJECT pObjReg;
  6049. POM_WSGROUP_REG_REC pRegObject;
  6050. POM_WSGROUP_REG_REC pNewRegObject;
  6051. UINT updateSize;
  6052. UINT rc = 0;
  6053. DebugEntry(RegAnnounceComplete);
  6054. TRACE_OUT(("Announcing completion of reg for WSG %d", pWSGroup->wsg));
  6055. //
  6056. // Set up pointers to the ObManControl workset group and the workset
  6057. // within it which holds the reg objects for the workset group we've
  6058. // just registered with:
  6059. //
  6060. pOMCWSGroup = GetOMCWsgroup(pDomain);
  6061. if( pOMCWSGroup == NULL)
  6062. {
  6063. TRACE_OUT(("pOMCWSGroup not found"));
  6064. DC_QUIT;
  6065. }
  6066. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  6067. //
  6068. // Set up pointers to the object record and the object data itself:
  6069. //
  6070. pObjReg = pWSGroup->pObjReg;
  6071. ValidateObject(pObjReg);
  6072. if ((pObjReg->flags & DELETED) || !pObjReg->pData)
  6073. {
  6074. ERROR_OUT(("RegAnnounceComplete: object 0x%08x is deleted or has no data", pObjReg));
  6075. rc = OM_RC_OBJECT_DELETED;
  6076. DC_QUIT;
  6077. }
  6078. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  6079. ValidateObjectDataWSGREGREC(pRegObject);
  6080. ASSERT(pRegObject->status == CATCHING_UP);
  6081. //
  6082. // Allocate some memory for the new object with which we are about to
  6083. // replace the old one:
  6084. //
  6085. updateSize = sizeof(OM_WSGROUP_REG_REC) - sizeof(TSHR_PERSON_DATA);
  6086. pNewRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(updateSize, FALSE);
  6087. if (!pNewRegObject)
  6088. {
  6089. rc = UT_RC_NO_MEM;
  6090. DC_QUIT;
  6091. }
  6092. //
  6093. // Copy the start of the old object into the new one:
  6094. //
  6095. memcpy(pNewRegObject, pRegObject, updateSize);
  6096. //
  6097. // Update the status field and also set the length field to be the
  6098. // length of the object we just allocated (since this is the number of
  6099. // bytes we are updating):
  6100. //
  6101. pNewRegObject->length = updateSize - sizeof(OM_MAX_OBJECT_SIZE);
  6102. pNewRegObject->status = READY_TO_SEND;
  6103. //
  6104. // Issue the update:
  6105. //
  6106. rc = ObjectDRU(pomPrimary->putTask,
  6107. pOMCWSGroup,
  6108. pOMCWorkset,
  6109. pObjReg,
  6110. (POM_OBJECTDATA) pNewRegObject,
  6111. OMNET_OBJECT_UPDATE);
  6112. if (rc != 0)
  6113. {
  6114. DC_QUIT;
  6115. }
  6116. TRACE_OUT(( "Updated status in own reg object for WSG %d to READY_TO_SEND",
  6117. pWSGroup->wsg));
  6118. //
  6119. // Set the workset group state, to ensure that the reg/info objects get
  6120. // deleted when we deregister.
  6121. //
  6122. pWSGroup->state = WSGROUP_READY;
  6123. DC_EXIT_POINT:
  6124. if (rc != 0)
  6125. {
  6126. ERROR_OUT(( "Error %d updating own reg object for WSG %d",
  6127. rc, pWSGroup->wsg));
  6128. }
  6129. DebugExitDWORD(RegAnnounceComplete, rc);
  6130. return(rc);
  6131. }
  6132. //
  6133. // MaybeRetryCatchUp(...)
  6134. //
  6135. void MaybeRetryCatchUp
  6136. (
  6137. POM_PRIMARY pomPrimary,
  6138. POM_DOMAIN pDomain,
  6139. OM_WSGROUP_ID wsGroupID,
  6140. NET_UID userID
  6141. )
  6142. {
  6143. POM_WSGROUP pWSGroup;
  6144. POM_WSGROUP_REG_CB pRegistrationCB;
  6145. DebugEntry(MaybeRetryCatchUp);
  6146. //
  6147. // This function is called on receipt of a DETACH indication from MCS
  6148. // or a SEND_DENY message from another node. We check the workset
  6149. // group identified and see if we were trying to catch up from the
  6150. // departed node.
  6151. //
  6152. // If we do find a match (on the helperNode), then what we do depends
  6153. // on the state of the workset group:
  6154. //
  6155. // - PENDING_SEND_MIDWAY : Retry the registration from the top.
  6156. //
  6157. // - PENDING_SEND_COMPLETE : Just repeat the catchup.
  6158. //
  6159. //
  6160. // Find the workset group:
  6161. //
  6162. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  6163. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  6164. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  6165. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  6166. if (pWSGroup == NULL)
  6167. {
  6168. TRACE_OUT(( "No record found for WSG ID %hu", wsGroupID));
  6169. DC_QUIT;
  6170. }
  6171. //
  6172. // Compare the helperNode stored in the workset group and the userID of
  6173. // the node who has either detached or sent us a SEND_DENY message. If
  6174. // they do not match then we have nothing further to do.
  6175. //
  6176. if (pWSGroup->helperNode != userID)
  6177. {
  6178. DC_QUIT;
  6179. }
  6180. TRACE_OUT(( "Node 0x%08x was our helper node for WSG %d, in state %hu",
  6181. userID, pWSGroup->wsg, pWSGroup->state));
  6182. //
  6183. // We need to retry the registration - check the current state to find
  6184. // out how much we need to do.
  6185. //
  6186. switch (pWSGroup->state)
  6187. {
  6188. case PENDING_SEND_MIDWAY:
  6189. {
  6190. //
  6191. // First check if this is for ObManControl:
  6192. //
  6193. if (pWSGroup->wsGroupID == WSGROUPID_OMC)
  6194. {
  6195. //
  6196. // It is, so we need to retry the domain attach process.
  6197. // We do this by grabbing the ObMan token and resetting the
  6198. // domain state; when the GRAB_CONFIRM event arrives, we
  6199. // will rejoin the domain attach process at the correct
  6200. // point.
  6201. //
  6202. if (MG_TokenGrab(pomPrimary->pmgClient,
  6203. pDomain->tokenID) != 0)
  6204. {
  6205. ERROR_OUT(( "Failed to grab token"));
  6206. DC_QUIT;
  6207. }
  6208. pDomain->state = PENDING_TOKEN_GRAB;
  6209. }
  6210. else
  6211. {
  6212. //
  6213. // Not ObManControl, so there will be a registration CB -
  6214. // find it...
  6215. //
  6216. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  6217. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  6218. FIELD_OFFSET(OM_WSGROUP_REG_CB, pWSGroup),
  6219. (DWORD_PTR)pWSGroup, FIELD_SIZE(OM_WSGROUP_REG_CB, pWSGroup));
  6220. if (pRegistrationCB == NULL)
  6221. {
  6222. ERROR_OUT(( "No reg CB found for WSG %d in state %hu!",
  6223. pWSGroup->wsg, PENDING_SEND_MIDWAY));
  6224. DC_QUIT;
  6225. }
  6226. //
  6227. // ...and retry the registation:
  6228. //
  6229. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  6230. }
  6231. }
  6232. break;
  6233. case PENDING_SEND_COMPLETE:
  6234. {
  6235. //
  6236. // Retry the object catchup. There is no point in trying to
  6237. // find the registration CB as it will have been disposed of as
  6238. // soon as we entered the PENDING_SEND_COMPLETE state.
  6239. //
  6240. if (WSGCatchUp(pomPrimary, pDomain, pWSGroup) != 0)
  6241. //
  6242. // If there are no nodes ready to provide us with the catchup
  6243. // information then we are in a state where everyone either
  6244. // does not have the workset group or is catching up the
  6245. // workset group.
  6246. //
  6247. // MD 21/11/95
  6248. //
  6249. // For now pretend that all is well (it's not!) and go into the
  6250. // READY_TO_SEND state - potentially causing ObMan to become
  6251. // inconsistent.
  6252. {
  6253. RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  6254. }
  6255. }
  6256. break;
  6257. }
  6258. DC_EXIT_POINT:
  6259. DebugExitVOID(MaybeRetryCatchUp);
  6260. }
  6261. //
  6262. //
  6263. //
  6264. // WSGRegisterRetry(...)
  6265. //
  6266. //
  6267. //
  6268. void WSGRegisterRetry(POM_PRIMARY pomPrimary,
  6269. POM_WSGROUP_REG_CB pRegistrationCB)
  6270. {
  6271. POM_DOMAIN pDomain;
  6272. POM_WSGROUP pWSGroup;
  6273. UINT rc = 0;
  6274. DebugEntry(WSGRegisterRetry);
  6275. //
  6276. // Set up pointers
  6277. //
  6278. pWSGroup = pRegistrationCB->pWSGroup;
  6279. pDomain = pRegistrationCB->pDomain;
  6280. //
  6281. // If we've got ObManControl locked for THIS registration, unlock it:
  6282. //
  6283. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  6284. //
  6285. // If we have joined a channel (so the channelID is non-zero) then
  6286. // leave it.
  6287. //
  6288. if (pWSGroup->channelID != 0)
  6289. {
  6290. TRACE_OUT(( "Leaving channel %hu", pWSGroup->channelID));
  6291. MG_ChannelLeave(pomPrimary->pmgClient,
  6292. pWSGroup->channelID);
  6293. PurgeReceiveCBs(pRegistrationCB->pDomain,
  6294. pWSGroup->channelID);
  6295. //
  6296. // Set the channelID to zero now that we have left it.
  6297. //
  6298. pWSGroup->channelID = 0;
  6299. }
  6300. //
  6301. // Set the workset group state to INITIAL.
  6302. //
  6303. pWSGroup->state = INITIAL;
  6304. //
  6305. // We examine the retry count. If it's zero, we call WSGRegisterResult
  6306. // to indicate failure. Otherwise, we repost the event with a delay
  6307. // and a decremented retry value.
  6308. //
  6309. if (pRegistrationCB->retryCount == 0)
  6310. {
  6311. WARNING_OUT(( "Aborting registration for WSG %d",
  6312. pRegistrationCB->wsg));
  6313. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_TIMED_OUT);
  6314. }
  6315. else
  6316. {
  6317. //
  6318. // Since we're about to post a message referencing the Reg CB, bump
  6319. // the use count:
  6320. //
  6321. UT_BumpUpRefCount(pRegistrationCB);
  6322. TRACE_OUT(( "Retrying %d for WSG %d; retries left: %u",
  6323. pRegistrationCB->type,
  6324. pRegistrationCB->wsg,
  6325. pRegistrationCB->retryCount));
  6326. pRegistrationCB->retryCount--;
  6327. UT_PostEvent(pomPrimary->putTask,
  6328. pomPrimary->putTask,
  6329. OM_REGISTER_RETRY_DELAY_DFLT,
  6330. OMINT_EVENT_WSGROUP_REGISTER_CONT,
  6331. 0,
  6332. (UINT_PTR) pRegistrationCB);
  6333. }
  6334. DebugExitVOID(WSGRegisterRetry);
  6335. }
  6336. //
  6337. //
  6338. //
  6339. // WSGRegisterResult(...)
  6340. //
  6341. //
  6342. //
  6343. void WSGRegisterResult(POM_PRIMARY pomPrimary,
  6344. POM_WSGROUP_REG_CB pRegistrationCB,
  6345. UINT result)
  6346. {
  6347. POM_WSGROUP pWSGroup;
  6348. POM_DOMAIN pDomain;
  6349. POM_WORKSET pOMCWorkset;
  6350. OM_EVENT_DATA16 eventData16;
  6351. OM_EVENT_DATA32 eventData32;
  6352. UINT type;
  6353. UINT event = 0;
  6354. DebugEntry(WSGRegisterResult);
  6355. //
  6356. // Assert that this is a valid registration CB (which it DC_ABSolutely
  6357. // MUST be, since this function gets called synchronously by some other
  6358. // function which should have validated the CB):
  6359. //
  6360. ASSERT(pRegistrationCB->valid);
  6361. //
  6362. // If we've still got ObManControl locked for THIS registration, unlock
  6363. // it:
  6364. //
  6365. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  6366. //
  6367. // Determine whether we're doing a REGISTER or a MOVE (we use the
  6368. // string values for tracing):
  6369. //
  6370. type = pRegistrationCB->type;
  6371. switch (type)
  6372. {
  6373. case WSGROUP_REGISTER:
  6374. event = OM_WSGROUP_REGISTER_CON;
  6375. break;
  6376. case WSGROUP_MOVE:
  6377. event = OM_WSGROUP_MOVE_CON;
  6378. break;
  6379. default:
  6380. ERROR_OUT(("Reached default case in switch statement (value: %hu)", event));
  6381. }
  6382. //
  6383. // Here, we set up pointer to workset group.
  6384. //
  6385. // NOTE: This field in the structure might be NULL, if we have had to
  6386. // abort the registration very early. Therefore, do not use
  6387. // pWSGroup without checking it first!!!
  6388. //
  6389. pWSGroup = pRegistrationCB->pWSGroup;
  6390. if (pWSGroup)
  6391. {
  6392. ValidateWSGroup(pWSGroup);
  6393. }
  6394. //
  6395. // Trace if this registration has failed:
  6396. //
  6397. if (result != 0)
  6398. {
  6399. //
  6400. // pWSGroup might be NULL if we aborted the registration before we
  6401. // got around to creating it in ProcessWSGRegister (pre-Stage1).
  6402. // So, do a quick check and use a -1 value for the state if it's
  6403. // NULL. In either case pick up the name from the reg CB:
  6404. //
  6405. WARNING_OUT(( "%d failed for WSG %d (reason: 0x%08x, WSG state: %u)",
  6406. type, pRegistrationCB->wsg, result,
  6407. pWSGroup == NULL ? -1 : (UINT)pWSGroup->state));
  6408. //
  6409. // If a MOVE fails, then the workset group continues to exist in
  6410. // the old domain - so set the state back to WSGROUP_READY:
  6411. //
  6412. if ((type == WSGROUP_MOVE) && (pWSGroup != NULL))
  6413. {
  6414. pWSGroup->state = WSGROUP_READY;
  6415. }
  6416. }
  6417. else
  6418. {
  6419. //
  6420. // If the registration succeeded, pWSGroup must be OK:
  6421. //
  6422. ASSERT((pWSGroup != NULL));
  6423. ASSERT(((pWSGroup->state == WSGROUP_READY) ||
  6424. (pWSGroup->state == PENDING_SEND_COMPLETE)));
  6425. TRACE_OUT(( "%d succeeded for WSG %d (now in state %hu)",
  6426. type, pRegistrationCB->wsg, pWSGroup->state));
  6427. }
  6428. //
  6429. // Fill in the event parameters and post the result to the Client:
  6430. //
  6431. eventData16.hWSGroup = pRegistrationCB->hWSGroup;
  6432. eventData16.worksetID = 0;
  6433. eventData32.correlator = pRegistrationCB->correlator;
  6434. eventData32.result = (WORD)result;
  6435. UT_PostEvent(pomPrimary->putTask,
  6436. pRegistrationCB->putTask,
  6437. 0,
  6438. event,
  6439. *(PUINT) &eventData16,
  6440. *(LPUINT) &eventData32);
  6441. //
  6442. // If the operation was successful, we also post some more events:
  6443. //
  6444. if (result == 0)
  6445. {
  6446. if (type == WSGROUP_REGISTER)
  6447. {
  6448. //
  6449. // If this is a REGISTER, we post WORKSET_NEW events to the
  6450. // Client for all existing worksets:
  6451. //
  6452. PostWorksetNewEvents(pomPrimary->putTask,
  6453. pRegistrationCB->putTask,
  6454. pWSGroup,
  6455. pRegistrationCB->hWSGroup);
  6456. //
  6457. // We also need to generate PERSON_JOINED events - these are
  6458. // generated automatically by the ObMan task on receipt of the
  6459. // respective OBJECT_ADD events, but only once the registration
  6460. // has completed. So, fake ADD events for any objects that may
  6461. // exist already:
  6462. //
  6463. pDomain = pWSGroup->pDomain;
  6464. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  6465. PostAddEvents(pomPrimary->putTask,
  6466. pOMCWorkset,
  6467. pDomain->omchWSGroup,
  6468. pomPrimary->putTask);
  6469. }
  6470. }
  6471. //
  6472. // If we mananged to bump up the use counts of the Domain record and
  6473. // workset group, free them now:
  6474. //
  6475. if (pRegistrationCB->flags & BUMPED_CBS)
  6476. {
  6477. ASSERT((pWSGroup != NULL));
  6478. UT_FreeRefCount((void**)&(pRegistrationCB->pWSGroup), FALSE);
  6479. UT_FreeRefCount((void**)&(pRegistrationCB->pDomain), FALSE);
  6480. }
  6481. //
  6482. // Dispose of the registration CB - it has served us well!
  6483. //
  6484. pRegistrationCB->valid = FALSE;
  6485. TRACE_OUT(( "Finished %d attempt for WSG %d: result = 0x%08x",
  6486. type, pRegistrationCB->wsg, result));
  6487. COM_BasedListRemove(&(pRegistrationCB->chain));
  6488. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  6489. DebugExitVOID(WSGRegisterResult);
  6490. }
  6491. //
  6492. // WSGMove(...)
  6493. //
  6494. UINT WSGMove
  6495. (
  6496. POM_PRIMARY pomPrimary,
  6497. POM_DOMAIN pDestDomainRec,
  6498. POM_WSGROUP pWSGroup
  6499. )
  6500. {
  6501. UINT rc = 0;
  6502. DebugEntry(WSGMove);
  6503. //
  6504. // Now move the record into the new Domain record (this also removes
  6505. // the workset group and its reg object from the old Domain)
  6506. //
  6507. WSGRecordMove(pomPrimary, pDestDomainRec, pWSGroup);
  6508. //
  6509. // There is a problem with the way we deal with moving workset groups
  6510. // into the local Domain at call-end: if there is already a workset
  6511. // group of the same name/FP in the local Domain, we get a name clash,
  6512. // which the rest of the ObMan code does not expect. This can cause
  6513. // ObMan to get very confused when the workset group is eventually
  6514. // discarded from the local Domain, since it tries to throw away the
  6515. // wrong WSG_INFO object from workset #0 in ObManControl in the local
  6516. // Domain.
  6517. //
  6518. // In R1.1, this name clash will only ever happen with the ObManControl
  6519. // workset group itself, because of the way the apps use workset groups
  6520. // (i.e. they never register with one in a call AND one in the local
  6521. // Domain simultaneously). Therefore, we make our lives easier by NOT
  6522. // fully moving the ObManControl workset group into the local Domain at
  6523. // call end.
  6524. //
  6525. // Note however that it is OK (required, in fact) to move the workset
  6526. // group record into the list for the local Domain - the problem arises
  6527. // when we try to set it up in the local ObManControl (which we need to
  6528. // do for application workset groups so that they can continue to use
  6529. // person data objects etc.)
  6530. //
  6531. // So, if the workset group name matches ObManControl, skip the rest of
  6532. // this function:
  6533. //
  6534. if (pWSGroup->wsg == OMWSG_OM)
  6535. {
  6536. TRACE_OUT(("Not registering ObManControl in Domain %u (to avoid clash)",
  6537. pDestDomainRec->callID));
  6538. DC_QUIT;
  6539. }
  6540. //
  6541. // Reset the channel ID to zero:
  6542. //
  6543. pWSGroup->channelID = 0;
  6544. //
  6545. // Assign a new ID for this workset group:
  6546. //
  6547. rc = WSGGetNewID(pomPrimary, pDestDomainRec, &(pWSGroup->wsGroupID));
  6548. if (rc != 0)
  6549. {
  6550. DC_QUIT;
  6551. }
  6552. TRACE_OUT(( "Workset group ID for WSG %d in Domain %u is %hu",
  6553. pWSGroup->wsg, pDestDomainRec->callID, pWSGroup->wsGroupID));
  6554. //
  6555. // Now call CreateAnnounce to add a WSG_INFO object to workset #0 in
  6556. // ObManControl. There may be a name clash, but we don't mind in this
  6557. // case because we've been forced to do the move because of a call end:
  6558. //
  6559. rc = CreateAnnounce(pomPrimary, pDestDomainRec, pWSGroup);
  6560. if (rc != 0)
  6561. {
  6562. DC_QUIT;
  6563. }
  6564. //
  6565. // Now add the reg object:
  6566. //
  6567. rc = RegAnnounceBegin(pomPrimary,
  6568. pDestDomainRec,
  6569. pWSGroup,
  6570. pDestDomainRec->userID,
  6571. &(pWSGroup->pObjReg));
  6572. if (rc != 0)
  6573. {
  6574. DC_QUIT;
  6575. }
  6576. //
  6577. // Add the FE data back in:
  6578. //
  6579. rc = SetPersonData(pomPrimary, pDestDomainRec, pWSGroup);
  6580. if (rc != 0)
  6581. {
  6582. DC_QUIT;
  6583. }
  6584. //
  6585. // And update the object, just as if we were registering with it:
  6586. //
  6587. rc = RegAnnounceComplete(pomPrimary, pDestDomainRec, pWSGroup);
  6588. if (rc != 0)
  6589. {
  6590. DC_QUIT;
  6591. }
  6592. DC_EXIT_POINT:
  6593. if (rc != 0)
  6594. {
  6595. ERROR_OUT(( "Error %d moving WSG %d into Domain %u",
  6596. rc, pWSGroup->wsg, pDestDomainRec->callID));
  6597. }
  6598. DebugExitDWORD(WSGMove, rc);
  6599. return(rc);
  6600. }
  6601. //
  6602. // WSGRecordMove(...)
  6603. //
  6604. void WSGRecordMove
  6605. (
  6606. POM_PRIMARY pomPrimary,
  6607. POM_DOMAIN pDestDomainRec,
  6608. POM_WSGROUP pWSGroup
  6609. )
  6610. {
  6611. POM_DOMAIN pOldDomainRec;
  6612. DebugEntry(WSGRecordMove);
  6613. //
  6614. // Find the record for the Domain the workset group is currently in:
  6615. //
  6616. pOldDomainRec = pWSGroup->pDomain;
  6617. ASSERT(pOldDomainRec->valid);
  6618. DeregisterLocalClient(pomPrimary, &pOldDomainRec, pWSGroup, FALSE);
  6619. //
  6620. // Insert it into the destination Domain:
  6621. //
  6622. TRACE_OUT(("Inserting WSG %d' into list for Domain %u",
  6623. pWSGroup->wsg, pDestDomainRec->callID));
  6624. COM_BasedListInsertBefore(&(pDestDomainRec->wsGroups),
  6625. &(pWSGroup->chain));
  6626. //
  6627. // SFR : reset the pending data ack byte counts:
  6628. //
  6629. WSGResetBytesUnacked(pWSGroup);
  6630. //
  6631. // The workset group now belongs to this new Domain, so set it so.
  6632. //
  6633. pWSGroup->pDomain = pDestDomainRec;
  6634. //
  6635. // Finally, post the MOVE_IND event to all Clients registered with the
  6636. // workset group:
  6637. //
  6638. WSGroupEventPost(pomPrimary->putTask,
  6639. pWSGroup,
  6640. PRIMARY | SECONDARY,
  6641. OM_WSGROUP_MOVE_IND,
  6642. 0, // no workset
  6643. pDestDomainRec->callID);
  6644. DebugExitVOID(WSGRecordMove);
  6645. }
  6646. //
  6647. // WSGResetBytesUnacked(...)
  6648. //
  6649. void WSGResetBytesUnacked
  6650. (
  6651. POM_WSGROUP pWSGroup
  6652. )
  6653. {
  6654. OM_WORKSET_ID worksetID;
  6655. POM_WORKSET pWorkset;
  6656. DebugEntry(WSGResetBytesUnacked);
  6657. //
  6658. // Reset workset group's unacked byte count:
  6659. //
  6660. pWSGroup->bytesUnacked = 0;
  6661. //
  6662. // Now do it for each workset in the workset group:
  6663. //
  6664. for (worksetID = 0;
  6665. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  6666. worksetID++)
  6667. {
  6668. pWorkset = pWSGroup->apWorksets[worksetID];
  6669. if (pWorkset != NULL)
  6670. {
  6671. pWorkset->bytesUnacked = 0;
  6672. }
  6673. }
  6674. DebugExitVOID(WSGResetBytesUnacked);
  6675. }
  6676. //
  6677. //
  6678. //
  6679. // ProcessWSGDiscard(...)
  6680. //
  6681. //
  6682. //
  6683. void ProcessWSGDiscard
  6684. (
  6685. POM_PRIMARY pomPrimary,
  6686. POM_WSGROUP pWSGroup
  6687. )
  6688. {
  6689. POM_DOMAIN pDomain;
  6690. DebugEntry(ProcessWSGDiscard);
  6691. ASSERT(!pWSGroup->valid);
  6692. //
  6693. // Now get pointer to Domain record:
  6694. //
  6695. pDomain = pWSGroup->pDomain;
  6696. //
  6697. // If the TO_BE_DISCARDED flag has been cleared since the DISCARD event
  6698. // was posted, we abort the discard process (this will happen when
  6699. // someone local has registered with the workset since it was marked
  6700. // TO_BE_DISCARDED).
  6701. //
  6702. if (!pWSGroup->toBeDiscarded)
  6703. {
  6704. WARNING_OUT(( "Throwing away DISCARD event since WSG %d no longer TO_BE_DISCARDED",
  6705. pWSGroup->wsg));
  6706. DC_QUIT;
  6707. }
  6708. //
  6709. // Otherwise, we can go ahead and discard it:
  6710. //
  6711. WSGDiscard(pomPrimary, pDomain, pWSGroup, FALSE);
  6712. DC_EXIT_POINT:
  6713. DebugExitVOID(ProcessWSGDiscard);
  6714. }
  6715. //
  6716. // WSGDiscard(...)
  6717. //
  6718. void WSGDiscard
  6719. (
  6720. POM_PRIMARY pomPrimary,
  6721. POM_DOMAIN pDomain,
  6722. POM_WSGROUP pWSGroup,
  6723. BOOL fExit
  6724. )
  6725. {
  6726. POM_WORKSET pWorkset;
  6727. OM_WORKSET_ID worksetID;
  6728. DebugEntry(WSGDiscard);
  6729. TRACE_OUT(( "Discarding WSG %d from Domain %u",
  6730. pWSGroup->wsg, pDomain->callID));
  6731. //
  6732. // We only ever discard a workset group when nobody's registered with
  6733. // it, so check:
  6734. //
  6735. ASSERT(COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain)) == NULL);
  6736. //
  6737. // "Discarding" a workset group involves
  6738. //
  6739. // - calling DeregisterLocalClient to remove our person object, leave
  6740. // the channel, remove the workset group from our domain list etc.
  6741. //
  6742. // - discarding each of the worksets in the workset group
  6743. //
  6744. // - freeing the workset group record (which will have been removed
  6745. // from the list hung off the Domain record by
  6746. // DeregisterLocalClient).
  6747. //
  6748. DeregisterLocalClient(pomPrimary, &pDomain, pWSGroup, fExit);
  6749. //
  6750. // Now discard each workset in use:
  6751. //
  6752. for (worksetID = 0;
  6753. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  6754. worksetID++)
  6755. {
  6756. pWorkset = pWSGroup->apWorksets[worksetID];
  6757. if (pWorkset != NULL)
  6758. {
  6759. WorksetDiscard(pWSGroup, &pWorkset, fExit);
  6760. }
  6761. }
  6762. //
  6763. // Discard the checkpointing dummy workset:
  6764. //
  6765. pWorkset = pWSGroup->apWorksets[OM_CHECKPOINT_WORKSET];
  6766. ASSERT((pWorkset != NULL));
  6767. WorksetDiscard(pWSGroup, &pWorkset, fExit);
  6768. //
  6769. // Free the workset group record (it will have been removed from the
  6770. // domain's list by DeregisterLocalClient, above):
  6771. //
  6772. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  6773. DebugExitVOID(WSGDiscard);
  6774. }
  6775. //
  6776. // DeregisterLocalClient(...)
  6777. //
  6778. void DeregisterLocalClient
  6779. (
  6780. POM_PRIMARY pomPrimary,
  6781. POM_DOMAIN* ppDomain,
  6782. POM_WSGROUP pWSGroup,
  6783. BOOL fExit
  6784. )
  6785. {
  6786. POM_DOMAIN pDomain;
  6787. UINT callID;
  6788. DebugEntry(DeregisterLocalClient);
  6789. pDomain = *ppDomain;
  6790. callID = pDomain->callID;
  6791. TRACE_OUT(("Removing WSG %d from Domain %u - state is currently %hu",
  6792. pWSGroup->wsg, callID, pWSGroup->state));
  6793. //
  6794. // Removing a workset group from a Domain involves
  6795. //
  6796. // - deleting the registration object from the relevant registration
  6797. // workset in ObManControl, if we put one there earlier
  6798. //
  6799. // - calling WSGDiscard if there is no one left in the Domain who
  6800. // is registered with the workset group
  6801. //
  6802. // - leaving the relevant channel
  6803. //
  6804. // - removing the workset group from the list hung off the Domain
  6805. // record
  6806. //
  6807. // We will skip some of these unwinding stages, depending on how far we
  6808. // got in the registration process. We use a switch statement with NO
  6809. // BREAKS to determine our "entry point" into the unwinding.
  6810. //
  6811. // When we've done all that, we check to see if we are now no longer
  6812. // registered with any workset groups in this Domain. If not, we
  6813. // detach from the Domain.
  6814. //
  6815. switch (pWSGroup->state)
  6816. {
  6817. case WSGROUP_READY:
  6818. case PENDING_SEND_COMPLETE:
  6819. case PENDING_SEND_MIDWAY:
  6820. {
  6821. //
  6822. // SFR 5913: Purge any outstanding lock requests for the
  6823. // workset group.
  6824. //
  6825. PurgeLockRequests(pDomain, pWSGroup);
  6826. //
  6827. // Search for and remove our person object, if we have one:
  6828. //
  6829. RemovePersonObject(pomPrimary,
  6830. pDomain,
  6831. pWSGroup->wsGroupID,
  6832. pDomain->userID);
  6833. pWSGroup->pObjReg = NULL;
  6834. //
  6835. // If we joined a channel for this workset group, leave it:
  6836. //
  6837. if (pWSGroup->channelID != 0)
  6838. {
  6839. TRACE_OUT(( "Leaving channel %hu", pWSGroup->channelID));
  6840. if (!fExit)
  6841. {
  6842. MG_ChannelLeave(pomPrimary->pmgClient, pWSGroup->channelID);
  6843. }
  6844. //
  6845. // Purge any outstanding receives on this channel:
  6846. //
  6847. PurgeReceiveCBs(pDomain, pWSGroup->channelID);
  6848. }
  6849. }
  6850. // NO BREAK - fall through to next case
  6851. case PENDING_JOIN:
  6852. case LOCKING_OMC:
  6853. case INITIAL:
  6854. {
  6855. //
  6856. // If we didn't get as far as PENDING_SEND_MIDWAY then there's
  6857. // very little unwinding to do. This bit removes the workset
  6858. // group from the Domain's list:
  6859. //
  6860. TRACE_OUT(( "Removing workset group record from list"));
  6861. COM_BasedListRemove(&(pWSGroup->chain));
  6862. //
  6863. // We set the channel ID to zero here because even if we never
  6864. // succeeded in joining the channel, the field will contain the
  6865. // channel CORRELATOR returned to us by MG_ChannelJoin
  6866. //
  6867. pWSGroup->channelID = 0;
  6868. //
  6869. // Since the workset group is no longer associated with any
  6870. // Domain, NULL it out.
  6871. //
  6872. pWSGroup->pDomain = NULL;
  6873. }
  6874. break;
  6875. default:
  6876. {
  6877. ERROR_OUT(( "Default case in switch (value: %hu)",
  6878. pWSGroup->state));
  6879. }
  6880. }
  6881. //
  6882. // If this was the last workset group in the domain...
  6883. //
  6884. if (COM_BasedListIsEmpty(&(pDomain->wsGroups)))
  6885. {
  6886. //
  6887. // ...we should detach:
  6888. //
  6889. // Note: this will only happen when the workset group we have just
  6890. // removed is the ObManControl workset group, so assert:
  6891. //
  6892. if (!fExit)
  6893. {
  6894. ASSERT(pWSGroup->wsg == OMWSG_OM);
  6895. }
  6896. //
  6897. // Since ObMan no longer needs this workset group, we remove it
  6898. // from the list of registered Clients:
  6899. //
  6900. RemoveClientFromWSGList(pomPrimary->putTask,
  6901. pomPrimary->putTask,
  6902. pWSGroup);
  6903. TRACE_OUT(( "No longer using any wsGroups in domain %u - detaching",
  6904. callID));
  6905. //
  6906. // This will NULL the caller's pointer:
  6907. //
  6908. DomainDetach(pomPrimary, ppDomain, fExit);
  6909. }
  6910. DebugExitVOID(DeregisterLocalClient);
  6911. }
  6912. //
  6913. // WorksetDiscard(...)
  6914. //
  6915. void WorksetDiscard
  6916. (
  6917. POM_WSGROUP pWSGroup,
  6918. POM_WORKSET * ppWorkset,
  6919. BOOL fExit
  6920. )
  6921. {
  6922. POM_OBJECT pObj;
  6923. POM_OBJECT pObjTemp;
  6924. POM_WORKSET pWorkset;
  6925. POM_CLIENT_LIST pClient;
  6926. DebugEntry(WorksetDiscard);
  6927. //
  6928. // Set up local pointer:
  6929. //
  6930. pWorkset = *ppWorkset;
  6931. //
  6932. // The code here is similar to that in WorksetDoClear, but in this case
  6933. // we discard ALL objects, irrespective of the sequence stamps.
  6934. //
  6935. // In addition, WorksetDoClear doesn't cause the object records to be
  6936. // freed - it only marks them as deleted - whereas we actually free them
  6937. // up.
  6938. //
  6939. TRACE_OUT(( "Discarding all objects in workset %u in WSG %d",
  6940. pWorkset->worksetID, pWSGroup->wsg));
  6941. CheckObjectCount(pWSGroup, pWorkset);
  6942. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  6943. while (pObj != NULL)
  6944. {
  6945. ValidateObject(pObj);
  6946. pObjTemp = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  6947. FIELD_OFFSET(OM_OBJECT, chain));
  6948. //
  6949. // If the object (data) hasn't yet been deleted, do it now:
  6950. //
  6951. if (!(pObj->flags & DELETED))
  6952. {
  6953. if (!pObj->pData)
  6954. {
  6955. ERROR_OUT(("WorksetDiscard: object 0x%08x has no data", pObj));
  6956. }
  6957. else
  6958. {
  6959. ValidateObjectData(pObj->pData);
  6960. UT_FreeRefCount((void**)&pObj->pData, FALSE);
  6961. }
  6962. pWorkset->numObjects--;
  6963. }
  6964. //
  6965. // Now remove the object record itself from the list and free it:
  6966. //
  6967. TRACE_OUT(( "Freeing pObj at 0x%08x", pObj));
  6968. // NULL this out to catch stale references
  6969. COM_BasedListRemove(&(pObj->chain));
  6970. UT_FreeRefCount((void**)&pObj, FALSE);
  6971. pObj = pObjTemp;
  6972. }
  6973. CheckObjectCount(pWSGroup, pWorkset);
  6974. ASSERT(pWorkset->numObjects == 0);
  6975. //
  6976. // Mark the slot in workset offset array (hung off the workset group
  6977. // record) as empty:
  6978. //
  6979. pWSGroup->apWorksets[pWorkset->worksetID] = NULL;
  6980. //
  6981. // Free the clients
  6982. //
  6983. while (pClient = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWorkset->clients),
  6984. FIELD_OFFSET(OM_CLIENT_LIST, chain)))
  6985. {
  6986. TRACE_OUT(("WorksetDiscard: Freeing client 0x%08x workset 0x%08x",
  6987. pClient, pWorkset));
  6988. COM_BasedListRemove(&(pClient->chain));
  6989. UT_FreeRefCount((void**)&pClient, FALSE);
  6990. }
  6991. //
  6992. // Now discard the chunk holding the workset, setting the caller's
  6993. // pointer to NULL:
  6994. //
  6995. TRACE_OUT(( "Discarded workset %u in WSG %d",
  6996. pWorkset->worksetID, pWSGroup->wsg));
  6997. UT_FreeRefCount((void**)ppWorkset, FALSE);
  6998. DebugExitVOID(WorksetDiscard);
  6999. }
  7000. //
  7001. // ProcessOMCObjectEvents(...)
  7002. //
  7003. void ProcessOMCObjectEvents
  7004. (
  7005. POM_PRIMARY pomPrimary,
  7006. UINT event,
  7007. OM_WSGROUP_HANDLE hWSGroup,
  7008. OM_WORKSET_ID worksetID,
  7009. POM_OBJECT pObj
  7010. )
  7011. {
  7012. POM_DOMAIN pDomain;
  7013. POM_WSGROUP pOMCWSGroup;
  7014. POM_WORKSET pOMCWorkset;
  7015. POM_WSGROUP pWSGroup;
  7016. POM_OBJECT pObjOld;
  7017. POM_WSGROUP_REG_REC pPersonObject;
  7018. DebugEntry(ProcessOMCObjectEvents);
  7019. //
  7020. // In this function, we do the following:
  7021. //
  7022. // - find the domain and workset group this event belongs to
  7023. //
  7024. // - if we have a local client to whom we might be interested in
  7025. // posting a person data event, call GeneratePersonEvents
  7026. //
  7027. // - if this is an object add for a person data object which has our
  7028. // user ID in it, store the handle in the workset group record unless
  7029. // we're not expecting the person object, in which case delete it
  7030. //
  7031. // - if this is an object deleted indication for a person data object
  7032. // then we count the number of remaining person objects for the
  7033. // workset group. If it is zero then we remove the info object.
  7034. //
  7035. //
  7036. // To find the domain, we search the list of active domains, looking up
  7037. // the hWSGroup parameter against the omchWSGroup field:
  7038. //
  7039. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  7040. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  7041. FIELD_OFFSET(OM_DOMAIN, omchWSGroup), (DWORD)hWSGroup,
  7042. FIELD_SIZE(OM_DOMAIN, omchWSGroup));
  7043. if (pDomain == NULL)
  7044. {
  7045. //
  7046. // This should only happen at call end time.
  7047. //
  7048. TRACE_OUT(( "No domain with omchWSGroup %u - has call just ended?", hWSGroup));
  7049. DC_QUIT;
  7050. }
  7051. //
  7052. // To find the workset group, we use the fact that the ID of the
  7053. // control workset (for which we have just received the event) is the
  7054. // same as the ID of the workset group to which it relates. So, do a
  7055. // lookup on this ID:
  7056. //
  7057. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  7058. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  7059. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)worksetID,
  7060. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  7061. //
  7062. // SFR 5593: Changed comparison to PENDING_SEND_MIDWAY from
  7063. // WSGROUP_READY to ensure that late joiners get the person add events.
  7064. //
  7065. if ((pWSGroup != NULL) && (pWSGroup->state > PENDING_SEND_MIDWAY))
  7066. {
  7067. //
  7068. // This means that a local client has fully registered with the
  7069. // workset group, so we're in a position maybe translate the event
  7070. // to a person event:
  7071. //
  7072. TRACE_OUT(( "Recd event 0x%08x for person object 0x%08x (for WSG %d in state %hu)",
  7073. event, pObj, pWSGroup->wsg, pWSGroup->state));
  7074. GeneratePersonEvents(pomPrimary, event, pWSGroup, pObj);
  7075. }
  7076. //
  7077. // Now, if this event is an ADD event for an object which
  7078. //
  7079. // - has not been deleted
  7080. // - is a person object (i.e. has an OM_WSGREGREC_ID_STAMP stamp)
  7081. // - contains our user ID (i.e. is _our_ person object)
  7082. //
  7083. // then we do one of the following:
  7084. //
  7085. // - if the workset group exists get a handle to the old person object
  7086. // and delete it. Then store the handle of the new person object in
  7087. // the workset group record.
  7088. // - if the workset group does not exist then delete the person object.
  7089. //
  7090. // This fixes SFRs 2745 and 2592 which are caused by person objects
  7091. // getting left hanging around in some start/stop race scenarios.
  7092. //
  7093. ValidateObject(pObj);
  7094. if ((event == OM_OBJECT_ADD_IND) && !(pObj->flags & DELETED))
  7095. {
  7096. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  7097. if (!pPersonObject)
  7098. {
  7099. ERROR_OUT(("ProcessOMCObjectEvents: object 0x%08x has no data", pObj));
  7100. }
  7101. if (pPersonObject &&
  7102. (pPersonObject->idStamp == OM_WSGREGREC_ID_STAMP) &&
  7103. (pPersonObject->userID == pDomain->userID))
  7104. {
  7105. ValidateObjectData(pObj->pData);
  7106. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7107. if (pOMCWSGroup == NULL)
  7108. {
  7109. // lonchanc: ingore left-over events due to race condition
  7110. DC_QUIT;
  7111. }
  7112. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7113. if (pWSGroup != NULL)
  7114. {
  7115. if ((pWSGroup->pObjReg != NULL) &&
  7116. (pWSGroup->pObjReg != pObj))
  7117. {
  7118. //
  7119. // This object replaces an earlier one we had, so...
  7120. //
  7121. WARNING_OUT(( "Deleting old person object 0x%08x for WSG %d, "
  7122. "since person object 0x%08x has just arrived",
  7123. pWSGroup->pObjReg,
  7124. pWSGroup->wsg,
  7125. pObj));
  7126. //
  7127. // ...set up a pointer to the _old_ object record...
  7128. //
  7129. pObjOld = pWSGroup->pObjReg;
  7130. //
  7131. // ...and delete it:
  7132. //
  7133. ObjectDRU(pomPrimary->putTask,
  7134. pOMCWSGroup,
  7135. pOMCWorkset,
  7136. pObjOld,
  7137. NULL,
  7138. OMNET_OBJECT_DELETE);
  7139. }
  7140. pWSGroup->pObjReg = pObj;
  7141. }
  7142. else
  7143. {
  7144. //
  7145. // We've deregistered from the workset group - delete the
  7146. // object:
  7147. //
  7148. TRACE_OUT(( "Deleting reg object 0x%08x since WSG ID %hu not found",
  7149. pObj, worksetID));
  7150. ObjectDRU(pomPrimary->putTask,
  7151. pOMCWSGroup,
  7152. pOMCWorkset,
  7153. pObj,
  7154. NULL,
  7155. OMNET_OBJECT_DELETE);
  7156. }
  7157. }
  7158. else
  7159. {
  7160. //
  7161. // Not our person object - do nothing.
  7162. //
  7163. }
  7164. //
  7165. // Finished so quit out.
  7166. //
  7167. DC_QUIT;
  7168. }
  7169. //
  7170. // Now, if this event is a DELETED event then we check to see if anyone
  7171. // is still using the workset group. If not then we remove the info
  7172. // object.
  7173. //
  7174. if (event == OM_OBJECT_DELETED_IND)
  7175. {
  7176. //
  7177. // We need to check the number of person objects left in this
  7178. // ObMan control workset if it is not workset zero. If there are
  7179. // no person objects left then remove any orphaned INFO objects.
  7180. //
  7181. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7182. if (pOMCWSGroup == NULL)
  7183. {
  7184. // lonchanc: ingore left-over events due to race condition
  7185. DC_QUIT;
  7186. }
  7187. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7188. if (pOMCWorkset == NULL)
  7189. {
  7190. // lonchanc: ingore left-over events due to race condition
  7191. DC_QUIT;
  7192. }
  7193. if ((pOMCWorkset->numObjects == 0) &&
  7194. (worksetID != 0))
  7195. {
  7196. TRACE_OUT(( "Workset %hu has no person objects - deleting INFO object",
  7197. worksetID));
  7198. RemoveInfoObject(pomPrimary, pDomain, worksetID);
  7199. }
  7200. //
  7201. // A person object has been removed and as we are potentially in
  7202. // the middle of a workset group catchup from this person we may
  7203. // need to retry the catchup.
  7204. //
  7205. // We search through all the workset groups looking for WSGs that
  7206. // are in the PENDING_SEND_MIDWAY or PENDING_SEND_COMPLETE state
  7207. // (i.e. in catchup state). If they are we then search to ensure
  7208. // that the person object for them still exists. If it doesn't
  7209. // then we need to retry the catchup.
  7210. //
  7211. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7212. if (pOMCWSGroup == NULL)
  7213. {
  7214. // lonchanc: ingore left-over events due to race condition
  7215. DC_QUIT;
  7216. }
  7217. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7218. if (pOMCWorkset == NULL)
  7219. {
  7220. // lonchanc: ingore left-over events due to race condition
  7221. DC_QUIT;
  7222. }
  7223. pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups),
  7224. FIELD_OFFSET(OM_WSGROUP, chain));
  7225. while (pWSGroup != NULL)
  7226. {
  7227. //
  7228. // Check the WSG state to see if we are in the middle of a
  7229. // catchup.
  7230. //
  7231. if ((PENDING_SEND_MIDWAY == pWSGroup->state) ||
  7232. (PENDING_SEND_COMPLETE == pWSGroup->state))
  7233. {
  7234. //
  7235. // We are in the middle of a catchup so we need to check
  7236. // to see that the person object for the person that we
  7237. // are catching up from has not been deleted.
  7238. //
  7239. FindPersonObject(pOMCWorkset,
  7240. pWSGroup->helperNode,
  7241. FIND_THIS,
  7242. &pObj);
  7243. //
  7244. // Check the person handle.
  7245. //
  7246. if (NULL == pObj)
  7247. {
  7248. TRACE_OUT(("Person object removed for WSG %d - retrying"
  7249. " catchup",
  7250. pWSGroup->wsg));
  7251. //
  7252. // Force MaybeRetryCatchUp to retry the catchup by
  7253. // passing the helper node ID that is stored in the
  7254. // workset.
  7255. //
  7256. MaybeRetryCatchUp(pomPrimary,
  7257. pDomain,
  7258. pWSGroup->wsGroupID,
  7259. pWSGroup->helperNode);
  7260. }
  7261. else
  7262. {
  7263. UT_BumpUpRefCount(pObj);
  7264. }
  7265. }
  7266. //
  7267. // Get the next WSG.
  7268. //
  7269. pWSGroup = (POM_WSGROUP)COM_BasedListNext(&(pDomain->wsGroups), pWSGroup,
  7270. FIELD_OFFSET(OM_WSGROUP, chain));
  7271. }
  7272. }
  7273. DC_EXIT_POINT:
  7274. if (pObj)
  7275. {
  7276. UT_FreeRefCount((void**)&pObj, FALSE);
  7277. }
  7278. DebugExitVOID(ProcessOMCObjectEvents);
  7279. }
  7280. //
  7281. // GeneratePersonEvents(...)
  7282. //
  7283. void GeneratePersonEvents
  7284. (
  7285. POM_PRIMARY pomPrimary,
  7286. UINT event,
  7287. POM_WSGROUP pWSGroup,
  7288. POM_OBJECT pObj
  7289. )
  7290. {
  7291. POM_WSGROUP_REG_REC pPersonObject;
  7292. UINT newEvent = 0;
  7293. DebugEntry(GeneratePersonEvents);
  7294. //
  7295. // OK, to get here we must have determined that a local client has
  7296. // registered with the workset group. Now proceed to examine the event
  7297. // and generate an appropriate person event for the client:
  7298. //
  7299. switch (event)
  7300. {
  7301. case OM_OBJECT_ADD_IND:
  7302. case OM_OBJECT_UPDATED_IND:
  7303. {
  7304. ValidateObject(pObj);
  7305. if (pObj->flags & DELETED)
  7306. {
  7307. //
  7308. // The object has been deleted already! We can't check its
  7309. // state so just quit:
  7310. //
  7311. DC_QUIT;
  7312. }
  7313. if (!pObj->pData)
  7314. {
  7315. ERROR_OUT(("GeneratePersonEvents: object 0x%08x has no data", pObj));
  7316. DC_QUIT;
  7317. }
  7318. //
  7319. // We're only interested in person objects, so if it's anything
  7320. // else, quit:
  7321. //
  7322. ValidateObjectData(pObj->pData);
  7323. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  7324. if (pPersonObject->idStamp != OM_WSGREGREC_ID_STAMP)
  7325. {
  7326. DC_QUIT;
  7327. }
  7328. //
  7329. // Translate to a PERSON_JOINED event, provided the person data
  7330. // has actually arrived. We determine this by reading the
  7331. // object and checking the <status> in it:
  7332. //
  7333. if (pPersonObject->status == READY_TO_SEND)
  7334. {
  7335. newEvent = OM_PERSON_JOINED_IND;
  7336. }
  7337. }
  7338. break;
  7339. case OM_OBJECT_DELETED_IND:
  7340. {
  7341. //
  7342. // This means that someone has left the call
  7343. //
  7344. newEvent = OM_PERSON_LEFT_IND;
  7345. }
  7346. break;
  7347. case OM_OBJECT_REPLACED_IND:
  7348. {
  7349. //
  7350. // This means someone has done a SetPersonData:
  7351. //
  7352. newEvent = OM_PERSON_DATA_CHANGED_IND;
  7353. }
  7354. break;
  7355. }
  7356. //
  7357. // If there is any translating to be done, newEvent will now be
  7358. // non-zero:
  7359. //
  7360. if (newEvent != 0)
  7361. {
  7362. WSGroupEventPost(pomPrimary->putTask,
  7363. pWSGroup,
  7364. PRIMARY,
  7365. newEvent,
  7366. 0,
  7367. (UINT_PTR)pObj);
  7368. }
  7369. DC_EXIT_POINT:
  7370. DebugExitVOID(GeneratePersonEvents);
  7371. }
  7372. //
  7373. // ProcessOMCWorksetNew(...)
  7374. //
  7375. void ProcessOMCWorksetNew
  7376. (
  7377. POM_PRIMARY pomPrimary,
  7378. OM_WSGROUP_HANDLE hWSGroup,
  7379. OM_WORKSET_ID worksetID
  7380. )
  7381. {
  7382. POM_DOMAIN pDomain;
  7383. POM_WORKSET pOMCWorkset;
  7384. POM_CLIENT_LIST pClientListEntry;
  7385. DebugEntry(ProcessOMCWorksetNew);
  7386. //
  7387. // The ObMan task generates person data events for its clients when the
  7388. // contents of the relevant control workset changes. We therefore add
  7389. // ObMan to this new control workset's list of "clients" and post it
  7390. // events for any objects already there:
  7391. //
  7392. // NOTE: We specify that ObMan should be considered a SECONDARY "client"
  7393. // of this workset so that it is not required to confirm delete
  7394. // events etc.
  7395. //
  7396. TRACE_OUT(( "Recd WORKSET_NEW for workset %u, WSG %u",
  7397. worksetID, hWSGroup));
  7398. //
  7399. // Look up the domain record based on the workset group handle:
  7400. //
  7401. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  7402. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  7403. FIELD_OFFSET(OM_DOMAIN, omchWSGroup), (DWORD)hWSGroup,
  7404. FIELD_SIZE(OM_DOMAIN, omchWSGroup));
  7405. if (pDomain == NULL)
  7406. {
  7407. WARNING_OUT(( "No domain record found with omchWSGroup %d",
  7408. hWSGroup));
  7409. DC_QUIT;
  7410. }
  7411. pOMCWorkset = GetOMCWorkset(pDomain, worksetID);
  7412. ASSERT((pOMCWorkset != NULL));
  7413. if (AddClientToWsetList(pomPrimary->putTask,
  7414. pOMCWorkset,
  7415. hWSGroup,
  7416. SECONDARY,
  7417. &pClientListEntry) != 0)
  7418. {
  7419. DC_QUIT;
  7420. }
  7421. TRACE_OUT(( "Added ObMan as secondary client for workset"));
  7422. PostAddEvents(pomPrimary->putTask, pOMCWorkset, hWSGroup, pomPrimary->putTask);
  7423. DC_EXIT_POINT:
  7424. DebugExitVOID(ProcessOMCWorksetNew);
  7425. }
  7426. //
  7427. // ProcessSendQueue()
  7428. //
  7429. void ProcessSendQueue
  7430. (
  7431. POM_PRIMARY pomPrimary,
  7432. POM_DOMAIN pDomain,
  7433. BOOL domainRecBumped
  7434. )
  7435. {
  7436. POM_SEND_INST pSendInst;
  7437. NET_PRIORITY priority;
  7438. DebugEntry(ProcessSendQueue);
  7439. //
  7440. // Check the Domain record is still valid:
  7441. //
  7442. if (!pDomain->valid)
  7443. {
  7444. TRACE_OUT(( "Got OMINT_EVENT_SEND_QUEUE too late for discarded Domain %u",
  7445. pDomain->callID));
  7446. DC_QUIT;
  7447. }
  7448. //
  7449. // Check that there is supposed to be a send event outstanding:
  7450. //
  7451. if (pDomain->sendEventOutstanding)
  7452. {
  7453. //
  7454. // Although there might still be a send event outstanding (e.g. a
  7455. // FEEDBACK event) we can't be sure (unless we count them as we
  7456. // generate them). It's vital that we never leave the send queue
  7457. // unprocessed, so to be safe we clear the flag so that QueueMessage
  7458. // will post an event next time it's called:
  7459. //
  7460. pDomain->sendEventOutstanding = FALSE;
  7461. }
  7462. else
  7463. {
  7464. //
  7465. // This will happen
  7466. //
  7467. // - when we get a FEEDBACK event after we've cleared the queue, OR
  7468. //
  7469. // - when we get a SEND_QUEUE event which was posted because there
  7470. // were none outstanding but a FEEDBACK event arrived in the
  7471. // meantime to clear the queue.
  7472. //
  7473. // NOTE: this flag means that there MIGHT not be a send EVENT
  7474. // outstanding (see above). It does not mean that there's
  7475. // nothing on the send queue, so we go ahead and check the
  7476. // queue.
  7477. //
  7478. }
  7479. //
  7480. // The strategy for processing the send queue is to process the highest
  7481. // priority operation first, whether or not a transfer is in progress
  7482. // at another priority.
  7483. //
  7484. // So, for each priority, we check if there's anything in the queue:
  7485. //
  7486. TRACE_OUT(("Searching send queues for Domain %u",pDomain->callID));
  7487. for (priority = NET_TOP_PRIORITY; priority <= NET_LOW_PRIORITY; priority++)
  7488. {
  7489. TRACE_OUT(("Processing queue at priority %u", priority));
  7490. while (pSendInst = (POM_SEND_INST)COM_BasedListFirst(&(pDomain->sendQueue[priority]), FIELD_OFFSET(OM_SEND_INST, chain)))
  7491. {
  7492. TRACE_OUT(("Found send instruction for priority %u", priority));
  7493. if (SendMessagePkt(pomPrimary, pDomain, pSendInst) != 0)
  7494. {
  7495. DC_QUIT;
  7496. }
  7497. }
  7498. }
  7499. DC_EXIT_POINT:
  7500. if (domainRecBumped)
  7501. {
  7502. //
  7503. // If our caller has told us that the use count of the Domain
  7504. // record has been bumped, free it now:
  7505. //
  7506. UT_FreeRefCount((void**)&pDomain, FALSE);
  7507. }
  7508. DebugExitVOID(ProcessSendQueue);
  7509. }
  7510. //
  7511. // SendMessagePkt(...)
  7512. //
  7513. UINT SendMessagePkt
  7514. (
  7515. POM_PRIMARY pomPrimary,
  7516. POM_DOMAIN pDomain,
  7517. POM_SEND_INST pSendInst
  7518. )
  7519. {
  7520. void * pNetBuffer = NULL;
  7521. void * pAnotherNetBuffer = NULL;
  7522. UINT transferSize;
  7523. UINT dataTransferSize;
  7524. BOOL compressed;
  7525. BOOL tryToCompress;
  7526. BOOL spoiled = FALSE;
  7527. BOOL allSent = FALSE;
  7528. NET_PRIORITY queuePriority;
  7529. BOOL fSendExtra;
  7530. POMNET_PKT_HEADER pMessage;
  7531. POM_WSGROUP pWSGroup;
  7532. POM_WORKSET pWorkset;
  7533. UINT rc = 0;
  7534. DebugEntry(SendMessagePkt);
  7535. //
  7536. // We check here if we can spoil this message:
  7537. //
  7538. rc = TryToSpoilOp(pSendInst);
  7539. //
  7540. // If so, quit:
  7541. //
  7542. if (rc == OM_RC_SPOILED)
  7543. {
  7544. spoiled = TRUE;
  7545. rc = 0;
  7546. DC_QUIT;
  7547. }
  7548. //
  7549. // Any other error is more serious:
  7550. //
  7551. if (rc != 0)
  7552. {
  7553. DC_QUIT;
  7554. }
  7555. //
  7556. // Now decide how many bytes we're going to ask the network layer for
  7557. // this time and how many data bytes we're going to transfer:
  7558. //
  7559. DecideTransferSize(pSendInst, &transferSize, &dataTransferSize);
  7560. ASSERT(dataTransferSize <= pSendInst->dataLeftToGo);
  7561. //
  7562. // Add 1 byte to the transfer size for the <compressionType> byte:
  7563. //
  7564. TRACE_OUT(("Asking MG_GetBuffer for 0x%08x bytes for operation type 0x%08x",
  7565. transferSize + 1, pSendInst->messageType));
  7566. rc = MG_GetBuffer(pomPrimary->pmgClient,
  7567. transferSize + 1,
  7568. pSendInst->priority,
  7569. pSendInst->channel,
  7570. &pNetBuffer);
  7571. if (rc != 0)
  7572. {
  7573. //
  7574. // Possible errors include
  7575. // - NET_NOT_CONNECTED, when a backlevel call ends
  7576. // - NET_INVALID_USER_HANDLE, when an MCS call ends
  7577. // - NET_TOO_MUCH_IN_USE, when we hit back pressure (flow control)
  7578. //
  7579. // In all cases, just quit.
  7580. //
  7581. TRACE_OUT(("MG_GetBuffer failed; not sending OM message"));
  7582. DC_QUIT;
  7583. }
  7584. //
  7585. // OK so far, so now copy the header of the message into the first part
  7586. // of the compress buffer:
  7587. //
  7588. pMessage = pSendInst->pMessage;
  7589. ASSERT(pMessage);
  7590. memcpy(pomPrimary->compressBuffer, pMessage, pSendInst->messageSize);
  7591. //
  7592. // ...and now copy the data into the rest of the buffer:
  7593. //
  7594. // This must be a HUGE copy because although the compress buffer is not
  7595. // HUGE, the data is and the bit to be copied may span segments.
  7596. //
  7597. if (dataTransferSize != 0)
  7598. {
  7599. memcpy((LPBYTE)pomPrimary->compressBuffer + pSendInst->messageSize,
  7600. pSendInst->pDataNext, dataTransferSize);
  7601. }
  7602. //
  7603. // Determine whether to compress:
  7604. //
  7605. compressed = FALSE;
  7606. tryToCompress = FALSE;
  7607. if ((pDomain->compressionCaps & OM_CAPS_PKW_COMPRESSION) &&
  7608. (pSendInst->compressOrNot) &&
  7609. (transferSize > DCS_MIN_COMPRESSABLE_PACKET) &&
  7610. (pomPrimary->pgdcWorkBuf != NULL))
  7611. {
  7612. tryToCompress = TRUE;
  7613. }
  7614. //
  7615. // If we passed those tests, compress the packet into the network
  7616. // buffer.
  7617. //
  7618. // This will not use the whole network buffer we have allocated, but it
  7619. // saves us having to have two buffers and doing a second data copy.
  7620. // The network layer can handle a partially used buffer
  7621. //
  7622. if (tryToCompress)
  7623. {
  7624. TRACE_OUT(("OM Compressing %04d bytes", transferSize));
  7625. compressed = GDC_Compress(NULL, GDCCO_MAXSPEED, pomPrimary->pgdcWorkBuf,
  7626. pomPrimary->compressBuffer, transferSize, (LPBYTE)pNetBuffer + 1,
  7627. &transferSize);
  7628. }
  7629. if (compressed)
  7630. {
  7631. TRACE_OUT(("OM Compressed to %04d bytes", transferSize));
  7632. *((LPBYTE)pNetBuffer) = OM_PROT_PKW_COMPRESSED;
  7633. }
  7634. else
  7635. {
  7636. TRACE_OUT(("OM Uncompressed %04d bytes", transferSize));
  7637. memcpy((LPBYTE)pNetBuffer + 1, pomPrimary->compressBuffer,
  7638. transferSize);
  7639. *((LPBYTE)pNetBuffer) = OM_PROT_NOT_COMPRESSED;
  7640. }
  7641. //
  7642. // If we're in a T.120 call and sending on all priorities, we need to
  7643. // do some work to ensure compatibility with NetMeeting 1.0.
  7644. //
  7645. fSendExtra = ((pSendInst->priority & NET_SEND_ALL_PRIORITIES) != 0);
  7646. if ( fSendExtra )
  7647. {
  7648. //
  7649. // T.120 reserves MCS Top Priority for use by GCC. Sending on all
  7650. // priorities used to include Top, but no longer does, to ensure
  7651. // compliance. However, ObMan expects to receive 4 responses when
  7652. // sending on all priorities whereas the MCS glue now uses only
  7653. // 3 priorities. To ensure backward compatibility, whenever ObMan
  7654. // sends on all priorities, it has to add an extra send by making
  7655. // an extra call to the network here.
  7656. // First allocate another net buffer and copy the data to it (we
  7657. // have to do before calling MG_SendData as the other buffer is
  7658. // invalid after this).
  7659. //
  7660. TRACE_OUT(( "SEND_ALL: get extra NET buffer"));
  7661. rc = MG_GetBuffer(pomPrimary->pmgClient,
  7662. transferSize + 1,
  7663. (NET_PRIORITY)(pSendInst->priority & ~NET_SEND_ALL_PRIORITIES),
  7664. pSendInst->channel,
  7665. &pAnotherNetBuffer);
  7666. if (rc != 0)
  7667. {
  7668. WARNING_OUT(("MG_GetBuffer failed; not sending OM packet"));
  7669. }
  7670. else
  7671. {
  7672. memcpy(pAnotherNetBuffer, pNetBuffer, transferSize + 1);
  7673. }
  7674. }
  7675. //
  7676. // Now send the packet, adding 1 byte to the length for the
  7677. // <compressionType> byte:
  7678. //
  7679. TRACE_OUT(( "Sending 0x%08x bytes on channel 0x%08x at priority %hu",
  7680. transferSize + 1, pSendInst->channel, pSendInst->priority));
  7681. if (rc == 0)
  7682. {
  7683. TRACE_OUT(("SendMessagePkt: sending packet size %d",
  7684. transferSize+1));
  7685. rc = MG_SendData(pomPrimary->pmgClient,
  7686. pSendInst->priority,
  7687. pSendInst->channel,
  7688. (transferSize + 1),
  7689. &pNetBuffer);
  7690. }
  7691. if ( fSendExtra && (rc == 0) )
  7692. {
  7693. TRACE_OUT(("SendMessagePkt: sending extra packet size %d",
  7694. transferSize+1));
  7695. rc = MG_SendData(pomPrimary->pmgClient,
  7696. (NET_PRIORITY)(pSendInst->priority & ~NET_SEND_ALL_PRIORITIES),
  7697. pSendInst->channel,
  7698. (transferSize + 1),
  7699. &pAnotherNetBuffer);
  7700. }
  7701. if (rc != 0)
  7702. {
  7703. //
  7704. // Network API says free the buffer on error:
  7705. //
  7706. MG_FreeBuffer(pomPrimary->pmgClient, &pNetBuffer);
  7707. if ( pAnotherNetBuffer != NULL )
  7708. {
  7709. MG_FreeBuffer(pomPrimary->pmgClient, &pAnotherNetBuffer);
  7710. }
  7711. switch (rc)
  7712. {
  7713. case NET_RC_MGC_NOT_CONNECTED:
  7714. case NET_RC_MGC_INVALID_USER_HANDLE:
  7715. //
  7716. // These are the errors the Network layer returns when we're in
  7717. // a singleton Domain or when an MCS domain has just
  7718. // terminated. We ignore them.
  7719. //
  7720. TRACE_OUT(("No data sent since call %u doesn't exist",
  7721. pDomain->callID));
  7722. rc = 0;
  7723. break;
  7724. default:
  7725. //
  7726. // Any other error is more serious, so quit and pass it back:
  7727. //
  7728. DC_QUIT;
  7729. }
  7730. }
  7731. else
  7732. {
  7733. //
  7734. // We've sent a message and will therefore get a FEEDBACK event
  7735. // sometime later. This qualifies as a SEND_EVENT since it will
  7736. // prompt us to examine our send queue, so we set the
  7737. // SEND_EVENT_OUTSTANDING flag:
  7738. //
  7739. TRACE_OUT(("Sent msg in Domain %u (type: 0x%08x) with %hu data bytes",
  7740. pDomain->callID, pSendInst->messageType, dataTransferSize));
  7741. pDomain->sendEventOutstanding = TRUE;
  7742. }
  7743. //
  7744. // Here, we decrement the <bytesUnacked> fields for the workset and
  7745. // workset group:
  7746. //
  7747. if (dataTransferSize != 0)
  7748. {
  7749. pWorkset = pSendInst->pWorkset;
  7750. pWorkset->bytesUnacked -= dataTransferSize;
  7751. pWSGroup = pSendInst->pWSGroup;
  7752. pWSGroup->bytesUnacked -= dataTransferSize;
  7753. }
  7754. //
  7755. // Now update the send instruction and decide whether we've sent all
  7756. // the data for this operation:
  7757. //
  7758. pSendInst->dataLeftToGo -= dataTransferSize;
  7759. pSendInst->pDataNext = (POM_OBJECTDATA)((LPBYTE)pSendInst->pDataNext + dataTransferSize);
  7760. if (pSendInst->dataLeftToGo == 0)
  7761. {
  7762. //
  7763. // If so, we
  7764. //
  7765. // - clear the transfer-in-progress flag for this queue -
  7766. // remember that the NET_SEND_ALL_PRIORITIES flag may be set so
  7767. // we need to clear it
  7768. //
  7769. // - free our copy of the message packet and the data, if any (we
  7770. // bumped up the use count of the data chunk when the message was
  7771. // put on the queue so we won't really be getting rid of it
  7772. // unless it's been freed elsewhere already, which is fine)
  7773. //
  7774. // - pop the instruction off the send queue and free it.
  7775. //
  7776. TRACE_OUT(( "Sent last packet for operation (type: 0x%08x)",
  7777. pSendInst->messageType));
  7778. queuePriority = pSendInst->priority;
  7779. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  7780. pDomain->sendInProgress[queuePriority] = FALSE;
  7781. allSent = TRUE;
  7782. }
  7783. else
  7784. {
  7785. //
  7786. // If not, we
  7787. //
  7788. // - set the transfer-in-progress flag for this queue -
  7789. // remember that the NET_SEND_ALL_PRIORITIES flag may be set so
  7790. // we need to clear it
  7791. //
  7792. // - set the <messageSize> field of the send instruction to the
  7793. // size of a MORE_DATA header, so that only that many bytes are
  7794. // picked out of the message next time
  7795. //
  7796. // - set the <messageType> field of the message to MORE_DATA
  7797. //
  7798. // - leave the operation on the queue.
  7799. //
  7800. TRACE_OUT(("Data left to transfer: %u bytes (starting at 0x%08x)",
  7801. pSendInst->dataLeftToGo, pSendInst->pDataNext));
  7802. queuePriority = pSendInst->priority;
  7803. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  7804. pDomain->sendInProgress[queuePriority] = TRUE;
  7805. pSendInst->messageSize = OMNET_MORE_DATA_SIZE;
  7806. pMessage->messageType = OMNET_MORE_DATA;
  7807. }
  7808. DC_EXIT_POINT:
  7809. //
  7810. // If we're finished with the message (either because we've sent it all
  7811. // or because it was spoiled) we free it (plus any associated data):
  7812. //
  7813. if (spoiled || allSent)
  7814. {
  7815. FreeSendInst(pSendInst);
  7816. }
  7817. DebugExitDWORD(SendMessagePkt, rc);
  7818. return(rc);
  7819. }
  7820. //
  7821. // TryToSpoilOp
  7822. //
  7823. UINT TryToSpoilOp
  7824. (
  7825. POM_SEND_INST pSendInst
  7826. )
  7827. {
  7828. POMNET_OPERATION_PKT pMessage;
  7829. POM_OBJECT pObj;
  7830. POM_WORKSET pWorkset;
  7831. POM_WSGROUP pWSGroup;
  7832. BOOL spoilable = FALSE;
  7833. UINT rc = 0;
  7834. DebugEntry(TryToSpoilOp);
  7835. pMessage = (POMNET_OPERATION_PKT)pSendInst->pMessage;
  7836. pObj = pSendInst->pObj;
  7837. pWorkset = pSendInst->pWorkset;
  7838. pWSGroup = pSendInst->pWSGroup;
  7839. //
  7840. // The rules for spoiling state that
  7841. //
  7842. // - any operation is spoiled by a later operation of the same type
  7843. //
  7844. // - in addition, an Update is spoiled by a later Replace.
  7845. //
  7846. // Since we never have two Adds or two Deletes for the same object,
  7847. // these rules reduce to the following:
  7848. //
  7849. // - a Clear is spoiled by a later Clear
  7850. //
  7851. // - a Move is spoiled by a later Move
  7852. //
  7853. // - a Replace is spoiled by a later Replace
  7854. //
  7855. // - an Update is spoiled by a later Update or a later Replace.
  7856. //
  7857. // So, switch according to the operation type:
  7858. //
  7859. switch (pSendInst->messageType)
  7860. {
  7861. case OMNET_WORKSET_CLEAR:
  7862. if (STAMP_IS_LOWER(pMessage->seqStamp, pWorkset->clearStamp))
  7863. {
  7864. spoilable = TRUE;
  7865. }
  7866. break;
  7867. case OMNET_OBJECT_UPDATE:
  7868. if ((STAMP_IS_LOWER(pMessage->seqStamp, pObj->replaceStamp))
  7869. || (STAMP_IS_LOWER(pMessage->seqStamp, pObj->updateStamp)))
  7870. {
  7871. spoilable = TRUE;
  7872. }
  7873. break;
  7874. case OMNET_OBJECT_REPLACE:
  7875. if (STAMP_IS_LOWER(pMessage->seqStamp, pObj->replaceStamp))
  7876. {
  7877. spoilable = TRUE;
  7878. }
  7879. break;
  7880. case OMNET_OBJECT_MOVE:
  7881. if (STAMP_IS_LOWER(pMessage->seqStamp, pObj->positionStamp))
  7882. {
  7883. spoilable = TRUE;
  7884. }
  7885. break;
  7886. case OMNET_HELLO:
  7887. case OMNET_WELCOME:
  7888. case OMNET_LOCK_REQ:
  7889. case OMNET_LOCK_GRANT:
  7890. case OMNET_LOCK_DENY:
  7891. case OMNET_LOCK_NOTIFY:
  7892. case OMNET_UNLOCK:
  7893. case OMNET_WSGROUP_SEND_REQ:
  7894. case OMNET_WSGROUP_SEND_MIDWAY:
  7895. case OMNET_WSGROUP_SEND_COMPLETE:
  7896. case OMNET_WSGROUP_SEND_DENY:
  7897. case OMNET_WORKSET_NEW:
  7898. case OMNET_WORKSET_CATCHUP:
  7899. case OMNET_OBJECT_ADD:
  7900. case OMNET_OBJECT_DELETE:
  7901. case OMNET_OBJECT_CATCHUP:
  7902. //
  7903. // Do nothing
  7904. //
  7905. break;
  7906. default:
  7907. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  7908. pSendInst->messageType));
  7909. break;
  7910. }
  7911. if (spoilable)
  7912. {
  7913. //
  7914. // To spoil the message, we remove it from the send queue and free
  7915. // the memory (also NULL the caller's pointer):
  7916. //
  7917. //
  7918. // However, if we spoil the message, the data (if any) will never be
  7919. // acknowledged, so we must decrement the relevant <bytesUnacked>
  7920. // fields now:
  7921. //
  7922. TRACE_OUT(( "Spoiling from send queue for workset %u",
  7923. pWorkset->worksetID));
  7924. if (pSendInst->dataLeftToGo != 0)
  7925. {
  7926. pWorkset->bytesUnacked -= pSendInst->dataLeftToGo;
  7927. pWSGroup->bytesUnacked -= pSendInst->dataLeftToGo;
  7928. }
  7929. rc = OM_RC_SPOILED;
  7930. }
  7931. DebugExitDWORD(TryToSpoilOp, rc);
  7932. return(rc);
  7933. }
  7934. //
  7935. // DecideTransferSize(...)
  7936. //
  7937. void DecideTransferSize
  7938. (
  7939. POM_SEND_INST pSendInst,
  7940. UINT * pTransferSize,
  7941. UINT * pDataTransferSize
  7942. )
  7943. {
  7944. UINT transferSize;
  7945. DebugEntry(DecideTransferSize);
  7946. //
  7947. // Ideally, we'd like to transfer everything in one go, where
  7948. // "everything" is the message header plus all the data to go with it
  7949. // (if any):
  7950. //
  7951. transferSize = pSendInst->messageSize + pSendInst->dataLeftToGo;
  7952. TRACE_OUT(("Desired transfer size for this portion: %u", transferSize));
  7953. //
  7954. // However, we never ask for more than half the send pool size, so take
  7955. // the minimum of the two:
  7956. //
  7957. // (we subtract 1 byte to allow for the <compressionType> byte at the
  7958. // start of the packet)
  7959. //
  7960. transferSize = min(transferSize, ((OM_NET_SEND_POOL_SIZE / 2) - 1));
  7961. TRACE_OUT(("Feasible transfer size for this portion: %u",
  7962. transferSize));
  7963. //
  7964. // The logic of the send queue processing requires that the message
  7965. // header is sent completely in the first packet, so assert:
  7966. //
  7967. ASSERT((transferSize >= pSendInst->messageSize));
  7968. //
  7969. // As a sanity check, we ensure we're not trying to transfer more than
  7970. // the biggest buffer allowed:
  7971. //
  7972. ASSERT(transferSize <= OM_NET_MAX_TRANSFER_SIZE);
  7973. //
  7974. // The amount of data to be sent is the transfer size less the size of
  7975. // the header we're sending:
  7976. //
  7977. *pDataTransferSize = ((UINT) transferSize) - pSendInst->messageSize;
  7978. *pTransferSize = (UINT) transferSize;
  7979. TRACE_OUT(("Total transfer size for this packet: %u - data transfer size: %u",
  7980. (UINT) *pTransferSize, (UINT) *pDataTransferSize));
  7981. DebugExitVOID(DecideTransferSize);
  7982. }
  7983. //
  7984. // ProcessNetData(...)
  7985. //
  7986. void ProcessNetData
  7987. (
  7988. POM_PRIMARY pomPrimary,
  7989. POM_DOMAIN pDomain,
  7990. PNET_SEND_IND_EVENT pNetSendInd
  7991. )
  7992. {
  7993. POMNET_PKT_HEADER pHeader;
  7994. UINT dataSize;
  7995. OMNET_MESSAGE_TYPE messageType = 0;
  7996. UINT rc = 0;
  7997. DebugEntry(ProcessNetData);
  7998. //
  7999. // Decompress the packet and set pHeader to point to the start of
  8000. // wherever the data ends up:
  8001. //
  8002. ASSERT((pNetSendInd->lengthOfData < 0xFFFF));
  8003. if (NULL != pNetSendInd->data_ptr) {
  8004. switch (*(pNetSendInd->data_ptr))
  8005. {
  8006. case OM_PROT_NOT_COMPRESSED:
  8007. {
  8008. TRACE_OUT(("Buffer not compressed - taking it as it stands"));
  8009. memcpy(pomPrimary->compressBuffer, pNetSendInd->data_ptr + 1,
  8010. pNetSendInd->lengthOfData--);
  8011. }
  8012. break;
  8013. case OM_PROT_PKW_COMPRESSED:
  8014. {
  8015. TRACE_OUT(("Buffer was PKW compressed - size 0x%08x bytes",
  8016. pNetSendInd->lengthOfData));
  8017. dataSize = sizeof(pomPrimary->compressBuffer);
  8018. ASSERT(pomPrimary->pgdcWorkBuf != NULL);
  8019. if (!GDC_Decompress(NULL, pomPrimary->pgdcWorkBuf,
  8020. pNetSendInd->data_ptr + 1,
  8021. (WORD) pNetSendInd->lengthOfData - 1,
  8022. pomPrimary->compressBuffer, &dataSize))
  8023. {
  8024. ERROR_OUT(("Failed to decompress OM data!"));
  8025. }
  8026. pNetSendInd->lengthOfData = dataSize;
  8027. TRACE_OUT(("Decompressed to 0x%08x bytes",
  8028. pNetSendInd->lengthOfData));
  8029. }
  8030. break;
  8031. default:
  8032. {
  8033. ERROR_OUT(( "Ignoring packet with unknown compression (0x%08x)",
  8034. *(pNetSendInd->data_ptr)));
  8035. DC_QUIT;
  8036. }
  8037. }
  8038. pHeader = (POMNET_PKT_HEADER) pomPrimary->compressBuffer;
  8039. //
  8040. // Now switch accorindg to the message type:
  8041. //
  8042. messageType = pHeader->messageType;
  8043. TRACE_OUT((" Packet contains OMNET message type 0x%08x", messageType));
  8044. switch (messageType)
  8045. {
  8046. case OMNET_HELLO:
  8047. {
  8048. rc = ProcessHello(pomPrimary,
  8049. pDomain,
  8050. (POMNET_JOINER_PKT) pHeader,
  8051. pNetSendInd->lengthOfData);
  8052. }
  8053. break;
  8054. case OMNET_WELCOME:
  8055. {
  8056. rc = ProcessWelcome(pomPrimary,
  8057. pDomain,
  8058. (POMNET_JOINER_PKT) pHeader,
  8059. pNetSendInd->lengthOfData);
  8060. }
  8061. break;
  8062. case OMNET_LOCK_DENY:
  8063. case OMNET_LOCK_GRANT:
  8064. {
  8065. ProcessLockReply(pomPrimary,
  8066. pDomain,
  8067. pHeader->sender,
  8068. ((POMNET_LOCK_PKT) pHeader)->data1,
  8069. pHeader->messageType);
  8070. }
  8071. break;
  8072. case OMNET_LOCK_REQ:
  8073. {
  8074. ProcessLockRequest(pomPrimary, pDomain,
  8075. (POMNET_LOCK_PKT) pHeader);
  8076. }
  8077. break;
  8078. case OMNET_WSGROUP_SEND_REQ:
  8079. {
  8080. ProcessSendReq(pomPrimary,
  8081. pDomain,
  8082. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8083. }
  8084. break;
  8085. case OMNET_WSGROUP_SEND_MIDWAY:
  8086. {
  8087. ProcessSendMidway(pomPrimary,
  8088. pDomain,
  8089. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8090. }
  8091. break;
  8092. case OMNET_WSGROUP_SEND_COMPLETE:
  8093. {
  8094. rc = ProcessSendComplete(pomPrimary,
  8095. pDomain,
  8096. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8097. }
  8098. break;
  8099. case OMNET_WSGROUP_SEND_DENY:
  8100. {
  8101. MaybeRetryCatchUp(pomPrimary,
  8102. pDomain,
  8103. ((POMNET_WSGROUP_SEND_PKT) pHeader)->wsGroupID,
  8104. pHeader->sender);
  8105. }
  8106. break;
  8107. //
  8108. // We use the special ReceiveData function for any messages which
  8109. //
  8110. // - might need to be bounced, or
  8111. //
  8112. // - might fill more than one packet.
  8113. //
  8114. case OMNET_LOCK_NOTIFY:
  8115. case OMNET_UNLOCK:
  8116. case OMNET_WORKSET_NEW:
  8117. case OMNET_WORKSET_CLEAR:
  8118. case OMNET_WORKSET_CATCHUP:
  8119. case OMNET_OBJECT_ADD:
  8120. case OMNET_OBJECT_MOVE:
  8121. case OMNET_OBJECT_UPDATE:
  8122. case OMNET_OBJECT_REPLACE:
  8123. case OMNET_OBJECT_DELETE:
  8124. case OMNET_OBJECT_CATCHUP:
  8125. case OMNET_MORE_DATA:
  8126. {
  8127. rc = ReceiveData(pomPrimary,
  8128. pDomain,
  8129. pNetSendInd,
  8130. (POMNET_OPERATION_PKT) pHeader);
  8131. }
  8132. break;
  8133. default:
  8134. {
  8135. ERROR_OUT(( "Unexpected messageType 0x%08x", messageType));
  8136. }
  8137. }
  8138. DC_EXIT_POINT:
  8139. if (rc != 0)
  8140. {
  8141. ERROR_OUT(( "Error %d processing OMNET message 0x%08x",
  8142. rc, messageType));
  8143. }
  8144. }
  8145. DebugExitVOID(ProcessNetData);
  8146. }
  8147. //
  8148. // ReceiveData(...)
  8149. //
  8150. UINT ReceiveData
  8151. (
  8152. POM_PRIMARY pomPrimary,
  8153. POM_DOMAIN pDomain,
  8154. PNET_SEND_IND_EVENT pNetSendInd,
  8155. POMNET_OPERATION_PKT pNetMessage
  8156. )
  8157. {
  8158. POM_RECEIVE_CB pReceiveCB = NULL;
  8159. UINT thisHeaderSize;
  8160. UINT thisDataSize;
  8161. OMNET_MESSAGE_TYPE messageType;
  8162. long bytesStillExpected = 0;
  8163. UINT rc = 0;
  8164. DebugEntry(ReceiveData);
  8165. //
  8166. // Set up some local variables:
  8167. //
  8168. messageType = pNetMessage->header.messageType;
  8169. //
  8170. // The amount of data included in this message is the size of the
  8171. // network buffer less the size of our message header at the front of
  8172. // it:
  8173. //
  8174. // Note: <thisHeaderSize> is the size of the header IN THIS PACKET,
  8175. // rather than the size of the header in the first packet of a
  8176. // multi-packet send.
  8177. //
  8178. thisHeaderSize = GetMessageSize(pNetMessage->header.messageType);
  8179. thisDataSize = pNetSendInd->lengthOfData - thisHeaderSize;
  8180. //
  8181. // If this is a MORE_DATA packet, then there should already be a
  8182. // receive CB set up for the transfer. If not, we need to create one:
  8183. //
  8184. if (messageType == OMNET_MORE_DATA)
  8185. {
  8186. rc = FindReceiveCB(pDomain, pNetSendInd, pNetMessage, &pReceiveCB);
  8187. //
  8188. // If no receive CB, we swallow the return code and quit. This will
  8189. // happen when we join a channel midway through a large data
  8190. // transfer.
  8191. //
  8192. if (rc == OM_RC_RECEIVE_CB_NOT_FOUND)
  8193. {
  8194. WARNING_OUT(("Discarding unexpected packet from 0x%08x",
  8195. pNetMessage->header.sender));
  8196. rc = 0;
  8197. DC_QUIT;
  8198. }
  8199. }
  8200. else
  8201. {
  8202. // lonchanc: added the following block of code
  8203. if (messageType == OMNET_OBJECT_REPLACE)
  8204. {
  8205. POM_RECEIVE_CB p;
  8206. // lonchanc: This packet does not contain all the data.
  8207. // More data will come in another packets; however,
  8208. // in this case, bytesStillExpected will be greater than zero
  8209. // after substracting from thisDataSize, as a result,
  8210. // this receiveCB will be appended to the ReceiveList.
  8211. // However, FindReceiveCB will find the first one matched.
  8212. // As a result, the one we just appended to the ReceiveList will
  8213. // not be found.
  8214. // Even worse, if there is receiveCB (of same sender, priority, and
  8215. // channel), the first-matched receiveCB will be totally confused
  8216. // when more data come in. This is bug #578.
  8217. TRACE_OUT(("Removing receiveCB {"));
  8218. while (FindReceiveCB(pDomain, pNetSendInd, pNetMessage, &p) == 0)
  8219. {
  8220. //
  8221. // Remove the message from the list it's in (either the pending
  8222. // receives list if this message was never bounced or the bounce
  8223. // list if it has been bounced):
  8224. //
  8225. COM_BasedListRemove(&(p->chain));
  8226. //
  8227. // Now free the message and the receive control block (NOT THE
  8228. // DATA! If there was any, it's just been used for an object
  8229. // add/update etc.)
  8230. //
  8231. UT_FreeRefCount((void**)&(p->pHeader), FALSE);
  8232. UT_FreeRefCount((void**)&p, FALSE);
  8233. }
  8234. }
  8235. rc = CreateReceiveCB(pDomain, pNetSendInd, pNetMessage, &pReceiveCB);
  8236. }
  8237. if (rc != 0)
  8238. {
  8239. ERROR_OUT(("%s failed, rc=0x0x%08x",
  8240. (messageType == OMNET_MORE_DATA) ? "FindReceiveCB" : "CreateReceiveCB",
  8241. rc));
  8242. DC_QUIT;
  8243. }
  8244. TRACE_OUT(("%s ok, pRecvCB=0x0x%p",
  8245. (messageType == OMNET_MORE_DATA) ? "FindReceiveCB" : "CreateReceiveCB",
  8246. pReceiveCB));
  8247. //
  8248. // Now we copy the data, if any, from the network buffer into the chunk
  8249. // we allocated when we called CreateReceiveCB.
  8250. //
  8251. if (thisDataSize != 0)
  8252. {
  8253. //
  8254. // We copy the data across using memcpy.
  8255. //
  8256. bytesStillExpected = ((long) (pReceiveCB->pHeader->totalSize) -
  8257. (long) (pReceiveCB->bytesRecd));
  8258. TRACE_OUT(("thisDataSize=0x0x%08x, bytesStillExpected=0x0x%08x, totalSize=0x0x%08x, bytesRecd=0x0x%08x",
  8259. (long) thisDataSize,
  8260. (long) bytesStillExpected,
  8261. (long) pReceiveCB->pHeader->totalSize,
  8262. (long) pReceiveCB->bytesRecd));
  8263. ASSERT((long) thisDataSize <= bytesStillExpected);
  8264. memcpy(pReceiveCB->pCurrentPosition,
  8265. ((LPBYTE) pNetMessage) + thisHeaderSize,
  8266. thisDataSize);
  8267. pReceiveCB->bytesRecd += thisDataSize;
  8268. pReceiveCB->pCurrentPosition += thisDataSize;
  8269. bytesStillExpected -= thisDataSize;
  8270. TRACE_OUT((" Still expecting %u bytes", bytesStillExpected));
  8271. }
  8272. //
  8273. // If we are expecting no more data for this transfer, process it:
  8274. //
  8275. if (bytesStillExpected <= 0)
  8276. {
  8277. rc = ProcessMessage(pomPrimary, pReceiveCB, OK_TO_RETRY_BOUNCE_LIST);
  8278. if (rc == OM_RC_BOUNCED)
  8279. {
  8280. //
  8281. // If ProcessMessage can't deal with the message immediately
  8282. // (because e.g. it's an update for an object we don't yet
  8283. // have), it will have added it to the bounce list so it will
  8284. // be tried again later.
  8285. //
  8286. // We special case this return code as it's not a problem for
  8287. // us here (it exists because other parts of the code need it):
  8288. //
  8289. WARNING_OUT(("Bounced message type 0x%08x", messageType));
  8290. rc = 0;
  8291. }
  8292. if (rc != 0)
  8293. {
  8294. //
  8295. // Any other non-zero return code is more serious:
  8296. //
  8297. DC_QUIT;
  8298. }
  8299. }
  8300. DC_EXIT_POINT:
  8301. if (rc != 0)
  8302. {
  8303. ERROR_OUT(("Error %d from message type 0x%08x", rc, messageType));
  8304. if (rc == OM_RC_OUT_OF_RESOURCES)
  8305. {
  8306. //
  8307. // If we couldn't allocate memory for the data to be recd, we
  8308. // act as if we've been kicked out of the channel:
  8309. //
  8310. ERROR_OUT(( "Leaving chann 0x%08x, simulating expulsion", pNetSendInd->channel));
  8311. MG_ChannelLeave(pomPrimary->pmgClient, pNetSendInd->channel);
  8312. ProcessNetLeaveChannel(pomPrimary, pDomain, pNetSendInd->channel);
  8313. }
  8314. }
  8315. DebugExitDWORD(ReceiveData, rc);
  8316. return(rc);
  8317. }
  8318. //
  8319. // CreateReceiveCB(...)
  8320. //
  8321. UINT CreateReceiveCB
  8322. (
  8323. POM_DOMAIN pDomain,
  8324. PNET_SEND_IND_EVENT pNetSendInd,
  8325. POMNET_OPERATION_PKT pNetMessage,
  8326. POM_RECEIVE_CB * ppReceiveCB
  8327. )
  8328. {
  8329. POM_RECEIVE_CB pReceiveCB = NULL;
  8330. POMNET_OPERATION_PKT pHeader = NULL;
  8331. UINT headerSize;
  8332. UINT totalDataSize;
  8333. UINT rc = 0;
  8334. DebugEntry(CreateReceiveCB);
  8335. //
  8336. // We get here when the first packet of a message arrives . What we
  8337. // need to do is to set up a "receive" structure and add it to the list
  8338. // of receives-in-progress for the Domain. Then, when the ensuing data
  8339. // packets (if any) arrive, they will be correlated and concatenated.
  8340. // When all the data has arrived, the receive CB will be passed to
  8341. // ProcessMessage.
  8342. //
  8343. //
  8344. // Allocate some memory for the receive CB:
  8345. //
  8346. pReceiveCB = (POM_RECEIVE_CB)UT_MallocRefCount(sizeof(OM_RECEIVE_CB), TRUE);
  8347. if (!pReceiveCB)
  8348. {
  8349. rc = UT_RC_NO_MEM;
  8350. DC_QUIT;
  8351. }
  8352. SET_STAMP(pReceiveCB, RCVCB);
  8353. pReceiveCB->pDomain = pDomain;
  8354. pReceiveCB->priority = pNetSendInd->priority;
  8355. pReceiveCB->channel = pNetSendInd->channel;
  8356. //
  8357. // Allocate some memory for the message header and copy the packet into
  8358. // it from the network buffer (note: we must copy the header since at
  8359. // the moment it is in a network buffer which we can't hang on to for
  8360. // the entire duration of the transfer):
  8361. //
  8362. headerSize = GetMessageSize(pNetMessage->header.messageType);
  8363. pHeader = (POMNET_OPERATION_PKT)UT_MallocRefCount(sizeof(OMNET_OPERATION_PKT), TRUE);
  8364. if (!pHeader)
  8365. {
  8366. rc = UT_RC_NO_MEM;
  8367. DC_QUIT;
  8368. }
  8369. memcpy(pHeader, pNetMessage, headerSize);
  8370. pReceiveCB->pHeader = pHeader;
  8371. //
  8372. // Not all messages sent over the network have a totalSize field, but
  8373. // our subsequent processing requires one. So, if the message we've
  8374. // just received didn't have one, we set the value (our local copy of
  8375. // the header has room since we alloacated enough memory for the
  8376. // largest type of header):
  8377. //
  8378. if (headerSize >= (offsetof(OMNET_OPERATION_PKT, totalSize) +
  8379. (sizeof(pNetMessage->totalSize))))
  8380. {
  8381. TRACE_OUT(("Header contains <totalSize> field (value: %u)",
  8382. pNetMessage->totalSize));
  8383. }
  8384. else
  8385. {
  8386. TRACE_OUT(("Header doesn't contain <totalSize> field"));
  8387. pReceiveCB->pHeader->totalSize = headerSize;
  8388. }
  8389. //
  8390. // Now determine the total number of data bytes involved in this
  8391. // operation:
  8392. //
  8393. totalDataSize = pReceiveCB->pHeader->totalSize - ((UINT) headerSize);
  8394. //
  8395. // If there is any data, allocate some memory to receive it and set the
  8396. // <pData> pointer to point to it (otherwise NULL it):
  8397. //
  8398. if (totalDataSize != 0)
  8399. {
  8400. TRACE_OUT(( "Allocating %u bytes for data for this transfer",
  8401. totalDataSize));
  8402. pReceiveCB->pData = UT_MallocRefCount(totalDataSize, FALSE);
  8403. if (!pReceiveCB->pData)
  8404. {
  8405. ERROR_OUT(( "Failed to allocate %u bytes for object to be recd "
  8406. "from node 0x%08x - will remove WSG from Domain",
  8407. totalDataSize, pNetMessage->header.sender));
  8408. rc = OM_RC_OUT_OF_RESOURCES;
  8409. DC_QUIT;
  8410. }
  8411. }
  8412. else
  8413. {
  8414. pReceiveCB->pData = NULL;
  8415. }
  8416. pReceiveCB->pCurrentPosition = (LPBYTE)pReceiveCB->pData;
  8417. //
  8418. // Set <bytesRecd> to the size of the header. We may have recd some
  8419. // data bytes as well, but they'll be added to the header size in
  8420. // ReceiveData.
  8421. //
  8422. pReceiveCB->bytesRecd = headerSize;
  8423. //
  8424. // Now insert in the list hung off the Domain record:
  8425. //
  8426. COM_BasedListInsertBefore(&(pDomain->receiveList),
  8427. &(pReceiveCB->chain));
  8428. //
  8429. // Set caller's pointer:
  8430. //
  8431. *ppReceiveCB = pReceiveCB;
  8432. DC_EXIT_POINT:
  8433. if (rc != 0)
  8434. {
  8435. if(pHeader != NULL)
  8436. {
  8437. ERROR_OUT(( "Error %d receiving first packet of message type %u from node 0x%08x",
  8438. rc, pHeader->header.messageType, pHeader->header.sender));
  8439. }
  8440. if (pReceiveCB != NULL)
  8441. {
  8442. if (pReceiveCB->pData != NULL)
  8443. {
  8444. UT_FreeRefCount((void**)&(pReceiveCB->pData), FALSE);
  8445. }
  8446. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8447. }
  8448. if (pHeader != NULL)
  8449. {
  8450. UT_FreeRefCount((void**)&pHeader, FALSE);
  8451. }
  8452. }
  8453. DebugExitDWORD(CreateReceiveCB, rc);
  8454. return(rc);
  8455. }
  8456. //
  8457. //
  8458. //
  8459. // FindReceiveCB(...)
  8460. //
  8461. //
  8462. //
  8463. UINT FindReceiveCB(POM_DOMAIN pDomain,
  8464. PNET_SEND_IND_EVENT pNetSendInd,
  8465. POMNET_OPERATION_PKT pPacket,
  8466. POM_RECEIVE_CB * ppReceiveCB)
  8467. {
  8468. POM_RECEIVE_CB pReceiveCB;
  8469. NET_PRIORITY priority;
  8470. NET_CHANNEL_ID channel;
  8471. NET_UID sender;
  8472. POMNET_OPERATION_PKT pHeader;
  8473. UINT rc = 0;
  8474. DebugEntry(FindReceiveCB);
  8475. //
  8476. // First thing to do is to find the receive control block for the
  8477. // transfer. It should be in the list hung off the Domain record:
  8478. //
  8479. sender = pPacket->header.sender;
  8480. priority = pNetSendInd->priority;
  8481. channel = pNetSendInd->channel;
  8482. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->receiveList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8483. while (pReceiveCB != NULL)
  8484. {
  8485. //
  8486. // We check for a match on sender's user ID, channel and priority.
  8487. //
  8488. // We assume that, for a given channel, MCS does not reorder packets
  8489. // sent by the same user at the same priority.
  8490. //
  8491. pHeader = pReceiveCB->pHeader;
  8492. if ((pHeader->header.sender == sender) &&
  8493. (pReceiveCB->priority == priority) &&
  8494. (pReceiveCB->channel == channel))
  8495. {
  8496. //
  8497. // Found!
  8498. //
  8499. TRACE_OUT(("Found receive CB for user %hu, chann 0x%08x, pri %hu, at pRecvCB=0x0x%p",
  8500. sender, channel, priority, pReceiveCB));
  8501. break;
  8502. }
  8503. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->receiveList), pReceiveCB,
  8504. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8505. }
  8506. if (pReceiveCB == NULL)
  8507. {
  8508. rc = OM_RC_RECEIVE_CB_NOT_FOUND;
  8509. DC_QUIT;
  8510. }
  8511. else
  8512. {
  8513. *ppReceiveCB = pReceiveCB;
  8514. }
  8515. DC_EXIT_POINT:
  8516. DebugExitDWORD(FindReceiveCB, rc);
  8517. return(rc);
  8518. }
  8519. //
  8520. // PurgeReceiveCBs(...)
  8521. //
  8522. void PurgeReceiveCBs
  8523. (
  8524. POM_DOMAIN pDomain,
  8525. NET_CHANNEL_ID channel
  8526. )
  8527. {
  8528. POM_RECEIVE_CB pReceiveCB;
  8529. POM_RECEIVE_CB pNextReceiveCB;
  8530. DebugEntry(PurgeReceiveCBs);
  8531. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->receiveList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8532. while (pReceiveCB != NULL)
  8533. {
  8534. //
  8535. // Need to chain here since we may remove pReceiveCB from the list:
  8536. //
  8537. pNextReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->receiveList), pReceiveCB,
  8538. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8539. if (pReceiveCB->channel == channel)
  8540. {
  8541. //
  8542. // This receive CB is for the channel being purged - remove it
  8543. // from the list and free the memory.
  8544. //
  8545. WARNING_OUT(( "Purging receive CB from user %hu",
  8546. pReceiveCB->pHeader->header.sender));
  8547. COM_BasedListRemove(&(pReceiveCB->chain));
  8548. //
  8549. // Free the data memory.
  8550. //
  8551. if (pReceiveCB->pData != NULL)
  8552. {
  8553. UT_FreeRefCount(&pReceiveCB->pData, FALSE);
  8554. }
  8555. //
  8556. // Free the header memory.
  8557. //
  8558. if (pReceiveCB->pHeader != NULL)
  8559. {
  8560. UT_FreeRefCount((void**)&pReceiveCB->pHeader, FALSE);
  8561. }
  8562. //
  8563. // Finally free the control block.
  8564. //
  8565. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8566. }
  8567. pReceiveCB = pNextReceiveCB;
  8568. }
  8569. DebugExitVOID(PurgeReceiveCBs);
  8570. }
  8571. //
  8572. // ProcessMessage(...)
  8573. //
  8574. UINT ProcessMessage
  8575. (
  8576. POM_PRIMARY pomPrimary,
  8577. POM_RECEIVE_CB pReceiveCB,
  8578. UINT whatNext
  8579. )
  8580. {
  8581. POM_DOMAIN pDomain;
  8582. POMNET_OPERATION_PKT pHeader;
  8583. void * pData;
  8584. NET_PRIORITY priority;
  8585. OMNET_MESSAGE_TYPE messageType;
  8586. POM_WSGROUP pWSGroup;
  8587. POM_WORKSET pWorkset;
  8588. POM_OBJECT pObj;
  8589. BOOL bounced = FALSE;
  8590. BOOL retryBounceList = FALSE;
  8591. BOOL freeMemory = FALSE;
  8592. UINT rc = 0;
  8593. DebugEntry(ProcessMessage);
  8594. //
  8595. // Set up local variables:
  8596. //
  8597. pDomain = pReceiveCB->pDomain;
  8598. pHeader = pReceiveCB->pHeader;
  8599. priority = pReceiveCB->priority;
  8600. pData = pReceiveCB->pData;
  8601. messageType = pHeader->header.messageType;
  8602. //
  8603. // Extract pointers to workset group, workset and object record from
  8604. // the packet:
  8605. //
  8606. rc = PreProcessMessage(pDomain,
  8607. pHeader->wsGroupID,
  8608. pHeader->worksetID,
  8609. &pHeader->objectID,
  8610. pHeader->header.messageType,
  8611. &pWSGroup,
  8612. &pWorkset,
  8613. &pObj);
  8614. //
  8615. // PreProcess will have told us if it didn't find the relevant workset
  8616. // group, workset or object. Whether or not this is an error depends
  8617. // on the operation in question. We use a series of IF statements to
  8618. // detect and handle the following conditions:
  8619. //
  8620. //
  8621. // 1. Unknown workset group Discard the operation
  8622. //
  8623. // 2. Existing workset, WORKSET_NEW/CATCHUP Discard the operation
  8624. // 3. Unknown workset, any other operation Bounce the operation
  8625. //
  8626. // 4. Deleted object, any operation Discard the operation
  8627. // 5. Existing object, OBJECT_ADD/CATCHUP Discard the operation
  8628. // 6. Unknown object, any other operation Bounce the operation
  8629. //
  8630. //
  8631. //
  8632. // Test 1.:
  8633. //
  8634. if (rc == OM_RC_WSGROUP_NOT_FOUND)
  8635. {
  8636. //
  8637. // If we didn't even find the workset group, we just quit:
  8638. //
  8639. WARNING_OUT(( "Message is for unknown WSG (ID: %hu) in Domain %u",
  8640. pHeader->wsGroupID, pDomain->callID));
  8641. rc = 0;
  8642. //
  8643. // Mark the data memory allocated for this object to be freed.
  8644. //
  8645. freeMemory = TRUE;
  8646. DC_QUIT;
  8647. }
  8648. //
  8649. // Test 2.:
  8650. //
  8651. if (rc != OM_RC_WORKSET_NOT_FOUND) // i.e. existing workset
  8652. {
  8653. if ((messageType == OMNET_WORKSET_NEW) ||
  8654. (messageType == OMNET_WORKSET_CATCHUP))
  8655. {
  8656. //
  8657. // We've got a WORKSET_NEW or WORKSET_CATCHUP message, but the
  8658. // workset already exists. This is not a problem - we throw the
  8659. // message away - but check the priority and persistence fields
  8660. // are set to the right values.
  8661. //
  8662. // (They might be wrong if we created the workset on receipt of
  8663. // a lock request for a workset we didn't already have).
  8664. //
  8665. TRACE_OUT((
  8666. "Recd WORKSET_NEW/CATCHUP for extant workset %u in WSG %d",
  8667. pWorkset->worksetID, pWSGroup->wsg));
  8668. pWorkset->priority = *((NET_PRIORITY *) &(pHeader->position));
  8669. pWorkset->fTemp = *((BOOL *) &(pHeader->objectID));
  8670. rc = 0;
  8671. DC_QUIT;
  8672. }
  8673. }
  8674. //
  8675. // Test 3.:
  8676. //
  8677. else // rc == OM_RC_WORKSET_NOT_FOUND
  8678. {
  8679. if ((messageType != OMNET_WORKSET_NEW) &&
  8680. (messageType != OMNET_WORKSET_CATCHUP))
  8681. {
  8682. //
  8683. // Packet is for unknown workset and it's not a
  8684. // WORKSET_NEW/CATCHUP, so bounce it:
  8685. //
  8686. TRACE_OUT(( "Bouncing message for unknown workset %d WSG %d",
  8687. pHeader->worksetID, pWSGroup->wsg));
  8688. BounceMessage(pDomain, pReceiveCB);
  8689. bounced = TRUE;
  8690. rc = 0;
  8691. DC_QUIT;
  8692. }
  8693. }
  8694. //
  8695. // Test 4:.
  8696. //
  8697. if ((rc == OM_RC_OBJECT_DELETED) || (rc == OM_RC_OBJECT_PENDING_DELETE))
  8698. {
  8699. //
  8700. // Packet is for object which has been deleted, so we just throw it
  8701. // away (done for us by our caller):
  8702. //
  8703. TRACE_OUT(("Message 0x%08x for deleted obj 0x%08x:0x%08x in WSG %d:%hu",
  8704. messageType,
  8705. pHeader->objectID.creator, pHeader->objectID.sequence,
  8706. pWSGroup->wsg, pWorkset->worksetID));
  8707. rc = 0;
  8708. //
  8709. // Mark the data memory allocated for this object to be freed.
  8710. //
  8711. freeMemory = TRUE;
  8712. DC_QUIT;
  8713. }
  8714. //
  8715. // Test 5.:
  8716. //
  8717. if (rc != OM_RC_BAD_OBJECT_ID) // i.e. existing object
  8718. {
  8719. if ((messageType == OMNET_OBJECT_ADD) ||
  8720. (messageType == OMNET_OBJECT_CATCHUP))
  8721. {
  8722. //
  8723. // In this case, we DO have an OBEJCT_ADD/CATCHUP, but the
  8724. // object was found anyway! This must be a duplicate Add, so
  8725. // we just throw it away:
  8726. //
  8727. TRACE_OUT(( "Add for existing object 0x%08x:0x%08x in WSG %d:%hu",
  8728. pHeader->objectID.creator, pHeader->objectID.sequence,
  8729. pWSGroup->wsg, pWorkset->worksetID));
  8730. rc = 0;
  8731. //
  8732. // Mark the data memory allocated for this object to be freed.
  8733. //
  8734. freeMemory = TRUE;
  8735. DC_QUIT;
  8736. }
  8737. }
  8738. //
  8739. // Test 6.:
  8740. //
  8741. else // rc == OM_RC_BAD_OBJECT_ID
  8742. {
  8743. if ((messageType != OMNET_OBJECT_ADD) &&
  8744. (messageType != OMNET_OBJECT_CATCHUP))
  8745. {
  8746. //
  8747. // Packet is for unknown object, but it's not an
  8748. // OBJECT_ADD/CATCHUP, so bounce it:
  8749. //
  8750. TRACE_OUT(( "Message 0x%08x for unknown obj 0x%08x:0x%08x in WSG %d:%hu",
  8751. messageType,
  8752. pHeader->objectID.creator, pHeader->objectID.sequence,
  8753. pWSGroup->wsg, pWorkset->worksetID));
  8754. BounceMessage(pDomain, pReceiveCB);
  8755. bounced = TRUE;
  8756. rc = 0;
  8757. DC_QUIT;
  8758. }
  8759. }
  8760. //
  8761. // OK, we've passed all the tests above, so we must be in a position to
  8762. // process the operation. Switch on the message type and invoke the
  8763. // appropriate function:
  8764. //
  8765. switch (messageType)
  8766. {
  8767. case OMNET_LOCK_NOTIFY:
  8768. {
  8769. ProcessLockNotify(pomPrimary,
  8770. pDomain,
  8771. pWSGroup,
  8772. pWorkset,
  8773. ((POMNET_LOCK_PKT)pHeader)->data1);
  8774. }
  8775. break;
  8776. case OMNET_UNLOCK:
  8777. {
  8778. ProcessUnlock(pomPrimary,
  8779. pWorkset,
  8780. pHeader->header.sender);
  8781. }
  8782. break;
  8783. case OMNET_WORKSET_CATCHUP:
  8784. case OMNET_WORKSET_NEW:
  8785. {
  8786. rc = ProcessWorksetNew(pomPrimary->putTask, pHeader, pWSGroup);
  8787. //
  8788. // We will want to see if any bouncing messages can be
  8789. // processed because of this new workset, so set the reprocess
  8790. // flag:
  8791. //
  8792. retryBounceList = TRUE;
  8793. }
  8794. break;
  8795. case OMNET_WORKSET_CLEAR:
  8796. {
  8797. rc = ProcessWorksetClear(pomPrimary->putTask,
  8798. pomPrimary,
  8799. pHeader,
  8800. pWSGroup,
  8801. pWorkset);
  8802. }
  8803. break;
  8804. case OMNET_OBJECT_CATCHUP:
  8805. case OMNET_OBJECT_ADD:
  8806. {
  8807. rc = ProcessObjectAdd(pomPrimary->putTask,
  8808. pHeader,
  8809. pWSGroup,
  8810. pWorkset,
  8811. (POM_OBJECTDATA) pData,
  8812. &pObj);
  8813. retryBounceList = TRUE;
  8814. }
  8815. break;
  8816. case OMNET_OBJECT_MOVE:
  8817. {
  8818. ProcessObjectMove(pomPrimary->putTask,
  8819. pHeader,
  8820. pWorkset,
  8821. pObj);
  8822. }
  8823. break;
  8824. case OMNET_OBJECT_DELETE:
  8825. case OMNET_OBJECT_REPLACE:
  8826. case OMNET_OBJECT_UPDATE:
  8827. {
  8828. rc = ProcessObjectDRU(pomPrimary->putTask,
  8829. pHeader,
  8830. pWSGroup,
  8831. pWorkset,
  8832. pObj,
  8833. (POM_OBJECTDATA) pData);
  8834. }
  8835. break;
  8836. default:
  8837. {
  8838. ERROR_OUT(( "Default case in switch (message type: 0x%08x)",
  8839. messageType));
  8840. }
  8841. }
  8842. if (rc != 0)
  8843. {
  8844. ERROR_OUT(( "Error %d processing operation (type: 0x%08x)",
  8845. rc, messageType));
  8846. DC_QUIT;
  8847. }
  8848. TRACE_OUT(("Processed message type 0x%08x", messageType));
  8849. DC_EXIT_POINT:
  8850. //
  8851. // Unless we bounced the message, do some cleanup:
  8852. //
  8853. // Note: This must be after DC_EXIT_POINT because we want to do it
  8854. // even if we didn't process the message (unless we bounced it).
  8855. //
  8856. // If we haven't bounced the message then we may be able to free
  8857. // the data depending on the results of the above tests.
  8858. //
  8859. if (bounced == FALSE)
  8860. {
  8861. //
  8862. // Remove the message from the list it's in (either the pending
  8863. // receives list if this message was never bounced or the bounce
  8864. // list if it has been bounced):
  8865. //
  8866. COM_BasedListRemove(&(pReceiveCB->chain));
  8867. //
  8868. // Now free the message and the receive control block (NOT THE
  8869. // DATA! If there was any, it's just been used for an object
  8870. // add/update etc.)
  8871. //
  8872. UT_FreeRefCount((void**)&pHeader, FALSE);
  8873. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8874. //
  8875. // ...unless of course we indicated that we should free the data:
  8876. //
  8877. if (freeMemory)
  8878. {
  8879. if (pData != NULL)
  8880. {
  8881. TRACE_OUT(("Freeing object data at 0x%08x", pData));
  8882. UT_FreeRefCount(&pData, FALSE);
  8883. }
  8884. }
  8885. }
  8886. else
  8887. {
  8888. rc = OM_RC_BOUNCED;
  8889. }
  8890. //
  8891. // If we're not already processing bounced messages, and this message
  8892. // is an "enabling" message (i.e. a WORKSET_NEW or OBJECT_ADD), then
  8893. // retry the bounce list:
  8894. //
  8895. if ((whatNext == OK_TO_RETRY_BOUNCE_LIST) &&
  8896. (retryBounceList))
  8897. {
  8898. ProcessBouncedMessages(pomPrimary, pDomain);
  8899. }
  8900. DebugExitDWORD(ProcessMessage, rc);
  8901. return(rc);
  8902. }
  8903. //
  8904. // BounceMessage()
  8905. //
  8906. void BounceMessage
  8907. (
  8908. POM_DOMAIN pDomain,
  8909. POM_RECEIVE_CB pReceiveCB
  8910. )
  8911. {
  8912. UINT count;
  8913. DebugEntry(BounceMessage);
  8914. TRACE_OUT(( "Bouncing message type 0x%08x (CB at 0x%08x)",
  8915. pReceiveCB->pHeader->header.messageType, pReceiveCB));
  8916. //
  8917. // Remove this receive CB from whichever list its currently in (either
  8918. // the list of pending receives if this is the first time it's been
  8919. // bounced or the bounce list if not) and insert it at the START of the
  8920. // bounce list for the Domain:
  8921. //
  8922. // Note: the reason why we insert at the start is because
  8923. // ProcessBouncedMessages may be chaining through the list and
  8924. // we don't want to put this one back in the list at a later
  8925. // point or else we might go infinite.
  8926. //
  8927. COM_BasedListRemove(&(pReceiveCB->chain));
  8928. COM_BasedListInsertAfter(&(pDomain->bounceList), &(pReceiveCB->chain));
  8929. DebugExitVOID(BounceMessage);
  8930. }
  8931. //
  8932. //
  8933. //
  8934. // ProcessBouncedMessages(...)
  8935. //
  8936. //
  8937. //
  8938. void ProcessBouncedMessages(POM_PRIMARY pomPrimary,
  8939. POM_DOMAIN pDomain)
  8940. {
  8941. UINT count;
  8942. POM_RECEIVE_CB pReceiveCB;
  8943. POM_RECEIVE_CB pTempReceiveCB;
  8944. BOOL listGettingShorter;
  8945. UINT numPasses;
  8946. UINT rc;
  8947. DebugEntry(ProcessBouncedMessages);
  8948. TRACE_OUT(( "Processing bounced messages"));
  8949. //
  8950. // It is important that we process bounced messages as soon as we are
  8951. // able. Since processing one may enable others to be processed, we
  8952. // must go through the list several times, until we can't do any more
  8953. // work on it. So, we keep track of whether the list is getting shorter
  8954. // - if it is, we must have processed something so it's worth going
  8955. // through again.
  8956. //
  8957. // Note: an alternative would be do do exactly three passes through the
  8958. // list: one to do all the WORKSET_NEWs, then one to do all the
  8959. // OBJECT_ADDs and then one to do any remaining operations. This
  8960. // is slightly less generic code and is tied in to the current
  8961. // dependencies between operations so is not ideal but it may
  8962. // prove to be a good performance improvement if the average
  8963. // number of passes we do now exceeds three.
  8964. //
  8965. listGettingShorter = TRUE;
  8966. numPasses = 0;
  8967. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->bounceList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8968. while (listGettingShorter)
  8969. {
  8970. numPasses++;
  8971. listGettingShorter = FALSE;
  8972. while (pReceiveCB != NULL)
  8973. {
  8974. //
  8975. // We want to chain through the list of bounced messages and try
  8976. // to process each one. However, trying to process a message
  8977. // could cause it to be removed from the list (if processed) or
  8978. // added back in at the start (if bounced again).
  8979. //
  8980. // So, we chain NOW to the next one in the list:
  8981. //
  8982. pTempReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->bounceList), pReceiveCB,
  8983. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8984. TRACE_OUT(( "Retrying message type 0x%08x (CB at 0x%08x)",
  8985. pReceiveCB->pHeader->header.messageType, pReceiveCB));
  8986. rc = ProcessMessage(pomPrimary, pReceiveCB, DONT_RETRY_BOUNCE_LIST);
  8987. if (rc != OM_RC_BOUNCED)
  8988. {
  8989. //
  8990. // We processed a message, so set the flag for another run
  8991. // through the list:
  8992. //
  8993. TRACE_OUT(( "Successfully processed bounced message"));
  8994. listGettingShorter = TRUE;
  8995. }
  8996. //
  8997. // Now "chain" on to the next one, using the link we've already
  8998. // set up:
  8999. //
  9000. pReceiveCB = pTempReceiveCB;
  9001. }
  9002. }
  9003. TRACE_OUT(( "Processed as much of bounce list as possible in %hu passes",
  9004. numPasses));
  9005. DebugExitVOID(ProcessBouncedMessages);
  9006. }
  9007. //
  9008. // FreeSendInst(...)
  9009. //
  9010. void FreeSendInst
  9011. (
  9012. POM_SEND_INST pSendInst
  9013. )
  9014. {
  9015. DebugEntry(FreeSendInst);
  9016. if (pSendInst->pMessage != NULL)
  9017. {
  9018. UT_FreeRefCount((void**)&(pSendInst->pMessage), FALSE);
  9019. }
  9020. if (pSendInst->pWSGroup != NULL)
  9021. {
  9022. UT_FreeRefCount((void**)&(pSendInst->pWSGroup), FALSE);
  9023. }
  9024. if (pSendInst->pWorkset != NULL)
  9025. {
  9026. UT_FreeRefCount((void**)&(pSendInst->pWorkset), FALSE);
  9027. }
  9028. if (pSendInst->pObj != NULL)
  9029. {
  9030. UT_FreeRefCount((void**)&(pSendInst->pObj), FALSE);
  9031. }
  9032. if (pSendInst->pDataStart != NULL)
  9033. {
  9034. UT_FreeRefCount((void**)&(pSendInst->pDataStart), FALSE);
  9035. }
  9036. //
  9037. // Now free the send instruction itself:
  9038. //
  9039. COM_BasedListRemove(&(pSendInst->chain));
  9040. UT_FreeRefCount((void**)&pSendInst, FALSE);
  9041. DebugExitVOID(FreeSendInst);
  9042. }
  9043. //
  9044. // PreProcessMessage(...)
  9045. //
  9046. UINT PreProcessMessage
  9047. (
  9048. POM_DOMAIN pDomain,
  9049. OM_WSGROUP_ID wsGroupID,
  9050. OM_WORKSET_ID worksetID,
  9051. POM_OBJECT_ID pObjectID,
  9052. OMNET_MESSAGE_TYPE messageType,
  9053. POM_WSGROUP * ppWSGroup,
  9054. POM_WORKSET * ppWorkset,
  9055. POM_OBJECT * ppObj
  9056. )
  9057. {
  9058. POM_WSGROUP pWSGroup = NULL;
  9059. POM_WORKSET pWorkset = NULL;
  9060. POM_OBJECT pObj;
  9061. UINT rc = 0;
  9062. DebugEntry(PreProcessMessage);
  9063. //
  9064. // OK, we've got some sort of operation message: let's find the workset
  9065. // group it relates to:
  9066. //
  9067. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  9068. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  9069. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  9070. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  9071. if (pWSGroup == NULL)
  9072. {
  9073. //
  9074. // This is a message for a workset group which we are not/no longer
  9075. // registered with, so quit (our caller will throw it away):
  9076. //
  9077. rc = OM_RC_WSGROUP_NOT_FOUND;
  9078. DC_QUIT;
  9079. }
  9080. ValidateWSGroup(pWSGroup);
  9081. pWorkset = pWSGroup->apWorksets[worksetID];
  9082. //
  9083. // Check that this set up a valid workset pointer:
  9084. //
  9085. if (pWorkset == NULL)
  9086. {
  9087. rc = OM_RC_WORKSET_NOT_FOUND;
  9088. DC_QUIT;
  9089. }
  9090. ValidateWorkset(pWorkset);
  9091. //
  9092. // Search for the object ID, locking workset group mutex while we do
  9093. // so.
  9094. //
  9095. // Note: if the <pObjectID> parameter is NULL, it means that the caller
  9096. // doesn't want us to search for the object ID, so we skip this
  9097. // step
  9098. //
  9099. switch (messageType)
  9100. {
  9101. case OMNET_OBJECT_ADD:
  9102. case OMNET_OBJECT_CATCHUP:
  9103. case OMNET_OBJECT_REPLACE:
  9104. case OMNET_OBJECT_UPDATE:
  9105. case OMNET_OBJECT_DELETE:
  9106. case OMNET_OBJECT_MOVE:
  9107. {
  9108. rc = ObjectIDToPtr(pWorkset, *pObjectID, &pObj);
  9109. if (rc != 0)
  9110. {
  9111. //
  9112. // No object found with this ID (rc is BAD_ID, DELETED or
  9113. // PENDING_DELETE):
  9114. //
  9115. *ppObj = NULL;
  9116. }
  9117. else
  9118. {
  9119. ValidateObject(pObj);
  9120. *ppObj = pObj;
  9121. }
  9122. }
  9123. break;
  9124. default:
  9125. {
  9126. //
  9127. // Do nothing for other messages.
  9128. //
  9129. }
  9130. }
  9131. DC_EXIT_POINT:
  9132. *ppWorkset = pWorkset;
  9133. *ppWSGroup = pWSGroup;
  9134. TRACE_OUT(("Pre-processed message for Domain %u", pDomain->callID));
  9135. DebugExitDWORD(PreProcessMessage, rc);
  9136. return(rc);
  9137. }
  9138. //
  9139. // PurgeNonPersistent(...)
  9140. //
  9141. void PurgeNonPersistent
  9142. (
  9143. POM_PRIMARY pomPrimary,
  9144. POM_DOMAIN pDomain,
  9145. OM_WSGROUP_ID wsGroupID,
  9146. NET_UID userID
  9147. )
  9148. {
  9149. POM_WSGROUP pWSGroup;
  9150. POM_WORKSET pWorkset;
  9151. OM_WORKSET_ID worksetID;
  9152. POM_OBJECT pObj;
  9153. DebugEntry(PurgeNonPersistent);
  9154. //
  9155. // Find the workset group which has the specified ID:
  9156. //
  9157. COM_BasedListFind(LIST_FIND_FROM_FIRST, &pDomain->wsGroups,
  9158. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  9159. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  9160. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  9161. if (pWSGroup == NULL)
  9162. {
  9163. //
  9164. // SFR5794: Not an error if wsgroup not found - this just means
  9165. // someone has detached who was using a workset group which we were
  9166. // not using.
  9167. //
  9168. TRACE_OUT(("WSGroup %hu not found in domain %u",
  9169. wsGroupID, pDomain->callID));
  9170. DC_QUIT;
  9171. }
  9172. //
  9173. // Chain through each workset in the group - for those that are
  9174. // non-persistent, then chain through each object looking for a match
  9175. // on the user ID of the departed node:
  9176. //
  9177. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  9178. {
  9179. pWorkset = pWSGroup->apWorksets[worksetID];
  9180. if (pWorkset == NULL)
  9181. {
  9182. //
  9183. // Workset with this ID doesn't exist - continue
  9184. //
  9185. continue;
  9186. }
  9187. if (!pWorkset->fTemp)
  9188. {
  9189. //
  9190. // A persistent workset - we don't need to purge it of objects
  9191. //
  9192. continue;
  9193. }
  9194. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  9195. while (pObj != NULL)
  9196. {
  9197. ValidateObject(pObj);
  9198. //
  9199. // SFR6353: Don't try to delete the object if it's already
  9200. // pending delete.
  9201. //
  9202. if (!(pObj->flags & DELETED) &&
  9203. !(pObj->flags & PENDING_DELETE))
  9204. {
  9205. //
  9206. // If this object was added by the departed node, OR if
  9207. // ALL_REMOTES have gone and it was not added by us...
  9208. //
  9209. if ((pObj->objectID.creator == userID) ||
  9210. ((userID == NET_ALL_REMOTES) &&
  9211. (pObj->objectID.creator != pDomain->userID)))
  9212. {
  9213. //
  9214. // ...delete it:
  9215. //
  9216. ObjectDRU(pomPrimary->putTask,
  9217. pWSGroup,
  9218. pWorkset,
  9219. pObj,
  9220. NULL,
  9221. OMNET_OBJECT_DELETE);
  9222. }
  9223. }
  9224. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  9225. FIELD_OFFSET(OM_OBJECT, chain));
  9226. }
  9227. }
  9228. DC_EXIT_POINT:
  9229. DebugExitVOID(PurgeNonPersistent);
  9230. }
  9231. //
  9232. // SetPersonData(...)
  9233. //
  9234. UINT SetPersonData
  9235. (
  9236. POM_PRIMARY pomPrimary,
  9237. POM_DOMAIN pDomain,
  9238. POM_WSGROUP pWSGroup
  9239. )
  9240. {
  9241. POM_WSGROUP pOMCWSGroup;
  9242. POM_WORKSET pOMCWorkset;
  9243. POM_OBJECT pObjReg;
  9244. POM_WSGROUP_REG_REC pRegObject;
  9245. POM_WSGROUP_REG_REC pNewRegObject;
  9246. UINT rc = 0;
  9247. DebugEntry(SetPersonData);
  9248. //
  9249. // Set up pointers to the ObManControl workset group and the workset
  9250. // which contains the object to be replaced:
  9251. //
  9252. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9253. if( pOMCWSGroup == NULL)
  9254. {
  9255. TRACE_OUT(("pOMCWSGroup not found"));
  9256. DC_QUIT;
  9257. }
  9258. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  9259. //
  9260. // Set up pointers to the object record and the object data itself:
  9261. //
  9262. pObjReg = pWSGroup->pObjReg;
  9263. ValidateObject(pObjReg);
  9264. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  9265. if (!pRegObject)
  9266. {
  9267. ERROR_OUT(("SetPersonData: object 0x%08x has no data", pObjReg));
  9268. rc = UT_RC_NO_MEM;
  9269. DC_QUIT;
  9270. }
  9271. ValidateObjectDataWSGREGREC(pRegObject);
  9272. //
  9273. // Allocate some memory for the new object with which we are about to
  9274. // replace the old one:
  9275. //
  9276. pNewRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_REC), TRUE);
  9277. if (!pNewRegObject)
  9278. {
  9279. rc = UT_RC_NO_MEM;
  9280. DC_QUIT;
  9281. }
  9282. //
  9283. // Set the fields in the new object to have the same data as the old:
  9284. //
  9285. pNewRegObject->length = pRegObject->length;
  9286. pNewRegObject->idStamp = pRegObject->idStamp;
  9287. pNewRegObject->userID = pRegObject->userID;
  9288. pNewRegObject->status = pRegObject->status;
  9289. //
  9290. // Fill in the person data fields and issue the replace:
  9291. //
  9292. COM_GetSiteName(pNewRegObject->personData.personName,
  9293. sizeof(pNewRegObject->personData.personName));
  9294. rc = ObjectDRU(pomPrimary->putTask,
  9295. pOMCWSGroup,
  9296. pOMCWorkset,
  9297. pObjReg,
  9298. (POM_OBJECTDATA) pNewRegObject,
  9299. OMNET_OBJECT_REPLACE);
  9300. if (rc != 0)
  9301. {
  9302. DC_QUIT;
  9303. }
  9304. TRACE_OUT((" Set person data for WSG %d", pWSGroup->wsg));
  9305. DC_EXIT_POINT:
  9306. if (rc != 0)
  9307. {
  9308. ERROR_OUT(("Error %d updating own reg object for WSG %d",
  9309. rc, pWSGroup->wsg));
  9310. }
  9311. DebugExitDWORD(SetPersonData, rc);
  9312. return(rc);
  9313. }
  9314. //
  9315. // RemoveInfoObject(...)
  9316. //
  9317. void RemoveInfoObject
  9318. (
  9319. POM_PRIMARY pomPrimary,
  9320. POM_DOMAIN pDomain,
  9321. OM_WSGROUP_ID wsGroupID
  9322. )
  9323. {
  9324. POM_WSGROUP pOMCWSGroup;
  9325. POM_WORKSET pOMCWorkset;
  9326. POM_OBJECT pObj;
  9327. DebugEntry(RemoveInfoObject);
  9328. //
  9329. // OK, we've got to delete the identification object in workset #0 in
  9330. // ObManControl which identified the workset group.
  9331. //
  9332. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9333. pOMCWorkset = GetOMCWorkset(pDomain, 0);
  9334. //
  9335. // ...search for the WSGROUP_INFO object (by wsGroupID - we don't know
  9336. // the name or function profile so leave them blank):
  9337. //
  9338. FindInfoObject(pDomain, wsGroupID, OMWSG_MAX, OMFP_MAX, &pObj);
  9339. if (pObj == NULL)
  9340. {
  9341. //
  9342. // This should happen only for the local Domain:
  9343. //
  9344. // SFR 2208 : No: This will also happen in a regular call when
  9345. // the call ends almost as soon as it has begun. The
  9346. // sequence of events is as follows:
  9347. //
  9348. // - on callee, ObMan sends WSG_SEND_REQ to caller
  9349. // - caller sends REG_REC object, then WORKSET_CATCHUP
  9350. // then the INFO object we can't find
  9351. // - callee receives REG_REC then WORKSET_CATHCUP
  9352. // - call ends and callee enters WSGRemoveFromDomain
  9353. // which finds the REG_REC then calls us here
  9354. //
  9355. // Therefore the DC_ABSence of the INFO object is valid
  9356. // and we just trace an alert:
  9357. //
  9358. // NOTE: It will also happen when we receive a DELETE from
  9359. // someone else who is doing the same purge process
  9360. // as us.
  9361. //
  9362. WARNING_OUT(("No INFO object found for wsGroup %hu", wsGroupID));
  9363. DC_QUIT;
  9364. }
  9365. else
  9366. {
  9367. ValidateObject(pObj);
  9368. }
  9369. //
  9370. // We found an object, so delete it from the workset:
  9371. //
  9372. TRACE_OUT(("Deleting INFO object for wsGroup %hu from domain %u",
  9373. wsGroupID, pDomain->callID));
  9374. ObjectDRU(pomPrimary->putTask,
  9375. pOMCWSGroup,
  9376. pOMCWorkset,
  9377. pObj,
  9378. NULL,
  9379. OMNET_OBJECT_DELETE);
  9380. DC_EXIT_POINT:
  9381. DebugExitVOID(RemoveInfoObject);
  9382. }
  9383. //
  9384. // RemovePersonObject(...)
  9385. //
  9386. void RemovePersonObject
  9387. (
  9388. POM_PRIMARY pomPrimary,
  9389. POM_DOMAIN pDomain,
  9390. OM_WSGROUP_ID wsGroupID,
  9391. NET_UID detachedUserID
  9392. )
  9393. {
  9394. POM_WSGROUP pOMCWSGroup;
  9395. POM_WORKSET pOMCWorkset;
  9396. POM_OBJECT pObjReg;
  9397. NET_UID userIDRemoved;
  9398. POM_WSGROUP_REG_REC pRegObject;
  9399. DebugEntry(RemovePersonObject);
  9400. //
  9401. // Set up pointers to the ObManControl workset group and the relevant
  9402. // workset within it:
  9403. //
  9404. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9405. if( pOMCWSGroup == NULL)
  9406. {
  9407. TRACE_OUT(("OMC Workset Group not found - no person objects to remove"));
  9408. DC_QUIT;
  9409. }
  9410. pOMCWorkset = pOMCWSGroup->apWorksets[wsGroupID];
  9411. //
  9412. // If there is no such workset, it could be because the workset group
  9413. // has been moved into the local Domain on call end etc. In this case,
  9414. // just quit out.
  9415. //
  9416. if (pOMCWorkset == NULL)
  9417. {
  9418. TRACE_OUT(("OMC Workset not found - no person objects to remove"));
  9419. DC_QUIT;
  9420. }
  9421. //
  9422. // If detachedUserID is NET_ALL_REMOTES, we've a lot of work to do and
  9423. // we'll do this loop many times - otherwise we'll just do it for a
  9424. // single person object.
  9425. //
  9426. for (;;)
  9427. {
  9428. if (detachedUserID == NET_ALL_REMOTES)
  9429. {
  9430. //
  9431. // This will find ANY person object that's NOT OURS:
  9432. //
  9433. FindPersonObject(pOMCWorkset,
  9434. pDomain->userID,
  9435. FIND_OTHERS,
  9436. &pObjReg);
  9437. }
  9438. else
  9439. {
  9440. //
  9441. // This will find a specific node's person object:
  9442. //
  9443. FindPersonObject(pOMCWorkset,
  9444. detachedUserID,
  9445. FIND_THIS,
  9446. &pObjReg);
  9447. }
  9448. //
  9449. // If we don't find one, get out of the loop:
  9450. //
  9451. if (pObjReg == NULL)
  9452. {
  9453. break;
  9454. }
  9455. ValidateObject(pObjReg);
  9456. //
  9457. // If detachedUserID was NET_ALL_REMOTES, the user ID in the object
  9458. // we're deleting will obviously be different. So, find out the
  9459. // real user ID from the object we're deleting:
  9460. //
  9461. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  9462. if (!pRegObject)
  9463. {
  9464. ERROR_OUT(("RemovePersonObject: object 0x%08x has no data", pObjReg));
  9465. }
  9466. else
  9467. {
  9468. ValidateObjectDataWSGREGREC(pRegObject);
  9469. userIDRemoved = pRegObject->userID;
  9470. //
  9471. // Now delete the object. If the return code is bad, don't quit -
  9472. // we may still want to delete the info object.
  9473. //
  9474. TRACE_OUT(("Deleting person object for node 0x%08x, wsGroup %hu",
  9475. userIDRemoved, wsGroupID));
  9476. if (ObjectDRU(pomPrimary->putTask,
  9477. pOMCWSGroup,
  9478. pOMCWorkset,
  9479. pObjReg,
  9480. NULL,
  9481. OMNET_OBJECT_DELETE) != 0)
  9482. {
  9483. ERROR_OUT(("Error from ObjectDRU - leaving loop"));
  9484. break;
  9485. }
  9486. }
  9487. }
  9488. DC_EXIT_POINT:
  9489. DebugExitVOID(RemovePersonObject);
  9490. }
  9491. //
  9492. // WSGRecordFind(...)
  9493. //
  9494. void WSGRecordFind
  9495. (
  9496. POM_DOMAIN pDomain,
  9497. OMWSG wsg,
  9498. OMFP fpHandler,
  9499. POM_WSGROUP * ppWSGroup
  9500. )
  9501. {
  9502. POM_WSGROUP pWSGroup = NULL;
  9503. DebugEntry(WSGRecordFind);
  9504. //
  9505. // Search for workset group record:
  9506. //
  9507. TRACE_OUT(("Searching WSG list for Domain %u for match on WSG %d FP %d",
  9508. pDomain->callID, wsg, fpHandler));
  9509. pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups), FIELD_OFFSET(OM_WSGROUP, chain));
  9510. while (pWSGroup != NULL)
  9511. {
  9512. if ((pWSGroup->wsg == wsg) && (pWSGroup->fpHandler == fpHandler))
  9513. {
  9514. break;
  9515. }
  9516. pWSGroup = (POM_WSGROUP)COM_BasedListNext(&(pDomain->wsGroups), pWSGroup,
  9517. FIELD_OFFSET(OM_WSGROUP, chain));
  9518. }
  9519. //
  9520. // Set up caller's pointer:
  9521. //
  9522. *ppWSGroup = pWSGroup;
  9523. DebugExitVOID(WSGRecordFind);
  9524. }
  9525. //
  9526. // AddClientToWSGList(...)
  9527. //
  9528. UINT AddClientToWSGList
  9529. (
  9530. PUT_CLIENT putTask,
  9531. POM_WSGROUP pWSGroup,
  9532. OM_WSGROUP_HANDLE hWSGroup,
  9533. UINT mode
  9534. )
  9535. {
  9536. POM_CLIENT_LIST pClientListEntry;
  9537. UINT count;
  9538. UINT rc = 0;
  9539. DebugEntry(AddClientToWSGList);
  9540. //
  9541. // Count the number of local primaries registered with the workset
  9542. // group:
  9543. //
  9544. count = 0;
  9545. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  9546. while (pClientListEntry != NULL)
  9547. {
  9548. if (pClientListEntry->mode == PRIMARY)
  9549. {
  9550. count++;
  9551. }
  9552. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWSGroup->clients), pClientListEntry,
  9553. FIELD_OFFSET(OM_CLIENT_LIST, chain));
  9554. }
  9555. //
  9556. // What we do now depends on whether this is a primary or a secondary
  9557. // registration:
  9558. //
  9559. if (mode == PRIMARY)
  9560. {
  9561. //
  9562. // If a primary, check that no other primaries are present:
  9563. //
  9564. if (count > 0)
  9565. {
  9566. ERROR_OUT(("Can't register TASK 0x%08x with WSG %d as primary: "
  9567. "another primary is already registered",
  9568. putTask, pWSGroup->wsg));
  9569. rc = OM_RC_TOO_MANY_CLIENTS;
  9570. DC_QUIT;
  9571. }
  9572. else
  9573. {
  9574. TRACE_OUT(("%hu primary Clients already registered with WSG %d",
  9575. count, pWSGroup->wsg));
  9576. }
  9577. }
  9578. else // mode == SECONDARY
  9579. {
  9580. if (count == 0)
  9581. {
  9582. WARNING_OUT(("Can't register TASK 0x%08x with WSG %d as secondary: "
  9583. "no primary registered",
  9584. putTask, pWSGroup->wsg));
  9585. rc = OM_RC_NO_PRIMARY;
  9586. DC_QUIT;
  9587. }
  9588. }
  9589. //
  9590. // OK, allocate some memory for the Client's entry in the list:
  9591. //
  9592. pClientListEntry = (POM_CLIENT_LIST)UT_MallocRefCount(sizeof(OM_CLIENT_LIST), TRUE);
  9593. if (!pClientListEntry)
  9594. {
  9595. rc = UT_RC_NO_MEM;
  9596. DC_QUIT;
  9597. }
  9598. SET_STAMP(pClientListEntry, CLIENTLIST);
  9599. pClientListEntry->putTask = putTask;
  9600. pClientListEntry->hWSGroup = hWSGroup;
  9601. pClientListEntry->mode = (WORD)mode;
  9602. COM_BasedListInsertBefore(&(pWSGroup->clients), &(pClientListEntry->chain));
  9603. TRACE_OUT(("Added TASK 0x%08x to Client list for WSG %d as %s",
  9604. putTask, pWSGroup->wsg,
  9605. mode == PRIMARY ? "primary" : "secondary"));
  9606. DC_EXIT_POINT:
  9607. DebugExitDWORD(AddClientToWSGList, rc);
  9608. return(rc);
  9609. }
  9610. //
  9611. // FindPersonObject(...)
  9612. //
  9613. void FindPersonObject
  9614. (
  9615. POM_WORKSET pOMCWorkset,
  9616. NET_UID userID,
  9617. UINT searchType,
  9618. POM_OBJECT * ppObjReg
  9619. )
  9620. {
  9621. BOOL found = FALSE;
  9622. POM_OBJECT pObj;
  9623. POM_WSGROUP_REG_REC pRegObject;
  9624. UINT rc = 0;
  9625. DebugEntry(FindPersonObject);
  9626. TRACE_OUT(("Searching OMC workset %u for reg obj %sowned by node 0x%08x",
  9627. pOMCWorkset->worksetID, searchType == FIND_THIS ? "" : "not ", userID));
  9628. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  9629. while (pObj != NULL)
  9630. {
  9631. ValidateObject(pObj);
  9632. if (pObj->flags & DELETED)
  9633. {
  9634. // Do nothing
  9635. }
  9636. else if (!pObj->pData)
  9637. {
  9638. ERROR_OUT(("FindPersonObject: object 0x%08x has no data", pObj));
  9639. }
  9640. else
  9641. {
  9642. ValidateObjectData(pObj->pData);
  9643. pRegObject = (POM_WSGROUP_REG_REC)pObj->pData;
  9644. if (pRegObject->idStamp == OM_WSGREGREC_ID_STAMP)
  9645. {
  9646. if (((searchType == FIND_THIS) &&
  9647. (pRegObject->userID == userID)) ||
  9648. ((searchType == FIND_OTHERS) &&
  9649. (pRegObject->userID != userID)))
  9650. {
  9651. //
  9652. // Got it:
  9653. //
  9654. found = TRUE;
  9655. break;
  9656. }
  9657. }
  9658. }
  9659. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  9660. }
  9661. if (found == TRUE)
  9662. {
  9663. *ppObjReg = pObj;
  9664. }
  9665. else
  9666. {
  9667. if (searchType == FIND_THIS)
  9668. {
  9669. TRACE_OUT(("No reg object found for node 0x%08x in workset %u",
  9670. userID, pOMCWorkset->worksetID));
  9671. }
  9672. *ppObjReg = NULL;
  9673. }
  9674. DebugExitVOID(FindPersonObject);
  9675. }
  9676. //
  9677. // PostWorksetNewEvents(...)
  9678. //
  9679. UINT PostWorksetNewEvents
  9680. (
  9681. PUT_CLIENT putFrom,
  9682. PUT_CLIENT putTo,
  9683. POM_WSGROUP pWSGroup,
  9684. OM_WSGROUP_HANDLE hWSGroup
  9685. )
  9686. {
  9687. OM_WORKSET_ID worksetID;
  9688. OM_EVENT_DATA16 eventData16;
  9689. POM_WORKSET pWorkset;
  9690. UINT count;
  9691. UINT rc = 0;
  9692. DebugEntry(PostWorksetNewEvents);
  9693. TRACE_OUT(("Posting WORKSET_NEW events to Client TASK 0x%08x for WSG %d",
  9694. putTo, pWSGroup->wsg));
  9695. count = 0;
  9696. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  9697. {
  9698. pWorkset = pWSGroup->apWorksets[worksetID];
  9699. if (pWorkset != NULL)
  9700. {
  9701. eventData16.hWSGroup = hWSGroup;
  9702. eventData16.worksetID = worksetID;
  9703. UT_PostEvent(putFrom, putTo, 0,
  9704. OM_WORKSET_NEW_IND,
  9705. *(PUINT) &eventData16,
  9706. 0);
  9707. count++;
  9708. }
  9709. }
  9710. TRACE_OUT(("Posted %hu WORKSET_NEW events (hWSGroup: %hu)", count,
  9711. hWSGroup));
  9712. DebugExitDWORD(PostWorksetNewEvents, rc);
  9713. return(rc);
  9714. }
  9715. //
  9716. // OM_Register(...)
  9717. //
  9718. UINT OM_Register
  9719. (
  9720. PUT_CLIENT putTask,
  9721. OMCLI omType,
  9722. POM_CLIENT * ppomClient
  9723. )
  9724. {
  9725. POM_CLIENT pomClient = NULL;
  9726. UINT rc = 0;
  9727. DebugEntry(OM_Register);
  9728. UT_Lock(UTLOCK_OM);
  9729. if (!g_pomPrimary)
  9730. {
  9731. ERROR_OUT(("OM_Register failed; primary doesn't exist"));
  9732. DC_QUIT;
  9733. }
  9734. ValidateOMP(g_pomPrimary);
  9735. ASSERT(omType >= OMCLI_FIRST);
  9736. ASSERT(omType < OMCLI_MAX);
  9737. //
  9738. // Make sure this task isn't registered as an OM client
  9739. //
  9740. pomClient = &(g_pomPrimary->clients[omType]);
  9741. if (pomClient->putTask)
  9742. {
  9743. ERROR_OUT(("OM secondary %d already exists", omType));
  9744. pomClient = NULL;
  9745. rc = OM_RC_ALREADY_REGISTERED;
  9746. DC_QUIT;
  9747. }
  9748. // Bump up ref count on OM primary
  9749. UT_BumpUpRefCount(g_pomPrimary);
  9750. //
  9751. // Fill in the client info
  9752. //
  9753. ZeroMemory(pomClient, sizeof(*pomClient));
  9754. SET_STAMP(pomClient, OCLIENT);
  9755. pomClient->putTask = putTask;
  9756. COM_BasedListInit(&(pomClient->locks));
  9757. //
  9758. // Register an exit procedure for cleanup
  9759. //
  9760. UT_RegisterExit(putTask, OMSExitProc, pomClient);
  9761. pomClient->exitProcReg = TRUE;
  9762. //
  9763. // Register our hidden event handler for the Client (the parameter to be
  9764. // passed to the event handler is the pointer to the Client record):
  9765. //
  9766. UT_RegisterEvent(putTask, OMSEventHandler, pomClient, UT_PRIORITY_OBMAN);
  9767. pomClient->hiddenHandlerReg = TRUE;
  9768. DC_EXIT_POINT:
  9769. *ppomClient = pomClient;
  9770. UT_Unlock(UTLOCK_OM);
  9771. DebugExitDWORD(OM_Register, rc);
  9772. return(rc);
  9773. }
  9774. //
  9775. // OM_Deregister()
  9776. //
  9777. void OM_Deregister(POM_CLIENT * ppomClient)
  9778. {
  9779. DebugEntry(OM_Deregister);
  9780. ASSERT(ppomClient);
  9781. OMSExitProc(*ppomClient);
  9782. *ppomClient = NULL;
  9783. DebugExitVOID(OM_Deregister);
  9784. }
  9785. //
  9786. // OMSExitProc(...)
  9787. //
  9788. void CALLBACK OMSExitProc(LPVOID uData)
  9789. {
  9790. POM_CLIENT pomClient = (POM_CLIENT)uData;
  9791. OM_WSGROUP_HANDLE hWSGroup;
  9792. OM_WSGROUP_HANDLE hWSGroupTemp;
  9793. DebugEntry(OMSecExitProc);
  9794. UT_Lock(UTLOCK_OM);
  9795. ValidateOMS(pomClient);
  9796. // Deregister the event handler and exit procedure (we do this early and
  9797. // clear the flags since we want to avoid recursive abends):
  9798. //
  9799. if (pomClient->hiddenHandlerReg)
  9800. {
  9801. UT_DeregisterEvent(pomClient->putTask, OMSEventHandler, pomClient);
  9802. pomClient->hiddenHandlerReg = FALSE;
  9803. }
  9804. if (pomClient->exitProcReg)
  9805. {
  9806. UT_DeregisterExit(pomClient->putTask, OMSExitProc, pomClient);
  9807. pomClient->exitProcReg = FALSE;
  9808. }
  9809. //
  9810. // Deregister the Client from any workset groups with which it is still
  9811. // registered.
  9812. //
  9813. // The code works as follows:
  9814. //
  9815. // FOR each record in the apUsageRecs array
  9816. // IF there is a valid offset there it refers to a registered
  9817. // workset group so deregister it.
  9818. //
  9819. TRACE_OUT(("Checking Client record for active workset group handles"));
  9820. for (hWSGroup = 0; hWSGroup < OMWSG_MAXPERCLIENT; hWSGroup++)
  9821. {
  9822. if ((pomClient->apUsageRecs[hWSGroup] != NULL) &&
  9823. (pomClient->apUsageRecs[hWSGroup] != (POM_USAGE_REC)-1))
  9824. {
  9825. //
  9826. // Need to copy hWSGroup into a temporary variable, since
  9827. // OM_WSGroupDeregister will set it to zero and that would
  9828. // mess up our for-loop otherwise:
  9829. //
  9830. hWSGroupTemp = hWSGroup;
  9831. OM_WSGroupDeregister(pomClient, &hWSGroupTemp);
  9832. }
  9833. }
  9834. //
  9835. // NULL out the task; that's how the OM primary knows the task is
  9836. // present or not.
  9837. //
  9838. pomClient->putTask = NULL;
  9839. UT_FreeRefCount((void**)&g_pomPrimary, TRUE);
  9840. UT_Unlock(UTLOCK_OM);
  9841. DebugExitVOID(OMSExitProc);
  9842. }
  9843. //
  9844. // OMSEventHandler(...)
  9845. //
  9846. BOOL CALLBACK OMSEventHandler
  9847. (
  9848. LPVOID uData,
  9849. UINT event,
  9850. UINT_PTR eventParam1,
  9851. UINT_PTR eventParam2
  9852. )
  9853. {
  9854. POM_CLIENT pomClient = (POM_CLIENT)uData;
  9855. OM_WSGROUP_HANDLE hWSGroup;
  9856. OM_WORKSET_ID worksetID;
  9857. POM_OBJECT pObj;
  9858. UINT correlator;
  9859. POM_PENDING_OP pPendingOp = NULL;
  9860. POM_LOCK pLock;
  9861. POM_WORKSET pWorkset;
  9862. UINT result;
  9863. POM_USAGE_REC pUsageRec;
  9864. OM_OPERATION_TYPE type = NULL_OP;
  9865. BOOL ObjectEvent = FALSE;
  9866. BOOL processed = FALSE;
  9867. DebugEntry(OMSEventHandler);
  9868. UT_Lock(UTLOCK_OM);
  9869. ValidateOMS(pomClient);
  9870. //
  9871. // First check if this is an ObMan event:
  9872. //
  9873. if ((event < OM_BASE_EVENT) || (event > OM_LAST_EVENT))
  9874. {
  9875. DC_QUIT;
  9876. }
  9877. TRACE_OUT(("Processing ObMan event %d (param1: 0x%08x, param2: 0x%08x)",
  9878. event, eventParam1, eventParam2));
  9879. //
  9880. // Extract the fields from the event parameters (some or all of these
  9881. // will be unused, depending on which event this is):
  9882. //
  9883. hWSGroup = (*(POM_EVENT_DATA16)&eventParam1).hWSGroup;
  9884. worksetID = (*(POM_EVENT_DATA16)&eventParam1).worksetID;
  9885. correlator = (*(POM_EVENT_DATA32)&eventParam2).correlator;
  9886. result = (*(POM_EVENT_DATA32)&eventParam2).result;
  9887. pObj = (POM_OBJECT) eventParam2;
  9888. //
  9889. // ObMan guarantees not to deliver out of date events to client e.g.
  9890. // workset open events for aworkset it has since closed, or object add
  9891. // events for a workset group from which it has deregistered.
  9892. //
  9893. // Filtering these events is the main purpose of this hidden handler
  9894. // function; we check each event and if the workset group handle or
  9895. // object handle are invalid or if the workset is closed, we swallow the
  9896. // event.
  9897. //
  9898. switch (event)
  9899. {
  9900. case OM_OUT_OF_RESOURCES_IND:
  9901. {
  9902. //
  9903. // Do nothing.
  9904. //
  9905. }
  9906. break;
  9907. case OM_WSGROUP_REGISTER_CON:
  9908. {
  9909. //
  9910. // Mark this workset group as valid for our client.
  9911. //
  9912. pomClient->wsgValid[hWSGroup] = TRUE;
  9913. ASSERT(ValidWSGroupHandle(pomClient, hWSGroup));
  9914. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9915. TRACE_OUT(("REGISTER_CON arrived for wsg %d (result %u, hWSGroup %u)",
  9916. pUsageRec->pWSGroup->wsg, result, hWSGroup));
  9917. if (result != 0)
  9918. {
  9919. //
  9920. // The registration has failed, so call WSGroupDeregister to
  9921. // free up all the resources, then quit:
  9922. //
  9923. WARNING_OUT(("Registration failed for wsg %d, deregistering",
  9924. pUsageRec->pWSGroup->wsg));
  9925. OM_WSGroupDeregister(pomClient, &hWSGroup);
  9926. DC_QUIT;
  9927. }
  9928. }
  9929. break;
  9930. case OMINT_EVENT_WSGROUP_DEREGISTER:
  9931. {
  9932. //
  9933. // This event is designed to flush the Client's message queue of
  9934. // all events relating to a particular workset group handle.
  9935. //
  9936. // Because this event has arrived, we know there are no more
  9937. // events containing this workset group handle in the queue, so
  9938. // we can safely mark the handle for re-use:
  9939. //
  9940. // So, do a quick sanity check then reset the slot in the array
  9941. // of usage record offsets:
  9942. //
  9943. ASSERT(!pomClient->wsgValid[hWSGroup]);
  9944. TRACE_OUT(("Got WSGROUP_DEREGISTER back marker event for "
  9945. "hWSGroup %u, marking handle as ready for re-use", hWSGroup));
  9946. pomClient->apUsageRecs[hWSGroup] = NULL;
  9947. //
  9948. // ...and swallow the event:
  9949. //
  9950. processed = TRUE;
  9951. }
  9952. break;
  9953. case OM_WSGROUP_MOVE_CON:
  9954. case OM_WSGROUP_MOVE_IND:
  9955. case OM_WORKSET_NEW_IND:
  9956. {
  9957. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9958. {
  9959. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9960. hWSGroup, event));
  9961. processed = TRUE;
  9962. DC_QUIT;
  9963. }
  9964. }
  9965. break;
  9966. case OM_WORKSET_OPEN_CON:
  9967. {
  9968. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9969. {
  9970. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9971. hWSGroup, event));
  9972. processed = TRUE;
  9973. DC_QUIT;
  9974. }
  9975. //
  9976. // Else mark the workset as open:
  9977. //
  9978. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9979. TRACE_OUT(("Marking workset %u in wsg %d open for Client 0x%08x",
  9980. worksetID, pUsageRec->pWSGroup->wsg, pomClient));
  9981. WORKSET_SET_OPEN(pUsageRec, worksetID);
  9982. }
  9983. break;
  9984. case OM_WORKSET_UNLOCK_IND:
  9985. {
  9986. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9987. {
  9988. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9989. hWSGroup, event));
  9990. processed = TRUE;
  9991. DC_QUIT;
  9992. }
  9993. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9994. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  9995. {
  9996. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  9997. worksetID, pUsageRec->pWSGroup->wsg, event));
  9998. processed = TRUE;
  9999. DC_QUIT;
  10000. }
  10001. }
  10002. break;
  10003. case OM_WORKSET_CLEAR_IND:
  10004. {
  10005. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10006. {
  10007. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10008. hWSGroup, event));
  10009. processed = TRUE;
  10010. DC_QUIT;
  10011. }
  10012. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10013. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10014. {
  10015. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10016. worksetID, pUsageRec->pWSGroup->wsg, event));
  10017. processed = TRUE;
  10018. DC_QUIT;
  10019. }
  10020. //
  10021. // Check if Clear still pending; quit if not:
  10022. //
  10023. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  10024. ASSERT((pWorkset != NULL));
  10025. FindPendingOp(pWorkset, pObj, WORKSET_CLEAR, &pPendingOp);
  10026. if (pPendingOp == NULL)
  10027. {
  10028. TRACE_OUT(("Clear already confirmed for workset %hu", worksetID));
  10029. processed = TRUE;
  10030. DC_QUIT;
  10031. }
  10032. }
  10033. break;
  10034. case OM_WORKSET_LOCK_CON:
  10035. {
  10036. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10037. {
  10038. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10039. hWSGroup, event));
  10040. processed = TRUE;
  10041. DC_QUIT;
  10042. }
  10043. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10044. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10045. {
  10046. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10047. worksetID, pUsageRec->pWSGroup->wsg, event));
  10048. processed = TRUE;
  10049. DC_QUIT;
  10050. }
  10051. //
  10052. // Search for the lock on the lock stack:
  10053. //
  10054. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomClient->locks),
  10055. (void**)&pLock, FIELD_OFFSET(OM_LOCK, chain),
  10056. FIELD_OFFSET(OM_LOCK, worksetID), (DWORD)worksetID,
  10057. FIELD_SIZE(OM_LOCK, worksetID));
  10058. //
  10059. // If the lock is not present on the lock stack, then the Client
  10060. // must have called Unlock since it called LockReq. So, we
  10061. // swallow the event:
  10062. //
  10063. if (pLock == NULL)
  10064. {
  10065. TRACE_OUT(("Lock already cancelled for workset %hu", worksetID));
  10066. processed = TRUE;
  10067. DC_QUIT;
  10068. }
  10069. //
  10070. // When object locking supported, the first lock which matches
  10071. // on worksetID might not be the workset lock, so more code will
  10072. // be needed here then. In the meantime, just assert:
  10073. //
  10074. ASSERT((OBJECT_ID_IS_NULL(pLock->objectID)));
  10075. //
  10076. // If lock request failed, remove the lock from the Client's
  10077. // lock stack:
  10078. //
  10079. if (result != 0)
  10080. {
  10081. TRACE_OUT(("Lock failed; removing lock from Client's lock stack"));
  10082. COM_BasedListRemove(&pLock->chain);
  10083. UT_FreeRefCount((void**)&pLock, FALSE);
  10084. }
  10085. }
  10086. break;
  10087. case OM_OBJECT_ADD_IND:
  10088. case OM_OBJECT_MOVE_IND:
  10089. {
  10090. ObjectEvent = TRUE;
  10091. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10092. {
  10093. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10094. hWSGroup, event));
  10095. processed = TRUE;
  10096. DC_QUIT;
  10097. }
  10098. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10099. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10100. {
  10101. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10102. worksetID, pUsageRec->pWSGroup->wsg, event));
  10103. processed = TRUE;
  10104. DC_QUIT;
  10105. }
  10106. if (!ValidObject(pObj) || (pObj->flags & DELETED))
  10107. {
  10108. processed = TRUE;
  10109. DC_QUIT;
  10110. }
  10111. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10112. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  10113. ASSERT((pWorkset != NULL));
  10114. if (WorksetClearPending(pWorkset, pObj))
  10115. {
  10116. TRACE_OUT(("Event %hu for object 0x%08x will be swallowed since "
  10117. "object about to be cleared from the workset",
  10118. event, pObj));
  10119. processed = TRUE;
  10120. DC_QUIT;
  10121. }
  10122. }
  10123. break;
  10124. case OM_OBJECT_DELETE_IND:
  10125. case OM_OBJECT_REPLACE_IND:
  10126. case OM_OBJECT_UPDATE_IND:
  10127. {
  10128. ObjectEvent = TRUE;
  10129. switch (event)
  10130. {
  10131. case OM_OBJECT_DELETE_IND:
  10132. type = OBJECT_DELETE;
  10133. break;
  10134. case OM_OBJECT_REPLACE_IND:
  10135. type = OBJECT_REPLACE;
  10136. break;
  10137. case OM_OBJECT_UPDATE_IND:
  10138. type = OBJECT_UPDATE;
  10139. break;
  10140. default:
  10141. ERROR_OUT(("Reached default case in switch"));
  10142. }
  10143. //
  10144. // Check workset group handle is still valid, workset is still
  10145. // open and object handle is still valid; if not, swallow event:
  10146. //
  10147. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10148. {
  10149. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10150. hWSGroup, event));
  10151. processed = TRUE;
  10152. DC_QUIT;
  10153. }
  10154. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10155. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10156. {
  10157. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10158. worksetID, pUsageRec->pWSGroup->wsg, event));
  10159. processed = TRUE;
  10160. DC_QUIT;
  10161. }
  10162. //
  10163. // We also want to quit if the object is no longer valid or if
  10164. // there is a clear pending (just as for ADD/MOVE) but if we do
  10165. // so, we will also need to remove the pending op from the list.
  10166. // So, find the op now; if we quit and swallow the event, the
  10167. // function exit code will do the remove (this saves having to
  10168. // break up the QUIT_IF... macros for this special case).
  10169. //
  10170. // So, check the pending op list:
  10171. //
  10172. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  10173. ASSERT((pWorkset != NULL));
  10174. FindPendingOp(pWorkset, pObj, type, &pPendingOp);
  10175. if (pPendingOp == NULL)
  10176. {
  10177. TRACE_OUT(("Operation type %hu already confirmed for object 0x%08x",
  10178. type, pObj));
  10179. processed = TRUE;
  10180. DC_QUIT;
  10181. }
  10182. if (!ValidObject(pObj) || (pObj->flags & DELETED))
  10183. {
  10184. processed = TRUE;
  10185. DC_QUIT;
  10186. }
  10187. if (WorksetClearPending(pWorkset, pObj))
  10188. {
  10189. TRACE_OUT(("Event %hu for object 0x%08x will be swallowed since "
  10190. "object about to be cleared from the workset",
  10191. event, pObj));
  10192. processed = TRUE;
  10193. DC_QUIT;
  10194. }
  10195. }
  10196. break;
  10197. case OM_WORKSET_CLEARED_IND:
  10198. case OM_OBJECT_DELETED_IND:
  10199. case OM_OBJECT_UPDATED_IND:
  10200. case OM_OBJECT_REPLACED_IND:
  10201. {
  10202. //
  10203. // All of these except the CLEARED_IND are object events:
  10204. //
  10205. if (event != OM_WORKSET_CLEARED_IND)
  10206. {
  10207. ObjectEvent = TRUE;
  10208. }
  10209. //
  10210. // These are secondary API events. Swallow them if the workset
  10211. // is closed, but DO NOT swallow if object handle invalid (since
  10212. // we don't make guarantees about validity of handles passed in
  10213. // these events):
  10214. //
  10215. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10216. {
  10217. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10218. hWSGroup, event));
  10219. processed = TRUE;
  10220. DC_QUIT;
  10221. }
  10222. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10223. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10224. {
  10225. TRACE_OUT(("Workset %u in WSG %d no longer open; ignoring event %d",
  10226. worksetID, pUsageRec->pWSGroup->wsg, event));
  10227. processed = TRUE;
  10228. DC_QUIT;
  10229. }
  10230. }
  10231. break;
  10232. case OM_PERSON_JOINED_IND:
  10233. case OM_PERSON_LEFT_IND:
  10234. case OM_PERSON_DATA_CHANGED_IND:
  10235. {
  10236. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10237. {
  10238. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10239. hWSGroup, event));
  10240. processed = TRUE;
  10241. DC_QUIT;
  10242. }
  10243. }
  10244. break;
  10245. default:
  10246. {
  10247. ERROR_OUT(("Unrecognised ObMan event 0x%08x", event));
  10248. }
  10249. }
  10250. DC_EXIT_POINT:
  10251. //
  10252. // Whenever an event containing an object handle is posted, the use
  10253. // count of the object record is bumped, so we free it now:
  10254. //
  10255. if (ObjectEvent)
  10256. {
  10257. ValidateObject(pObj);
  10258. UT_FreeRefCount((void**)&pObj, FALSE);
  10259. }
  10260. UT_Unlock(UTLOCK_OM);
  10261. DebugExitBOOL(OMSEventHandler, processed);
  10262. return(processed);
  10263. }
  10264. //
  10265. // OM_WSGroupRegisterS(...)
  10266. //
  10267. UINT OM_WSGroupRegisterS
  10268. (
  10269. POM_CLIENT pomClient,
  10270. UINT callID,
  10271. OMFP fpHandler,
  10272. OMWSG wsg,
  10273. OM_WSGROUP_HANDLE * phWSGroup
  10274. )
  10275. {
  10276. POM_DOMAIN pDomain;
  10277. POM_WSGROUP pWSGroup;
  10278. POM_USAGE_REC pUsageRec;
  10279. POM_CLIENT_LIST pClientListEntry;
  10280. BOOL setUpUsageRec = FALSE;
  10281. UINT rc = 0;
  10282. DebugEntry(OM_WSGroupRegisterS);
  10283. UT_Lock(UTLOCK_OM);
  10284. //
  10285. // Validate params:
  10286. //
  10287. ValidateOMS(pomClient);
  10288. //
  10289. // Search for this Domain and workset group:
  10290. //
  10291. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(g_pomPrimary->domains),
  10292. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  10293. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)callID,
  10294. FIELD_SIZE(OM_DOMAIN, callID));
  10295. if (pDomain == NULL)
  10296. {
  10297. //
  10298. // We don't have a record for this Domain so there can be no primary
  10299. // registered with the workset group:
  10300. //
  10301. TRACE_OUT(("Not attached to Domain %u", callID));
  10302. rc = OM_RC_NO_PRIMARY;
  10303. DC_QUIT;
  10304. }
  10305. WSGRecordFind(pDomain, wsg, fpHandler, &pWSGroup);
  10306. if (pWSGroup == NULL)
  10307. {
  10308. rc = OM_RC_NO_PRIMARY;
  10309. DC_QUIT;
  10310. }
  10311. //
  10312. // If we get here, then the workset group exists locally so see if the
  10313. // Client is already registered with it:
  10314. //
  10315. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWSGroup->clients),
  10316. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  10317. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)pomClient->putTask,
  10318. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  10319. if (pClientListEntry != NULL)
  10320. {
  10321. rc = OM_RC_ALREADY_REGISTERED;
  10322. ERROR_OUT(("Can't register Client 0x%08x with WSG %d - already registered",
  10323. pomClient, wsg));
  10324. DC_QUIT;
  10325. }
  10326. //
  10327. // OK, Client is not already registered so register it now:
  10328. //
  10329. rc = SetUpUsageRecord(pomClient, SECONDARY, &pUsageRec, phWSGroup);
  10330. if (rc != 0)
  10331. {
  10332. DC_QUIT;
  10333. }
  10334. //
  10335. // SetUpUsageRecord doesn't put the workset group pointer in the CB
  10336. // (since it's not known yet in the case of a PRIMARY registration), so
  10337. // we do this now ourselves:
  10338. //
  10339. pUsageRec->pWSGroup = pWSGroup;
  10340. setUpUsageRec = TRUE;
  10341. //
  10342. // add this Client to the workset group's Client list:
  10343. //
  10344. rc = AddClientToWSGList(pomClient->putTask,
  10345. pWSGroup,
  10346. *phWSGroup,
  10347. SECONDARY);
  10348. if (rc != 0)
  10349. {
  10350. DC_QUIT;
  10351. }
  10352. pUsageRec->flags |= ADDED_TO_WSGROUP_LIST;
  10353. pomClient->wsgValid[*phWSGroup] = TRUE;
  10354. //
  10355. // Post WORKSET_NEW events to the Client for the worksets in the group,
  10356. // if any:
  10357. //
  10358. PostWorksetNewEvents(pomClient->putTask, pomClient->putTask,
  10359. pWSGroup, *phWSGroup);
  10360. TRACE_OUT(("Registered 0x%08x as secondary Client for WSG %d (hWSGroup: %hu)",
  10361. pomClient, wsg, *phWSGroup));
  10362. DC_EXIT_POINT:
  10363. if (rc != 0)
  10364. {
  10365. if (rc == OM_RC_NO_PRIMARY)
  10366. {
  10367. //
  10368. // We do a regular trace here rather than an error because this
  10369. // happens normally:
  10370. //
  10371. TRACE_OUT(("No primary Client for WSG %d in Domain %u "
  10372. "- can't register secondary", wsg, callID));
  10373. }
  10374. else
  10375. {
  10376. ERROR_OUT(("Error %d registering Client 0x%08x as secondary"
  10377. "for WSG %d in Domain %u",
  10378. rc, pomClient, wsg, callID));
  10379. }
  10380. if (setUpUsageRec == TRUE)
  10381. {
  10382. pomClient->apUsageRecs[*phWSGroup] = NULL;
  10383. if (pUsageRec->flags & ADDED_TO_WSGROUP_LIST)
  10384. {
  10385. RemoveClientFromWSGList(pomClient->putTask, pomClient->putTask, pWSGroup);
  10386. }
  10387. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10388. }
  10389. pomClient->wsgValid[*phWSGroup] = FALSE;
  10390. }
  10391. UT_Unlock(UTLOCK_OM);
  10392. DebugExitDWORD(OM_WSGroupRegisterS, rc);
  10393. return(rc);
  10394. }
  10395. //
  10396. // OM_WorksetOpenS(...)
  10397. //
  10398. UINT OM_WorksetOpenS
  10399. (
  10400. POM_CLIENT pomClient,
  10401. OM_WSGROUP_HANDLE hWSGroup,
  10402. OM_WORKSET_ID worksetID
  10403. )
  10404. {
  10405. POM_WSGROUP pWSGroup;
  10406. POM_WORKSET pWorkset;
  10407. POM_USAGE_REC pUsageRec;
  10408. POM_CLIENT_LIST pClientListEntry = NULL;
  10409. UINT rc = 0;
  10410. DebugEntry(OM_WorksetOpenS);
  10411. UT_Lock(UTLOCK_OM);
  10412. //
  10413. // Validate params:
  10414. //
  10415. ValidateParams2(pomClient, hWSGroup, SECONDARY, &pUsageRec, &pWSGroup);
  10416. TRACE_OUT(("Secondary Client 0x%08x requesting to open workset %u in WSG %d",
  10417. pomClient, worksetID, pWSGroup->wsg));
  10418. //
  10419. // If the Client already has this workset open then return a (non-error)
  10420. // return code:
  10421. //
  10422. if (WORKSET_IS_OPEN(pUsageRec, worksetID) == TRUE)
  10423. {
  10424. TRACE_OUT(("Client 0x%08x already has workset %u in WSG %d open",
  10425. pomClient, worksetID, pWSGroup->wsg));
  10426. rc = OM_RC_WORKSET_ALREADY_OPEN;
  10427. DC_QUIT;
  10428. }
  10429. //
  10430. // Check workset group record to see if workset exists:
  10431. //
  10432. if (pWSGroup->apWorksets[worksetID] == NULL)
  10433. {
  10434. //
  10435. // Workset doesn't exist so return bad rc:
  10436. //
  10437. WARNING_OUT(("Workset %hu doesn't exist in WSG %d",
  10438. worksetID, pWSGroup->wsg));
  10439. rc = OM_RC_WORKSET_DOESNT_EXIST;
  10440. DC_QUIT;
  10441. }
  10442. else
  10443. {
  10444. //
  10445. // Workset already exists, so we don't need to do anything.
  10446. //
  10447. TRACE_OUT((" Workset %hu in WSG %d already exists",
  10448. worksetID, pWSGroup->wsg));
  10449. }
  10450. //
  10451. // If the workset didn't already exist, queueing the send instruction
  10452. // will have caused the workset to be created syncrhonously. So, either
  10453. // way the workset exists at this point.
  10454. //
  10455. //
  10456. // Get a pointer to the workset:
  10457. //
  10458. pWorkset = pWSGroup->apWorksets[worksetID];
  10459. ASSERT((pWorkset != NULL));
  10460. //
  10461. // Mark this workset as open in the Client's usage record:
  10462. //
  10463. WORKSET_SET_OPEN(pUsageRec, worksetID);
  10464. //
  10465. // Add this Client to the list kept in the workset record:
  10466. //
  10467. rc = AddClientToWsetList(pomClient->putTask,
  10468. pWorkset,
  10469. hWSGroup,
  10470. pUsageRec->mode,
  10471. &pClientListEntry);
  10472. if (rc != 0)
  10473. {
  10474. DC_QUIT;
  10475. }
  10476. rc = PostAddEvents(pomClient->putTask, pWorkset, hWSGroup, pomClient->putTask);
  10477. if (rc != 0)
  10478. {
  10479. DC_QUIT;
  10480. }
  10481. TRACE_OUT(("Opened workset %u in WSG %d for secondary Client 0x%08x",
  10482. worksetID, pWSGroup->wsg, pomClient));
  10483. DC_EXIT_POINT:
  10484. if ((rc != 0) && (rc != OM_RC_WORKSET_ALREADY_OPEN))
  10485. {
  10486. //
  10487. // Cleanup:
  10488. //
  10489. ERROR_OUT(("Error %d opening workset %u in WSG %d for Client 0x%08x",
  10490. rc, worksetID, pWSGroup->wsg, pomClient));
  10491. WORKSET_SET_CLOSED(pUsageRec, worksetID);
  10492. if (pClientListEntry != NULL)
  10493. {
  10494. COM_BasedListRemove(&(pClientListEntry->chain));
  10495. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  10496. }
  10497. }
  10498. UT_Unlock(UTLOCK_OM);
  10499. DebugExitDWORD(OM_WorksetOpenS, rc);
  10500. return(rc);
  10501. }
  10502. //
  10503. // OM_WSGroupRegisterPReq(...)
  10504. //
  10505. UINT OM_WSGroupRegisterPReq
  10506. (
  10507. POM_CLIENT pomClient,
  10508. UINT callID,
  10509. OMFP fpHandler,
  10510. OMWSG wsg,
  10511. OM_CORRELATOR * pCorrelator
  10512. )
  10513. {
  10514. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  10515. POM_USAGE_REC pUsageRec;
  10516. OM_WSGROUP_HANDLE hWSGroup;
  10517. BOOL setUpUsageRec = FALSE;
  10518. UINT rc = 0;
  10519. DebugEntry(OM_WSGroupRegisterPReq);
  10520. UT_Lock(UTLOCK_OM);
  10521. ValidateOMS(pomClient);
  10522. //
  10523. // Set up a usage record and workset group handle for the Client:
  10524. //
  10525. rc = SetUpUsageRecord(pomClient, PRIMARY, &pUsageRec, &hWSGroup);
  10526. if (rc != 0)
  10527. {
  10528. DC_QUIT;
  10529. }
  10530. setUpUsageRec = TRUE;
  10531. //
  10532. // Create a new correlator for the Client and put it in the Client's
  10533. // variable:
  10534. //
  10535. *pCorrelator = NextCorrelator(g_pomPrimary);
  10536. //
  10537. // Sub alloc a chunk of memory for the registration control block, in
  10538. // which we will pass the registration request parameters to the ObMan
  10539. // task:
  10540. //
  10541. pRegistrationCB = (POM_WSGROUP_REG_CB)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_CB), TRUE);
  10542. if (!pRegistrationCB)
  10543. {
  10544. rc = UT_RC_NO_MEM;
  10545. DC_QUIT;
  10546. }
  10547. SET_STAMP(pRegistrationCB, REGCB);
  10548. //
  10549. // Fill in the fields, but note that we don't yet know the Domain record
  10550. // or workset group, so we leave those ones blank:
  10551. //
  10552. pRegistrationCB->putTask = pomClient->putTask;
  10553. pRegistrationCB->callID = callID;
  10554. pRegistrationCB->correlator = *pCorrelator;
  10555. pRegistrationCB->hWSGroup = hWSGroup;
  10556. pRegistrationCB->wsg = wsg;
  10557. pRegistrationCB->fpHandler = fpHandler;
  10558. pRegistrationCB->retryCount = OM_REGISTER_RETRY_COUNT_DFLT;
  10559. pRegistrationCB->valid = TRUE;
  10560. pRegistrationCB->type = WSGROUP_REGISTER;
  10561. pRegistrationCB->mode = PRIMARY;
  10562. pRegistrationCB->pUsageRec = pUsageRec;
  10563. //
  10564. // Now put a pointer to the registration CB in the usage record, as
  10565. // described above, and set a flag so we know what we've done:
  10566. //
  10567. pUsageRec->pWSGroup = (POM_WSGROUP) pRegistrationCB;
  10568. pUsageRec->flags |= PWSGROUP_IS_PREGCB;
  10569. //
  10570. // Post an event to the ObMan task telling it to process this CB.
  10571. //
  10572. // The first parameter is the retry value for the event.
  10573. //
  10574. // The second parameter is the offset of the control block in the OMMISC
  10575. // memory block.
  10576. //
  10577. UT_PostEvent(pomClient->putTask, // Client's putTask
  10578. g_pomPrimary->putTask, // ObMan's putTask
  10579. 0,
  10580. OMINT_EVENT_WSGROUP_REGISTER,
  10581. 0,
  10582. (UINT_PTR)pRegistrationCB);
  10583. TRACE_OUT(("Requested to register Client 0x%08x with WSG %d",
  10584. pomClient, wsg));
  10585. DC_EXIT_POINT:
  10586. if (rc != 0)
  10587. {
  10588. ERROR_OUT(("Error 0x%08x registering Client 0x%08x with WSG %d",
  10589. rc, pomClient, wsg));
  10590. if (pRegistrationCB != NULL)
  10591. {
  10592. //
  10593. // We can free the reg CB safely since we know that if we hit an
  10594. // error, we never got around to inserting the item in the list or
  10595. // posting its offset to the ObMan task:
  10596. //
  10597. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  10598. }
  10599. if (setUpUsageRec)
  10600. {
  10601. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10602. pomClient->apUsageRecs[hWSGroup] = NULL;
  10603. }
  10604. }
  10605. UT_Unlock(UTLOCK_OM);
  10606. DebugExitDWORD(OM_WSGroupRegisterPReq, rc);
  10607. return(rc);
  10608. }
  10609. //
  10610. // OM_WSGroupMoveReq(...)
  10611. //
  10612. UINT OM_WSGroupMoveReq
  10613. (
  10614. POM_CLIENT pomClient,
  10615. OM_WSGROUP_HANDLE hWSGroup,
  10616. UINT callID,
  10617. OM_CORRELATOR * pCorrelator
  10618. )
  10619. {
  10620. POM_USAGE_REC pUsageRec;
  10621. POM_WSGROUP pWSGroup;
  10622. POM_DOMAIN pDomain;
  10623. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  10624. UINT rc = 0;
  10625. DebugEntry(OM_WSGroupMoveReq);
  10626. UT_Lock(UTLOCK_OM);
  10627. ValidateParams2(pomClient, hWSGroup, PRIMARY, &pUsageRec, &pWSGroup);
  10628. TRACE_OUT(("Client 0x%08x requesting to move WSG %d into Domain %u",
  10629. pomClient, hWSGroup, callID));
  10630. //
  10631. // Check workset group is not already in a Call: (this may be relaxed)
  10632. //
  10633. pDomain = pWSGroup->pDomain;
  10634. if (pDomain->callID != OM_NO_CALL)
  10635. {
  10636. ERROR_OUT(("Client 0x%08x attempted to move WSG %d out of a call "
  10637. "(Domain %u)",
  10638. pomClient, hWSGroup, pDomain->callID));
  10639. rc = OM_RC_ALREADY_IN_CALL;
  10640. DC_QUIT;
  10641. }
  10642. //
  10643. // Create a correlator, to correlate the MOVE_CON event:
  10644. //
  10645. *pCorrelator = NextCorrelator(g_pomPrimary);
  10646. //
  10647. // Create a control block to pass the relevant info to ObMan:
  10648. //
  10649. pRegistrationCB = (POM_WSGROUP_REG_CB)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_CB), TRUE);
  10650. if (!pRegistrationCB)
  10651. {
  10652. rc = UT_RC_NO_MEM;
  10653. DC_QUIT;
  10654. }
  10655. SET_STAMP(pRegistrationCB, REGCB);
  10656. //
  10657. // Fill in the fields:
  10658. //
  10659. pRegistrationCB->putTask = pomClient->putTask;
  10660. pRegistrationCB->callID = callID; // DESTINATION Domain!
  10661. pRegistrationCB->correlator = *pCorrelator;
  10662. pRegistrationCB->hWSGroup = hWSGroup;
  10663. pRegistrationCB->wsg = pWSGroup->wsg;
  10664. pRegistrationCB->fpHandler = pWSGroup->fpHandler;
  10665. pRegistrationCB->retryCount = OM_REGISTER_RETRY_COUNT_DFLT;
  10666. pRegistrationCB->valid = TRUE;
  10667. pRegistrationCB->type = WSGROUP_MOVE;
  10668. pRegistrationCB->mode = pUsageRec->mode;
  10669. pRegistrationCB->pWSGroup = pWSGroup;
  10670. //
  10671. // Post an event to ObMan requesting it to process the CB:
  10672. //
  10673. UT_PostEvent(pomClient->putTask,
  10674. g_pomPrimary->putTask,
  10675. 0, // no delay
  10676. OMINT_EVENT_WSGROUP_MOVE,
  10677. 0,
  10678. (UINT_PTR)pRegistrationCB);
  10679. TRACE_OUT(("Requested to move WSG %d into Domain %u for Client 0x%08x",
  10680. hWSGroup, callID, pomClient));
  10681. DC_EXIT_POINT:
  10682. if (rc != 0)
  10683. {
  10684. ERROR_OUT(("Error 0x%08x requesting to move WSG %d into Domain %u",
  10685. rc, hWSGroup, callID));
  10686. if (pRegistrationCB != NULL)
  10687. {
  10688. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  10689. }
  10690. }
  10691. UT_Unlock(UTLOCK_OM);
  10692. DebugExitDWORD(OM_WSGroupMoveReq, rc);
  10693. return(rc);
  10694. }
  10695. //
  10696. // OM_WSGroupDeregister(...)
  10697. //
  10698. void OM_WSGroupDeregister
  10699. (
  10700. POM_CLIENT pomClient,
  10701. OM_WSGROUP_HANDLE * phWSGroup
  10702. )
  10703. {
  10704. POM_WSGROUP pWSGroup;
  10705. POM_USAGE_REC pUsageRec;
  10706. OM_WORKSET_ID worksetID;
  10707. OM_EVENT_DATA16 eventData16;
  10708. OM_WSGROUP_HANDLE hWSGroup;
  10709. DebugEntry(OM_WSGroupDeregister);
  10710. UT_Lock(UTLOCK_OM);
  10711. ValidateOMS(pomClient);
  10712. hWSGroup = *phWSGroup;
  10713. //
  10714. // If this function has been called because of an abortive
  10715. // WSGroupRegister, or from OM_Deregister, the wsg might not yet be
  10716. // marked as VALID, so we check here and set it to VALID.
  10717. //
  10718. if (!pomClient->wsgValid[hWSGroup])
  10719. {
  10720. TRACE_OUT(("Deregistering Client before registration completed"));
  10721. pomClient->wsgValid[hWSGroup] = TRUE;
  10722. }
  10723. // lonchanc: bug #1986, make sure we have a valid wsg.
  10724. // pWSGroup can be invalid in a race condition that we hang up
  10725. // before Whiteboard initializes.
  10726. pUsageRec = NULL; // make sure this local is reset in case we bail out from here.
  10727. if (!ValidWSGroupHandle(pomClient, hWSGroup) ||
  10728. (pomClient->apUsageRecs[hWSGroup] == (POM_USAGE_REC)-1))
  10729. {
  10730. ERROR_OUT(("OM_WSGroupDeregister: Invalid wsg=0x0x%08x", hWSGroup));
  10731. DC_QUIT;
  10732. }
  10733. //
  10734. // Get a pointer to the associated usage record:
  10735. //
  10736. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10737. //
  10738. // Extract a Client pointer to the workset group from the usage record:
  10739. //
  10740. pWSGroup = pUsageRec->pWSGroup;
  10741. //
  10742. // Test the flag in the usage record to see whether the <pWSGroup> field
  10743. // is actually pointing to the registration CB (which will be the case
  10744. // if we are deregistering immediately after registering):
  10745. //
  10746. if (pUsageRec->flags & PWSGROUP_IS_PREGCB)
  10747. {
  10748. //
  10749. // Mark the registration CB as invalid in order to abort the
  10750. // registration (ObMan will test for this in ProcessWSGRegister):
  10751. //
  10752. // Note: the pWSGroup field of the usage record is actually a pointer
  10753. // to a registration CB in this case
  10754. //
  10755. TRACE_OUT(("Client deregistering before registration even started - aborting"));
  10756. ((POM_WSGROUP_REG_CB)pUsageRec->pWSGroup)->valid = FALSE;
  10757. DC_QUIT;
  10758. }
  10759. //
  10760. // Check the workset group record is valid:
  10761. //
  10762. ValidateWSGroup(pWSGroup);
  10763. //
  10764. // If it is valid, we continue with the deregistration process:
  10765. //
  10766. TRACE_OUT(("Deregistering Client 0x%08x from WSG %d", pomClient, hWSGroup));
  10767. //
  10768. // Close all the worksets in the group that the Client has open:
  10769. //
  10770. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  10771. {
  10772. if (WORKSET_IS_OPEN(pUsageRec, worksetID))
  10773. {
  10774. OM_WorksetClose(pomClient, hWSGroup, worksetID);
  10775. }
  10776. }
  10777. //
  10778. // If we added this Client to the workset group's Client list, find it
  10779. // again and remove it:
  10780. //
  10781. if (pUsageRec->flags & ADDED_TO_WSGROUP_LIST)
  10782. {
  10783. TRACE_OUT(("Removing Client from workset group list"));
  10784. RemoveClientFromWSGList(pomClient->putTask, pomClient->putTask, pWSGroup);
  10785. pUsageRec->flags &= ~ADDED_TO_WSGROUP_LIST;
  10786. }
  10787. else
  10788. {
  10789. TRACE_OUT(("Client not added to wsGroup list, not removing"));
  10790. }
  10791. TRACE_OUT(("Deregistered Client 0x%08x from WSG %d", pomClient, hWSGroup));
  10792. DC_EXIT_POINT:
  10793. //
  10794. // Free the usage record (we put this after the DC_QUIT since we want to
  10795. // do this even if the workset group pointer was found to be invalid
  10796. // above):
  10797. //
  10798. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10799. //
  10800. // Mark the workset group handle as invalid, so that any events which
  10801. // the Client gets will be swallowed:
  10802. //
  10803. pomClient->wsgValid[hWSGroup] = FALSE;
  10804. //
  10805. // Note: we don't set the slot in the usage record offset array to zero,
  10806. // since we don't want the workset group handle to be reused yet.
  10807. // When the DEREGISTER events arrives (after flushing the Client's
  10808. // event queue), we will set the offset to zero.
  10809. //
  10810. // However, if we leave the offset as it is, OM_Deregister might
  10811. // call us again because it thinks we haven't yet deregistered
  10812. // from the workset group. So, we set it to -1, which ensures
  10813. // that
  10814. //
  10815. // a) it is seen as in use by FindUnusedWSGHandle, since that
  10816. // function checks for 0
  10817. //
  10818. // b) it is seen as not in use by OM_Deregister, since that
  10819. // function checks for 0 or -1.
  10820. //
  10821. pomClient->apUsageRecs[hWSGroup] = (POM_USAGE_REC)-1;
  10822. //
  10823. // Send an OMINT_EVENT_WSGROUP_DEREGISTER event to the hidden handler (which
  10824. // will swallow it) to flush the Client's message queue:
  10825. //
  10826. TRACE_OUT(("Posting WSGROUP_DEREGISTER event to Client's hidden handler"));
  10827. eventData16.hWSGroup = hWSGroup;
  10828. eventData16.worksetID = 0;
  10829. UT_PostEvent(pomClient->putTask,
  10830. pomClient->putTask,
  10831. 0,
  10832. OMINT_EVENT_WSGROUP_DEREGISTER,
  10833. *(PUINT) &eventData16,
  10834. 0);
  10835. *phWSGroup = 0;
  10836. UT_Unlock(UTLOCK_OM);
  10837. DebugExitVOID(OM_WSGroupDeregister);
  10838. }
  10839. //
  10840. // OM_WorksetOpenPReq(...)
  10841. //
  10842. UINT OM_WorksetOpenPReq
  10843. (
  10844. POM_CLIENT pomClient,
  10845. OM_WSGROUP_HANDLE hWSGroup,
  10846. OM_WORKSET_ID worksetID,
  10847. NET_PRIORITY priority,
  10848. BOOL fTemp,
  10849. OM_CORRELATOR * pCorrelator
  10850. )
  10851. {
  10852. POM_WSGROUP pWSGroup;
  10853. POM_WORKSET pWorkset;
  10854. POM_USAGE_REC pUsageRec;
  10855. OM_EVENT_DATA16 eventData16;
  10856. OM_EVENT_DATA32 eventData32;
  10857. POM_CLIENT_LIST pClientListEntry = NULL;
  10858. UINT rc = 0;
  10859. DebugEntry(OM_WorksetOpenPReq);
  10860. UT_Lock(UTLOCK_OM);
  10861. //
  10862. // Validate params:
  10863. //
  10864. ValidateParams2(pomClient, hWSGroup, PRIMARY, &pUsageRec, &pWSGroup);
  10865. TRACE_OUT(("Client 0x%08x opening workset %u in WSG %d at priority 0x%08x",
  10866. pomClient, worksetID, hWSGroup, priority));
  10867. //
  10868. // If the Client already has this workset open then return a (non-error)
  10869. // return code:
  10870. //
  10871. if (WORKSET_IS_OPEN(pUsageRec, worksetID) == TRUE)
  10872. {
  10873. TRACE_OUT(("Client 0x%08x already has workset %hu in WSG %d open",
  10874. pomClient, worksetID, hWSGroup));
  10875. rc = OM_RC_WORKSET_ALREADY_OPEN;
  10876. DC_QUIT;
  10877. }
  10878. //
  10879. // Check the Client has supplied a valid value for <priority>:
  10880. //
  10881. if ((priority < NET_HIGH_PRIORITY) || (priority > NET_LOW_PRIORITY))
  10882. {
  10883. ASSERT((priority == OM_OBMAN_CHOOSES_PRIORITY));
  10884. }
  10885. //
  10886. // Check workset group record to see if workset exists:
  10887. //
  10888. // Note: this check looks to see if the offset to the workset is zero,
  10889. // since workset records never reside at the start of the OMWORKSETS
  10890. // block.
  10891. //
  10892. if (pWSGroup->apWorksets[worksetID] == NULL)
  10893. {
  10894. rc = WorksetCreate(pomClient->putTask, pWSGroup, worksetID, fTemp, priority);
  10895. if (rc != 0)
  10896. {
  10897. DC_QUIT;
  10898. }
  10899. }
  10900. else
  10901. {
  10902. //
  10903. // Workset already exists, so we don't need to do anything.
  10904. //
  10905. TRACE_OUT((" Workset %hu in WSG %d already exists",
  10906. worksetID, hWSGroup));
  10907. }
  10908. //
  10909. // If the workset didn't already exist, queueing the send instruction
  10910. // will have caused the workset to be created syncrhonously. So, either
  10911. // way the workset exists at this point.
  10912. //
  10913. //
  10914. // Get a pointer to the workset:
  10915. //
  10916. pWorkset = pWSGroup->apWorksets[worksetID];
  10917. ASSERT((pWorkset != NULL));
  10918. //
  10919. // Set the persistence field for the workset - we might not have done
  10920. // this as part of the WorksetCreate above if someone else had created
  10921. // the workset already. However, we set our local copy to have the
  10922. // appropriate persistence value.
  10923. //
  10924. pWorkset->fTemp = fTemp;
  10925. //
  10926. // We need to mark this workset as open in the Client's usage record.
  10927. // However, we don't do this yet - we do it in our hidden handler when
  10928. // the OPEN_CON event is received.
  10929. //
  10930. // The reason for this is that a Client shouldn't start using a workset
  10931. // until it has received the event, so we want the workset to remain
  10932. // closed until then.
  10933. //
  10934. // Note that whether we do it this way or mark the workset as open here
  10935. // and now doesn't make much difference from ObMan's point of view but
  10936. // it will help detect applications which are badly behaved.
  10937. //
  10938. //
  10939. // Add this Client to the list kept in the workset record:
  10940. //
  10941. rc = AddClientToWsetList(pomClient->putTask,
  10942. pWorkset,
  10943. hWSGroup,
  10944. pUsageRec->mode,
  10945. &pClientListEntry);
  10946. if (rc != 0)
  10947. {
  10948. pClientListEntry = NULL;
  10949. DC_QUIT;
  10950. }
  10951. //
  10952. // Create correlator:
  10953. //
  10954. *pCorrelator = NextCorrelator(g_pomPrimary);
  10955. //
  10956. // Post WORKSET_OPEN_CON event to Client:
  10957. //
  10958. eventData16.hWSGroup = hWSGroup;
  10959. eventData16.worksetID = worksetID;
  10960. eventData32.result = 0;
  10961. eventData32.correlator = *pCorrelator;
  10962. TRACE_OUT((" Posting WORKSET_OPEN_CON to Client 0x%08x (task 0x%08x)"));
  10963. UT_PostEvent(pomClient->putTask,
  10964. pomClient->putTask,
  10965. 0, // no delay
  10966. OM_WORKSET_OPEN_CON,
  10967. *(UINT *) &eventData16,
  10968. *(UINT *) &eventData32);
  10969. //
  10970. // Now post OBJECT_ADD_IND events for each of the objects in the
  10971. // workset:
  10972. //
  10973. rc = PostAddEvents(pomClient->putTask, pWorkset, hWSGroup, pomClient->putTask);
  10974. if (rc != 0)
  10975. {
  10976. DC_QUIT;
  10977. }
  10978. TRACE_OUT(("Opened workset %hu in WSG %d for Client 0x%08x",
  10979. worksetID, hWSGroup, pomClient));
  10980. DC_EXIT_POINT:
  10981. if (rc != 0)
  10982. {
  10983. ERROR_OUT(("Error 0x%08x opening workset %u in WSG %d for Client 0x%08x",
  10984. rc, worksetID, hWSGroup, pomClient));
  10985. if (pClientListEntry != NULL)
  10986. {
  10987. COM_BasedListRemove(&(pClientListEntry->chain));
  10988. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  10989. }
  10990. }
  10991. UT_Unlock(UTLOCK_OM);
  10992. DebugExitDWORD(OM_WorksetOpenPReq, rc);
  10993. return(rc);
  10994. }
  10995. //
  10996. // OM_WorksetClose(...)
  10997. //
  10998. void OM_WorksetClose
  10999. (
  11000. POM_CLIENT pomClient,
  11001. OM_WSGROUP_HANDLE hWSGroup,
  11002. OM_WORKSET_ID worksetID
  11003. )
  11004. {
  11005. POM_WORKSET pWorkset;
  11006. POM_USAGE_REC pUsageRec;
  11007. POM_CLIENT_LIST pClientListEntry;
  11008. DebugEntry(OM_WorksetClose);
  11009. UT_Lock(UTLOCK_OM);
  11010. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  11011. &pUsageRec, &pWorkset);
  11012. //
  11013. // Mark the workset as closed in the Client's usage record:
  11014. //
  11015. TRACE_OUT(("Closing workset %u in WSG %d for Client 0x%08x",
  11016. worksetID, hWSGroup, pomClient));
  11017. WORKSET_SET_CLOSED(pUsageRec, worksetID);
  11018. //
  11019. // Now we release all the resources the Client is using which concern
  11020. // this workset. We
  11021. //
  11022. // - release all the locks the Client has for this workset
  11023. //
  11024. // - confirm any outstanding operations such as Deletes, etc.
  11025. //
  11026. // - release all the objects it is currently reading
  11027. //
  11028. // - discard any objects allocated but not yet used.
  11029. //
  11030. TRACE_OUT(("Releasing all resources in use by Client..."));
  11031. ReleaseAllLocks(pomClient, pUsageRec, pWorkset);
  11032. ReleaseAllObjects(pUsageRec, pWorkset);
  11033. ConfirmAll(pomClient, pUsageRec, pWorkset);
  11034. DiscardAllObjects(pUsageRec, pWorkset);
  11035. //
  11036. // Remove the Client from the list of Clients stored in the workset
  11037. // record:
  11038. //
  11039. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWorkset->clients),
  11040. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  11041. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)pomClient->putTask,
  11042. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  11043. //
  11044. // If we've got this far, the Client has the workset open, so it must be
  11045. // listed in the workset's list of Clients:
  11046. //
  11047. ASSERT((pClientListEntry != NULL));
  11048. COM_BasedListRemove(&(pClientListEntry->chain));
  11049. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  11050. TRACE_OUT(("Closed workset %u in WSG %d for Client 0x%08x",
  11051. worksetID, hWSGroup, pomClient));
  11052. UT_Unlock(UTLOCK_OM);
  11053. DebugExitVOID(OM_WorksetClose);
  11054. }
  11055. //
  11056. // OM_WorksetLockReq(...)
  11057. //
  11058. UINT OM_WorksetLockReq
  11059. (
  11060. POM_CLIENT pomClient,
  11061. OM_WSGROUP_HANDLE hWSGroup,
  11062. OM_WORKSET_ID worksetID,
  11063. OM_CORRELATOR * pCorrelator
  11064. )
  11065. {
  11066. POM_USAGE_REC pUsageRec;
  11067. POM_WSGROUP pWSGroup;
  11068. POM_WORKSET pWorkset;
  11069. POM_LOCK pLastLock;
  11070. POM_LOCK pThisLock = NULL;
  11071. BOOL inserted = FALSE;
  11072. UINT rc = 0;
  11073. DebugEntry(OM_WorksetLockReq);
  11074. UT_Lock(UTLOCK_OM);
  11075. //
  11076. // Validate params:
  11077. //
  11078. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11079. &pUsageRec, &pWorkset);
  11080. //
  11081. // Set up workset group pointer:
  11082. //
  11083. pWSGroup = pUsageRec->pWSGroup;
  11084. TRACE_OUT(("Client 0x%08x requesting to lock workset %u in WSG %d",
  11085. pomClient, worksetID, hWSGroup));
  11086. //
  11087. // Create a lock record which we will (eventually) put in the Client's
  11088. // lock stack:
  11089. //
  11090. pThisLock = (POM_LOCK)UT_MallocRefCount(sizeof(OM_LOCK), TRUE);
  11091. if (!pThisLock)
  11092. {
  11093. rc = UT_RC_NO_MEM;
  11094. DC_QUIT;
  11095. }
  11096. SET_STAMP(pThisLock, LOCK);
  11097. //
  11098. // Fill in the fields:
  11099. //
  11100. pThisLock->pWSGroup = pWSGroup;
  11101. pThisLock->worksetID = worksetID;
  11102. ZeroMemory(&(pThisLock->objectID), sizeof(OM_OBJECT_ID));
  11103. //
  11104. // Check that granting this lock won't result in a lock order violation:
  11105. // (it will if this lock is earlier than or equal to the last lock
  11106. // acquired).
  11107. //
  11108. TRACE_OUT(("Checking for lock order violation..."));
  11109. pLastLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  11110. if (pLastLock != NULL)
  11111. {
  11112. ASSERT(CompareLocks(pLastLock, pThisLock) < 0);
  11113. TRACE_OUT(("Last lock acquired by Client 0x%08x was workset %u in WSG %d",
  11114. pomClient, pLastLock->worksetID, pLastLock->pWSGroup->wsg));
  11115. }
  11116. else
  11117. {
  11118. //
  11119. // If there aren't any locks on the lock stack then there can't be
  11120. // any lock violation, so do nothing.
  11121. //
  11122. TRACE_OUT(("No locks on Client's lock stack"));
  11123. }
  11124. //
  11125. // Put a record of this lock in the Client's lock stack (we don't need
  11126. // to surround this with a mutex since a Client's lock stack is only
  11127. // accessed from that Client's task):
  11128. //
  11129. // Note: since this is a stack, we insert the item at the head of the
  11130. // list.
  11131. //
  11132. COM_BasedListInsertAfter(&(pomClient->locks), &(pThisLock->chain));
  11133. //
  11134. // Now start the process of requesting the lock from the ObMan task:
  11135. //
  11136. WorksetLockReq(pomClient->putTask, g_pomPrimary,
  11137. pWSGroup, pWorkset, hWSGroup, pCorrelator);
  11138. TRACE_OUT(("Requested lock for workset %u in WSG %d for Client 0x%08x",
  11139. worksetID, pWSGroup->wsg, pomClient));
  11140. DC_EXIT_POINT:
  11141. UT_Unlock(UTLOCK_OM);
  11142. DebugExitDWORD(OM_WorksetLockReq, rc);
  11143. return(rc);
  11144. }
  11145. //
  11146. // OM_WorksetUnlock(...)
  11147. //
  11148. void OM_WorksetUnlock
  11149. (
  11150. POM_CLIENT pomClient,
  11151. OM_WSGROUP_HANDLE hWSGroup,
  11152. OM_WORKSET_ID worksetID
  11153. )
  11154. {
  11155. POM_USAGE_REC pUsageRec;
  11156. POM_WSGROUP pWSGroup;
  11157. POM_WORKSET pWorkset;
  11158. POM_LOCK pLastLock;
  11159. OM_LOCK thisLock;
  11160. UINT rc = 0;
  11161. DebugEntry(OM_WorksetUnlock);
  11162. UT_Lock(UTLOCK_OM);
  11163. //
  11164. // Validate params:
  11165. //
  11166. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11167. &pUsageRec, &pWorkset);
  11168. pWSGroup = pUsageRec->pWSGroup;
  11169. TRACE_OUT(("Client 0x%08x requesting to unlock workset %u in WSG %d",
  11170. pomClient, worksetID, hWSGroup));
  11171. //
  11172. // Find the lock uppermost on the Client's lock stack:
  11173. //
  11174. pLastLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  11175. ASSERT((pLastLock != NULL));
  11176. //
  11177. // Assert that the lock uppermost on the lock stack is the one the
  11178. // Client is trying to release (i.e. that the workset IDs are the same
  11179. // and that the object ID of the lock on the stack is NULL):
  11180. //
  11181. thisLock.pWSGroup = pWSGroup;
  11182. thisLock.worksetID = worksetID;
  11183. ZeroMemory(&(thisLock.objectID), sizeof(OM_OBJECT_ID));
  11184. ASSERT(CompareLocks(pLastLock, &thisLock) == 0);
  11185. //
  11186. // Now call the common function to do the unlock:
  11187. //
  11188. WorksetUnlock(pomClient->putTask, pWSGroup, pWorkset);
  11189. //
  11190. // Remove the lock from the lock stack and free the memory:
  11191. //
  11192. COM_BasedListRemove(&(pLastLock->chain));
  11193. UT_FreeRefCount((void**)&pLastLock, FALSE);
  11194. TRACE_OUT(("Unlocked workset %u in WSG %d for Client 0x%08x",
  11195. worksetID, hWSGroup, pomClient));
  11196. UT_Unlock(UTLOCK_OM);
  11197. DebugExitVOID(OM_WorksetUnlock);
  11198. }
  11199. //
  11200. // OM_WorksetCountObjects(...)
  11201. //
  11202. void OM_WorksetCountObjects
  11203. (
  11204. POM_CLIENT pomClient,
  11205. OM_WSGROUP_HANDLE hWSGroup,
  11206. OM_WORKSET_ID worksetID,
  11207. UINT * pCount
  11208. )
  11209. {
  11210. POM_USAGE_REC pUsageRec;
  11211. POM_WORKSET pWorkset;
  11212. DebugEntry(OM_WorksetCountObjects);
  11213. UT_Lock(UTLOCK_OM);
  11214. //
  11215. // Validate params:
  11216. //
  11217. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  11218. &pUsageRec, &pWorkset);
  11219. //
  11220. // Extract <numObjects> field and put in *pCount:
  11221. //
  11222. *pCount = pWorkset->numObjects;
  11223. //
  11224. // Debug-only check:
  11225. //
  11226. CheckObjectCount(pUsageRec->pWSGroup, pWorkset);
  11227. TRACE_OUT(("Number of objects in workset %u in WSG %d = %u",
  11228. worksetID, hWSGroup, *pCount));
  11229. UT_Unlock(UTLOCK_OM);
  11230. DebugExitVOID(OM_WorksetCountObjects);
  11231. }
  11232. //
  11233. // OM_WorksetClear(...)
  11234. //
  11235. UINT OM_WorksetClear
  11236. (
  11237. POM_CLIENT pomClient,
  11238. OM_WSGROUP_HANDLE hWSGroup,
  11239. OM_WORKSET_ID worksetID
  11240. )
  11241. {
  11242. POM_USAGE_REC pUsageRec;
  11243. POM_WSGROUP pWSGroup;
  11244. POM_WORKSET pWorkset;
  11245. POMNET_OPERATION_PKT pPacket;
  11246. UINT rc = 0;
  11247. DebugEntry(OM_WorksetClear);
  11248. UT_Lock(UTLOCK_OM);
  11249. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11250. &pUsageRec, &pWorkset);
  11251. pWSGroup = pUsageRec->pWSGroup;
  11252. TRACE_OUT(("Client 0x%08x requesting to clear workset %u in WSG %d",
  11253. pomClient, worksetID, hWSGroup));
  11254. //
  11255. // Check workset isn't locked by somebody else (OK if locked by us):
  11256. //
  11257. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11258. //
  11259. // Check workset is not exhausted:
  11260. //
  11261. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11262. //
  11263. // Generate, process and queue the WORKSET_NEW message:
  11264. //
  11265. rc = GenerateOpMessage(pWSGroup,
  11266. worksetID,
  11267. NULL, // no object ID
  11268. NULL, // no object data
  11269. OMNET_WORKSET_CLEAR,
  11270. &pPacket);
  11271. if (rc != 0)
  11272. {
  11273. DC_QUIT;
  11274. }
  11275. rc = ProcessWorksetClear(pomClient->putTask, g_pomPrimary,
  11276. pPacket, pWSGroup, pWorkset);
  11277. if (rc != 0)
  11278. {
  11279. DC_QUIT;
  11280. }
  11281. rc = QueueMessage(pomClient->putTask,
  11282. pWSGroup->pDomain,
  11283. pWSGroup->channelID,
  11284. NET_HIGH_PRIORITY,
  11285. pWSGroup,
  11286. pWorkset,
  11287. NULL, // no object record
  11288. (POMNET_PKT_HEADER) pPacket,
  11289. NULL, // no object data
  11290. TRUE);
  11291. if (rc != 0)
  11292. {
  11293. DC_QUIT;
  11294. }
  11295. TRACE_OUT(("Issued WorksetClear for workset %u in WSG %d for Client 0x%08x",
  11296. worksetID, hWSGroup, pomClient));
  11297. DC_EXIT_POINT:
  11298. if (rc != 0)
  11299. {
  11300. ERROR_OUT(("Error 0x%08x clearing workset %u in WSG %d for Client 0x%08x",
  11301. rc, worksetID, hWSGroup, pomClient));
  11302. }
  11303. UT_Unlock(UTLOCK_OM);
  11304. DebugExitDWORD(OM_WorksetClear, rc);
  11305. return(rc);
  11306. }
  11307. //
  11308. // OM_WorksetClearConfirm(...)
  11309. //
  11310. void OM_WorksetClearConfirm
  11311. (
  11312. POM_CLIENT pomClient,
  11313. OM_WSGROUP_HANDLE hWSGroup,
  11314. OM_WORKSET_ID worksetID
  11315. )
  11316. {
  11317. POM_USAGE_REC pUsageRec;
  11318. POM_PENDING_OP pPendingOp;
  11319. POM_WORKSET pWorkset;
  11320. UINT rc = 0;
  11321. DebugEntry(OM_WorksetClearConfirm);
  11322. UT_Lock(UTLOCK_OM);
  11323. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11324. &pUsageRec, &pWorkset);
  11325. TRACE_OUT(("Client 0x%08x confirming WorksetClear for workest %u in WSG %d",
  11326. pomClient, worksetID, hWSGroup));
  11327. //
  11328. // Find the pending clear that we've been asked to confirm (assume it is
  11329. // first clear we find in the pending operation queue):
  11330. //
  11331. FindPendingOp(pWorkset, 0, WORKSET_CLEAR, &pPendingOp);
  11332. //
  11333. // We assert that a relevant pending op was found:
  11334. //
  11335. ASSERT(pPendingOp != NULL);
  11336. //
  11337. // In versions which support object locking, we will need to unlock any
  11338. // objects that are both
  11339. //
  11340. // - locked, and
  11341. //
  11342. // - deleted by this Clear (remember that a Clear doesn't delete ALL
  11343. // objects but only those that were added before the Clear was
  11344. // issued).
  11345. //
  11346. //
  11347. // We also need to release any objects
  11348. //
  11349. // - that the Client was using and
  11350. //
  11351. // - which are to be deleted.
  11352. //
  11353. // Since it's rather a lot of effort to ensure both conditions, we just
  11354. // release all the objects the Client was using i.e. invoking
  11355. // ClearConfirm invalidates ALL object pointers obtained via ObjectRead,
  11356. // as specified in the API:
  11357. //
  11358. ReleaseAllObjects(pUsageRec, pWorkset);
  11359. //
  11360. // If an object which is to be deleted because of the clear has an
  11361. // operation pending on it, the IND event will be swallowed by the
  11362. // HiddenHandler.
  11363. //
  11364. // Note that we cannot call ConfirmAll (to confirm any pending
  11365. // operations on objects in the workset) at this point for the following
  11366. // reasons:
  11367. //
  11368. // - this Clear might not affect the objects on which we were confirming
  11369. // operations
  11370. //
  11371. // - the Client might have received the IND events and try to call a
  11372. // Confirm function in the future, which would cause an assertion
  11373. // failure
  11374. //
  11375. // - if the Client hasn't yet got the IND events it will never get them
  11376. // because the hidden handler will swallow them if this DoClear causes
  11377. // them to be deleted.
  11378. //
  11379. //
  11380. // Here we actually perform the clear:
  11381. //
  11382. // (with multiple local access to workset groups as we may have in R2.0,
  11383. // we can't necessarily clear a workset when just one Client has
  11384. // confirmed; exactly what we will do depends on the design on R2.0).
  11385. //
  11386. WorksetDoClear(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pPendingOp);
  11387. TRACE_OUT(("Confirmed Clear for workset %u in WSG %d for Client 0x%08x",
  11388. worksetID, hWSGroup, pomClient));
  11389. UT_Unlock(UTLOCK_OM);
  11390. DebugExitVOID(OM_WorksetClearConfirm);
  11391. }
  11392. //
  11393. // OM_ObjectAdd()
  11394. //
  11395. UINT OM_ObjectAdd
  11396. (
  11397. POM_CLIENT pomClient,
  11398. OM_WSGROUP_HANDLE hWSGroup,
  11399. OM_WORKSET_ID worksetID,
  11400. POM_OBJECTDATA * ppData,
  11401. UINT updateSize,
  11402. POM_OBJECT * ppObj,
  11403. OM_POSITION position
  11404. )
  11405. {
  11406. POM_USAGE_REC pUsageRec;
  11407. POM_WSGROUP pWSGroup;
  11408. POM_WORKSET pWorkset;
  11409. POM_OBJECTDATA pData;
  11410. OM_OBJECT_ID newObjectID;
  11411. UINT rc = 0;
  11412. DebugEntry(OM_ObjectAdd);
  11413. UT_Lock(UTLOCK_OM);
  11414. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11415. &pUsageRec, &pWorkset);
  11416. pData = *ppData;
  11417. ValidateObjectData(pData);
  11418. TRACE_OUT(("Client 0x%08x adding object to workset %u in WSG %d",
  11419. pomClient, worksetID, hWSGroup));
  11420. TRACE_OUT((" object data is at 0x%08x - size: %u",
  11421. pData, pData->length));
  11422. ASSERT((updateSize < OM_MAX_UPDATE_SIZE));
  11423. //
  11424. // Set up workset group pointer:
  11425. //
  11426. pWSGroup = pUsageRec->pWSGroup;
  11427. //
  11428. // Check workset isn't locked by somebody else (OK if locked by us):
  11429. //
  11430. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11431. //
  11432. // Check workset is not exhausted:
  11433. //
  11434. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11435. //
  11436. // Call the internal function to add the object:
  11437. //
  11438. rc = ObjectAdd(pomClient->putTask, g_pomPrimary,
  11439. pWSGroup, pWorkset, pData, updateSize,
  11440. position, &newObjectID, ppObj);
  11441. if (rc != 0)
  11442. {
  11443. DC_QUIT;
  11444. }
  11445. //
  11446. // Remove the object from the unused objects list:
  11447. //
  11448. RemoveFromUnusedList(pUsageRec, pData);
  11449. //
  11450. // If all has gone well, we NULL the Client's pointer to the object
  11451. // data, since we now own the object and the Client is not supposed to
  11452. // refer to it again (unless, of course, it does an OM_ObjectRead).
  11453. //
  11454. *ppData = NULL;
  11455. DC_EXIT_POINT:
  11456. if (rc != 0)
  11457. {
  11458. ERROR_OUT(("ERROR %d adding object to workset %u in WSG %d for Client 0x%08x",
  11459. rc, pWorkset->worksetID, hWSGroup, pomClient));
  11460. }
  11461. UT_Unlock(UTLOCK_OM);
  11462. DebugExitDWORD(OM_ObjectAdd, rc);
  11463. return(rc);
  11464. }
  11465. //
  11466. // OM_ObjectMove()
  11467. //
  11468. UINT OM_ObjectMove
  11469. (
  11470. POM_CLIENT pomClient,
  11471. OM_WSGROUP_HANDLE hWSGroup,
  11472. OM_WORKSET_ID worksetID,
  11473. POM_OBJECT pObj,
  11474. OM_POSITION position
  11475. )
  11476. {
  11477. POM_USAGE_REC pUsageRec;
  11478. POM_WSGROUP pWSGroup;
  11479. POM_WORKSET pWorkset;
  11480. POMNET_OPERATION_PKT pPacket = NULL;
  11481. UINT rc = 0;
  11482. DebugEntry(OM_ObjectMove);
  11483. UT_Lock(UTLOCK_OM);
  11484. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11485. &pUsageRec, &pWorkset);
  11486. TRACE_OUT(("Client 0x%08x moving object 0x%08x in workset %u in WSG %d (position: %s)...",
  11487. pomClient, pObj, worksetID, hWSGroup,
  11488. position == LAST ? "LAST" : "FIRST"));
  11489. //
  11490. // Set up workset group pointer:
  11491. //
  11492. pWSGroup = pUsageRec->pWSGroup;
  11493. //
  11494. // Check workset isn't locked by somebody else (OK if locked by us):
  11495. //
  11496. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11497. //
  11498. // Check workset is not exhausted:
  11499. //
  11500. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11501. //
  11502. // Here we generate, process and queue an OBJECT_MOVE message:
  11503. //
  11504. rc = GenerateOpMessage(pWSGroup,
  11505. pWorkset->worksetID,
  11506. &(pObj->objectID),
  11507. NULL, // no object data
  11508. OMNET_OBJECT_MOVE,
  11509. &pPacket);
  11510. if (rc != 0)
  11511. {
  11512. pPacket = NULL;
  11513. DC_QUIT;
  11514. }
  11515. //
  11516. // Generate message doesn't put the position in the <misc1> field, so we
  11517. // do it here:
  11518. //
  11519. pPacket->position = position;
  11520. //
  11521. // QueueMessage may free the packet (if we're not in a call) but we need
  11522. // to process it in a minute so bump the use count:
  11523. //
  11524. UT_BumpUpRefCount(pPacket);
  11525. rc = QueueMessage(pomClient->putTask,
  11526. pWSGroup->pDomain,
  11527. pWSGroup->channelID,
  11528. NET_HIGH_PRIORITY,
  11529. pWSGroup,
  11530. pWorkset,
  11531. pObj,
  11532. (POMNET_PKT_HEADER) pPacket,
  11533. NULL, // no object data for a MOVE
  11534. TRUE);
  11535. if (rc != 0)
  11536. {
  11537. DC_QUIT;
  11538. }
  11539. ProcessObjectMove(pomClient->putTask, pPacket, pWorkset, pObj);
  11540. DC_EXIT_POINT:
  11541. if (pPacket != NULL)
  11542. {
  11543. //
  11544. // Do this on success OR error since we bumped up the ref count above.
  11545. //
  11546. UT_FreeRefCount((void**)&pPacket, FALSE);
  11547. }
  11548. if (rc != 0)
  11549. {
  11550. ERROR_OUT(("ERROR %d moving object 0x%08x in workset %u in WSG %d",
  11551. rc, pObj, worksetID, hWSGroup));
  11552. }
  11553. UT_Unlock(UTLOCK_OM);
  11554. DebugExitDWORD(OM_ObjectMove, rc);
  11555. return(rc);
  11556. }
  11557. //
  11558. // OM_ObjectDelete(...)
  11559. //
  11560. UINT OM_ObjectDelete
  11561. (
  11562. POM_CLIENT pomClient,
  11563. OM_WSGROUP_HANDLE hWSGroup,
  11564. OM_WORKSET_ID worksetID,
  11565. POM_OBJECT pObj
  11566. )
  11567. {
  11568. POM_USAGE_REC pUsageRec;
  11569. POM_WORKSET pWorkset;
  11570. UINT rc = 0;
  11571. DebugEntry(OM_ObjectDelete);
  11572. UT_Lock(UTLOCK_OM);
  11573. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11574. &pUsageRec, &pWorkset);
  11575. TRACE_OUT(("Client 0x%08x requesting to delete object 0x%08x from workset %u in WSG %d",
  11576. pomClient, pObj, worksetID, hWSGroup));
  11577. //
  11578. // Check workset isn't locked by somebody else (OK if locked by us):
  11579. //
  11580. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11581. //
  11582. // Check workset is not exhausted:
  11583. //
  11584. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11585. //
  11586. // If there is already a Delete pending for the object, we return an
  11587. // error and do not post the delete indication event.
  11588. //
  11589. // If we returned success, we would then have to post another event,
  11590. // since the Client may wait for it. If we post the event, the Client
  11591. // will probably invoke DeleteConfirm a second time when it is
  11592. // unexpected, thereby causing an assertion failure.
  11593. //
  11594. // Note that we cannot rely on the hidden handler to get us out of this
  11595. // one, since the Client might receive the second event before
  11596. // processing the first one, so the handler would have no way of knowing
  11597. // to trap the event.
  11598. //
  11599. //
  11600. // So, to find out if there's a delete pending, check the flag in the
  11601. // object record:
  11602. //
  11603. if (pObj->flags & PENDING_DELETE)
  11604. {
  11605. TRACE_OUT(("Client tried to delete object already being deleted (0x%08x)",
  11606. pObj));
  11607. rc = OM_RC_OBJECT_DELETED;
  11608. DC_QUIT;
  11609. }
  11610. //
  11611. // Here we call the ObjectDelete function to generate, process and queue
  11612. // an OBJECT_DELETE message:
  11613. //
  11614. rc = ObjectDRU(pomClient->putTask,
  11615. pUsageRec->pWSGroup,
  11616. pWorkset,
  11617. pObj,
  11618. NULL,
  11619. OMNET_OBJECT_DELETE);
  11620. if (rc != 0)
  11621. {
  11622. DC_QUIT;
  11623. }
  11624. //
  11625. // Remember, the delete doesn't actually happen until the local
  11626. // Client(s) have invoked DeleteConfirm().
  11627. //
  11628. DC_EXIT_POINT:
  11629. //
  11630. // SFR5843: Don't trace an error if the object has been deleted - this
  11631. // is just safe race condition.
  11632. //
  11633. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11634. {
  11635. ERROR_OUT(("ERROR %d issuing delete for object 0x%08x in WSG %d:%hu",
  11636. rc, pObj, hWSGroup, worksetID));
  11637. }
  11638. UT_Unlock(UTLOCK_OM);
  11639. DebugExitDWORD(OM_ObjectDelete, rc);
  11640. return(rc);
  11641. }
  11642. //
  11643. // OM_ObjectDeleteConfirm
  11644. //
  11645. void OM_ObjectDeleteConfirm
  11646. (
  11647. POM_CLIENT pomClient,
  11648. OM_WSGROUP_HANDLE hWSGroup,
  11649. OM_WORKSET_ID worksetID,
  11650. POM_OBJECT pObj
  11651. )
  11652. {
  11653. POM_WORKSET pWorkset;
  11654. POM_USAGE_REC pUsageRec;
  11655. POM_PENDING_OP pPendingOp;
  11656. POM_PENDING_OP pOtherPendingOp;
  11657. UINT rc = 0;
  11658. DebugEntry(OM_ObjectDeleteConfirm);
  11659. UT_Lock(UTLOCK_OM);
  11660. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11661. &pUsageRec, &pWorkset);
  11662. //
  11663. // To check that there is indeed a Delete pending for the object, we
  11664. // look in the workset's pending operation list.
  11665. //
  11666. FindPendingOp(pWorkset, pObj, OBJECT_DELETE, &pPendingOp);
  11667. //
  11668. // We assert that a relevant pending op was found:
  11669. //
  11670. ASSERT((pPendingOp != NULL));
  11671. //
  11672. // Call ObjectRelease, to release the object (will be a no-op and return
  11673. // NOT_FOUND if the Client hasn't done a Read on it):
  11674. //
  11675. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11676. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11677. //
  11678. // If we are going to confirm the delete, then we must ensure that any
  11679. // pending update or replace is carried out too. There can be only one
  11680. // of each, so check as follows (ther order we do them in is not
  11681. // relevant):
  11682. //
  11683. FindPendingOp(pWorkset, pObj, OBJECT_REPLACE, &pOtherPendingOp);
  11684. if (pOtherPendingOp != NULL)
  11685. {
  11686. ObjectDoReplace(pomClient->putTask,
  11687. pUsageRec->pWSGroup, pWorkset, pObj, pOtherPendingOp);
  11688. }
  11689. FindPendingOp(pWorkset, pObj, OBJECT_UPDATE, &pOtherPendingOp);
  11690. if (pOtherPendingOp != NULL)
  11691. {
  11692. ObjectDoUpdate(pomClient->putTask,
  11693. pUsageRec->pWSGroup, pWorkset, pObj, pOtherPendingOp);
  11694. }
  11695. //
  11696. // Perform the Delete:
  11697. //
  11698. ObjectDoDelete(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11699. UT_Unlock(UTLOCK_OM);
  11700. DebugExitVOID(OM_ObjectDeleteConfirm);
  11701. }
  11702. //
  11703. // OM_ObjectReplace(...)
  11704. //
  11705. UINT OM_ObjectReplace
  11706. (
  11707. POM_CLIENT pomClient,
  11708. OM_WSGROUP_HANDLE hWSGroup,
  11709. OM_WORKSET_ID worksetID,
  11710. POM_OBJECT pObj,
  11711. POM_OBJECTDATA * ppData
  11712. )
  11713. {
  11714. POM_USAGE_REC pUsageRec;
  11715. POM_WORKSET pWorkset;
  11716. POM_OBJECTDATA pData;
  11717. UINT rc = 0;
  11718. DebugEntry(OM_ObjectReplace);
  11719. UT_Lock(UTLOCK_OM);
  11720. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11721. &pUsageRec, &pWorkset);
  11722. pData = *ppData;
  11723. ValidateObjectData(pData);
  11724. //
  11725. // Check that the Client is not attempting to replace the object with
  11726. // one smaller that the object's update size (which is the minimum size
  11727. // for a replace):
  11728. //
  11729. ASSERT((pData->length >= pObj->updateSize));
  11730. //
  11731. // Check workset isn't locked by somebody else (OK if locked by us):
  11732. //
  11733. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11734. //
  11735. // Check workset is not exhausted:
  11736. //
  11737. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11738. //
  11739. // If the object is in the process of being deleted, we prevent the
  11740. // Replace. This is because if we don't, the Client will get a
  11741. // REPLACE_IND event after it has got (and processed) a DELETE event for
  11742. // the object.
  11743. //
  11744. if (pObj->flags & PENDING_DELETE)
  11745. {
  11746. TRACE_OUT(("Client 0x%08x tried to replace object being deleted (0x%08x)",
  11747. pomClient, pObj));
  11748. rc = OM_RC_OBJECT_DELETED;
  11749. DC_QUIT;
  11750. }
  11751. //
  11752. // When object locking supported, need to prevent object replace when
  11753. // object is locked.
  11754. //
  11755. //
  11756. // Generate, process and queue an OBJECT_REPLACE message:
  11757. //
  11758. rc = ObjectDRU(pomClient->putTask,
  11759. pUsageRec->pWSGroup,
  11760. pWorkset,
  11761. pObj,
  11762. pData,
  11763. OMNET_OBJECT_REPLACE);
  11764. if (rc != 0)
  11765. {
  11766. DC_QUIT;
  11767. }
  11768. //
  11769. // Remove the object from the unused objects list:
  11770. //
  11771. RemoveFromUnusedList(pUsageRec, pData);
  11772. //
  11773. // NULL the Client's pointer to the object:
  11774. //
  11775. *ppData = NULL;
  11776. TRACE_OUT(("Queued replace for object 0x%08x in workset %u for Client 0x%08x",
  11777. pObj, worksetID, pomClient));
  11778. DC_EXIT_POINT:
  11779. //
  11780. // SFR5843: Don't trace an error if the object has been deleted - this
  11781. // is just safe race condition.
  11782. //
  11783. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11784. {
  11785. ERROR_OUT(("ERROR %d issuing replace for object 0x%08x in WSG %d:%hu",
  11786. rc, pObj, hWSGroup, worksetID));
  11787. }
  11788. UT_Unlock(UTLOCK_OM);
  11789. DebugExitDWORD(OM_ObjectReplace, rc);
  11790. return(rc);
  11791. }
  11792. //
  11793. // OM_ObjectUpdate
  11794. //
  11795. UINT OM_ObjectUpdate
  11796. (
  11797. POM_CLIENT pomClient,
  11798. OM_WSGROUP_HANDLE hWSGroup,
  11799. OM_WORKSET_ID worksetID,
  11800. POM_OBJECT pObj,
  11801. POM_OBJECTDATA * ppData
  11802. )
  11803. {
  11804. POM_USAGE_REC pUsageRec;
  11805. POM_WORKSET pWorkset;
  11806. POM_OBJECTDATA pData;
  11807. UINT rc = 0;
  11808. DebugEntry(OM_ObjectUpdate);
  11809. UT_Lock(UTLOCK_OM);
  11810. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11811. &pUsageRec, &pWorkset);
  11812. pData = *ppData;
  11813. ValidateObjectData(pData);
  11814. //
  11815. // Check size of update equals the update size for the object:
  11816. //
  11817. ASSERT((pData->length == pObj->updateSize));
  11818. TRACE_OUT(("Update request is for first 0x%08x bytes, starting at 0x%08x",
  11819. pData->length, pData->data));
  11820. //
  11821. // Check workset isn't locked by somebody else (OK if locked by us):
  11822. //
  11823. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11824. //
  11825. // Check workset is not exhausted:
  11826. //
  11827. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11828. //
  11829. // If the object is in the process of being deleted, we prevent the
  11830. // Update. This is because if we don't, the Client will get a
  11831. // UPDATE_IND event after it has got (and processed) a DELETE event for
  11832. // the object.
  11833. //
  11834. if (pObj->flags & PENDING_DELETE)
  11835. {
  11836. TRACE_OUT(("Client 0x%08x tried to update object being deleted (0x%08x)",
  11837. pomClient, pObj));
  11838. rc = OM_RC_OBJECT_DELETED;
  11839. DC_QUIT;
  11840. }
  11841. //
  11842. // When object locking supported, need to prevent object update/replace
  11843. // when object is locked.
  11844. //
  11845. //
  11846. // Generate, process and queue an OBJECT_UPDATE message:
  11847. //
  11848. rc = ObjectDRU(pomClient->putTask,
  11849. pUsageRec->pWSGroup,
  11850. pWorkset,
  11851. pObj,
  11852. pData,
  11853. OMNET_OBJECT_UPDATE);
  11854. if (rc != 0)
  11855. {
  11856. DC_QUIT;
  11857. }
  11858. //
  11859. // Remove the object from the unused objects list:
  11860. //
  11861. RemoveFromUnusedList(pUsageRec, pData);
  11862. //
  11863. // NULL the Client's pointer to the object:
  11864. //
  11865. *ppData = NULL;
  11866. TRACE_OUT(("Queued update for object 0x%08x in workset %u for Client 0x%08x",
  11867. pObj, worksetID, pomClient));
  11868. DC_EXIT_POINT:
  11869. //
  11870. // SFR5843: Don't trace an error if the object has been deleted - this
  11871. // is just safe race condition.
  11872. //
  11873. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11874. {
  11875. ERROR_OUT(("ERROR %d issuing update for object 0x%08x in WSG %d:%hu",
  11876. rc, pObj, hWSGroup, worksetID));
  11877. }
  11878. UT_Unlock(UTLOCK_OM);
  11879. DebugExitDWORD(OM_ObjectUpdate, rc);
  11880. return(rc);
  11881. }
  11882. //
  11883. // OM_ObjectReplaceConfirm(...)
  11884. //
  11885. void OM_ObjectReplaceConfirm
  11886. (
  11887. POM_CLIENT pomClient,
  11888. OM_WSGROUP_HANDLE hWSGroup,
  11889. OM_WORKSET_ID worksetID,
  11890. POM_OBJECT pObj
  11891. )
  11892. {
  11893. POM_WORKSET pWorkset;
  11894. POM_USAGE_REC pUsageRec;
  11895. POM_PENDING_OP pPendingOp;
  11896. UINT rc = 0;
  11897. DebugEntry(OM_ObjectReplaceConfirm);
  11898. UT_Lock(UTLOCK_OM);
  11899. //
  11900. // Here, we do our usual parameter validation, but we don't want to
  11901. // assert if the object has been delete-confirmed already, so we modify
  11902. // the code from ValidateParams4 a bit:
  11903. //
  11904. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11905. &pUsageRec, &pWorkset);
  11906. //
  11907. // Retrieve the Replace operation from the object's pending op queue (we
  11908. // want the first REPLACE operation on the queue, so we start from the
  11909. // head):
  11910. //
  11911. FindPendingOp(pWorkset, pObj, OBJECT_REPLACE, &pPendingOp);
  11912. ASSERT((pPendingOp != NULL));
  11913. //
  11914. // Call ObjectRelease, to release the object (will be a no-op if the
  11915. // Client hasn't done a Read on it):
  11916. //
  11917. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11918. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11919. //
  11920. // Call the internal function to perform the actual Replace:
  11921. //
  11922. ObjectDoReplace(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11923. TRACE_OUT(("Confirmed Replace for object 0x%08x in workset %u for Client 0x%08x",
  11924. pObj, worksetID, pomClient));
  11925. UT_Unlock(UTLOCK_OM);
  11926. DebugExitVOID(OM_ObjectReplaceConfirm);
  11927. }
  11928. //
  11929. // OM_ObjectUpdateConfirm(...)
  11930. //
  11931. void OM_ObjectUpdateConfirm
  11932. (
  11933. POM_CLIENT pomClient,
  11934. OM_WSGROUP_HANDLE hWSGroup,
  11935. OM_WORKSET_ID worksetID,
  11936. POM_OBJECT pObj
  11937. )
  11938. {
  11939. POM_USAGE_REC pUsageRec;
  11940. POM_WORKSET pWorkset;
  11941. POM_PENDING_OP pPendingOp;
  11942. UINT rc = 0;
  11943. DebugEntry(OM_ObjectUpdateConfirm);
  11944. UT_Lock(UTLOCK_OM);
  11945. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11946. &pUsageRec, &pWorkset);
  11947. //
  11948. // Retrieve the Update operation from the object's pending op queue (we
  11949. // want the first UPDATE operation on the queue, so we start from the
  11950. // head):
  11951. //
  11952. FindPendingOp(pWorkset, pObj, OBJECT_UPDATE, &pPendingOp);
  11953. ASSERT((pPendingOp != NULL));
  11954. //
  11955. // Call ObjectRelease, to release the object (will be a no-op if the
  11956. // Client hasn't done a Read on it):
  11957. //
  11958. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11959. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11960. //
  11961. // Call the internal function to perform the actual Update:
  11962. //
  11963. ObjectDoUpdate(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11964. TRACE_OUT(("Confirmed Update for object 0x%08x in workset %u for Client 0x%08x",
  11965. pObj, worksetID, pomClient));
  11966. UT_Unlock(UTLOCK_OM);
  11967. DebugExitVOID(OM_ObjectUpdateConfirm);
  11968. }
  11969. //
  11970. // OM_ObjectH()
  11971. // Gets a ptr to the first/next/previous/last object
  11972. //
  11973. UINT OM_ObjectH
  11974. (
  11975. POM_CLIENT pomClient,
  11976. OM_WSGROUP_HANDLE hWSGroup,
  11977. OM_WORKSET_ID worksetID,
  11978. POM_OBJECT pObjOther,
  11979. POM_OBJECT * ppObj,
  11980. OM_POSITION omPos
  11981. )
  11982. {
  11983. POM_USAGE_REC pUsageRec;
  11984. POM_WORKSET pWorkset;
  11985. UINT rc = 0;
  11986. DebugEntry(OM_ObjectH);
  11987. UT_Lock(UTLOCK_OM);
  11988. //
  11989. // Validate params. If no hOtherObject (like in first/last), don't validate hOtherObject
  11990. //
  11991. if ((omPos == FIRST) || (omPos == LAST))
  11992. {
  11993. ASSERT(pObjOther == NULL);
  11994. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  11995. &pUsageRec, &pWorkset);
  11996. if (omPos == FIRST)
  11997. omPos = AFTER;
  11998. else
  11999. omPos = BEFORE;
  12000. }
  12001. else
  12002. {
  12003. ValidateParams4(pomClient, hWSGroup, worksetID, pObjOther,
  12004. PRIMARY | SECONDARY, &pUsageRec, &pWorkset);
  12005. }
  12006. //
  12007. // Get the object pointer
  12008. //
  12009. //
  12010. // Here we derive a pointer to what is "probably" the object record
  12011. // we're looking for:
  12012. //
  12013. if (pObjOther == NULL)
  12014. {
  12015. //
  12016. // Remember, if *ppObj == 0, then we're looking for the first or
  12017. // last object in the workset:
  12018. //
  12019. if (omPos == AFTER)
  12020. {
  12021. TRACE_OUT(("Getting first object in workset %u", worksetID));
  12022. *ppObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  12023. }
  12024. else
  12025. {
  12026. TRACE_OUT(("Getting last object in workset %u", worksetID));
  12027. *ppObj = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  12028. }
  12029. }
  12030. else
  12031. {
  12032. *ppObj = pObjOther;
  12033. if (omPos == AFTER)
  12034. {
  12035. TRACE_OUT(("Getting object after 0x%08x in workset %u",
  12036. pObjOther, worksetID));
  12037. *ppObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjOther, FIELD_OFFSET(OM_OBJECT, chain));
  12038. }
  12039. else
  12040. {
  12041. TRACE_OUT(("Getting object before 0x%08x in workset %u",
  12042. pObjOther, worksetID));
  12043. *ppObj = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObjOther, FIELD_OFFSET(OM_OBJECT, chain));
  12044. }
  12045. }
  12046. //
  12047. // ppObj now has "probably" a pointer to the object we're looking for,
  12048. // but now we need to skip deleted objects.
  12049. //
  12050. while ((*ppObj != NULL) && ((*ppObj)->flags & DELETED))
  12051. {
  12052. ValidateObject(*ppObj);
  12053. if (omPos == AFTER)
  12054. {
  12055. *ppObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), *ppObj, FIELD_OFFSET(OM_OBJECT, chain));
  12056. }
  12057. else
  12058. {
  12059. *ppObj = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), *ppObj, FIELD_OFFSET(OM_OBJECT, chain));
  12060. }
  12061. }
  12062. if (*ppObj == NULL)
  12063. {
  12064. rc = OM_RC_NO_SUCH_OBJECT;
  12065. }
  12066. UT_Unlock(UTLOCK_OM);
  12067. DebugExitDWORD(OM_ObjectH, rc);
  12068. return(rc);
  12069. }
  12070. //
  12071. // OM_ObjectIDToPtr(...)
  12072. //
  12073. UINT OM_ObjectIDToPtr
  12074. (
  12075. POM_CLIENT pomClient,
  12076. OM_WSGROUP_HANDLE hWSGroup,
  12077. OM_WORKSET_ID worksetID,
  12078. OM_OBJECT_ID objectID,
  12079. POM_OBJECT * ppObj
  12080. )
  12081. {
  12082. POM_USAGE_REC pUsageRec;
  12083. POM_WORKSET pWorkset;
  12084. UINT rc = 0;
  12085. DebugEntry(OM_ObjectIDToPtr);
  12086. UT_Lock(UTLOCK_OM);
  12087. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  12088. &pUsageRec, &pWorkset);
  12089. //
  12090. // Now call the internal function to do the search for the ID:
  12091. //
  12092. rc = ObjectIDToPtr(pWorkset, objectID, ppObj);
  12093. if (rc == OM_RC_OBJECT_DELETED)
  12094. {
  12095. //
  12096. // This internal function returns OBJECT_DELETED if the object record
  12097. // was found but is marked as deleted. We map this to BAD_OBJECT_ID
  12098. // since that's all we externalise to Clients:
  12099. //
  12100. rc = OM_RC_BAD_OBJECT_ID;
  12101. }
  12102. else if (rc == OM_RC_OBJECT_PENDING_DELETE)
  12103. {
  12104. //
  12105. // If we get back PENDING_DELETE, then we map this to OK, since as
  12106. // far as the Client is concerned, the object still exists:
  12107. //
  12108. rc = 0;
  12109. }
  12110. if (rc == OM_RC_BAD_OBJECT_ID)
  12111. {
  12112. WARNING_OUT(("No object found in workset with ID 0x%08x:0x%08x",
  12113. objectID.creator, objectID.sequence));
  12114. }
  12115. else if (rc != 0)
  12116. {
  12117. ERROR_OUT(("ERROR %d converting object ID (0x%08x:0x%08x) to handle",
  12118. rc, objectID.creator, objectID.sequence));
  12119. }
  12120. else
  12121. {
  12122. TRACE_OUT(("Converted object ID (0x%08x:0x%08x) to handle (0x%08x)",
  12123. objectID.creator, objectID.sequence, *ppObj));
  12124. }
  12125. UT_Unlock(UTLOCK_OM);
  12126. DebugExitDWORD(OM_ObjectIDToPtr, rc);
  12127. return(rc);
  12128. }
  12129. //
  12130. // OM_ObjectPtrToID(...)
  12131. //
  12132. void OM_ObjectPtrToID
  12133. (
  12134. POM_CLIENT pomClient,
  12135. OM_WSGROUP_HANDLE hWSGroup,
  12136. OM_WORKSET_ID worksetID,
  12137. POM_OBJECT pObj,
  12138. POM_OBJECT_ID pObjectID
  12139. )
  12140. {
  12141. POM_USAGE_REC pUsageRec;
  12142. POM_WORKSET pWorkset;
  12143. UINT rc = 0;
  12144. DebugEntry(OM_ObjectPtrToID);
  12145. UT_Lock(UTLOCK_OM);
  12146. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12147. &pUsageRec, &pWorkset);
  12148. //
  12149. // Extract ID from object record:
  12150. //
  12151. memcpy(pObjectID, &pObj->objectID, sizeof(OM_OBJECT_ID));
  12152. TRACE_OUT(("Retrieved object ID 0x%08x:0x%08x for object 0x%08x in workset %u",
  12153. pObjectID->creator, pObjectID->sequence, pObj, worksetID));
  12154. UT_Unlock(UTLOCK_OM);
  12155. DebugExitVOID(OM_ObjectHandleToID);
  12156. }
  12157. //
  12158. // OM_ObjectRead(...)
  12159. //
  12160. UINT OM_ObjectRead
  12161. (
  12162. POM_CLIENT pomClient,
  12163. OM_WSGROUP_HANDLE hWSGroup,
  12164. OM_WORKSET_ID worksetID,
  12165. POM_OBJECT pObj,
  12166. POM_OBJECTDATA * ppData
  12167. )
  12168. {
  12169. POM_USAGE_REC pUsageRec;
  12170. POM_WORKSET pWorkset;
  12171. POM_OBJECT_LIST pListEntry;
  12172. UINT rc = 0;
  12173. DebugEntry(OM_ObjectRead);
  12174. UT_Lock(UTLOCK_OM);
  12175. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12176. &pUsageRec, &pWorkset);
  12177. //
  12178. // Check the Client hasn't already read this object without releasing
  12179. // it:
  12180. //
  12181. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12182. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12183. FIELD_OFFSET(OM_OBJECT_LIST, pObj), (DWORD_PTR)pObj,
  12184. FIELD_SIZE(OM_OBJECT_LIST, pObj));
  12185. ASSERT(pListEntry == NULL);
  12186. //
  12187. // Convert object handle to a pointer to the object data:
  12188. //
  12189. *ppData = pObj->pData;
  12190. if (!*ppData)
  12191. {
  12192. ERROR_OUT(("OM_ObjectRead: Object 0x%08x has no data", pObj));
  12193. rc = UT_RC_NO_MEM;
  12194. DC_QUIT;
  12195. }
  12196. //
  12197. // Bump up the use count of the chunk so it won't be freed until the
  12198. // Client calls OM_ObjectRelease (explicitly or implicitly via e.g
  12199. // DeleteConfirm)
  12200. //
  12201. UT_BumpUpRefCount(*ppData);
  12202. //
  12203. // We need to add this object's handle to the Client's list of
  12204. // objects-in-use, so allocate some memory for the object...
  12205. //
  12206. pListEntry = (POM_OBJECT_LIST)UT_MallocRefCount(sizeof(OM_OBJECT_LIST), TRUE);
  12207. if (!pListEntry)
  12208. {
  12209. rc = UT_RC_NO_MEM;
  12210. DC_QUIT;
  12211. }
  12212. SET_STAMP(pListEntry, OLIST);
  12213. //
  12214. // ...fill in the fields...
  12215. //
  12216. pListEntry->pObj = pObj;
  12217. pListEntry->worksetID = worksetID;
  12218. //
  12219. // ...and insert into the list:
  12220. //
  12221. COM_BasedListInsertBefore(&(pUsageRec->objectsInUse),
  12222. &(pListEntry->chain));
  12223. TRACE_OUT(("Read object at 0x%08x (handle: 0x%08x) for Client 0x%08x",
  12224. *ppData, pObj, pomClient));
  12225. DC_EXIT_POINT:
  12226. if (rc != 0)
  12227. {
  12228. //
  12229. // Cleanup:
  12230. //
  12231. ERROR_OUT(("ERROR %d reading object 0x%08x in workset %u in WSG %d",
  12232. rc, pObj, worksetID, hWSGroup));
  12233. if (pListEntry != NULL)
  12234. {
  12235. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12236. }
  12237. if (*ppData)
  12238. UT_FreeRefCount((void**)ppData, FALSE);
  12239. }
  12240. UT_Unlock(UTLOCK_OM);
  12241. DebugExitDWORD(OM_ObjectRead, rc);
  12242. return(rc);
  12243. }
  12244. //
  12245. // OM_ObjectRelease()
  12246. //
  12247. void OM_ObjectRelease
  12248. (
  12249. POM_CLIENT pomClient,
  12250. OM_WSGROUP_HANDLE hWSGroup,
  12251. OM_WORKSET_ID worksetID,
  12252. POM_OBJECT pObj,
  12253. POM_OBJECTDATA * ppData
  12254. )
  12255. {
  12256. POM_USAGE_REC pUsageRec;
  12257. POM_WORKSET pWorkset;
  12258. UINT rc = 0;
  12259. DebugEntry(OM_ObjectRelease);
  12260. UT_Lock(UTLOCK_OM);
  12261. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12262. &pUsageRec, &pWorkset);
  12263. //
  12264. // Check that the object pointer and object handle match:
  12265. //
  12266. ASSERT(pObj->pData == *ppData);
  12267. //
  12268. // Now try to release the object from the objects-in-use list:
  12269. //
  12270. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  12271. //
  12272. // ObjectRelease will return an error if the object handle wasn't found
  12273. // in the objects-in-use list. As far as we're concerned, this is an
  12274. // assert-level error:
  12275. //
  12276. ASSERT((rc == 0));
  12277. //
  12278. // NULL the Client's pointer:
  12279. //
  12280. *ppData = NULL;
  12281. TRACE_OUT(("Released Client 0x%08x's hold on object 0x%08x in workset %u in WSG %d",
  12282. pomClient, pObj, worksetID, hWSGroup));
  12283. UT_Unlock(UTLOCK_OM);
  12284. DebugExitVOID(OM_ObjectRelease);
  12285. }
  12286. //
  12287. // OM_ObjectAlloc(...)
  12288. //
  12289. UINT OM_ObjectAlloc
  12290. (
  12291. POM_CLIENT pomClient,
  12292. OM_WSGROUP_HANDLE hWSGroup,
  12293. OM_WORKSET_ID worksetID,
  12294. UINT size,
  12295. POM_OBJECTDATA * ppData
  12296. )
  12297. {
  12298. POM_USAGE_REC pUsageRec;
  12299. POM_WORKSET pWorkset;
  12300. POM_OBJECTDATA_LIST pListEntry = NULL;
  12301. UINT rc = 0;
  12302. DebugEntry(OM_ObjectAlloc);
  12303. UT_Lock(UTLOCK_OM);
  12304. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  12305. &pUsageRec, &pWorkset);
  12306. TRACE_OUT(("Client 0x%08x requesting to allocate 0x%08x bytes "
  12307. "for object for workset %u in WSG %d",
  12308. pomClient, size, worksetID, hWSGroup));
  12309. //
  12310. // Check request not too big:
  12311. //
  12312. ASSERT((size < OM_MAX_OBJECT_SIZE - sizeof(OM_MAX_OBJECT_SIZE)));
  12313. //
  12314. // Check request not too small:
  12315. //
  12316. ASSERT((size > 0));
  12317. //
  12318. // Allocate a chunk of memory for the object (note that we add 4 bytes
  12319. // to the size the Client asked for (i.e. the <size> parameter) since
  12320. // the API stipulates that this does not include the <size> field which
  12321. // is at the start of the object.
  12322. //
  12323. *ppData = (POM_OBJECTDATA)UT_MallocRefCount(size + sizeof(OM_MAX_OBJECT_SIZE), FALSE);
  12324. if (! *ppData)
  12325. {
  12326. rc = UT_RC_NO_MEM;
  12327. DC_QUIT;
  12328. }
  12329. ZeroMemory(*ppData, min(size, OM_ZERO_OBJECT_SIZE));
  12330. //
  12331. // Now insert a reference to this chunk in the Client's unused-objects
  12332. // list (will be removed by Add, Replace, Update or Discard functions).
  12333. //
  12334. pListEntry = (POM_OBJECTDATA_LIST)UT_MallocRefCount(sizeof(OM_OBJECTDATA_LIST), TRUE);
  12335. if (!pListEntry)
  12336. {
  12337. rc = UT_RC_NO_MEM;
  12338. DC_QUIT;
  12339. }
  12340. SET_STAMP(pListEntry, ODLIST);
  12341. pListEntry->pData = *ppData;
  12342. pListEntry->size = size;
  12343. pListEntry->worksetID = worksetID;
  12344. COM_BasedListInsertBefore(&(pUsageRec->unusedObjects),
  12345. &(pListEntry->chain));
  12346. TRACE_OUT(("Allocated object starting at 0x%08x", *ppData));
  12347. DC_EXIT_POINT:
  12348. if (rc != 0)
  12349. {
  12350. //
  12351. // Cleanup:
  12352. //
  12353. ERROR_OUT(("ERROR %d allocating object (size: 0x%08x) for Client 0x%08x",
  12354. rc, size + sizeof(OM_MAX_OBJECT_SIZE), pomClient));
  12355. if (pListEntry != NULL)
  12356. {
  12357. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12358. }
  12359. if (*ppData != NULL)
  12360. {
  12361. UT_FreeRefCount((void**)ppData, FALSE);
  12362. }
  12363. }
  12364. UT_Unlock(UTLOCK_OM);
  12365. DebugExitDWORD(OM_ObjectAlloc, rc);
  12366. return(rc);
  12367. }
  12368. //
  12369. // OM_ObjectDiscard(...)
  12370. //
  12371. void OM_ObjectDiscard
  12372. (
  12373. POM_CLIENT pomClient,
  12374. OM_WSGROUP_HANDLE hWSGroup,
  12375. OM_WORKSET_ID worksetID,
  12376. POM_OBJECTDATA * ppData
  12377. )
  12378. {
  12379. POM_USAGE_REC pUsageRec;
  12380. POM_WORKSET pWorkset;
  12381. POM_OBJECTDATA pData;
  12382. UINT rc = 0;
  12383. DebugEntry(OM_ObjectDiscard);
  12384. UT_Lock(UTLOCK_OM);
  12385. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  12386. &pUsageRec, &pWorkset);
  12387. pData = *ppData;
  12388. //
  12389. // Remove the object from the unused objects list:
  12390. //
  12391. RemoveFromUnusedList(pUsageRec, pData);
  12392. //
  12393. // Free the chunk containing the object, NULLing the caller's pointer at
  12394. // the same time:
  12395. //
  12396. UT_FreeRefCount((void**)ppData, FALSE);
  12397. TRACE_OUT(("Discarded object at 0x%08x in workset %u in WSG %d for Client 0x%08x",
  12398. pData, worksetID, hWSGroup, pomClient));
  12399. UT_Unlock(UTLOCK_OM);
  12400. DebugExitVOID(OM_ObjectDiscard);
  12401. }
  12402. //
  12403. // OM_GetNetworkUserID
  12404. //
  12405. UINT OM_GetNetworkUserID
  12406. (
  12407. POM_CLIENT pomClient,
  12408. OM_WSGROUP_HANDLE hWSGroup,
  12409. NET_UID * pNetUserID
  12410. )
  12411. {
  12412. POM_DOMAIN pDomain;
  12413. POM_USAGE_REC pUsageRec;
  12414. POM_WSGROUP pWSGroup;
  12415. UINT rc = 0;
  12416. DebugEntry(OM_GetNetworkUserID);
  12417. UT_Lock(UTLOCK_OM);
  12418. ValidateParams2(pomClient, hWSGroup, PRIMARY | SECONDARY,
  12419. &pUsageRec, &pWSGroup);
  12420. //
  12421. // Get a pointer to the relevant Domain:
  12422. //
  12423. pDomain = pWSGroup->pDomain;
  12424. if (pDomain->callID == OM_NO_CALL)
  12425. {
  12426. rc = OM_RC_LOCAL_WSGROUP;
  12427. DC_QUIT;
  12428. }
  12429. //
  12430. // Otherwise, everything's OK, so we fill in the caller's pointer and
  12431. // return:
  12432. //
  12433. if (pDomain->userID == 0)
  12434. {
  12435. WARNING_OUT(("Client requesting userID for Domain %u before we've attached",
  12436. pDomain->callID));
  12437. rc = OM_RC_NOT_ATTACHED;
  12438. DC_QUIT;
  12439. }
  12440. *pNetUserID = pDomain->userID;
  12441. TRACE_OUT(("Returned Network user ID (0x%08x) to Client 0x%08x for '0x%08x'",
  12442. *pNetUserID, pomClient, hWSGroup));
  12443. DC_EXIT_POINT:
  12444. UT_Unlock(UTLOCK_OM);
  12445. DebugExitDWORD(OM_GetNetworkUserID, rc);
  12446. return(rc);
  12447. }
  12448. //
  12449. // SetUpUsageRecord(...)
  12450. //
  12451. UINT SetUpUsageRecord
  12452. (
  12453. POM_CLIENT pomClient,
  12454. UINT mode,
  12455. POM_USAGE_REC * ppUsageRec,
  12456. OM_WSGROUP_HANDLE * phWSGroup
  12457. )
  12458. {
  12459. UINT rc = 0;
  12460. DebugEntry(SetUpUsageRecord);
  12461. ValidateOMS(pomClient);
  12462. //
  12463. // Find an unused workset group handle for the Client:
  12464. //
  12465. rc = FindUnusedWSGHandle(pomClient, phWSGroup);
  12466. if (rc != 0)
  12467. {
  12468. DC_QUIT;
  12469. }
  12470. //
  12471. // Client has a spare handle so create a new usage record for this
  12472. // Client's use of the workset group:
  12473. //
  12474. *ppUsageRec = (POM_USAGE_REC)UT_MallocRefCount(sizeof(OM_USAGE_REC), TRUE);
  12475. if (! *ppUsageRec)
  12476. {
  12477. rc = UT_RC_NO_MEM;
  12478. DC_QUIT;
  12479. }
  12480. SET_STAMP((*ppUsageRec), USAGEREC);
  12481. //
  12482. // Next, fill in the fields, but note that:
  12483. //
  12484. // - until the registration gets to pre-Stage1, the only way to abort it
  12485. // from the Client context is to mark the registration CB as invalid.
  12486. // To do this (e.g. in WSGroupDeregister) we need access to the
  12487. // registration CB, so we will put a pointer to it in the usage record
  12488. // below.
  12489. //
  12490. // - the <worksetOpenFlags> field is zero initially (it will be changed
  12491. // when the Client does a WorksetOpen), so we do nothing
  12492. //
  12493. // - the <wsGroupMutex> field also needs to be zero initially (the
  12494. // correct value is inserted by the hidden handler), so we leave this
  12495. // blank too.
  12496. //
  12497. (*ppUsageRec)->mode = (BYTE)mode;
  12498. COM_BasedListInit(&((*ppUsageRec)->unusedObjects));
  12499. COM_BasedListInit(&((*ppUsageRec)->objectsInUse));
  12500. //
  12501. // Put the offset to the usage record in the array of offsets:
  12502. //
  12503. pomClient->apUsageRecs[*phWSGroup] = *ppUsageRec;
  12504. TRACE_OUT(("Set up usage record for Client 0x%08x at 0x%08x (hWSGroup: %hu)",
  12505. pomClient, *ppUsageRec, *phWSGroup));
  12506. DC_EXIT_POINT:
  12507. DebugExitDWORD(SetUpUsageRecord, rc);
  12508. return(rc);
  12509. }
  12510. //
  12511. // FindUnusedWSGHandle(...)
  12512. //
  12513. UINT FindUnusedWSGHandle
  12514. (
  12515. POM_CLIENT pomClient,
  12516. OM_WSGROUP_HANDLE * phWSGroup
  12517. )
  12518. {
  12519. BOOL found;
  12520. OM_WSGROUP_HANDLE hWSGroup;
  12521. UINT rc = 0;
  12522. DebugEntry(FindUnusedWSGHandle);
  12523. ValidateOMS(pomClient);
  12524. //
  12525. // Workset group handles are indexes into an array of offsets to usage
  12526. // records. When one of these offsets is 0, the slot is available for
  12527. // use.
  12528. //
  12529. // We start our loop at 1 because 0 is never used as a workset group
  12530. // handle. Because we start at 1, we end at MAX + 1 to ensure that we
  12531. // use MAX handles.
  12532. //
  12533. found = FALSE;
  12534. for (hWSGroup = 1; hWSGroup < OMWSG_MAXPERCLIENT; hWSGroup++)
  12535. {
  12536. if (pomClient->apUsageRecs[hWSGroup] == NULL)
  12537. {
  12538. found = TRUE;
  12539. TRACE_OUT(("Found unused workset group handle %hu for Client 0x%08x",
  12540. hWSGroup, pomClient));
  12541. ASSERT(!pomClient->wsgValid[hWSGroup]);
  12542. break;
  12543. }
  12544. }
  12545. //
  12546. // If there aren't any, quit with an error:
  12547. //
  12548. if (!found)
  12549. {
  12550. WARNING_OUT(("Client 0x%08x has no more workset group handles", pomClient));
  12551. rc = OM_RC_NO_MORE_HANDLES;
  12552. DC_QUIT;
  12553. }
  12554. else
  12555. {
  12556. *phWSGroup = hWSGroup;
  12557. }
  12558. DC_EXIT_POINT:
  12559. DebugExitDWORD(FindUnusedWSGHandle, rc);
  12560. return(rc);
  12561. }
  12562. //
  12563. // RemoveFromUnusedList()
  12564. //
  12565. void RemoveFromUnusedList
  12566. (
  12567. POM_USAGE_REC pUsageRec,
  12568. POM_OBJECTDATA pData
  12569. )
  12570. {
  12571. POM_OBJECTDATA_LIST pListEntry;
  12572. DebugEntry(RemoveFromUnusedList);
  12573. //
  12574. // Search in the unused-objects list hung off the usage record for an
  12575. // entry whose field is the same as the offset of this object:
  12576. //
  12577. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->unusedObjects),
  12578. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECTDATA_LIST, chain),
  12579. FIELD_OFFSET(OM_OBJECTDATA_LIST, pData), (DWORD_PTR)pData,
  12580. FIELD_SIZE(OM_OBJECTDATA_LIST, pData));
  12581. //
  12582. // This object must have been previously allocated, so it must be in the
  12583. // list. Assert failure if not:
  12584. //
  12585. ASSERT((pListEntry != NULL));
  12586. //
  12587. // Also, we check to make sure the Client hasn't set the <size> field to
  12588. // more memory than we originally allocated for the object:
  12589. //
  12590. if (pData->length != pListEntry->size)
  12591. {
  12592. ASSERT((pData->length < pListEntry->size));
  12593. TRACE_OUT(("Client has shrunk object from %u to %u bytes",
  12594. pListEntry->size, pData->length));
  12595. }
  12596. COM_BasedListRemove(&(pListEntry->chain));
  12597. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12598. DebugExitVOID(RemoveFromUnusedList);
  12599. }
  12600. //
  12601. // ReleaseAllObjects(...)
  12602. //
  12603. void ReleaseAllObjects
  12604. (
  12605. POM_USAGE_REC pUsageRec,
  12606. POM_WORKSET pWorkset
  12607. )
  12608. {
  12609. DebugEntry(ReleaseAllObjects);
  12610. while (ObjectRelease(pUsageRec, pWorkset->worksetID, 0) == 0)
  12611. {
  12612. //
  12613. // Calling ObjectRelease with pObj set to NULL will cause the
  12614. // first object in the objects-in-use list which is in this workset
  12615. // to be released. When there are no more, rc will be set to
  12616. // OM_RC_OBJECT_NOT_FOUND and we will break out of our loop:
  12617. //
  12618. }
  12619. DebugExitVOID(ReleaseAllObjects);
  12620. }
  12621. //
  12622. // ReleaseAllLocks(...)
  12623. //
  12624. void ReleaseAllLocks
  12625. (
  12626. POM_CLIENT pomClient,
  12627. POM_USAGE_REC pUsageRec,
  12628. POM_WORKSET pWorkset
  12629. )
  12630. {
  12631. POM_LOCK pThisLock;
  12632. POM_LOCK pTempLock;
  12633. DebugEntry(ReleaseAllLocks);
  12634. ValidateOMS(pomClient);
  12635. //
  12636. // Here we chain through the Client's lock stack and unlock any locks
  12637. // that relate to this workset.
  12638. //
  12639. // Note that, since object locking is not currently supported, the if
  12640. // statement in the loop will succeed at most once (i.e. if the workset
  12641. // itself is locked). The code is nonetheless implemented as a loop for
  12642. // forward compatibility. If this is deemed to be performance critical,
  12643. // we could put a break statement in.
  12644. //
  12645. pThisLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  12646. while (pThisLock != NULL)
  12647. {
  12648. //
  12649. // Since we will remove and free the entry in the lock stack if we
  12650. // find a match, we must chain to the next item beforehand:
  12651. //
  12652. pTempLock = (POM_LOCK)COM_BasedListNext(&(pomClient->locks), pThisLock, FIELD_OFFSET(OM_LOCK, chain));
  12653. if ((pThisLock->pWSGroup == pUsageRec->pWSGroup) &&
  12654. (pThisLock->worksetID == pWorkset->worksetID))
  12655. {
  12656. if (OBJECT_ID_IS_NULL(pThisLock->objectID)) // always TRUE in R1.1
  12657. {
  12658. //
  12659. // ...we're dealing with a workset lock:
  12660. //
  12661. WorksetUnlock(pomClient->putTask, pUsageRec->pWSGroup, pWorkset);
  12662. }
  12663. else
  12664. {
  12665. //
  12666. // ...this is an object lock, so call ObjectUnlock (when it's
  12667. // supported!). In the meantime, assert:
  12668. //
  12669. ERROR_OUT(("Object locking not supported in R1.1!!"));
  12670. }
  12671. COM_BasedListRemove(&(pThisLock->chain));
  12672. UT_FreeRefCount((void**)&pThisLock, FALSE);
  12673. //
  12674. // Could put the break in here for performance improvement.
  12675. //
  12676. }
  12677. pThisLock = pTempLock;
  12678. }
  12679. DebugExitVOID(ReleaseAllLocks);
  12680. }
  12681. //
  12682. // ConfirmAll(...)
  12683. //
  12684. void ConfirmAll
  12685. (
  12686. POM_CLIENT pomClient,
  12687. POM_USAGE_REC pUsageRec,
  12688. POM_WORKSET pWorkset
  12689. )
  12690. {
  12691. POM_PENDING_OP pThisPendingOp;
  12692. POM_OBJECT pObj;
  12693. UINT rc = 0;
  12694. DebugEntry(ConfirmAll);
  12695. ValidateOMS(pomClient);
  12696. //
  12697. // To confirm all outstanding operations for this workset, we search
  12698. // the list of pending ops stored off the workset record:
  12699. //
  12700. //
  12701. // Chain through the workset's list of pending operations and confirm
  12702. // them one by one:
  12703. //
  12704. pThisPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12705. while (pThisPendingOp != NULL)
  12706. {
  12707. pObj = pThisPendingOp->pObj;
  12708. switch (pThisPendingOp->type)
  12709. {
  12710. case WORKSET_CLEAR:
  12711. {
  12712. WorksetDoClear(pomClient->putTask,
  12713. pUsageRec->pWSGroup, pWorkset, pThisPendingOp);
  12714. break;
  12715. }
  12716. case OBJECT_DELETE:
  12717. {
  12718. ObjectDoDelete(pomClient->putTask,
  12719. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12720. break;
  12721. }
  12722. case OBJECT_UPDATE:
  12723. {
  12724. ObjectDoUpdate(pomClient->putTask,
  12725. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12726. break;
  12727. }
  12728. case OBJECT_REPLACE:
  12729. {
  12730. ObjectDoReplace(pomClient->putTask,
  12731. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12732. break;
  12733. }
  12734. default:
  12735. {
  12736. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  12737. pThisPendingOp->type));
  12738. break;
  12739. }
  12740. }
  12741. //
  12742. // The above functions all remove the pending op from the list, so get
  12743. // the new first item
  12744. //
  12745. pThisPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12746. }
  12747. DebugExitVOID(ConfirmAll);
  12748. }
  12749. //
  12750. // DiscardAllObjects()
  12751. //
  12752. void DiscardAllObjects
  12753. (
  12754. POM_USAGE_REC pUsageRec,
  12755. POM_WORKSET pWorkset
  12756. )
  12757. {
  12758. POM_OBJECTDATA_LIST pThisEntry;
  12759. POM_OBJECTDATA_LIST pTempEntry;
  12760. POM_OBJECTDATA pData;
  12761. DebugEntry(DiscardAllObjects);
  12762. //
  12763. // Chain through the Client's list of unused objects for this workset
  12764. // group, free any unused objects which were allocated for this workset
  12765. // and remove the entry from the list:
  12766. //
  12767. pThisEntry = (POM_OBJECTDATA_LIST)COM_BasedListFirst(&(pUsageRec->unusedObjects), FIELD_OFFSET(OM_OBJECTDATA_LIST, chain));
  12768. while (pThisEntry != NULL)
  12769. {
  12770. //
  12771. // Since we may be removing and freeing items from the list, we must
  12772. // set up a pointer to the next link in the chain before proceeding:
  12773. //
  12774. pTempEntry = (POM_OBJECTDATA_LIST)COM_BasedListNext(&(pUsageRec->unusedObjects), pThisEntry, FIELD_OFFSET(OM_OBJECTDATA_LIST, chain));
  12775. if (pThisEntry->worksetID == pWorkset->worksetID)
  12776. {
  12777. //
  12778. // OK, this entry in the list is for an object allocated for this
  12779. // workset, so find the object...
  12780. //
  12781. pData = pThisEntry->pData;
  12782. if (!pData)
  12783. {
  12784. ERROR_OUT(("DiscardAllObjects: object 0x%08x has no data", pThisEntry));
  12785. }
  12786. else
  12787. {
  12788. ValidateObjectData(pData);
  12789. //
  12790. // ...free it...
  12791. //
  12792. TRACE_OUT(("Discarding object at 0x%08x", pData));
  12793. UT_FreeRefCount((void**)&pData, FALSE);
  12794. }
  12795. //
  12796. // ...and remove the entry from the list:
  12797. //
  12798. COM_BasedListRemove(&(pThisEntry->chain));
  12799. UT_FreeRefCount((void**)&pThisEntry, FALSE);
  12800. }
  12801. pThisEntry = pTempEntry;
  12802. }
  12803. DebugExitVOID(DiscardAllObjects);
  12804. }
  12805. //
  12806. // ObjectRelease(...)
  12807. //
  12808. UINT ObjectRelease
  12809. (
  12810. POM_USAGE_REC pUsageRec,
  12811. OM_WORKSET_ID worksetID,
  12812. POM_OBJECT pObj
  12813. )
  12814. {
  12815. POM_OBJECT_LIST pListEntry;
  12816. POM_OBJECTDATA pData;
  12817. UINT rc = 0;
  12818. DebugEntry(ObjectRelease);
  12819. if (pObj == NULL)
  12820. {
  12821. //
  12822. // If <pObj> is NULL, our caller wants us to release the first
  12823. // object in the objects-in-use list which is in the specified
  12824. // workset:
  12825. //
  12826. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12827. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12828. FIELD_OFFSET(OM_OBJECT_LIST, worksetID), (DWORD)worksetID,
  12829. FIELD_SIZE(OM_OBJECT_LIST, worksetID));
  12830. }
  12831. else
  12832. {
  12833. //
  12834. // Otherwise, we do the lookup based on the object handle passed in:
  12835. //
  12836. // Note: since object handles are unique across worksets, we can just
  12837. // do a match on the handle. If the implementation of object handles
  12838. // changes and they become specific to a workset and not globally
  12839. // valid within a machine, we will need to do a double match here.
  12840. //
  12841. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12842. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12843. FIELD_OFFSET(OM_OBJECT_LIST, pObj), (DWORD_PTR)pObj,
  12844. FIELD_SIZE(OM_OBJECT_LIST, pObj));
  12845. }
  12846. //
  12847. // If we didn't find a relevant list entry, set rc and quit:
  12848. //
  12849. if (pListEntry == NULL)
  12850. {
  12851. rc = OM_RC_OBJECT_NOT_FOUND;
  12852. DC_QUIT;
  12853. }
  12854. //
  12855. // Now set pObj (will be a no-op if it wasn't originally NULL):
  12856. //
  12857. ASSERT((pListEntry->worksetID == worksetID));
  12858. pObj = pListEntry->pObj;
  12859. ValidateObject(pObj);
  12860. pData = pObj->pData;
  12861. if (!pData)
  12862. {
  12863. ERROR_OUT(("ObjectRelease: object 0x%08x has no data", pObj));
  12864. }
  12865. else
  12866. {
  12867. ValidateObjectData(pData);
  12868. //
  12869. // Decrement use count of memory chunk holding object:
  12870. //
  12871. UT_FreeRefCount((void**)&pData, FALSE);
  12872. }
  12873. //
  12874. // Remove the entry for this object from the objects-in-use list:
  12875. //
  12876. COM_BasedListRemove(&(pListEntry->chain));
  12877. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12878. DC_EXIT_POINT:
  12879. DebugExitDWORD(ObjectRelease, rc);
  12880. return(rc);
  12881. }
  12882. //
  12883. // WorksetClearPending(...)
  12884. //
  12885. BOOL WorksetClearPending
  12886. (
  12887. POM_WORKSET pWorkset,
  12888. POM_OBJECT pObj
  12889. )
  12890. {
  12891. POM_PENDING_OP pPendingOp;
  12892. BOOL rc = FALSE;
  12893. DebugEntry(WorksetClearPending);
  12894. //
  12895. // Try to find a pending workset clear for the given workset.
  12896. //
  12897. // N.B. We can't use FindPendingOp because we may want to check more
  12898. // than just the first pending workset clear.
  12899. //
  12900. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12901. while (pPendingOp != NULL)
  12902. {
  12903. if (pPendingOp->type == WORKSET_CLEAR)
  12904. {
  12905. ValidateObject(pObj);
  12906. //
  12907. // Check that this clear affects the given object
  12908. //
  12909. if (STAMP_IS_LOWER(pObj->addStamp, pPendingOp->seqStamp))
  12910. {
  12911. TRACE_OUT(("Clear pending which affects object 0x%08x", pObj));
  12912. rc = TRUE;
  12913. DC_QUIT;
  12914. }
  12915. else
  12916. {
  12917. TRACE_OUT(("Clear pending but doesn't affect object 0x%08x", pObj));
  12918. }
  12919. }
  12920. //
  12921. // On to the next pending op...
  12922. //
  12923. pPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  12924. }
  12925. DC_EXIT_POINT:
  12926. DebugExitDWORD(WorksetClearPending, rc);
  12927. return(rc);
  12928. }
  12929. //
  12930. // ProcessWorksetNew(...)
  12931. //
  12932. UINT ProcessWorksetNew
  12933. (
  12934. PUT_CLIENT putClient,
  12935. POMNET_OPERATION_PKT pPacket,
  12936. POM_WSGROUP pWSGroup
  12937. )
  12938. {
  12939. POM_DOMAIN pDomain;
  12940. POM_WORKSET pWorkset;
  12941. OM_WORKSET_ID worksetID;
  12942. UINT rc = 0;
  12943. DebugEntry(ProcessWorksetNew);
  12944. worksetID = pPacket->worksetID;
  12945. TRACE_OUT(("Creating workset %u in WSG %d", worksetID, pWSGroup->wsg));
  12946. //
  12947. // Allocate some memory for the workset record:
  12948. //
  12949. pWorkset = (POM_WORKSET)UT_MallocRefCount(sizeof(OM_WORKSET), TRUE);
  12950. if (!pWorkset)
  12951. {
  12952. rc = UT_RC_NO_MEM;
  12953. DC_QUIT;
  12954. }
  12955. //
  12956. // Fill in the fields (this chunk is taken from a huge block so we have
  12957. // to set it to zero explicitly):
  12958. //
  12959. // Note: the <position> and <flags> fields of the packet hold a
  12960. // two-byte quantity representing the network priority for the workset.
  12961. //
  12962. SET_STAMP(pWorkset, WORKSET);
  12963. pWorkset->priority = *((NET_PRIORITY *) &(pPacket->position));
  12964. pWorkset->fTemp = *((BOOL *) &(pPacket->objectID));
  12965. pWorkset->worksetID = worksetID;
  12966. pWorkset->lockState = UNLOCKED;
  12967. pWorkset->lockedBy = 0;
  12968. pWorkset->lockCount = 0;
  12969. COM_BasedListInit(&(pWorkset->objects));
  12970. COM_BasedListInit(&(pWorkset->clients));
  12971. COM_BasedListInit(&(pWorkset->pendingOps));
  12972. if (pPacket->header.messageType == OMNET_WORKSET_CATCHUP)
  12973. {
  12974. //
  12975. // For a WORKSET_CATCHUP message, the <userID> field of the
  12976. // <seqStamp> field in the message holds the user ID of the node
  12977. // which holds the workset lock, if it is locked.
  12978. //
  12979. if (pPacket->seqStamp.userID != 0)
  12980. {
  12981. //
  12982. // If the <userID> field is the same as our user ID, then the
  12983. // remote node must think that we've got the workset locked -
  12984. // but we're just catching up, so something is wrong:
  12985. //
  12986. pDomain = pWSGroup->pDomain;
  12987. ASSERT((pPacket->seqStamp.userID != pDomain->userID));
  12988. pWorkset->lockState = LOCK_GRANTED;
  12989. pWorkset->lockedBy = pPacket->seqStamp.userID;
  12990. pWorkset->lockCount = 0;
  12991. TRACE_OUT(("Catching up with workset %u in WSG %d while locked by %hu",
  12992. worksetID, pWSGroup->wsg, pWorkset->lockedBy));
  12993. }
  12994. //
  12995. // In addition, the current generation number for the workset is
  12996. // held in the <genNumber> field of the <seqStamp> field in the
  12997. // message:
  12998. //
  12999. pWorkset->genNumber = pPacket->seqStamp.genNumber;
  13000. }
  13001. //
  13002. // Find the offset within OMWORKSETS of the workset record and put it
  13003. // in the array of offsets in the workset group record:
  13004. //
  13005. pWSGroup->apWorksets[worksetID] = pWorkset;
  13006. //
  13007. // Post a WORKSET_NEW event to all Clients registered with the workset
  13008. // group:
  13009. //
  13010. WSGroupEventPost(putClient,
  13011. pWSGroup,
  13012. PRIMARY | SECONDARY,
  13013. OM_WORKSET_NEW_IND,
  13014. worksetID,
  13015. 0);
  13016. TRACE_OUT(("Processed WORKSET_NEW for workset ID %hu in WSG %d",
  13017. worksetID, pWSGroup->wsg));
  13018. DC_EXIT_POINT:
  13019. if (rc != 0)
  13020. {
  13021. ERROR_OUT(("ERROR %d creating workset %u in workset group '%s'",
  13022. rc, worksetID, pWSGroup->wsg));
  13023. if (pWorkset != NULL)
  13024. {
  13025. UT_FreeRefCount((void**)&pWorkset, FALSE);
  13026. }
  13027. pWSGroup->apWorksets[worksetID] = NULL;
  13028. }
  13029. DebugExitDWORD(ProcessWorksetNew, rc);
  13030. return(rc);
  13031. }
  13032. //
  13033. // ProcessWorksetClear(...)
  13034. //
  13035. UINT ProcessWorksetClear
  13036. (
  13037. PUT_CLIENT putClient,
  13038. POM_PRIMARY pomPrimary,
  13039. POMNET_OPERATION_PKT pPacket,
  13040. POM_WSGROUP pWSGroup,
  13041. POM_WORKSET pWorkset
  13042. )
  13043. {
  13044. POM_PENDING_OP pPendingOp = NULL;
  13045. UINT numPosts;
  13046. UINT rc = 0;
  13047. DebugEntry(ProcessWorksetClear);
  13048. //
  13049. // Update the workset generation number:
  13050. //
  13051. UpdateWorksetGeneration(pWorkset, pPacket);
  13052. //
  13053. // See if this Clear operation can be spoiled (it will be spoiled if
  13054. // another Clear operation with a later sequence stamp has already been
  13055. // issued):
  13056. //
  13057. if (STAMP_IS_LOWER(pPacket->seqStamp, pWorkset->clearStamp))
  13058. {
  13059. TRACE_OUT(("Spoiling Clear with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  13060. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  13061. pWorkset->clearStamp.userID, pWorkset->clearStamp.genNumber));
  13062. DC_QUIT;
  13063. }
  13064. //
  13065. // Update the workset clear stamp:
  13066. //
  13067. COPY_SEQ_STAMP(pWorkset->clearStamp, pPacket->seqStamp);
  13068. //
  13069. // Now create a pending op CB to add to the list:
  13070. //
  13071. // Note: even if there is another Clear outstanding for the workset,
  13072. // we go ahead and put this one in the list and post another event
  13073. // to the Client. If we didn't, then we would expose ourselves
  13074. // to the following situation:
  13075. //
  13076. // 1. Clear issued
  13077. // 1a. Clear indication recd
  13078. // 2. Object added
  13079. // 3. Delete issued
  13080. // 3a. Delete indication recd - not filtered because unaffected
  13081. // by pending clear
  13082. // 4. Clear issued again - "takes over" previous Clear
  13083. // 5. Clear confirmed - causes object added in 2 to be deleted
  13084. // 6. Delete confirmed - assert because the delete WAS affected
  13085. // by the second clear which "took over" earlier one.
  13086. //
  13087. // A Client can still cause an assert by juggling the events and
  13088. // confirms, but we don't care because youo're not supposed to
  13089. // reorder ObMan events in any case.
  13090. //
  13091. pPendingOp = (POM_PENDING_OP)UT_MallocRefCount(sizeof(OM_PENDING_OP), FALSE);
  13092. if (!pPendingOp)
  13093. {
  13094. rc = UT_RC_NO_MEM;
  13095. DC_QUIT;
  13096. }
  13097. SET_STAMP(pPendingOp, PENDINGOP);
  13098. pPendingOp->pObj = 0;
  13099. pPendingOp->pData = NULL;
  13100. pPendingOp->type = WORKSET_CLEAR;
  13101. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13102. COM_BasedListInsertBefore(&(pWorkset->pendingOps), &(pPendingOp->chain));
  13103. //
  13104. // Post a workset clear indication event to the Client:
  13105. //
  13106. numPosts = WorksetEventPost(putClient,
  13107. pWorkset,
  13108. PRIMARY,
  13109. OM_WORKSET_CLEAR_IND,
  13110. 0);
  13111. //
  13112. // If there are no primaries present, then we won't be getting any
  13113. // ClearConfirms, so we do it now:
  13114. //
  13115. if (numPosts == 0)
  13116. {
  13117. TRACE_OUT(("No local primary Client has workset %u in WSG %d open - clearing",
  13118. pWorkset->worksetID, pWSGroup->wsg));
  13119. WorksetDoClear(putClient, pWSGroup, pWorkset, pPendingOp);
  13120. }
  13121. TRACE_OUT(("Processed WORKSET_CLEAR for workset %u in WSG %d",
  13122. pWorkset->worksetID, pWSGroup->wsg));
  13123. DC_EXIT_POINT:
  13124. if (rc != 0)
  13125. {
  13126. ERROR_OUT(("ERROR %d processing clear for workset %u in WSG %d",
  13127. rc, pWorkset->worksetID, pWSGroup->wsg));
  13128. if (pPendingOp != NULL)
  13129. {
  13130. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13131. }
  13132. }
  13133. DebugExitDWORD(ProcessWorksetClear, rc);
  13134. return(rc);
  13135. }
  13136. //
  13137. // ProcessObjectAdd(...)
  13138. //
  13139. UINT ProcessObjectAdd
  13140. (
  13141. PUT_CLIENT putTask,
  13142. POMNET_OPERATION_PKT pPacket,
  13143. POM_WSGROUP pWSGroup,
  13144. POM_WORKSET pWorkset,
  13145. POM_OBJECTDATA pData,
  13146. POM_OBJECT * ppObj
  13147. )
  13148. {
  13149. POM_OBJECT pObj;
  13150. UINT rc = 0;
  13151. DebugEntry(ProcessObjectAdd);
  13152. //
  13153. // Update the workset generation number:
  13154. //
  13155. UpdateWorksetGeneration(pWorkset, pPacket);
  13156. //
  13157. // Create a new record for the object:
  13158. //
  13159. //
  13160. // Allocate memory for the object record:
  13161. //
  13162. *ppObj = (POM_OBJECT)UT_MallocRefCount(sizeof(OM_OBJECT), FALSE);
  13163. if (! *ppObj)
  13164. {
  13165. rc = UT_RC_NO_MEM;
  13166. DC_QUIT;
  13167. }
  13168. pObj = *ppObj;
  13169. //
  13170. // Fill in the fields (remember, pData will be NULL if this is a
  13171. // catchup for a deleted object):
  13172. //
  13173. SET_STAMP(pObj, OBJECT);
  13174. pObj->updateSize = pPacket->updateSize;
  13175. pObj->pData = pData;
  13176. memcpy(&(pObj->objectID), &(pPacket->objectID), sizeof(OM_OBJECT_ID));
  13177. //
  13178. // How to set to the <flags> field and the sequence stamps depends on
  13179. // whether this is a CATCHUP:
  13180. //
  13181. if (pPacket->header.messageType == OMNET_OBJECT_CATCHUP)
  13182. {
  13183. COPY_SEQ_STAMP(pObj->addStamp, pPacket->seqStamp);
  13184. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->positionStamp);
  13185. COPY_SEQ_STAMP(pObj->updateStamp, pPacket->updateStamp);
  13186. COPY_SEQ_STAMP(pObj->replaceStamp, pPacket->replaceStamp);
  13187. pObj->flags = pPacket->flags;
  13188. }
  13189. else
  13190. {
  13191. COPY_SEQ_STAMP(pObj->addStamp, pPacket->seqStamp);
  13192. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->seqStamp);
  13193. COPY_SEQ_STAMP(pObj->updateStamp, pPacket->seqStamp);
  13194. COPY_SEQ_STAMP(pObj->replaceStamp, pPacket->seqStamp);
  13195. pObj->flags = 0;
  13196. }
  13197. //
  13198. // The following fields are not filled in since they are handled
  13199. // by ObjectInsert, when the object is actually inserted into the
  13200. // workset:
  13201. //
  13202. // - chain
  13203. // - position
  13204. //
  13205. //
  13206. // Insert the object into the workset:
  13207. //
  13208. ObjectInsert(pWorkset, pObj, pPacket->position);
  13209. //
  13210. // If the object has been deleted (which will only happen for a Catchup
  13211. // of a deleted object), we don't need to do anything else, so just
  13212. // quit:
  13213. if (pObj->flags & DELETED)
  13214. {
  13215. ASSERT((pPacket->header.messageType == OMNET_OBJECT_CATCHUP));
  13216. TRACE_OUT(("Processing Catchup for deleted object (ID: 0x%08x:0x%08x)",
  13217. pObj->objectID.creator, pObj->objectID.sequence));
  13218. DC_QUIT;
  13219. }
  13220. //
  13221. // Otherwise, we continue...
  13222. //
  13223. // Increment the numObjects field:
  13224. //
  13225. // (we don't do this inside ObjectInsert since that's called when moving
  13226. // objects also)
  13227. //
  13228. pWorkset->numObjects++;
  13229. TRACE_OUT(("Number of objects in workset %u in WSG %d is now %u",
  13230. pWorkset->worksetID, pWSGroup->wsg, pWorkset->numObjects));
  13231. //
  13232. // See if this Add can be spoiled (it is spoilable if the workset has
  13233. // been cleared since the Add was issued):
  13234. //
  13235. // Note: even if the Add is to be spoiled, we must create a record for
  13236. // it and insert it in the workset, for the same reason that we keep
  13237. // records of deleted objects in the workset (i.e. to differentiate
  13238. // between operations which are for deleted objects and those which are
  13239. // for objects not yet arrived).
  13240. //
  13241. if (STAMP_IS_LOWER(pPacket->seqStamp, pWorkset->clearStamp))
  13242. {
  13243. TRACE_OUT(("Spoiling Add with stamp 0x%08x:0x%08x (workset cleared at 0x%08x:0x%08x)",
  13244. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  13245. pWorkset->clearStamp.userID, pWorkset->clearStamp.genNumber));
  13246. //
  13247. // We "spoil" an Add by simply deleting it:
  13248. //
  13249. ObjectDoDelete(putTask, pWSGroup, pWorkset, pObj, NULL);
  13250. DC_QUIT;
  13251. }
  13252. //
  13253. // Post an add indication to all local Clients with the workset open:
  13254. //
  13255. WorksetEventPost(putTask,
  13256. pWorkset,
  13257. PRIMARY | SECONDARY,
  13258. OM_OBJECT_ADD_IND,
  13259. pObj);
  13260. TRACE_OUT(("Added object to workset %u in WSG %d (handle: 0x%08x - ID: 0x%08x:0x%08x)",
  13261. pWorkset->worksetID, pWSGroup->wsg, pObj,
  13262. pObj->objectID.creator, pObj->objectID.sequence));
  13263. TRACE_OUT((" position: %s - data at 0x%08x - size: %u - update size: %u",
  13264. pPacket->position == LAST ? "LAST" : "FIRST", pData,
  13265. pData->length, pPacket->updateSize));
  13266. DC_EXIT_POINT:
  13267. if (rc != 0)
  13268. {
  13269. ERROR_OUT(("Error 0x%08x processing Add message", rc));
  13270. }
  13271. DebugExitDWORD(ProcessObjectAdd, rc);
  13272. return(rc);
  13273. }
  13274. //
  13275. // ProcessObjectMove(...)
  13276. //
  13277. void ProcessObjectMove
  13278. (
  13279. PUT_CLIENT putTask,
  13280. POMNET_OPERATION_PKT pPacket,
  13281. POM_WORKSET pWorkset,
  13282. POM_OBJECT pObj
  13283. )
  13284. {
  13285. DebugEntry(ProcessObjectMove);
  13286. //
  13287. // Update the workset generation number:
  13288. //
  13289. UpdateWorksetGeneration(pWorkset, pPacket);
  13290. //
  13291. // See if we can spoil this move:
  13292. //
  13293. if (STAMP_IS_LOWER(pPacket->seqStamp, pObj->positionStamp))
  13294. {
  13295. TRACE_OUT(("Spoiling Move with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  13296. pPacket->seqStamp.userID,
  13297. pPacket->seqStamp.genNumber,
  13298. pObj->positionStamp.userID,
  13299. pObj->positionStamp.genNumber));
  13300. DC_QUIT;
  13301. }
  13302. //
  13303. // Moving an object in a workset involves
  13304. //
  13305. // 1. removing the object from its current position in the workset,
  13306. //
  13307. // 2. setting its position stamp to the new value
  13308. //
  13309. // 3. inserting it at its new position.
  13310. //
  13311. COM_BasedListRemove(&(pObj->chain));
  13312. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->seqStamp);
  13313. ObjectInsert(pWorkset, pObj, pPacket->position);
  13314. //
  13315. // Post an indication to all local Clients with the workset open:
  13316. //
  13317. WorksetEventPost(putTask,
  13318. pWorkset,
  13319. PRIMARY | SECONDARY,
  13320. OM_OBJECT_MOVE_IND,
  13321. pObj);
  13322. DC_EXIT_POINT:
  13323. TRACE_OUT(("Moved object 0x%08x to %s of workset %u",
  13324. pObj, (pPacket->position == LAST ? "end" : "start"),
  13325. pWorkset->worksetID));
  13326. DebugExitVOID(ProcessObjectMove);
  13327. }
  13328. //
  13329. // ProcessObjectDRU(...)
  13330. //
  13331. UINT ProcessObjectDRU
  13332. (
  13333. PUT_CLIENT putTask,
  13334. POMNET_OPERATION_PKT pPacket,
  13335. POM_WSGROUP pWSGroup,
  13336. POM_WORKSET pWorkset,
  13337. POM_OBJECT pObj,
  13338. POM_OBJECTDATA pData
  13339. )
  13340. {
  13341. UINT numPosts;
  13342. POM_PENDING_OP pPendingOp = NULL;
  13343. POM_OBJECTDATA pPrevData;
  13344. UINT event = 0; // event to post to Client
  13345. OM_OPERATION_TYPE type = 0; // type for pendingOp struct
  13346. POM_SEQUENCE_STAMP pSeqStamp = NULL; // sequence stamp to update
  13347. void (* fnObjectDoAction)(PUT_CLIENT, POM_WSGROUP, POM_WORKSET,
  13348. POM_OBJECT,
  13349. POM_PENDING_OP) = NULL;
  13350. UINT rc = 0;
  13351. DebugEntry(ProcessObjectDRU);
  13352. //
  13353. // Set up the type variables:
  13354. //
  13355. switch (pPacket->header.messageType)
  13356. {
  13357. case OMNET_OBJECT_DELETE:
  13358. event = OM_OBJECT_DELETE_IND;
  13359. type = OBJECT_DELETE;
  13360. pSeqStamp = NULL;
  13361. fnObjectDoAction = ObjectDoDelete;
  13362. break;
  13363. case OMNET_OBJECT_REPLACE:
  13364. event = OM_OBJECT_REPLACE_IND;
  13365. type = OBJECT_REPLACE;
  13366. pSeqStamp = &(pObj->replaceStamp);
  13367. fnObjectDoAction = ObjectDoReplace;
  13368. break;
  13369. case OMNET_OBJECT_UPDATE:
  13370. event = OM_OBJECT_UPDATE_IND;
  13371. type = OBJECT_UPDATE;
  13372. pSeqStamp = &(pObj->updateStamp);
  13373. fnObjectDoAction = ObjectDoUpdate;
  13374. break;
  13375. default:
  13376. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  13377. pPacket->header.messageType));
  13378. break;
  13379. }
  13380. //
  13381. // Update the workset generation number:
  13382. //
  13383. UpdateWorksetGeneration(pWorkset, pPacket);
  13384. //
  13385. // Now do some spoiling checks, unless the object is a Delete (Deletes
  13386. // can't be spoiled):
  13387. //
  13388. if (type != OBJECT_DELETE)
  13389. {
  13390. ASSERT(((pSeqStamp != NULL) && (pData != NULL)));
  13391. //
  13392. // The first check is to see if this operation can be spoiled. It
  13393. // will be spoilable if the object has been updated/replaced since
  13394. // the operation took place. Since this function is called
  13395. // synchronously for a local Update/Replace, this will only event
  13396. // happen when a remote Update/Replace arrives "too late".
  13397. //
  13398. // The way we check is to compare the current stamp for the object
  13399. // with the stamp for the operation:
  13400. //
  13401. if (STAMP_IS_LOWER(pPacket->seqStamp, *pSeqStamp))
  13402. {
  13403. TRACE_OUT(("Spoiling with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  13404. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  13405. (*pSeqStamp).userID, (*pSeqStamp).genNumber));
  13406. UT_FreeRefCount((void**)&pData, FALSE);
  13407. DC_QUIT;
  13408. }
  13409. //
  13410. // Update whichever of the object's stamps is involved by copying
  13411. // in the stamp from the packet:
  13412. //
  13413. COPY_SEQ_STAMP(*pSeqStamp, pPacket->seqStamp);
  13414. //
  13415. // The second check is to see if this operation spoils a previous
  13416. // one. This will happen when a Client does two updates or two
  13417. // replaces in quick succession i.e. does the second
  13418. // update/replace before confirming the first.
  13419. //
  13420. // In this case, we "spoil" the previous operation by removing the
  13421. // previous pending op from the pending op list and inserting this
  13422. // one instead. Note that we do NOT post another event, as to do
  13423. // so without adding net a new pending op would cause the Client to
  13424. // assert on its second call to Confirm().
  13425. //
  13426. // Note: although in general a Replace will spoil a previous
  13427. // Update, it cannot do so in this case because if there is
  13428. // an Update outstanding, the Client will call UpdateConfirm
  13429. // so we must leave the Update pending and post a Replace
  13430. // event also.
  13431. //
  13432. FindPendingOp(pWorkset, pObj, type, &pPendingOp);
  13433. if (pPendingOp != NULL)
  13434. {
  13435. //
  13436. // OK, there is an operation of this type already outstanding
  13437. // for this object. So, we change the entry in the pending op
  13438. // list to refer to this operation instead. Before doing so,
  13439. // however, we must free up the chunk holding the previous
  13440. // (superceded) update/replace:
  13441. //
  13442. pPrevData = pPendingOp->pData;
  13443. if (pPrevData != NULL)
  13444. {
  13445. UT_FreeRefCount((void**)&pPrevData, FALSE);
  13446. }
  13447. //
  13448. // Now put the reference to the new update/replace in the
  13449. // pending op:
  13450. //
  13451. pPendingOp->pData = pData;
  13452. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13453. //
  13454. // The rest of this function inserts the pending op in the
  13455. // list, posts an event to local Client and performs the op if
  13456. // there are none. We know that
  13457. //
  13458. // - the op is in the list
  13459. //
  13460. // - there is an event outstanding because we found a pending
  13461. // op in the list
  13462. //
  13463. // - there are local Clients, for the same reason.
  13464. //
  13465. // Therefore, just quit:
  13466. //
  13467. DC_QUIT;
  13468. }
  13469. else
  13470. {
  13471. //
  13472. // No outstanding operation of this type for this object, so do
  13473. // nothing here and fall through to the standard processing:
  13474. //
  13475. }
  13476. }
  13477. else
  13478. {
  13479. //
  13480. // Sanity check:
  13481. //
  13482. ASSERT((pData == NULL));
  13483. pObj->flags |= PENDING_DELETE;
  13484. }
  13485. //
  13486. // Add this operation to the workset's pending operation list:
  13487. //
  13488. pPendingOp = (POM_PENDING_OP)UT_MallocRefCount(sizeof(OM_PENDING_OP), FALSE);
  13489. if (!pPendingOp)
  13490. {
  13491. rc = UT_RC_NO_MEM;
  13492. DC_QUIT;
  13493. }
  13494. SET_STAMP(pPendingOp, PENDINGOP);
  13495. pPendingOp->type = type;
  13496. pPendingOp->pData = pData;
  13497. pPendingOp->pObj = pObj;
  13498. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13499. TRACE_OUT(("Inserting %d in pending op list for workset %u", type,
  13500. pWorkset->worksetID));
  13501. COM_BasedListInsertBefore(&(pWorkset->pendingOps), &(pPendingOp->chain));
  13502. //
  13503. // Post an indication to all local Clients with the workset open:
  13504. //
  13505. numPosts = WorksetEventPost(putTask,
  13506. pWorkset,
  13507. PRIMARY,
  13508. event,
  13509. pObj);
  13510. //
  13511. // If no one has the workset open, we won't be getting any
  13512. // DeleteConfirms, so we'd better do the delete straight away:
  13513. //
  13514. if (numPosts == 0)
  13515. {
  13516. TRACE_OUT(("Workset %hu in WSG %d not open: performing %d immediately",
  13517. pWorkset->worksetID, pWSGroup->wsg, type));
  13518. fnObjectDoAction(putTask, pWSGroup, pWorkset, pObj, pPendingOp);
  13519. }
  13520. TRACE_OUT(("Processed %d message for object 0x%08x in workset %u in WSG %d",
  13521. type, pObj, pWorkset->worksetID, pWSGroup->wsg));
  13522. DC_EXIT_POINT:
  13523. if (rc != 0)
  13524. {
  13525. //
  13526. // Cleanup:
  13527. //
  13528. ERROR_OUT(("ERROR %d processing WSG %d message", rc, pWSGroup->wsg));
  13529. if (pPendingOp != NULL)
  13530. {
  13531. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13532. }
  13533. }
  13534. DebugExitDWORD(ProcessObjectDRU, rc);
  13535. return(rc);
  13536. }
  13537. //
  13538. // ObjectInsert(...)
  13539. //
  13540. void ObjectInsert
  13541. (
  13542. POM_WORKSET pWorkset,
  13543. POM_OBJECT pObj,
  13544. OM_POSITION position
  13545. )
  13546. {
  13547. POM_OBJECT pObjTemp;
  13548. PBASEDLIST pChain;
  13549. DebugEntry(ObjectInsert);
  13550. //
  13551. // The algorithm for inserting an object at the start (end) of a workset
  13552. // is as follows:
  13553. //
  13554. // - search forward (back) from the first (last) object until one of the
  13555. // following happens:
  13556. //
  13557. // - we find an object which does not have FIRST (LAST) as a position
  13558. // stamp
  13559. //
  13560. // - we find an object which has a lower (lower) position stamp.
  13561. //
  13562. // - we reach the root of the list of objects in the workset
  13563. //
  13564. // - insert the new object before (after) this object.
  13565. //
  13566. switch (position)
  13567. {
  13568. case FIRST:
  13569. {
  13570. pObjTemp = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13571. while (pObjTemp != NULL)
  13572. {
  13573. ValidateObject(pObjTemp);
  13574. if ((pObjTemp->position != position) ||
  13575. (STAMP_IS_LOWER(pObjTemp->positionStamp,
  13576. pObj->positionStamp)))
  13577. {
  13578. break;
  13579. }
  13580. pObjTemp = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjTemp, FIELD_OFFSET(OM_OBJECT, chain));
  13581. }
  13582. break;
  13583. }
  13584. case LAST:
  13585. {
  13586. pObjTemp = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13587. while (pObjTemp != NULL)
  13588. {
  13589. ValidateObject(pObjTemp);
  13590. if ((pObjTemp->position != position) ||
  13591. (STAMP_IS_LOWER(pObjTemp->positionStamp,
  13592. pObj->positionStamp)))
  13593. {
  13594. break;
  13595. }
  13596. pObjTemp = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObjTemp, FIELD_OFFSET(OM_OBJECT, chain));
  13597. }
  13598. break;
  13599. }
  13600. default:
  13601. {
  13602. ERROR_OUT(("Reached default case in switch (position: %hu)", position));
  13603. break;
  13604. }
  13605. }
  13606. //
  13607. // OK, we've found the correct position for the object. If we reached
  13608. // the end (start) of the workset, then we want to insert the object
  13609. // before (after) the root, so we set up pChain accordingly:
  13610. //
  13611. if (pObjTemp == NULL)
  13612. {
  13613. pChain = &(pWorkset->objects);
  13614. TRACE_OUT(("Inserting object into workset %u as the %s object",
  13615. pWorkset->worksetID, position == LAST ? "last" : "first"));
  13616. }
  13617. else
  13618. {
  13619. pChain = &(pObjTemp->chain);
  13620. TRACE_OUT(("Inserting object into workset %u %s object "
  13621. "with record at 0x%08x (position stamp: 0x%08x:0x%08x)",
  13622. pWorkset->worksetID,
  13623. (position == LAST ? "after" : "before"),
  13624. pObjTemp, pObjTemp->objectID.creator,
  13625. pObjTemp->objectID.sequence));
  13626. }
  13627. //
  13628. // Now insert the object, either before or after the position we
  13629. // determined above:
  13630. //
  13631. if (position == FIRST)
  13632. {
  13633. COM_BasedListInsertBefore(pChain, &(pObj->chain));
  13634. }
  13635. else
  13636. {
  13637. COM_BasedListInsertAfter(pChain, &(pObj->chain));
  13638. }
  13639. pObj->position = position;
  13640. //
  13641. // Now do a debug-only check to ensure correct order of objects:
  13642. //
  13643. CheckObjectOrder(pWorkset);
  13644. DebugExitVOID(ObjectInsert);
  13645. }
  13646. //
  13647. // ObjectDoDelete(...)
  13648. //
  13649. void ObjectDoDelete
  13650. (
  13651. PUT_CLIENT putTask,
  13652. POM_WSGROUP pWSGroup,
  13653. POM_WORKSET pWorkset,
  13654. POM_OBJECT pObj,
  13655. POM_PENDING_OP pPendingOp
  13656. )
  13657. {
  13658. POM_DOMAIN pDomain;
  13659. DebugEntry(ObjectDoDelete);
  13660. //
  13661. // We should never be called for an object that's already been deleted:
  13662. //
  13663. ValidateObject(pObj);
  13664. ASSERT(!(pObj->flags & DELETED));
  13665. //
  13666. // Derive a pointer to the object itself and then free it:
  13667. //
  13668. if (!pObj->pData)
  13669. {
  13670. ERROR_OUT(("ObjectDoDelete: object 0x%08x has no data", pObj));
  13671. }
  13672. else
  13673. {
  13674. ValidateObjectData(pObj->pData);
  13675. UT_FreeRefCount((void**)&pObj->pData, FALSE);
  13676. }
  13677. //
  13678. // Set the deleted flag in the object record:
  13679. //
  13680. // (note that we don't delete the object record entirely as we need to
  13681. // keep track of deleted objects so that when we get operations from the
  13682. // network for objects not in the workset, we can differentiate between
  13683. // operations on objects
  13684. //
  13685. // - that haven't yet been added at this node (we keep these operations
  13686. // and perform them later) and
  13687. //
  13688. // - that have been deleted (we throw these operations away).
  13689. //
  13690. // A slight space optimisation would be to store the IDs of deleted
  13691. // objects in a separate list, since we don't need any of the other
  13692. // fields in the record.
  13693. //
  13694. pObj->flags |= DELETED;
  13695. pObj->flags &= ~PENDING_DELETE;
  13696. //
  13697. // Remove the pending op from the list, if the pointer passed in is
  13698. // valid (it won't be if we're called from WorksetDoClear, since those
  13699. // deletes have not been "pending").
  13700. //
  13701. // In addition, if pPendingOp is not NULL, we post the DELETED event to
  13702. // registered secondaries:
  13703. //
  13704. if (pPendingOp != NULL)
  13705. {
  13706. COM_BasedListRemove(&(pPendingOp->chain));
  13707. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13708. WorksetEventPost(putTask,
  13709. pWorkset,
  13710. SECONDARY,
  13711. OM_OBJECT_DELETED_IND,
  13712. pObj);
  13713. }
  13714. //
  13715. // If we are in the local domain, we can safely delete the object rec:
  13716. //
  13717. pDomain = pWSGroup->pDomain;
  13718. if (pDomain->callID == OM_NO_CALL)
  13719. {
  13720. TRACE_OUT(("Freeing pObj at 0x%08x", pObj));
  13721. ValidateObject(pObj);
  13722. COM_BasedListRemove(&(pObj->chain));
  13723. UT_FreeRefCount((void**)&pObj, FALSE);
  13724. }
  13725. //
  13726. // Decrement the number of objects in the workset:
  13727. //
  13728. ASSERT(pWorkset->numObjects > 0);
  13729. pWorkset->numObjects--;
  13730. DebugExitVOID(ObjectDoDelete);
  13731. }
  13732. //
  13733. // ObjectDoReplace(...)
  13734. //
  13735. void ObjectDoReplace
  13736. (
  13737. PUT_CLIENT putTask,
  13738. POM_WSGROUP pWSGroup,
  13739. POM_WORKSET pWorkset,
  13740. POM_OBJECT pObj,
  13741. POM_PENDING_OP pPendingOp
  13742. )
  13743. {
  13744. POM_OBJECTDATA pDataNew;
  13745. POM_OBJECTDATA pDataOld;
  13746. UINT rc = 0;
  13747. DebugEntry(ObjectDoReplace);
  13748. ValidateObject(pObj);
  13749. //
  13750. // If the object has already been deleted for whatever reason, quit:
  13751. //
  13752. if (pObj->flags & DELETED)
  13753. {
  13754. WARNING_OUT(("Asked to do replace for deleted object 0x%08x!", pObj));
  13755. DC_QUIT;
  13756. }
  13757. //
  13758. // Set up some local variables:
  13759. //
  13760. pDataOld = pObj->pData;
  13761. pDataNew = pPendingOp->pData;
  13762. ValidateObjectData(pDataNew);
  13763. pObj->pData = pDataNew;
  13764. //
  13765. // If this object has been updated since this replace was issued, we
  13766. // must ensure that the replace doesn't overwrite the "later" update:
  13767. //
  13768. // Initial object at t=1 AAAAAA
  13769. // Object updated (two bytes) at t=3;
  13770. // Object becomes: CCAAAA
  13771. //
  13772. // Object replaced at t=2: BBBB
  13773. // Must now re-enact the update: CCBB
  13774. //
  13775. // Therefore, if the update stamp for the object is later than the stamp
  13776. // of the replace instruction, we copy the first N bytes back over the
  13777. // new object, where N is the size of the last update:
  13778. //
  13779. if (STAMP_IS_LOWER(pPendingOp->seqStamp, pObj->updateStamp))
  13780. {
  13781. ASSERT((pDataNew->length >= pObj->updateSize));
  13782. memcpy(&(pDataNew->data), &(pDataOld->data), pObj->updateSize);
  13783. }
  13784. TRACE_OUT(("Replacing object 0x%08x with data at 0x%08x (old data at 0x%08x)",
  13785. pObj, pDataNew, pDataOld));
  13786. //
  13787. // We also need to free up the chunk holding the old object:
  13788. //
  13789. if (!pDataOld)
  13790. {
  13791. ERROR_OUT(("ObjectDoReplace: object 0x%08x has no data", pObj));
  13792. }
  13793. else
  13794. {
  13795. UT_FreeRefCount((void**)&pDataOld, FALSE);
  13796. }
  13797. //
  13798. // Now that we've replaced the object, post a REPLACED event to all
  13799. // secondaries:
  13800. //
  13801. WorksetEventPost(putTask,
  13802. pWorkset,
  13803. SECONDARY,
  13804. OM_OBJECT_REPLACED_IND,
  13805. pObj);
  13806. DC_EXIT_POINT:
  13807. //
  13808. // We've either done the replace or abandoned it because the object has
  13809. // been deleted; either way, free up the entry in the pending op list:
  13810. //
  13811. COM_BasedListRemove(&(pPendingOp->chain));
  13812. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13813. DebugExitVOID(ObjectDoReplace);
  13814. }
  13815. //
  13816. // ObjectDoUpdate(...)
  13817. //
  13818. void ObjectDoUpdate
  13819. (
  13820. PUT_CLIENT putTask,
  13821. POM_WSGROUP pWSGroup,
  13822. POM_WORKSET pWorkset,
  13823. POM_OBJECT pObj,
  13824. POM_PENDING_OP pPendingOp
  13825. )
  13826. {
  13827. POM_OBJECTDATA pDataNew;
  13828. UINT rc = 0;
  13829. DebugEntry(ObjectDoUpdate);
  13830. ValidateObject(pObj);
  13831. //
  13832. // If the object has already been deleted for whatever reason, quit:
  13833. //
  13834. if (pObj->flags & DELETED)
  13835. {
  13836. WARNING_OUT(("Asked to do update for deleted object 0x%08x!", pObj));
  13837. DC_QUIT;
  13838. }
  13839. pDataNew = pPendingOp->pData;
  13840. if (!pObj->pData)
  13841. {
  13842. ERROR_OUT(("ObjectDoUpdate: object 0x%08x has no data", pObj));
  13843. }
  13844. else
  13845. {
  13846. ValidateObjectData(pObj->pData);
  13847. //
  13848. // Updating an object involves copying <length> bytes from the <data>
  13849. // field of the update over the start of the <data> field of the
  13850. // existing object:
  13851. //
  13852. memcpy(&(pObj->pData->data), &(pDataNew->data), pDataNew->length);
  13853. }
  13854. UT_FreeRefCount((void**)&pDataNew, FALSE);
  13855. //
  13856. // Now that we've updated the object, post an UPDATED event to all
  13857. // secondaries:
  13858. //
  13859. WorksetEventPost(putTask,
  13860. pWorkset,
  13861. SECONDARY,
  13862. OM_OBJECT_UPDATED_IND,
  13863. pObj);
  13864. DC_EXIT_POINT:
  13865. //
  13866. // We've done the update, so free up the entry in the pending op list:
  13867. //
  13868. COM_BasedListRemove(&(pPendingOp->chain));
  13869. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13870. DebugExitVOID(ObjectDoUpdate);
  13871. }
  13872. //
  13873. // ObjectIDToPtr(...)
  13874. //
  13875. UINT ObjectIDToPtr
  13876. (
  13877. POM_WORKSET pWorkset,
  13878. OM_OBJECT_ID objectID,
  13879. POM_OBJECT * ppObj
  13880. )
  13881. {
  13882. POM_OBJECT pObj;
  13883. UINT rc = 0;
  13884. DebugEntry(ObjectIDToPtr);
  13885. //
  13886. // To find the handle, we chain through each of the object records in
  13887. // the workset and compare the id of each one with the required ID:
  13888. //
  13889. TRACE_OUT(("About to search object records looking for ID 0x%08x:0x%08x",
  13890. objectID.creator, objectID.sequence));
  13891. ValidateWorkset(pWorkset);
  13892. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13893. while (pObj != NULL)
  13894. {
  13895. ValidateObject(pObj);
  13896. TRACE_OUT(("Comparing against object at 0x%08x (ID: 0x%08x:0x%08x)",
  13897. pObj,
  13898. pObj->objectID.creator,
  13899. pObj->objectID.sequence));
  13900. if (OBJECT_IDS_ARE_EQUAL(pObj->objectID, objectID))
  13901. {
  13902. break;
  13903. }
  13904. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  13905. }
  13906. //
  13907. // If object record not found, warn:
  13908. //
  13909. if (pObj == NULL)
  13910. {
  13911. TRACE_OUT(("Object with ID 0x%08x:0x%08x not found",
  13912. objectID.creator, objectID.sequence));
  13913. rc = OM_RC_BAD_OBJECT_ID;
  13914. DC_QUIT;
  13915. }
  13916. *ppObj = pObj;
  13917. //
  13918. // If object record found but object deleted or pending delete, warn:
  13919. //
  13920. if (pObj->flags & DELETED)
  13921. {
  13922. TRACE_OUT(("Object record found (handle: 0x%08x) for ID 0x%08x:0x%08x "
  13923. "but object deleted",
  13924. *ppObj, objectID.creator, objectID.sequence));
  13925. rc = OM_RC_OBJECT_DELETED;
  13926. DC_QUIT;
  13927. }
  13928. if (pObj->flags & PENDING_DELETE)
  13929. {
  13930. TRACE_OUT(("Object record found (handle: 0x%08x) for ID 0x%08x:0x%08x "
  13931. "but object pending delete",
  13932. *ppObj, objectID.creator, objectID.sequence));
  13933. rc = OM_RC_OBJECT_PENDING_DELETE;
  13934. DC_QUIT;
  13935. }
  13936. DC_EXIT_POINT:
  13937. DebugExitDWORD(ObjectIDToPtr, rc);
  13938. return(rc);
  13939. }
  13940. //
  13941. // GenerateOpMessage(...)
  13942. //
  13943. UINT GenerateOpMessage
  13944. (
  13945. POM_WSGROUP pWSGroup,
  13946. OM_WORKSET_ID worksetID,
  13947. POM_OBJECT_ID pObjectID,
  13948. POM_OBJECTDATA pData,
  13949. OMNET_MESSAGE_TYPE messageType,
  13950. POMNET_OPERATION_PKT * ppPacket
  13951. )
  13952. {
  13953. POMNET_OPERATION_PKT pPacket;
  13954. POM_DOMAIN pDomain;
  13955. POM_WORKSET pWorkset = NULL;
  13956. UINT rc = 0;
  13957. DebugEntry(GenerateOpMessage);
  13958. //
  13959. // Set up Domain record pointer:
  13960. //
  13961. pDomain = pWSGroup->pDomain;
  13962. TRACE_OUT(("Generating message for operation type 0x%08x", messageType));
  13963. //
  13964. // Allocate some memory for the packet:
  13965. //
  13966. pPacket = (POMNET_OPERATION_PKT)UT_MallocRefCount(sizeof(OMNET_OPERATION_PKT), TRUE);
  13967. if (!pPacket)
  13968. {
  13969. rc = UT_RC_NO_MEM;
  13970. DC_QUIT;
  13971. }
  13972. //
  13973. // Here, we fill in the fields common to all types of messages:
  13974. //
  13975. pPacket->header.sender = pDomain->userID;
  13976. pPacket->header.messageType = messageType;
  13977. //
  13978. // The <totalSize> field is the number of bytes in the header packet
  13979. // plus the number of associated data bytes, if any. For the moment, we
  13980. // set it to the size of the header only; we'll add the size of the data
  13981. // later:
  13982. //
  13983. pPacket->totalSize = GetMessageSize(messageType);
  13984. pPacket->wsGroupID = pWSGroup->wsGroupID;
  13985. pPacket->worksetID = worksetID;
  13986. //
  13987. // If this is a WorksetNew operation, there will be no workset yet and
  13988. // thus no valid sequence stamp, so we use a null sequence stamp.
  13989. // Otherwise, we take the value from the workset.
  13990. //
  13991. if (messageType == OMNET_WORKSET_NEW)
  13992. {
  13993. SET_NULL_SEQ_STAMP(pPacket->seqStamp);
  13994. }
  13995. else
  13996. {
  13997. pWorkset = pWSGroup->apWorksets[worksetID];
  13998. ASSERT((pWorkset != NULL));
  13999. GET_CURR_SEQ_STAMP(pPacket->seqStamp, pDomain, pWorkset);
  14000. }
  14001. //
  14002. // If this is a workset operation, <pObjectID> will be NULL, so we set
  14003. // the object ID in the packet to NULL also:
  14004. //
  14005. if (pObjectID == NULL)
  14006. {
  14007. ZeroMemory(&(pPacket->objectID), sizeof(OM_OBJECT_ID));
  14008. }
  14009. else
  14010. {
  14011. memcpy(&(pPacket->objectID), pObjectID, sizeof(OM_OBJECT_ID));
  14012. }
  14013. //
  14014. // If this message is associated with object data, we must add the size
  14015. // of this data (including the size of the <length> field itself). The
  14016. // test for this is if the <pData> parameter is not NULL:
  14017. //
  14018. if (pData != NULL)
  14019. {
  14020. pPacket->totalSize += pData->length + sizeof(pData->length);
  14021. }
  14022. //
  14023. // For a WORKSET_CATCHUP message, we need to let the other node know if
  14024. // the workset is locked and if so, by whom:
  14025. //
  14026. if (messageType == OMNET_WORKSET_CATCHUP)
  14027. {
  14028. //
  14029. // pWorkset should have been set up above:
  14030. //
  14031. ASSERT((pWorkset != NULL));
  14032. //
  14033. // Put the ID of the node which owns the workset lock in the <userID>
  14034. // field of the <seqStamp> field of the packet:
  14035. //
  14036. pPacket->seqStamp.userID = pWorkset->lockedBy;
  14037. TRACE_OUT(("Set <lockedBy> field in WORKSET_CATCHUP to %hu",
  14038. pWorkset->lockedBy));
  14039. //
  14040. // Now we put the current generation number for the workset in the
  14041. // <genNumber> field of the <seqStamp> field of the packet:
  14042. //
  14043. pPacket->seqStamp.genNumber = pWorkset->genNumber;
  14044. TRACE_OUT(("Set generation number field in WORKSET_CATCHUP to %u",
  14045. pPacket->seqStamp.genNumber));
  14046. //
  14047. // Fill in the priority value for the workset, which goes in the two
  14048. // bytes occupied by the <position> and <flags> fields:
  14049. //
  14050. *((NET_PRIORITY *) &(pPacket->position)) = pWorkset->priority;
  14051. *((BOOL *) &(pPacket->objectID)) = pWorkset->fTemp;
  14052. }
  14053. //
  14054. // We do not fill in the following fields:
  14055. //
  14056. // position
  14057. // flags
  14058. // updateSize
  14059. //
  14060. // This is because these are used only in a minority of messages and to
  14061. // add the extra parameters to the GenerateOpMessage function seemed
  14062. // undesirable. Messages where these fields are used should be filled
  14063. // in by the calling function as appropriate.
  14064. //
  14065. //
  14066. // Set the caller's pointer:
  14067. //
  14068. *ppPacket = pPacket;
  14069. DC_EXIT_POINT:
  14070. if (rc != 0)
  14071. {
  14072. ERROR_OUT(("ERROR %d generating message of type 0x%08x",
  14073. rc, messageType));
  14074. }
  14075. DebugExitDWORD(GenerateOpMessage, rc);
  14076. return(rc);
  14077. }
  14078. //
  14079. // QueueMessage(...)
  14080. //
  14081. UINT QueueMessage
  14082. (
  14083. PUT_CLIENT putTask,
  14084. POM_DOMAIN pDomain,
  14085. NET_CHANNEL_ID channelID,
  14086. NET_PRIORITY priority,
  14087. POM_WSGROUP pWSGroup,
  14088. POM_WORKSET pWorkset,
  14089. POM_OBJECT pObj,
  14090. POMNET_PKT_HEADER pMessage,
  14091. POM_OBJECTDATA pData,
  14092. BOOL compressOrNot
  14093. )
  14094. {
  14095. POM_SEND_INST pSendInst;
  14096. NET_PRIORITY queuePriority;
  14097. BOOL locked = FALSE;
  14098. BOOL bumped = FALSE;
  14099. UINT rc = 0;
  14100. DebugEntry(QueueMessage);
  14101. //
  14102. // If this is the local Domain, we don't put the op on the send queue;
  14103. // just free the packet and quit:
  14104. //
  14105. if (pDomain->callID == NET_INVALID_DOMAIN_ID)
  14106. {
  14107. TRACE_OUT(("Not queueing message (it's for the local Domain)"));
  14108. UT_FreeRefCount((void**)&pMessage, FALSE);
  14109. DC_QUIT;
  14110. }
  14111. //
  14112. // Allocate some memory in OMGLOBAL for the send instruction:
  14113. //
  14114. pSendInst = (POM_SEND_INST)UT_MallocRefCount(sizeof(OM_SEND_INST), TRUE);
  14115. if (!pSendInst)
  14116. {
  14117. rc = UT_RC_NO_MEM;
  14118. DC_QUIT;
  14119. }
  14120. SET_STAMP(pSendInst, SENDINST);
  14121. //
  14122. // Fill in the fields of the send instruction:
  14123. //
  14124. pSendInst->messageSize = (WORD)GetMessageSize(pMessage->messageType);
  14125. DeterminePriority(&priority, pData);
  14126. pSendInst->priority = priority;
  14127. pSendInst->callID = pDomain->callID;
  14128. pSendInst->channel = channelID;
  14129. pSendInst->messageType = pMessage->messageType;
  14130. pSendInst->compressOrNot = compressOrNot;
  14131. //
  14132. // Now calculate the relevant offsets so we can add them to the ObMan
  14133. // base pointers:
  14134. //
  14135. // SFR 2560 { : bump use counts of all non-zero pointers, not just pData
  14136. //
  14137. if (pMessage != NULL)
  14138. {
  14139. pSendInst->pMessage = pMessage;
  14140. //
  14141. // SFR 5488 { : No! Don't bump use count of pMessage - we're the
  14142. // only people using it now so we don't need to. }
  14143. //
  14144. }
  14145. if (pWSGroup != NULL)
  14146. {
  14147. UT_BumpUpRefCount(pWSGroup);
  14148. pSendInst->pWSGroup = pWSGroup;
  14149. }
  14150. if (pWorkset != NULL)
  14151. {
  14152. UT_BumpUpRefCount(pWorkset);
  14153. pSendInst->pWorkset = pWorkset;
  14154. }
  14155. if (pObj != NULL)
  14156. {
  14157. UT_BumpUpRefCount(pObj);
  14158. pSendInst->pObj = pObj;
  14159. }
  14160. if (pData != NULL)
  14161. {
  14162. UT_BumpUpRefCount(pData);
  14163. pSendInst->pDataStart = pData;
  14164. pSendInst->pDataNext = pData;
  14165. //
  14166. // In addition, we set up some send instruction fields which are
  14167. // specific to operations which involve object data:
  14168. //
  14169. pSendInst->dataLeftToGo = pData->length + sizeof(pData->length);
  14170. //
  14171. // Increment the <bytesUnacked> fields in the workset and workset
  14172. // group:
  14173. //
  14174. pWorkset->bytesUnacked += pSendInst->dataLeftToGo;
  14175. pWSGroup->bytesUnacked += pSendInst->dataLeftToGo;
  14176. TRACE_OUT(("Bytes unacked for workset %u in WSG %d now %u "
  14177. "(for wsGroup: %u)", pWorkset->worksetID, pWSGroup->wsg,
  14178. pWorkset->bytesUnacked, pWSGroup->bytesUnacked));
  14179. }
  14180. //
  14181. // Set a flag so we can clean up a bit better on error:
  14182. //
  14183. bumped = TRUE;
  14184. //
  14185. // Unless there's a send event outstanding, post an event to the ObMan
  14186. // task prompting it to examine the send queue. Providing we have
  14187. // received a Net Attach indication.
  14188. //
  14189. if ( !pDomain->sendEventOutstanding &&
  14190. (pDomain->state > PENDING_ATTACH) )
  14191. {
  14192. TRACE_OUT(("No send event outstanding - posting SEND_QUEUE event"));
  14193. //
  14194. // Bump up the use count of the Domain record (since we're passing it
  14195. // in an event):
  14196. //
  14197. UT_BumpUpRefCount(pDomain);
  14198. //
  14199. // NFC - we used to pass the pDomain pointer as param2 in this
  14200. // event, but the event may get processed in a different process
  14201. // where the pointer is no longer valid, so pass the offset instead.
  14202. //
  14203. ValidateOMP(g_pomPrimary);
  14204. UT_PostEvent(putTask,
  14205. g_pomPrimary->putTask,
  14206. 0, // no delay
  14207. OMINT_EVENT_SEND_QUEUE,
  14208. 0,
  14209. (UINT_PTR)pDomain);
  14210. pDomain->sendEventOutstanding = TRUE;
  14211. }
  14212. else
  14213. {
  14214. TRACE_OUT(("Send event outstanding/state %u: not posting SEND_QUEUE event",
  14215. pDomain->state));
  14216. }
  14217. //
  14218. // Place the event at the end of the relevant send queue. This depends
  14219. // on priority - but remember, the priority value passed in might have
  14220. // the NET_SEND_ALL_PRIORITIES flag set - so clear it when determining
  14221. // the queue.
  14222. //
  14223. // NB: Do this after any possible DC-QUIT so we're not left with a
  14224. // NULL entry in the list.
  14225. //
  14226. queuePriority = priority;
  14227. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  14228. COM_BasedListInsertBefore(&(pDomain->sendQueue[queuePriority]),
  14229. &(pSendInst->chain));
  14230. TRACE_OUT((" Queued instruction (type: 0x%08x) at priority %hu "
  14231. "on channel 0x%08x in Domain %u",
  14232. pMessage->messageType, priority, channelID, pDomain->callID));
  14233. DC_EXIT_POINT:
  14234. if (rc != 0)
  14235. {
  14236. //
  14237. // Cleanup:
  14238. //
  14239. ERROR_OUT(("ERROR %d queueing send instruction (message type: %hu)",
  14240. rc, pMessage->messageType));
  14241. if (pSendInst != NULL)
  14242. {
  14243. UT_FreeRefCount((void**)&pSendInst, FALSE);
  14244. }
  14245. if (bumped == TRUE)
  14246. {
  14247. // SFR 2560 { : Free all non-zero pointers not just pData
  14248. if (pMessage != NULL)
  14249. {
  14250. UT_FreeRefCount((void**)&pMessage, FALSE);
  14251. }
  14252. if (pWSGroup != NULL)
  14253. {
  14254. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  14255. }
  14256. if (pWorkset != NULL)
  14257. {
  14258. UT_FreeRefCount((void**)&pWorkset, FALSE);
  14259. }
  14260. if (pObj != NULL)
  14261. {
  14262. UT_FreeRefCount((void**)&pObj, FALSE);
  14263. }
  14264. if (pData != NULL)
  14265. {
  14266. UT_FreeRefCount((void**)&pData, FALSE);
  14267. }
  14268. }
  14269. }
  14270. DebugExitDWORD(QueueMessage, rc);
  14271. return(rc);
  14272. }
  14273. //
  14274. // DeterminePriority(...)
  14275. //
  14276. void DeterminePriority
  14277. (
  14278. NET_PRIORITY * pPriority,
  14279. POM_OBJECTDATA pData
  14280. )
  14281. {
  14282. DebugEntry(DeterminePriority);
  14283. if (OM_OBMAN_CHOOSES_PRIORITY == *pPriority)
  14284. {
  14285. if (pData != NULL)
  14286. {
  14287. if (pData->length < OM_NET_HIGH_PRI_THRESHOLD)
  14288. {
  14289. *pPriority = NET_HIGH_PRIORITY;
  14290. }
  14291. else if (pData->length < OM_NET_MED_PRI_THRESHOLD)
  14292. {
  14293. *pPriority = NET_MEDIUM_PRIORITY;
  14294. }
  14295. else
  14296. {
  14297. *pPriority = NET_LOW_PRIORITY;
  14298. }
  14299. TRACE_OUT(("Priority chosen: %hu (data size: %u)",
  14300. *pPriority, pData->length));
  14301. }
  14302. else
  14303. {
  14304. *pPriority = NET_HIGH_PRIORITY;
  14305. }
  14306. }
  14307. else
  14308. {
  14309. TRACE_OUT(("Priority specified is %hu - not changing", *pPriority));
  14310. }
  14311. DebugExitVOID(DeterminePriority);
  14312. }
  14313. //
  14314. // GetMessageSize(...)
  14315. //
  14316. UINT GetMessageSize
  14317. (
  14318. OMNET_MESSAGE_TYPE messageType
  14319. )
  14320. {
  14321. UINT size;
  14322. DebugEntry(GetMessageSize);
  14323. switch (messageType)
  14324. {
  14325. case OMNET_HELLO:
  14326. case OMNET_WELCOME:
  14327. size = sizeof(OMNET_JOINER_PKT);
  14328. break;
  14329. case OMNET_LOCK_REQ:
  14330. case OMNET_LOCK_GRANT:
  14331. case OMNET_LOCK_DENY:
  14332. case OMNET_LOCK_NOTIFY:
  14333. case OMNET_UNLOCK:
  14334. size = sizeof(OMNET_LOCK_PKT);
  14335. break;
  14336. case OMNET_WSGROUP_SEND_REQ:
  14337. case OMNET_WSGROUP_SEND_MIDWAY:
  14338. case OMNET_WSGROUP_SEND_COMPLETE:
  14339. case OMNET_WSGROUP_SEND_DENY:
  14340. size = sizeof(OMNET_WSGROUP_SEND_PKT);
  14341. break;
  14342. //
  14343. // The remaining messages all use OMNET_OPERATION_PKT packets, but
  14344. // each uses different amounts of the generic packet. Therefore, we
  14345. // can't use sizeof so we've got some defined constants instead:
  14346. //
  14347. case OMNET_WORKSET_NEW:
  14348. size = OMNET_WORKSET_NEW_SIZE;
  14349. break;
  14350. case OMNET_WORKSET_CATCHUP:
  14351. size = OMNET_WORKSET_CATCHUP_SIZE;
  14352. break;
  14353. case OMNET_WORKSET_CLEAR:
  14354. size = OMNET_WORKSET_CLEAR_SIZE;
  14355. break;
  14356. case OMNET_OBJECT_MOVE:
  14357. size = OMNET_OBJECT_MOVE_SIZE;
  14358. break;
  14359. case OMNET_OBJECT_DELETE:
  14360. size = OMNET_OBJECT_DELETE_SIZE;
  14361. break;
  14362. case OMNET_OBJECT_REPLACE:
  14363. size = OMNET_OBJECT_REPLACE_SIZE;
  14364. break;
  14365. case OMNET_OBJECT_UPDATE:
  14366. size = OMNET_OBJECT_UPDATE_SIZE;
  14367. break;
  14368. case OMNET_OBJECT_ADD:
  14369. size = OMNET_OBJECT_ADD_SIZE;
  14370. break;
  14371. case OMNET_OBJECT_CATCHUP:
  14372. size = OMNET_OBJECT_CATCHUP_SIZE;
  14373. break;
  14374. case OMNET_MORE_DATA:
  14375. size = OMNET_MORE_DATA_SIZE;
  14376. break;
  14377. default:
  14378. ERROR_OUT(("Reached default case in switch statement (type: %hu)",
  14379. messageType));
  14380. size = 0;
  14381. break;
  14382. }
  14383. DebugExitDWORD(GetMessageSize, size);
  14384. return(size);
  14385. }
  14386. //
  14387. // WorksetEventPost()
  14388. //
  14389. UINT WorksetEventPost
  14390. (
  14391. PUT_CLIENT putTask,
  14392. POM_WORKSET pWorkset,
  14393. BYTE target,
  14394. UINT event,
  14395. POM_OBJECT pObj
  14396. )
  14397. {
  14398. POM_CLIENT_LIST pClientListEntry;
  14399. OM_EVENT_DATA16 eventData16;
  14400. UINT numPosts;
  14401. DebugEntry(WorksetEventPost);
  14402. //
  14403. // Need to post the event to each Client which has the workset open, so
  14404. // we chain through the list of Clients stored in the workset record:
  14405. //
  14406. numPosts = 0;
  14407. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWorkset->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14408. while (pClientListEntry != NULL)
  14409. {
  14410. //
  14411. // <target> specifies which type of Client we are to post events to
  14412. // and is PRIMARY and/or SECONDARY (ORed together if both). Check
  14413. // against this Client's registration mode:
  14414. //
  14415. if (target & pClientListEntry->mode)
  14416. {
  14417. //
  14418. // If the pObj was not NULL, bump the use count for the object
  14419. // record. If this fails, give up:
  14420. //
  14421. if (pObj != NULL)
  14422. {
  14423. ValidateObject(pObj);
  14424. UT_BumpUpRefCount(pObj);
  14425. }
  14426. //
  14427. // Fill in the fields of the event parameter, using the workset
  14428. // group handle as found in the Client list and the workset ID as
  14429. // found in the workset record:
  14430. //
  14431. eventData16.hWSGroup = pClientListEntry->hWSGroup;
  14432. eventData16.worksetID = pWorkset->worksetID;
  14433. UT_PostEvent(putTask,
  14434. pClientListEntry->putTask,
  14435. 0,
  14436. event,
  14437. *(PUINT) &eventData16,
  14438. (UINT_PTR)pObj);
  14439. numPosts++;
  14440. }
  14441. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWorkset->clients), pClientListEntry,
  14442. FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14443. }
  14444. TRACE_OUT(("Posted event 0x%08x to %hu Clients (those with workset %u open)",
  14445. event, numPosts, pWorkset->worksetID));
  14446. DebugExitDWORD(WorksetEventPost, numPosts);
  14447. return(numPosts);
  14448. }
  14449. //
  14450. // WSGroupEventPost(...)
  14451. //
  14452. UINT WSGroupEventPost
  14453. (
  14454. PUT_CLIENT putFrom,
  14455. POM_WSGROUP pWSGroup,
  14456. BYTE target,
  14457. UINT event,
  14458. OM_WORKSET_ID worksetID,
  14459. UINT_PTR param2
  14460. )
  14461. {
  14462. POM_CLIENT_LIST pClientListEntry;
  14463. OM_EVENT_DATA16 eventData16;
  14464. UINT numPosts;
  14465. UINT rc = 0;
  14466. DebugEntry(WSGroupEventPost);
  14467. //
  14468. // Need to post the event to each Client which is registered with the
  14469. // workset group, so we chain through the list of Clients stored in the
  14470. // workset group record:
  14471. //
  14472. numPosts = 0;
  14473. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14474. while (pClientListEntry != NULL)
  14475. {
  14476. //
  14477. // <target> specifies which type of Client we are to post events to
  14478. // and is PRIMARY and/or SECONDARY (ORed together if both). Check
  14479. // against this Client's registration mode:
  14480. //
  14481. if (target & pClientListEntry->mode)
  14482. {
  14483. //
  14484. // Fill in the fields of the event parameter, using the workset
  14485. // group handle as found in the Client list and the workset ID
  14486. // passed in:
  14487. //
  14488. eventData16.hWSGroup = pClientListEntry->hWSGroup;
  14489. eventData16.worksetID = worksetID;
  14490. TRACE_OUT(("Posting event 0x%08x to 0x%08x (hWSGroup: %hu - worksetID: %hu)",
  14491. event, pClientListEntry->putTask, eventData16.hWSGroup,
  14492. eventData16.worksetID));
  14493. UT_PostEvent(putFrom,
  14494. pClientListEntry->putTask,
  14495. 0,
  14496. event,
  14497. *(PUINT) &eventData16,
  14498. param2);
  14499. numPosts++;
  14500. }
  14501. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWSGroup->clients), pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14502. }
  14503. TRACE_OUT(("Posted event 0x%08x to %hu Clients (all registered with '0x%08x')",
  14504. event, numPosts, pWSGroup->wsg));
  14505. DebugExitDWORD(WSGroupEventPost, numPosts);
  14506. return(numPosts);
  14507. }
  14508. //
  14509. // WorksetDoClear(...)
  14510. //
  14511. void WorksetDoClear
  14512. (
  14513. PUT_CLIENT putTask,
  14514. POM_WSGROUP pWSGroup,
  14515. POM_WORKSET pWorkset,
  14516. POM_PENDING_OP pPendingOp
  14517. )
  14518. {
  14519. POM_OBJECT pObj;
  14520. POM_OBJECT pObj2;
  14521. BOOL locked = FALSE;
  14522. DebugEntry(WorksetDoClear);
  14523. //
  14524. // To clear a workset, we chain through each object in the workset and
  14525. // compare its addition stamp to the stamp of the clear operation we're
  14526. // performing. If the object was added before the workset clear was
  14527. // issued, we delete the object. Otherwise, we ignore it.
  14528. //
  14529. TRACE_OUT(("Clearing workset %u...", pWorkset->worksetID));
  14530. pObj = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  14531. while (pObj != NULL)
  14532. {
  14533. ValidateObject(pObj);
  14534. pObj2 = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  14535. if (pObj->flags & DELETED)
  14536. {
  14537. //
  14538. // Do nothing
  14539. //
  14540. }
  14541. else
  14542. {
  14543. if (STAMP_IS_LOWER(pObj->addStamp, pPendingOp->seqStamp))
  14544. {
  14545. TRACE_OUT(("Object 0x%08x added before workset cleared, deleting",
  14546. pObj));
  14547. PurgePendingOps(pWorkset, pObj);
  14548. ObjectDoDelete(putTask, pWSGroup, pWorkset, pObj, NULL);
  14549. }
  14550. }
  14551. // restore the previous one
  14552. pObj = pObj2;
  14553. }
  14554. //
  14555. // This operation isn't pending anymore, so we remove it from the
  14556. // pending operation list and free the memory:
  14557. //
  14558. COM_BasedListRemove(&(pPendingOp->chain));
  14559. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  14560. //
  14561. // Now that we've cleared the workset, post a CLEARED event to all
  14562. // secondaries:
  14563. //
  14564. WorksetEventPost(putTask,
  14565. pWorkset,
  14566. SECONDARY,
  14567. OM_WORKSET_CLEARED_IND,
  14568. 0);
  14569. TRACE_OUT(("Cleared workset %u", pWorkset->worksetID));
  14570. DebugExitVOID(WorksetDoClear);
  14571. }
  14572. //
  14573. // WorksetCreate(...)
  14574. //
  14575. UINT WorksetCreate
  14576. (
  14577. PUT_CLIENT putTask,
  14578. POM_WSGROUP pWSGroup,
  14579. OM_WORKSET_ID worksetID,
  14580. BOOL fTemp,
  14581. NET_PRIORITY priority
  14582. )
  14583. {
  14584. POMNET_OPERATION_PKT pPacket;
  14585. UINT rc = 0;
  14586. DebugEntry(WorksetCreate);
  14587. //
  14588. // Here we create the new workset by generating the message to be
  14589. // broadcast, processing it as if it had just arrived, and then
  14590. // queueing it to be sent:
  14591. //
  14592. rc = GenerateOpMessage(pWSGroup,
  14593. worksetID,
  14594. NULL, // no object ID
  14595. NULL, // no object
  14596. OMNET_WORKSET_NEW,
  14597. &pPacket);
  14598. if (rc != 0)
  14599. {
  14600. DC_QUIT;
  14601. }
  14602. //
  14603. // Fill in the priority value for the workset, which goes in the two
  14604. // bytes occupied by the <position> and <flags> fields:
  14605. //
  14606. *((NET_PRIORITY *) &(pPacket->position)) = priority;
  14607. *((BOOL *) &(pPacket->objectID)) = fTemp;
  14608. rc = ProcessWorksetNew(putTask, pPacket, pWSGroup);
  14609. if (rc != 0)
  14610. {
  14611. DC_QUIT;
  14612. }
  14613. //
  14614. // NEW FOR R2.0
  14615. //
  14616. // In R2.0, the checkpointing mechanism used by a helper to get up to
  14617. // date before sending a workset group to a late joiner relies on
  14618. // locking a "dummy" workset (#255) in the workset group in question.
  14619. // So, if the workset ID is 255, this is the dummy workset. We do not
  14620. // broadcast the WORKSET_NEW for this dummy workset, for two reasons:
  14621. //
  14622. // - it will confuse R1.1 systems
  14623. //
  14624. // - all other R2.0 systems will create it locally just as we have, so
  14625. // there isn't any need.
  14626. //
  14627. // So, do a check and free up the send packet if necessary; otherwise
  14628. // queue the message as normal:
  14629. //
  14630. if (worksetID == OM_CHECKPOINT_WORKSET)
  14631. {
  14632. TRACE_OUT(("WORKSET_NEW for checkpointing dummy workset - not queueing"));
  14633. UT_FreeRefCount((void**)&pPacket, FALSE);
  14634. }
  14635. else
  14636. {
  14637. rc = QueueMessage(putTask,
  14638. pWSGroup->pDomain,
  14639. pWSGroup->channelID,
  14640. priority,
  14641. pWSGroup,
  14642. NULL,
  14643. NULL, // no object
  14644. (POMNET_PKT_HEADER) pPacket,
  14645. NULL, // no object data
  14646. TRUE);
  14647. if (rc != 0)
  14648. {
  14649. DC_QUIT;
  14650. }
  14651. }
  14652. TRACE_OUT(("Created workset ID %hu in WSG %d for TASK 0x%08x",
  14653. worksetID, pWSGroup->wsg, putTask));
  14654. DC_EXIT_POINT:
  14655. if (rc != 0)
  14656. {
  14657. //
  14658. // Cleanup:
  14659. //
  14660. ERROR_OUT(("Error 0x%08x creating workset ID %hu in WSG %d for TASK 0x%08x",
  14661. rc, worksetID, pWSGroup->wsg, putTask));
  14662. }
  14663. DebugExitDWORD(WorksetCreate, rc);
  14664. return(rc);
  14665. }
  14666. //
  14667. // ObjectAdd(...)
  14668. //
  14669. UINT ObjectAdd
  14670. (
  14671. PUT_CLIENT putTask,
  14672. POM_PRIMARY pomPrimary,
  14673. POM_WSGROUP pWSGroup,
  14674. POM_WORKSET pWorkset,
  14675. POM_OBJECTDATA pData,
  14676. UINT updateSize,
  14677. OM_POSITION position,
  14678. OM_OBJECT_ID * pObjectID,
  14679. POM_OBJECT * ppObj
  14680. )
  14681. {
  14682. POM_OBJECT pObj;
  14683. POMNET_OPERATION_PKT pPacket;
  14684. POM_DOMAIN pDomain;
  14685. UINT rc = 0;
  14686. DebugEntry(ObjectAdd);
  14687. TRACE_OUT(("Adding object to workset %u in WSG %d",
  14688. pWorkset->worksetID, pWSGroup->wsg));
  14689. //
  14690. // Allocate a new ID for this object:
  14691. //
  14692. pDomain = pWSGroup->pDomain;
  14693. GET_NEXT_OBJECT_ID(*pObjectID, pDomain, pomPrimary);
  14694. //
  14695. // Generate the OMNET_OBJECT_ADD message:
  14696. //
  14697. rc = GenerateOpMessage(pWSGroup,
  14698. pWorkset->worksetID,
  14699. pObjectID,
  14700. pData,
  14701. OMNET_OBJECT_ADD,
  14702. &pPacket);
  14703. if (rc != 0)
  14704. {
  14705. pPacket = NULL;
  14706. DC_QUIT;
  14707. }
  14708. //
  14709. // Generate message doesn't fill in the <updateSize> or <position>
  14710. // fields (as they are specific to ObjectAdd) so we do them here:
  14711. //
  14712. pPacket->updateSize = updateSize;
  14713. pPacket->position = position;
  14714. //
  14715. // This processes the message, as if it has just been received from the
  14716. // network (i.e. allocates the record, sets up the object handle,
  14717. // inserts the object in the workset, etc.)
  14718. //
  14719. rc = ProcessObjectAdd(putTask, pPacket, pWSGroup,
  14720. pWorkset, pData, ppObj);
  14721. if (rc != 0)
  14722. {
  14723. DC_QUIT;
  14724. }
  14725. pObj = *ppObj;
  14726. //
  14727. // This queues the OMNET_OBJECT_ADD message on the send queue for this
  14728. // Domain and priority:
  14729. //
  14730. rc = QueueMessage(putTask,
  14731. pWSGroup->pDomain,
  14732. pWSGroup->channelID,
  14733. pWorkset->priority,
  14734. pWSGroup,
  14735. pWorkset,
  14736. pObj,
  14737. (POMNET_PKT_HEADER) pPacket,
  14738. pData,
  14739. TRUE);
  14740. if (rc != 0)
  14741. {
  14742. ValidateObject(pObj);
  14743. //
  14744. // If we failed to queue the message, we must unwind by deleting the
  14745. // object and its record from the workset (since otherwise it would
  14746. // be present on this node and no another, which we want to avoid):
  14747. //
  14748. // We don't want to call ObjectDoDelete since that frees the object
  14749. // data (which our caller will expect still to be valid if the
  14750. // function fails). We could, of course, bump the use count and then
  14751. // call ObjectDoDelete but if we fail on the bump, what next?
  14752. //
  14753. // Instead, we
  14754. //
  14755. // - set the DELETED flag so the hidden handler will swallow the
  14756. // Add event
  14757. //
  14758. // - decrement the numObjects field in the workset
  14759. //
  14760. // - free the object record after removing it from the workset.
  14761. //
  14762. pObj->flags |= DELETED;
  14763. pWorkset->numObjects--;
  14764. TRACE_OUT(("Freeing object record at 0x%08x)", pObj));
  14765. COM_BasedListRemove(&(pObj->chain));
  14766. UT_FreeRefCount((void**)&pObj, FALSE);
  14767. DC_QUIT;
  14768. }
  14769. DC_EXIT_POINT:
  14770. if (rc != 0)
  14771. {
  14772. ERROR_OUT(("ERROR %d adding object to workset %u in WSG %d for TASK 0x%08x",
  14773. rc, pWorkset->worksetID, pWSGroup->wsg, putTask));
  14774. if (pPacket != NULL)
  14775. {
  14776. UT_FreeRefCount((void**)&pPacket, FALSE);
  14777. }
  14778. }
  14779. DebugExitDWORD(ObjectAdd, rc);
  14780. return(rc);
  14781. }
  14782. //
  14783. // ObjectDRU(...)
  14784. //
  14785. UINT ObjectDRU
  14786. (
  14787. PUT_CLIENT putTask,
  14788. POM_WSGROUP pWSGroup,
  14789. POM_WORKSET pWorkset,
  14790. POM_OBJECT pObj,
  14791. POM_OBJECTDATA pData,
  14792. OMNET_MESSAGE_TYPE type
  14793. )
  14794. {
  14795. POMNET_OPERATION_PKT pPacket;
  14796. UINT rc = 0;
  14797. DebugEntry(ObjectDRU);
  14798. TRACE_OUT(("Issuing operation type 0x%08x for object 0x%08x in workset %u in WSG %d",
  14799. type, pData, pWorkset->worksetID, pWSGroup->wsg));
  14800. rc = GenerateOpMessage(pWSGroup,
  14801. pWorkset->worksetID,
  14802. &(pObj->objectID),
  14803. pData,
  14804. type,
  14805. &pPacket);
  14806. if (rc != 0)
  14807. {
  14808. pPacket = NULL;
  14809. DC_QUIT;
  14810. }
  14811. //
  14812. // QueueMessage may free the packet (if we're not in a call) but we need
  14813. // to process it in a minute so bump the use count:
  14814. //
  14815. UT_BumpUpRefCount(pPacket);
  14816. rc = QueueMessage(putTask,
  14817. pWSGroup->pDomain,
  14818. pWSGroup->channelID,
  14819. pWorkset->priority,
  14820. pWSGroup,
  14821. pWorkset,
  14822. pObj,
  14823. (POMNET_PKT_HEADER) pPacket,
  14824. pData,
  14825. TRUE);
  14826. if (rc != 0)
  14827. {
  14828. DC_QUIT;
  14829. }
  14830. rc = ProcessObjectDRU(putTask,
  14831. pPacket,
  14832. pWSGroup,
  14833. pWorkset,
  14834. pObj,
  14835. pData);
  14836. DC_EXIT_POINT:
  14837. //
  14838. // Now free the packet since we bumped its use count above:
  14839. //
  14840. if (pPacket != NULL)
  14841. {
  14842. UT_FreeRefCount((void**)&pPacket, FALSE);
  14843. }
  14844. if (rc != 0)
  14845. {
  14846. ERROR_OUT(("ERROR %d issuing D/R/U (type %hu) for object 0x%08x "
  14847. "in workset %u in WSG %d",
  14848. rc, type, pData, pWorkset->worksetID, pWSGroup->wsg));
  14849. }
  14850. DebugExitDWORD(ObjectDRU, rc);
  14851. return(rc);
  14852. }
  14853. //
  14854. // FindPendingOp(...)
  14855. //
  14856. void FindPendingOp
  14857. (
  14858. POM_WORKSET pWorkset,
  14859. POM_OBJECT pObj,
  14860. OM_OPERATION_TYPE type,
  14861. POM_PENDING_OP * ppPendingOp
  14862. )
  14863. {
  14864. POM_PENDING_OP pPendingOp;
  14865. DebugEntry(FindPendingOp);
  14866. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  14867. while (pPendingOp != NULL)
  14868. {
  14869. if ((pPendingOp->type == type) && (pPendingOp->pObj == pObj))
  14870. {
  14871. break;
  14872. }
  14873. pPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  14874. }
  14875. if (pPendingOp == NULL)
  14876. {
  14877. TRACE_OUT(("No pending op of type %hu found for object 0x%08x",
  14878. type, pObj));
  14879. }
  14880. *ppPendingOp = pPendingOp;
  14881. DebugExitVOID(FindPendingOp);
  14882. }
  14883. //
  14884. // AddClientToWsetList(...)
  14885. //
  14886. UINT AddClientToWsetList
  14887. (
  14888. PUT_CLIENT putTask,
  14889. POM_WORKSET pWorkset,
  14890. OM_WSGROUP_HANDLE hWSGroup,
  14891. UINT mode,
  14892. POM_CLIENT_LIST * ppClientListEntry
  14893. )
  14894. {
  14895. UINT rc = 0;
  14896. DebugEntry(AddClientToWsetList);
  14897. //
  14898. // Adding a task to a workset's client list means that that task will
  14899. // get events relating to that workset.
  14900. //
  14901. TRACE_OUT((" Adding TASK 0x%08x to workset's client list"));
  14902. *ppClientListEntry = (POM_CLIENT_LIST)UT_MallocRefCount(sizeof(OM_CLIENT_LIST), FALSE);
  14903. if (! *ppClientListEntry)
  14904. {
  14905. rc = UT_RC_NO_MEM;
  14906. DC_QUIT;
  14907. }
  14908. SET_STAMP((*ppClientListEntry), CLIENTLIST);
  14909. (*ppClientListEntry)->putTask = putTask;
  14910. (*ppClientListEntry)->hWSGroup = hWSGroup;
  14911. (*ppClientListEntry)->mode = (WORD)mode;
  14912. //
  14913. // Now insert the entry into the list:
  14914. //
  14915. COM_BasedListInsertBefore(&(pWorkset->clients),
  14916. &((*ppClientListEntry)->chain));
  14917. TRACE_OUT((" Inserted Client list item into workset's Client list"));
  14918. DC_EXIT_POINT:
  14919. DebugExitDWORD(AddClientToWsetList, rc);
  14920. return(rc);
  14921. }
  14922. //
  14923. // RemoveClientFromWSGList(...)
  14924. //
  14925. void RemoveClientFromWSGList
  14926. (
  14927. PUT_CLIENT putUs,
  14928. PUT_CLIENT putTask,
  14929. POM_WSGROUP pWSGroup
  14930. )
  14931. {
  14932. POM_CLIENT_LIST pClientListEntry;
  14933. BOOL locked = FALSE;
  14934. DebugEntry(RemoveClientFromWSGList);
  14935. TRACE_OUT(("Searching for Client TASK 0x%08x in WSG %d",
  14936. putTask, pWSGroup->wsg));
  14937. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWSGroup->clients),
  14938. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  14939. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)putTask,
  14940. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  14941. //
  14942. // If it's not there, the Client may have already deregistered itself:
  14943. //
  14944. if (pClientListEntry == NULL)
  14945. {
  14946. WARNING_OUT(("Client TASK 0x%08x not found in list for WSG %d",
  14947. putTask, pWSGroup->wsg));
  14948. DC_QUIT;
  14949. }
  14950. //
  14951. // Remove the Client from the list and free the memory:
  14952. //
  14953. COM_BasedListRemove(&(pClientListEntry->chain));
  14954. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  14955. //
  14956. // If there are now no local Clients registered with the workset group,
  14957. // post an event to ObMan so it can discard the workset group (unless
  14958. // the workset group is marked non-discardable e.g the ObManControl
  14959. // workset group)
  14960. //
  14961. // The event parameter is the offset of the workset group record.
  14962. //
  14963. // Note: this discard is done asynchronously since it may involve
  14964. // allocating resources (broadcasting to other nodes that
  14965. // we've deregistered), and we want this function to always
  14966. // succeed.
  14967. //
  14968. // However, we clear the <valid> flag synchronously so that
  14969. // ObMan will not try to process events etc. which arrive
  14970. // for it.
  14971. //
  14972. if (COM_BasedListIsEmpty(&(pWSGroup->clients)))
  14973. {
  14974. pWSGroup->toBeDiscarded = TRUE;
  14975. pWSGroup->valid = FALSE;
  14976. TRACE_OUT(("Last local Client deregistered from WSG %d, "
  14977. "marking invalid and posting DISCARD event", pWSGroup->wsg));
  14978. ValidateOMP(g_pomPrimary);
  14979. UT_PostEvent(putUs,
  14980. g_pomPrimary->putTask,
  14981. 0, // no delay
  14982. OMINT_EVENT_WSGROUP_DISCARD,
  14983. 0,
  14984. (UINT_PTR)pWSGroup);
  14985. }
  14986. else
  14987. {
  14988. TRACE_OUT(("Clients still registered with WSG %d", pWSGroup->wsg));
  14989. }
  14990. DC_EXIT_POINT:
  14991. DebugExitVOID(RemoveClientFromWSGList);
  14992. }
  14993. //
  14994. // FindInfoObject(...)
  14995. //
  14996. void FindInfoObject
  14997. (
  14998. POM_DOMAIN pDomain,
  14999. OM_WSGROUP_ID wsGroupID,
  15000. OMWSG wsg,
  15001. OMFP fpHandler,
  15002. POM_OBJECT * ppObjInfo
  15003. )
  15004. {
  15005. POM_WORKSET pOMCWorkset;
  15006. POM_OBJECT pObj;
  15007. POM_WSGROUP_INFO pInfoObject;
  15008. DebugEntry(FindInfoObject);
  15009. TRACE_OUT(("FindInfoObject: FP %d WSG %d ID %d, domain %u",
  15010. fpHandler, wsg, wsGroupID, pDomain->callID));
  15011. //
  15012. // In this function, we search workset #0 in ObManControl for a
  15013. // Function Profile/workset group name combination which matches the
  15014. // ones specified
  15015. //
  15016. // So, we need to start with a pointer to this workset:
  15017. //
  15018. pOMCWorkset = GetOMCWorkset(pDomain, OM_INFO_WORKSET);
  15019. //
  15020. // Now chain through each of the objects in the workset to look for a
  15021. // match.
  15022. //
  15023. pObj = (POM_OBJECT)COM_BasedListLast(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15024. while (pObj != NULL)
  15025. {
  15026. ValidateObject(pObj);
  15027. //
  15028. // If the object has not been deleted...
  15029. //
  15030. if (pObj->flags & DELETED)
  15031. {
  15032. }
  15033. else if (!pObj->pData)
  15034. {
  15035. ERROR_OUT(("FindInfoObject: object 0x%08x has no data", pObj));
  15036. }
  15037. else
  15038. {
  15039. ValidateObjectData(pObj->pData);
  15040. pInfoObject = (POM_WSGROUP_INFO)pObj->pData;
  15041. //
  15042. // ...and if it is an INFO object...
  15043. //
  15044. if (pInfoObject->idStamp == OM_WSGINFO_ID_STAMP)
  15045. {
  15046. // If no FP provided, check the group IDs match
  15047. if (fpHandler == OMFP_MAX)
  15048. {
  15049. //
  15050. // ...and the ID matches, we've got what we want:
  15051. //
  15052. if (wsGroupID == pInfoObject->wsGroupID)
  15053. {
  15054. break;
  15055. }
  15056. }
  15057. //
  15058. // ...but otherwise, try match on functionProfile...
  15059. //
  15060. else
  15061. {
  15062. if (!lstrcmp(pInfoObject->functionProfile,
  15063. OMMapFPToName(fpHandler)))
  15064. {
  15065. //
  15066. // ...and also on WSG unless it is not provided
  15067. //
  15068. if ((wsg == OMWSG_MAX) ||
  15069. (!lstrcmp(pInfoObject->wsGroupName,
  15070. OMMapWSGToName(wsg))))
  15071. {
  15072. break;
  15073. }
  15074. }
  15075. }
  15076. }
  15077. }
  15078. pObj = (POM_OBJECT)COM_BasedListPrev(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15079. }
  15080. TRACE_OUT(("%s info object in Domain %u",
  15081. pObj == NULL ? "Didn't find" : "Found", pDomain->callID));
  15082. //
  15083. // Set up the caller's pointer:
  15084. //
  15085. *ppObjInfo = pObj;
  15086. DebugExitVOID(FindInfoObject);
  15087. }
  15088. //
  15089. // PostAddEvents(...)
  15090. //
  15091. UINT PostAddEvents
  15092. (
  15093. PUT_CLIENT putFrom,
  15094. POM_WORKSET pWorkset,
  15095. OM_WSGROUP_HANDLE hWSGroup,
  15096. PUT_CLIENT putTo
  15097. )
  15098. {
  15099. OM_EVENT_DATA16 eventData16;
  15100. POM_OBJECT pObj;
  15101. UINT rc = 0;
  15102. DebugEntry(PostAddEvents);
  15103. eventData16.hWSGroup = hWSGroup;
  15104. eventData16.worksetID = pWorkset->worksetID;
  15105. if (pWorkset->numObjects != 0)
  15106. {
  15107. TRACE_OUT(("Workset has %u objects - posting OBJECT_ADD events",
  15108. pWorkset->numObjects));
  15109. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15110. while (pObj != NULL)
  15111. {
  15112. ValidateObject(pObj);
  15113. //
  15114. // Don't post events for DELETED objects:
  15115. //
  15116. if (!(pObj->flags & DELETED))
  15117. {
  15118. //
  15119. // We're posting an event with an pObj in it, so bump the
  15120. // use count of the object record it refers to:
  15121. //
  15122. UT_BumpUpRefCount(pObj);
  15123. UT_PostEvent(putFrom, putTo,
  15124. 0, // no delay
  15125. OM_OBJECT_ADD_IND,
  15126. *(PUINT) &eventData16,
  15127. (UINT_PTR)pObj);
  15128. }
  15129. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15130. }
  15131. }
  15132. else
  15133. {
  15134. TRACE_OUT(("No objects in workset"));
  15135. }
  15136. DebugExitDWORD(PostAddEvents, rc);
  15137. return(rc);
  15138. }
  15139. //
  15140. // PurgePendingOps(...)
  15141. //
  15142. void PurgePendingOps
  15143. (
  15144. POM_WORKSET pWorkset,
  15145. POM_OBJECT pObj
  15146. )
  15147. {
  15148. POM_PENDING_OP pPendingOp;
  15149. POM_PENDING_OP pTempPendingOp;
  15150. DebugEntry(PurgePendingOps);
  15151. //
  15152. // Chain through the workset's list of pending operations and confirm
  15153. // them one by one:
  15154. //
  15155. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  15156. while (pPendingOp != NULL)
  15157. {
  15158. pTempPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  15159. if (pPendingOp->pObj == pObj)
  15160. {
  15161. TRACE_OUT(("Purging operation type %hd", pPendingOp->type));
  15162. COM_BasedListRemove(&(pPendingOp->chain));
  15163. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  15164. }
  15165. pPendingOp = pTempPendingOp;
  15166. }
  15167. DebugExitVOID(PurgePendingOps);
  15168. }
  15169. //
  15170. // WorksetLockReq(...)
  15171. //
  15172. void WorksetLockReq
  15173. (
  15174. PUT_CLIENT putTask,
  15175. POM_PRIMARY pomPrimary,
  15176. POM_WSGROUP pWSGroup,
  15177. POM_WORKSET pWorkset,
  15178. OM_WSGROUP_HANDLE hWSGroup,
  15179. OM_CORRELATOR * pCorrelator
  15180. )
  15181. {
  15182. POM_DOMAIN pDomain;
  15183. POM_LOCK_REQ pLockReq = NULL;
  15184. POMNET_LOCK_PKT pLockReqPkt = NULL;
  15185. UINT rc = 0;
  15186. DebugEntry(WorksetLockReq);
  15187. TRACE_OUT(("TASK 0x%08x requesting to lock workset %u in WSG %d",
  15188. putTask, pWorkset->worksetID, hWSGroup));
  15189. //
  15190. // The caller will need a correlator value to correlate the eventual
  15191. // lock success/failure event:
  15192. //
  15193. *pCorrelator = NextCorrelator(pomPrimary);
  15194. //
  15195. // Set up a pointer to the Domain record:
  15196. //
  15197. pDomain = pWSGroup->pDomain;
  15198. //
  15199. // Allocate some memory for the lock request control block:
  15200. //
  15201. pLockReq = (POM_LOCK_REQ)UT_MallocRefCount(sizeof(OM_LOCK_REQ), TRUE);
  15202. if (!pLockReq)
  15203. {
  15204. rc = OM_RC_OUT_OF_RESOURCES;
  15205. DC_QUIT;
  15206. }
  15207. SET_STAMP(pLockReq, LREQ);
  15208. //
  15209. // Set up the fields:
  15210. //
  15211. pLockReq->putTask = putTask;
  15212. pLockReq->correlator = *pCorrelator;
  15213. pLockReq->wsGroupID = pWSGroup->wsGroupID;
  15214. pLockReq->worksetID = pWorkset->worksetID;
  15215. pLockReq->hWSGroup = hWSGroup;
  15216. pLockReq->type = LOCK_PRIMARY;
  15217. pLockReq->retriesToGo = OM_LOCK_RETRY_COUNT_DFLT;
  15218. pLockReq->pWSGroup = pWSGroup;
  15219. COM_BasedListInit(&(pLockReq->nodes));
  15220. //
  15221. // Insert this lock request in the Domain's list of pending lock
  15222. // requests:
  15223. //
  15224. COM_BasedListInsertBefore(&(pDomain->pendingLocks), &(pLockReq->chain));
  15225. //
  15226. // Now examine the workset lock state to see if we can grant the lock
  15227. // immediately:
  15228. //
  15229. TRACE_OUT(("Lock state for workset %u in WSG %d is %hu",
  15230. pWorkset->worksetID, hWSGroup, pWorkset->lockState));
  15231. switch (pWorkset->lockState)
  15232. {
  15233. case LOCKING:
  15234. case LOCKED:
  15235. {
  15236. TRACE_OUT((
  15237. "Workset %hu in WSG %d already locked/locking - bumping count",
  15238. pWorkset->worksetID, hWSGroup));
  15239. pLockReq->type = LOCK_SECONDARY;
  15240. pWorkset->lockCount++;
  15241. if (pWorkset->lockState == LOCKED)
  15242. {
  15243. //
  15244. // If we've already got the lock, post success immediately:
  15245. //
  15246. WorksetLockResult(putTask, &pLockReq, 0);
  15247. }
  15248. else
  15249. {
  15250. //
  15251. // Otherwise, this request will be handled when the primary
  15252. // request completes, so do nothing for now.
  15253. //
  15254. }
  15255. }
  15256. break;
  15257. case LOCK_GRANTED:
  15258. {
  15259. //
  15260. // We've already granted the lock to another node so we fail
  15261. // our local client's request for it:
  15262. //
  15263. WorksetLockResult(putTask, &pLockReq, OM_RC_WORKSET_LOCK_GRANTED);
  15264. }
  15265. break;
  15266. case UNLOCKED:
  15267. {
  15268. //
  15269. // Build up a list of other nodes using the workset group:
  15270. //
  15271. rc = BuildNodeList(pDomain, pLockReq);
  15272. if (rc != 0)
  15273. {
  15274. DC_QUIT;
  15275. }
  15276. pWorkset->lockState = LOCKING;
  15277. pWorkset->lockCount++;
  15278. pWorkset->lockedBy = pDomain->userID;
  15279. //
  15280. // If the list is empty, we have got the lock:
  15281. //
  15282. if (COM_BasedListIsEmpty(&pLockReq->nodes))
  15283. {
  15284. TRACE_OUT(("No remote nodes, granting lock immediately"));
  15285. pWorkset->lockState = LOCKED;
  15286. WorksetLockResult(putTask, &pLockReq, 0);
  15287. }
  15288. //
  15289. // Otherwise, we need to broadcast a lock request CB:
  15290. //
  15291. else
  15292. {
  15293. pLockReqPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  15294. if (!pLockReqPkt)
  15295. {
  15296. rc = UT_RC_NO_MEM;
  15297. DC_QUIT;
  15298. }
  15299. pLockReqPkt->header.messageType = OMNET_LOCK_REQ;
  15300. pLockReqPkt->header.sender = pDomain->userID;
  15301. pLockReqPkt->data1 = pLockReq->correlator;
  15302. pLockReqPkt->wsGroupID = pLockReq->wsGroupID;
  15303. pLockReqPkt->worksetID = pLockReq->worksetID;
  15304. //
  15305. // Lock messages go at the priority of the workset
  15306. // involved. If this is OBMAN_CHOOSES_PRIORITY, then
  15307. // all bets are off and we send them TOP_PRIORITY.
  15308. //
  15309. rc = QueueMessage(putTask,
  15310. pDomain,
  15311. pWSGroup->channelID,
  15312. (NET_PRIORITY)((pWorkset->priority == OM_OBMAN_CHOOSES_PRIORITY) ?
  15313. NET_TOP_PRIORITY : pWorkset->priority),
  15314. NULL,
  15315. NULL,
  15316. NULL,
  15317. (POMNET_PKT_HEADER) pLockReqPkt,
  15318. NULL,
  15319. TRUE);
  15320. if (rc != 0)
  15321. {
  15322. DC_QUIT;
  15323. }
  15324. //
  15325. // Post a timeout event to the ObMan task so that we don't hang around
  15326. // forever waiting for the lock replies:
  15327. //
  15328. UT_PostEvent(putTask,
  15329. pomPrimary->putTask, // ObMan's utH
  15330. OM_LOCK_RETRY_DELAY_DFLT,
  15331. OMINT_EVENT_LOCK_TIMEOUT,
  15332. pLockReq->correlator,
  15333. pDomain->callID);
  15334. }
  15335. }
  15336. break;
  15337. }
  15338. DC_EXIT_POINT:
  15339. //
  15340. // For the checkpointing dummy workset, we always "forget" our lock
  15341. // state so that subsequent requests to lock it will result in the
  15342. // required end-to-end ping:
  15343. //
  15344. if (pWorkset->worksetID == OM_CHECKPOINT_WORKSET)
  15345. {
  15346. TRACE_OUT(("Resetting lock state of checkpoint workset in WSG %d",
  15347. hWSGroup));
  15348. pWorkset->lockState = UNLOCKED;
  15349. pWorkset->lockCount = 0;
  15350. pWorkset->lockedBy = 0;
  15351. }
  15352. if (rc != 0)
  15353. {
  15354. if (pLockReqPkt != NULL)
  15355. {
  15356. UT_FreeRefCount((void**)&pLockReqPkt, FALSE);
  15357. }
  15358. //
  15359. // This function never returns an error to its caller directly;
  15360. // instead, we call WorksetLockResult which will post a failure
  15361. // event to the calling task (this means the caller doesn't have to
  15362. // have two error processing paths)
  15363. //
  15364. if (pLockReq != NULL)
  15365. {
  15366. WorksetLockResult(putTask, &pLockReq, rc);
  15367. }
  15368. else
  15369. {
  15370. WARNING_OUT(("ERROR %d requesting lock for workset %u in WSG %d ",
  15371. rc, pWorkset->worksetID, hWSGroup));
  15372. }
  15373. }
  15374. DebugExitVOID(WorksetLockReq);
  15375. }
  15376. //
  15377. // BuildNodeList(...)
  15378. //
  15379. UINT BuildNodeList
  15380. (
  15381. POM_DOMAIN pDomain,
  15382. POM_LOCK_REQ pLockReq
  15383. )
  15384. {
  15385. NET_PRIORITY priority;
  15386. POM_WORKSET pOMCWorkset;
  15387. POM_OBJECT pObj;
  15388. POM_WSGROUP_REG_REC pPersonObject;
  15389. POM_NODE_LIST pNodeEntry;
  15390. NET_UID ownUserID;
  15391. BOOL foundOurRegObject;
  15392. UINT rc = 0;
  15393. DebugEntry(BuildNodeList);
  15394. //
  15395. // OK, we're about to broadcast a lock request throughout this Domain
  15396. // on this workset group's channel. Before we do so, however, we build
  15397. // up a list of the nodes we expect to respond to the request. As the
  15398. // replies come in we tick them off against this list; when all of them
  15399. // have been received, the lock is granted.
  15400. //
  15401. // SFR 6117: Since the lock replies will come back on all priorities
  15402. // (to correctly flush the channel), we add 4 items for each remote
  15403. // node - one for each priority.
  15404. //
  15405. // So, we examine the control workset for this workset group, adding
  15406. // items to our list for each person object we find in it (except our
  15407. // own, of course).
  15408. //
  15409. //
  15410. // First, get a pointer to the relevant control workset:
  15411. //
  15412. pOMCWorkset = GetOMCWorkset(pDomain, pLockReq->wsGroupID);
  15413. ASSERT((pOMCWorkset != NULL));
  15414. //
  15415. // We want to ignore our own registration object, so make a note of our
  15416. // user ID:
  15417. //
  15418. ownUserID = pDomain->userID;
  15419. //
  15420. // Now chain through the workset:
  15421. //
  15422. foundOurRegObject = FALSE;
  15423. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15424. while (pObj != NULL)
  15425. {
  15426. ValidateObject(pObj);
  15427. if (pObj->flags & DELETED)
  15428. {
  15429. //
  15430. // Do nothing
  15431. //
  15432. }
  15433. else if (!pObj->pData)
  15434. {
  15435. ERROR_OUT(("BuildNodeList: object 0x%08x has no data", pObj));
  15436. }
  15437. else
  15438. {
  15439. ValidateObjectData(pObj->pData);
  15440. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  15441. if (pPersonObject->idStamp != OM_WSGREGREC_ID_STAMP)
  15442. {
  15443. TRACE_OUT(("Not a person object, skipping"));
  15444. }
  15445. else
  15446. {
  15447. if (pPersonObject->userID == ownUserID)
  15448. {
  15449. if (foundOurRegObject)
  15450. {
  15451. ERROR_OUT(("Duplicate person object in workset %u",
  15452. pOMCWorkset->worksetID));
  15453. }
  15454. else
  15455. {
  15456. TRACE_OUT(("Found own person object, skipping"));
  15457. foundOurRegObject = TRUE;
  15458. }
  15459. }
  15460. else
  15461. {
  15462. //
  15463. // Add an item to our expected respondents list (this
  15464. // memory is freed in each case when the remote node
  15465. // replies, or the timer expires and we notice that the
  15466. // node has disappeared).
  15467. //
  15468. // SFR 6117: We add one item for each priority value, since
  15469. // the lock replies will come back on all priorities.
  15470. //
  15471. for (priority = NET_TOP_PRIORITY;
  15472. priority <= NET_LOW_PRIORITY;
  15473. priority++)
  15474. {
  15475. TRACE_OUT(("Adding node 0x%08x to node list at priority %hu",
  15476. pPersonObject->userID, priority));
  15477. pNodeEntry = (POM_NODE_LIST)UT_MallocRefCount(sizeof(OM_NODE_LIST), TRUE);
  15478. if (!pNodeEntry)
  15479. {
  15480. rc = UT_RC_NO_MEM;
  15481. DC_QUIT;
  15482. }
  15483. SET_STAMP(pNodeEntry, NODELIST);
  15484. pNodeEntry->userID = pPersonObject->userID;
  15485. COM_BasedListInsertAfter(&(pLockReq->nodes),
  15486. &(pNodeEntry->chain));
  15487. //
  15488. // BUT! We only do this for R20 and later (i.e.
  15489. // anything over real MCS). For R11 calls, just put
  15490. // one entry on the list.
  15491. //
  15492. // ALSO! For ObManControl worksets, we only expect one
  15493. // lock reply (at TOP_PRIORITY) - this is to speed up
  15494. // processing of registration attempts. So, if this is
  15495. // for ObManControl, don't go around this loop again -
  15496. // just get out.
  15497. //
  15498. if (pLockReq->wsGroupID == WSGROUPID_OMC)
  15499. {
  15500. break;
  15501. }
  15502. }
  15503. }
  15504. }
  15505. }
  15506. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15507. }
  15508. DC_EXIT_POINT:
  15509. if (rc != 0)
  15510. {
  15511. ERROR_OUT(("ERROR %d trying to build node list", rc));
  15512. }
  15513. DebugExitDWORD(BuildNodeList, rc);
  15514. return(rc);
  15515. }
  15516. //
  15517. // WorksetLockResult(...)
  15518. //
  15519. void WorksetLockResult
  15520. (
  15521. PUT_CLIENT putTask,
  15522. POM_LOCK_REQ * ppLockReq,
  15523. UINT result
  15524. )
  15525. {
  15526. POM_LOCK_REQ pLockReq;
  15527. POM_WSGROUP pWSGroup;
  15528. POM_WORKSET pWorkset;
  15529. OM_EVENT_DATA16 eventData16;
  15530. OM_EVENT_DATA32 eventData32;
  15531. POM_NODE_LIST pNodeEntry;
  15532. DebugEntry(WorksetLockResult);
  15533. //
  15534. // First some sanity checks:
  15535. //
  15536. ASSERT((ppLockReq != NULL));
  15537. ASSERT((*ppLockReq != NULL));
  15538. pLockReq = *ppLockReq;
  15539. //
  15540. // Set up a local pointer to the workset:
  15541. //
  15542. pWSGroup = pLockReq->pWSGroup;
  15543. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  15544. ASSERT((pWorkset != NULL));
  15545. TRACE_OUT(("Lock %s: lock state: %hu - locked by: 0x%08x - lock count: %hu",
  15546. (result == 0) ? "succeded" : "failed",
  15547. pWorkset->lockState, pWorkset->lockedBy, pWorkset->lockCount));
  15548. //
  15549. // We merge the LOCKED and LOCK_GRANTED return codes at the API level:
  15550. //
  15551. if (result == OM_RC_WORKSET_LOCK_GRANTED)
  15552. {
  15553. result = OM_RC_WORKSET_LOCKED;
  15554. }
  15555. //
  15556. // Fill in fields of event parameter and post the result:
  15557. //
  15558. eventData16.hWSGroup = pLockReq->hWSGroup;
  15559. eventData16.worksetID = pLockReq->worksetID;
  15560. eventData32.correlator = pLockReq->correlator;
  15561. eventData32.result = (WORD)result;
  15562. UT_PostEvent(putTask,
  15563. pLockReq->putTask, // task that wants the lock
  15564. 0, // i.e. ObMan or Client
  15565. OM_WORKSET_LOCK_CON,
  15566. *((PUINT) &eventData16),
  15567. *((LPUINT) &eventData32));
  15568. //
  15569. // Remove any node entries left hanging off the lockReqCB:
  15570. //
  15571. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  15572. while (pNodeEntry != NULL)
  15573. {
  15574. COM_BasedListRemove(&pNodeEntry->chain);
  15575. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  15576. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  15577. }
  15578. //
  15579. // Remove the lock request itself from the list and free the memory:
  15580. //
  15581. COM_BasedListRemove(&pLockReq->chain);
  15582. UT_FreeRefCount((void**)&pLockReq, FALSE);
  15583. *ppLockReq = NULL;
  15584. DebugExitVOID(WorksetLockResult);
  15585. }
  15586. //
  15587. // WorksetUnlock(...)
  15588. //
  15589. void WorksetUnlock
  15590. (
  15591. PUT_CLIENT putTask,
  15592. POM_WSGROUP pWSGroup,
  15593. POM_WORKSET pWorkset
  15594. )
  15595. {
  15596. DebugEntry(WorksetUnlock);
  15597. TRACE_OUT(("Unlocking workset %u in WSG %d for TASK 0x%08x",
  15598. pWorkset->worksetID, pWSGroup->wsg, putTask));
  15599. TRACE_OUT((" lock state: %hu - locked by: 0x%08x - lock count: %hu",
  15600. pWorkset->lockState, pWorkset->lockedBy, pWorkset->lockCount));
  15601. //
  15602. // Check the workset lock state
  15603. //
  15604. if ((pWorkset->lockState != LOCKED) &&
  15605. (pWorkset->lockState != LOCKING))
  15606. {
  15607. ERROR_OUT(("Unlock error for workset %u in WSG %d - not locked",
  15608. pWorkset->worksetID, pWSGroup->wsg));
  15609. DC_QUIT;
  15610. }
  15611. //
  15612. // If this workset is "multiply locked" (i.e. locked more than one
  15613. // time by the same task), then all we want to do is decrement the lock
  15614. // count. Otherwise, we want to release the lock.
  15615. //
  15616. pWorkset->lockCount--;
  15617. if (pWorkset->lockCount == 0)
  15618. {
  15619. TRACE_OUT(("Lock count now 0 - really unlocking"));
  15620. WorksetUnlockLocal(putTask, pWorkset);
  15621. QueueUnlock(putTask, pWSGroup->pDomain,
  15622. pWSGroup->wsGroupID,
  15623. pWorkset->worksetID,
  15624. pWSGroup->channelID,
  15625. pWorkset->priority);
  15626. }
  15627. DC_EXIT_POINT:
  15628. DebugExitVOID(WorksetUnlock);
  15629. }
  15630. //
  15631. // WorksetUnlockLocal(...)
  15632. //
  15633. void WorksetUnlockLocal
  15634. (
  15635. PUT_CLIENT putTask,
  15636. POM_WORKSET pWorkset
  15637. )
  15638. {
  15639. DebugEntry(WorksetUnlockLocal);
  15640. //
  15641. // To unlock a workset, we
  15642. //
  15643. // - check that it's not already unlocked
  15644. //
  15645. // - check that the lock count is zero, so we can now unlock it
  15646. //
  15647. // - set the lock fields in the workset record
  15648. //
  15649. // - post an OM_WORKSET_UNLOCK_IND to all Clients with the workset
  15650. // open.
  15651. //
  15652. if (pWorkset->lockState == UNLOCKED)
  15653. {
  15654. WARNING_OUT(("Workset %hu is already UNLOCKED!", pWorkset->worksetID));
  15655. DC_QUIT;
  15656. }
  15657. ASSERT((pWorkset->lockCount == 0));
  15658. pWorkset->lockedBy = 0;
  15659. pWorkset->lockState = UNLOCKED;
  15660. WorksetEventPost(putTask,
  15661. pWorkset,
  15662. PRIMARY | SECONDARY,
  15663. OM_WORKSET_UNLOCK_IND,
  15664. 0);
  15665. DC_EXIT_POINT:
  15666. DebugExitVOID(WorksetUnlockLocal);
  15667. }
  15668. //
  15669. // QueueUnlock(...)
  15670. //
  15671. UINT QueueUnlock
  15672. (
  15673. PUT_CLIENT putTask,
  15674. POM_DOMAIN pDomain,
  15675. OM_WSGROUP_ID wsGroupID,
  15676. OM_WORKSET_ID worksetID,
  15677. NET_UID destination,
  15678. NET_PRIORITY priority
  15679. )
  15680. {
  15681. POMNET_LOCK_PKT pUnlockPkt;
  15682. UINT rc = 0;
  15683. DebugEntry(QueueUnlock);
  15684. //
  15685. // Allocate memory for the message, fill in the fields and queue it:
  15686. //
  15687. pUnlockPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  15688. if (!pUnlockPkt)
  15689. {
  15690. rc = UT_RC_NO_MEM;
  15691. DC_QUIT;
  15692. }
  15693. pUnlockPkt->header.messageType = OMNET_UNLOCK;
  15694. pUnlockPkt->header.sender = pDomain->userID;
  15695. pUnlockPkt->wsGroupID = wsGroupID;
  15696. pUnlockPkt->worksetID = worksetID;
  15697. //
  15698. // Unlock messages go at the priority of the workset involved. If this
  15699. // is OBMAN_CHOOSES_PRIORITY, then all bets are off and we send them
  15700. // TOP_PRIORITY.
  15701. //
  15702. if (priority == OM_OBMAN_CHOOSES_PRIORITY)
  15703. {
  15704. priority = NET_TOP_PRIORITY;
  15705. }
  15706. rc = QueueMessage(putTask,
  15707. pDomain,
  15708. destination,
  15709. priority,
  15710. NULL,
  15711. NULL,
  15712. NULL, // no object
  15713. (POMNET_PKT_HEADER) pUnlockPkt,
  15714. NULL, // no object data
  15715. TRUE);
  15716. if (rc != 0)
  15717. {
  15718. DC_QUIT;
  15719. }
  15720. DC_EXIT_POINT:
  15721. if (rc != 0)
  15722. {
  15723. ERROR_OUT(("ERROR %d in FindInfoObject"));
  15724. if (pUnlockPkt != NULL)
  15725. {
  15726. UT_FreeRefCount((void**)&pUnlockPkt, FALSE);
  15727. }
  15728. }
  15729. DebugExitDWORD(QueueUnlock, rc);
  15730. return(rc);
  15731. }
  15732. //
  15733. //
  15734. // DEBUG ONLY FUNCTIONS
  15735. //
  15736. // These functions are debug code only - for normal compilations, they are
  15737. // #defined to nothing.
  15738. //
  15739. #ifdef _DEBUG
  15740. //
  15741. // CheckObjectCount(...)
  15742. //
  15743. void CheckObjectCount
  15744. (
  15745. POM_WSGROUP pWSGroup,
  15746. POM_WORKSET pWorkset
  15747. )
  15748. {
  15749. POM_OBJECT pObj;
  15750. UINT count;
  15751. DebugEntry(CheckObjectCount);
  15752. count = 0;
  15753. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15754. while (pObj != NULL)
  15755. {
  15756. ValidateObject(pObj);
  15757. if (!(pObj->flags & DELETED))
  15758. {
  15759. count++;
  15760. }
  15761. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15762. }
  15763. ASSERT((count == pWorkset->numObjects));
  15764. TRACE_OUT(("Counted %u items in workset %u in WSG %d, agrees with numObjects",
  15765. count, pWorkset->worksetID, pWSGroup->wsg));
  15766. DebugExitVOID(CheckObjectCount);
  15767. }
  15768. //
  15769. // CheckObjectOrder(...)
  15770. //
  15771. void CheckObjectOrder
  15772. (
  15773. POM_WORKSET pWorkset
  15774. )
  15775. {
  15776. POM_OBJECT pObjThis;
  15777. POM_OBJECT pObjNext;
  15778. BOOL orderIsGood = TRUE;
  15779. DebugEntry(CheckObjectOrder);
  15780. //
  15781. // This function checks that objects in the specified workset have been
  15782. // correctly positioned. The correct order of objects is one where
  15783. //
  15784. // - all FIRST objects are before all LAST objects
  15785. //
  15786. // - the position stamps of the FIRST objects decrease monotonically
  15787. // from the start of the workset onwards
  15788. //
  15789. // - the position stamps of the LAST objects decrease monotonically
  15790. // from the end of the workset backwards.
  15791. //
  15792. //
  15793. //
  15794. // This can be represented grahpically as follows:
  15795. //
  15796. // * *
  15797. // * * * *
  15798. // * * * * * *
  15799. // * * * * * * * *
  15800. // * * * * * * * * * *
  15801. // * * * * * * * * * * * *
  15802. //
  15803. // F F F F F F L L L L L L
  15804. //
  15805. // ...where taller columns indicate later sequence stamps and 'F' and
  15806. // 'L' indicate the FIRST or LAST objects.
  15807. //
  15808. //
  15809. //
  15810. // The way we test for correct order is to compare each adjacent pair of
  15811. // objects. If the overall order is correct, the for each pair of
  15812. // objects where A immediately precedes B, one of the following is true:
  15813. //
  15814. // - both are FIRST and B has a lower sequence stamp than A
  15815. //
  15816. // - A is FIRST and B is LAST
  15817. //
  15818. // - both are LAST and A has a lower sequence stamp than B.
  15819. //
  15820. pObjThis = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15821. if (!pObjThis)
  15822. {
  15823. //
  15824. // Hitting the end of the workset at any stage means order is
  15825. // correct, so quit:
  15826. //
  15827. DC_QUIT;
  15828. }
  15829. pObjNext = pObjThis;
  15830. orderIsGood = TRUE;
  15831. while (orderIsGood)
  15832. {
  15833. pObjNext = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjNext, FIELD_OFFSET(OM_OBJECT, chain));
  15834. if (!pObjNext)
  15835. {
  15836. DC_QUIT;
  15837. }
  15838. switch (pObjThis->position)
  15839. {
  15840. case FIRST: // condition 3 has failed
  15841. if (pObjNext->position == FIRST) // condition 2 has failed
  15842. {
  15843. if (!STAMP_IS_LOWER(pObjNext->positionStamp,
  15844. pObjThis->positionStamp))
  15845. {
  15846. ERROR_OUT(("Object order check failed (1)"));
  15847. orderIsGood = FALSE; // final condition (1) has failed
  15848. DC_QUIT;
  15849. }
  15850. }
  15851. break;
  15852. case LAST: // conditions 1 and 2 have failed
  15853. if ((pObjNext->position != LAST) ||
  15854. (!STAMP_IS_LOWER(pObjThis->positionStamp,
  15855. pObjNext->positionStamp)))
  15856. {
  15857. ERROR_OUT(("Object order check failed (2)"));
  15858. orderIsGood = FALSE; // final condition (3) has failed
  15859. DC_QUIT;
  15860. }
  15861. break;
  15862. default:
  15863. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  15864. pObjThis->position));
  15865. break;
  15866. }
  15867. pObjThis = pObjNext;
  15868. }
  15869. DC_EXIT_POINT:
  15870. if (!orderIsGood)
  15871. {
  15872. ERROR_OUT(("This object (handle: 0x%08x - ID: 0x%08x:0x%08x) "
  15873. "has position stamp 0x%08x:0x%08x (position %s)",
  15874. pObjThis,
  15875. pObjThis->objectID.creator, pObjThis->objectID.sequence,
  15876. pObjThis->positionStamp.userID,
  15877. pObjThis->positionStamp.genNumber,
  15878. (pObjThis->position == LAST) ? "LAST" : "FIRST"));
  15879. ERROR_OUT(("This object (handle: 0x%08x - ID: 0x%08x:0x%08x) "
  15880. "has position stamp 0x%08x:0x%08x (position %s)",
  15881. pObjNext,
  15882. pObjNext->objectID.creator, pObjNext->objectID.sequence,
  15883. pObjNext->positionStamp.userID,
  15884. pObjNext->positionStamp.genNumber,
  15885. (pObjNext->position == LAST) ? "LAST" : "FIRST"));
  15886. ERROR_OUT(("Object order check failed for workset %u. "
  15887. "See trace for more details",
  15888. pWorkset->worksetID));
  15889. }
  15890. TRACE_OUT(("Object order in workset %u is correct",
  15891. pWorkset->worksetID));
  15892. DebugExitVOID(CheckObjectOrder);
  15893. }
  15894. #endif // _DEBUG
  15895. //
  15896. // OMMapNameToFP()
  15897. //
  15898. OMFP OMMapNameToFP(LPCSTR szFunctionProfile)
  15899. {
  15900. int fp;
  15901. DebugEntry(OMMapNameToFP);
  15902. for (fp = OMFP_FIRST; fp < OMFP_MAX; fp++)
  15903. {
  15904. if (!lstrcmp(szFunctionProfile, c_aFpMap[fp].szName))
  15905. {
  15906. // Found it
  15907. break;
  15908. }
  15909. }
  15910. //
  15911. // Note that OMFP_MAX means "not found"
  15912. //
  15913. DebugExitDWORD(OMMapNameToFP, fp);
  15914. return((OMFP)fp);
  15915. }
  15916. //
  15917. // OMMapFPToName()
  15918. //
  15919. // This returns a data pointer of the FP name to the caller. The caller
  15920. // can only copy it or compare it; it may not write into or otherwise
  15921. // modify/hang on to the pointer.
  15922. //
  15923. LPCSTR OMMapFPToName(OMFP fp)
  15924. {
  15925. LPCSTR szFunctionProfile;
  15926. DebugEntry(OMMapFPToName);
  15927. ASSERT(fp >= OMFP_FIRST);
  15928. ASSERT(fp < OMFP_MAX);
  15929. szFunctionProfile = c_aFpMap[fp].szName;
  15930. DebugExitPVOID(OMMapFPToName, (PVOID)szFunctionProfile);
  15931. return(szFunctionProfile);
  15932. }
  15933. //
  15934. // OMMapNameToWSG()
  15935. //
  15936. OMWSG OMMapNameToWSG(LPCSTR szWSGName)
  15937. {
  15938. int wsg;
  15939. DebugEntry(OMMapNameToWSG);
  15940. for (wsg = OMWSG_FIRST; wsg < OMWSG_MAX; wsg++)
  15941. {
  15942. if (!lstrcmp(szWSGName, c_aWsgMap[wsg].szName))
  15943. {
  15944. // Found it
  15945. break;
  15946. }
  15947. }
  15948. //
  15949. // Note that OMWSG_MAX means "not found"
  15950. //
  15951. DebugExitDWORD(OMMapNameToWSG, wsg);
  15952. return((OMWSG)wsg);
  15953. }
  15954. //
  15955. // OMMapWSGToName()
  15956. //
  15957. LPCSTR OMMapWSGToName(OMWSG wsg)
  15958. {
  15959. LPCSTR szWSGName;
  15960. DebugEntry(OMMapWSGToName);
  15961. ASSERT(wsg >= OMWSG_FIRST);
  15962. ASSERT(wsg < OMWSG_MAX);
  15963. szWSGName = c_aWsgMap[wsg].szName;
  15964. DebugExitPVOID(OMMapWSGToName, (PVOID)szWSGName);
  15965. return(szWSGName);
  15966. }