Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

19018 lines
517 KiB

  1. #include "precomp.h"
  2. //
  3. // OM.CPP
  4. // Object Manager
  5. //
  6. // Copyright(c) Microsoft 1997-
  7. //
  8. #define MLZ_FILE_ZONE ZONE_OM
  9. //
  10. // Function profile ID <--> name mapping
  11. //
  12. typedef struct tagOMFP_MAP
  13. {
  14. char szName[16];
  15. }
  16. OMFP_MAP;
  17. const OMFP_MAP c_aFpMap[OMFP_MAX] =
  18. {
  19. { AL_FP_NAME },
  20. { OM_FP_NAME },
  21. { WB_FP_NAME }
  22. };
  23. //
  24. // Workset Group ID <--> name mapping
  25. //
  26. typedef struct tagOMWSG_MAP
  27. {
  28. char szName[16];
  29. }
  30. OMWSG_MAP;
  31. const OMWSG_MAP c_aWsgMap[OMWSG_MAX] =
  32. {
  33. { OMC_WSG_NAME },
  34. { AL_WSG_NAME },
  35. { WB_WSG_NAME }
  36. };
  37. //
  38. // OMP_Init()
  39. //
  40. BOOL OMP_Init(BOOL * pfCleanup)
  41. {
  42. BOOL fInit = FALSE;
  43. DebugEntry(OMP_Init);
  44. UT_Lock(UTLOCK_OM);
  45. //
  46. // Register the OM service
  47. //
  48. if (g_putOM || g_pomPrimary)
  49. {
  50. *pfCleanup = FALSE;
  51. ERROR_OUT(("Can't start OM primary task; already running"));
  52. DC_QUIT;
  53. }
  54. *pfCleanup = TRUE;
  55. if (!UT_InitTask(UTTASK_OM, &g_putOM))
  56. {
  57. ERROR_OUT(("Failed to start OM task"));
  58. DC_QUIT;
  59. }
  60. g_pomPrimary = (POM_PRIMARY)UT_MallocRefCount(sizeof(OM_PRIMARY), TRUE);
  61. if (!g_pomPrimary)
  62. {
  63. ERROR_OUT(("Failed to allocate OM memory block"));
  64. DC_QUIT;
  65. }
  66. SET_STAMP(g_pomPrimary, OPRIMARY);
  67. g_pomPrimary->putTask = g_putOM;
  68. g_pomPrimary->correlator = 1;
  69. COM_BasedListInit(&(g_pomPrimary->domains));
  70. UT_RegisterExit(g_putOM, OMPExitProc, g_pomPrimary);
  71. g_pomPrimary->exitProcReg = TRUE;
  72. UT_RegisterEvent(g_putOM, OMPEventsHandler, g_pomPrimary, UT_PRIORITY_NORMAL);
  73. g_pomPrimary->eventProcReg = TRUE;
  74. if (!MG_Register(MGTASK_OM, &(g_pomPrimary->pmgClient), g_putOM))
  75. {
  76. ERROR_OUT(("Couldn't register OM with the MG layer"));
  77. DC_QUIT;
  78. }
  79. if (!CMS_Register(g_putOM, CMTASK_OM, &(g_pomPrimary->pcmClient)))
  80. {
  81. ERROR_OUT(("Couldn't register OM as call secondary"));
  82. DC_QUIT;
  83. }
  84. //
  85. // Allocate our GDC buffer.
  86. //
  87. g_pomPrimary->pgdcWorkBuf = new BYTE[GDC_WORKBUF_SIZE];
  88. if (!g_pomPrimary->pgdcWorkBuf)
  89. {
  90. ERROR_OUT(("SendMessagePkt: can't allocate GDC work buf, not compressing"));
  91. DC_QUIT;
  92. }
  93. fInit = TRUE;
  94. DC_EXIT_POINT:
  95. UT_Unlock(UTLOCK_OM);
  96. DebugExitBOOL(OMP_Init, fInit);
  97. return(fInit);
  98. }
  99. //
  100. // OMP_Term()
  101. //
  102. void OMP_Term(void)
  103. {
  104. DebugEntry(OMP_Term);
  105. UT_Lock(UTLOCK_OM);
  106. if (g_pomPrimary)
  107. {
  108. ValidateOMP(g_pomPrimary);
  109. //
  110. // Deregister from Call Manager
  111. //
  112. if (g_pomPrimary->pcmClient)
  113. {
  114. CMS_Deregister(&g_pomPrimary->pcmClient);
  115. }
  116. //
  117. // Deregister from MG
  118. //
  119. if (g_pomPrimary->pmgClient)
  120. {
  121. MG_Deregister(&g_pomPrimary->pmgClient);
  122. }
  123. OMPExitProc(g_pomPrimary);
  124. }
  125. UT_TermTask(&g_putOM);
  126. UT_Unlock(UTLOCK_OM);
  127. DebugExitVOID(OMP_Term);
  128. }
  129. //
  130. // OMPExitProc()
  131. //
  132. void CALLBACK OMPExitProc(LPVOID uData)
  133. {
  134. POM_PRIMARY pomPrimary = (POM_PRIMARY)uData;
  135. POM_DOMAIN pDomain;
  136. POM_WSGROUP pWSGroup;
  137. POM_CLIENT_LIST pClient;
  138. DebugEntry(OMPExitProc);
  139. UT_Lock(UTLOCK_OM);
  140. ValidateOMP(pomPrimary);
  141. ASSERT(pomPrimary == g_pomPrimary);
  142. if (pomPrimary->exitProcReg)
  143. {
  144. UT_DeregisterExit(pomPrimary->putTask, OMPExitProc, pomPrimary);
  145. pomPrimary->exitProcReg = FALSE;
  146. }
  147. if (pomPrimary->eventProcReg)
  148. {
  149. UT_DeregisterEvent(pomPrimary->putTask, OMPEventsHandler, pomPrimary);
  150. pomPrimary->eventProcReg = FALSE;
  151. }
  152. //
  153. // Free domains
  154. //
  155. while (pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains),
  156. FIELD_OFFSET(OM_DOMAIN, chain)))
  157. {
  158. TRACE_OUT(("OMPExitProc: Freeing domain 0x%08x call ID 0x%08x",
  159. pDomain, pDomain->callID));
  160. //
  161. // Free workset groups
  162. // NOTE:
  163. // WSGDiscard() may destroy the domain, hence the weird
  164. // loop
  165. //
  166. if (pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups),
  167. FIELD_OFFSET(OM_WSGROUP, chain)))
  168. {
  169. TRACE_OUT(("OMPExitProc: Freeing wsg 0x%08x domain 0x%08x",
  170. pWSGroup, pDomain));
  171. //
  172. // Free clients
  173. //
  174. while (pClient = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients),
  175. FIELD_OFFSET(OM_CLIENT_LIST, chain)))
  176. {
  177. TRACE_OUT(("OMPExitProc: Freeing client 0x%08x wsg 0x%08x",
  178. pClient, pWSGroup));
  179. COM_BasedListRemove(&(pClient->chain));
  180. UT_FreeRefCount((void**)&pClient, FALSE);
  181. }
  182. WSGDiscard(pomPrimary, pDomain, pWSGroup, TRUE);
  183. }
  184. else
  185. {
  186. FreeDomainRecord(&pDomain);
  187. }
  188. }
  189. if (pomPrimary->pgdcWorkBuf)
  190. {
  191. delete[] pomPrimary->pgdcWorkBuf;
  192. pomPrimary->pgdcWorkBuf = NULL;
  193. }
  194. UT_FreeRefCount((void**)&g_pomPrimary, TRUE);
  195. UT_Unlock(UTLOCK_OM);
  196. DebugExitVOID(OMPExitProc);
  197. }
  198. //
  199. // OMPEventsHandler(...)
  200. //
  201. BOOL CALLBACK OMPEventsHandler
  202. (
  203. LPVOID uData,
  204. UINT event,
  205. UINT_PTR param1,
  206. UINT_PTR param2
  207. )
  208. {
  209. POM_PRIMARY pomPrimary = (POM_PRIMARY)uData;
  210. POM_DOMAIN pDomain = NULL;
  211. BOOL fProcessed = TRUE;
  212. DebugEntry(OMPEventsHandler);
  213. UT_Lock(UTLOCK_OM);
  214. ValidateOMP(pomPrimary);
  215. //
  216. // Check event is in the range we deal with:
  217. //
  218. if ((event < CM_BASE_EVENT) || (event > CM_LAST_EVENT))
  219. {
  220. goto CHECK_OM_EVENTS;
  221. }
  222. switch (event)
  223. {
  224. case CMS_NEW_CALL:
  225. {
  226. TRACE_OUT(( "CMS_NEW_CALL"));
  227. //
  228. // We ignore the return code - it will have been handled lower
  229. // down.
  230. //
  231. DomainRecordFindOrCreate(pomPrimary, (UINT)param2, &pDomain);
  232. }
  233. break;
  234. case CMS_END_CALL:
  235. {
  236. TRACE_OUT(( "CMS_END_CALL"));
  237. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  238. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  239. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)param2,
  240. FIELD_SIZE(OM_DOMAIN, callID));
  241. if (pDomain == NULL)
  242. {
  243. //
  244. // We don't have a record for this Domain so either we
  245. // never attached or we've already detached. Do nothing.
  246. //
  247. TRACE_OUT(( "No record for Domain %u found", param2));
  248. }
  249. else
  250. {
  251. ProcessOwnDetach(pomPrimary, pDomain);
  252. }
  253. }
  254. break;
  255. case CMS_TOKEN_ASSIGN_CONFIRM:
  256. {
  257. TRACE_OUT(( "CMS_TOKEN_ASSIGN_CONFIRM"));
  258. //
  259. // There is a flaw in the CMS_ASSIGN_TOKEN_CONFIRM API in that
  260. // it does not tell us which domain it refers to. So, we
  261. // operate under the assumption that this event relates to the
  262. // most recent domain we created i.e. the first one in the
  263. // list (they go in at the beginning).
  264. //
  265. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains),
  266. FIELD_OFFSET(OM_DOMAIN, chain));
  267. if (pDomain != NULL)
  268. {
  269. ProcessCMSTokenAssign(pomPrimary,
  270. pDomain,
  271. (param1 != 0),
  272. LOWORD(param2));
  273. }
  274. else
  275. {
  276. WARNING_OUT(( "No domain found for CMS_TOKEN_ASSIGN_CONFIRM"));
  277. }
  278. }
  279. break;
  280. }
  281. TRACE_OUT(( "Processed Call Manager event %#x", event));
  282. DC_QUIT;
  283. CHECK_OM_EVENTS:
  284. //
  285. // Check event is in the range we deal with:
  286. //
  287. if ((event < OM_BASE_EVENT) || (event > OM_LAST_EVENT))
  288. {
  289. goto CHECK_NET_EVENTS;
  290. }
  291. switch (event)
  292. {
  293. case OMINT_EVENT_LOCK_TIMEOUT:
  294. {
  295. ProcessLockTimeout(pomPrimary, (UINT)param1, (UINT)param2);
  296. }
  297. break;
  298. case OMINT_EVENT_SEND_QUEUE:
  299. {
  300. //
  301. // Param2 is the domain record.
  302. //
  303. pDomain = (POM_DOMAIN)param2;
  304. ProcessSendQueue(pomPrimary, pDomain, TRUE);
  305. }
  306. break;
  307. case OMINT_EVENT_PROCESS_MESSAGE:
  308. {
  309. ProcessBouncedMessages(pomPrimary, (POM_DOMAIN) param2);
  310. }
  311. break;
  312. case OMINT_EVENT_WSGROUP_DISCARD:
  313. {
  314. ProcessWSGDiscard(pomPrimary, (POM_WSGROUP)param2);
  315. }
  316. break;
  317. case OMINT_EVENT_WSGROUP_MOVE:
  318. case OMINT_EVENT_WSGROUP_REGISTER:
  319. {
  320. ProcessWSGRegister(pomPrimary, (POM_WSGROUP_REG_CB)param2);
  321. }
  322. break;
  323. case OMINT_EVENT_WSGROUP_REGISTER_CONT:
  324. {
  325. WSGRegisterStage1(pomPrimary, (POM_WSGROUP_REG_CB) param2);
  326. }
  327. break;
  328. //
  329. // The remaining events are ones we get by virtue of being
  330. // considered as a client of the ObManControl workset group
  331. //
  332. case OM_WORKSET_LOCK_CON:
  333. {
  334. switch (((POM_EVENT_DATA16)&param1)->worksetID)
  335. {
  336. case OM_INFO_WORKSET:
  337. ProcessOMCLockConfirm(pomPrimary,
  338. ((POM_EVENT_DATA32) &param2)->correlator,
  339. ((POM_EVENT_DATA32) &param2)->result);
  340. break;
  341. case OM_CHECKPOINT_WORKSET:
  342. ProcessCheckpoint(pomPrimary,
  343. ((POM_EVENT_DATA32) &param2)->correlator,
  344. ((POM_EVENT_DATA32) &param2)->result);
  345. break;
  346. }
  347. }
  348. break;
  349. case OM_WORKSET_NEW_IND:
  350. {
  351. ProcessOMCWorksetNew(pomPrimary,
  352. ((POM_EVENT_DATA16) &param1)->hWSGroup,
  353. ((POM_EVENT_DATA16) &param1)->worksetID);
  354. }
  355. break;
  356. case OM_PERSON_JOINED_IND:
  357. case OM_PERSON_LEFT_IND:
  358. case OM_PERSON_DATA_CHANGED_IND:
  359. case OM_WSGROUP_MOVE_IND:
  360. case OM_WORKSET_UNLOCK_IND:
  361. {
  362. //
  363. // We ignore these events.
  364. //
  365. }
  366. break;
  367. case OM_OBJECT_ADD_IND:
  368. case OM_OBJECT_REPLACED_IND:
  369. case OM_OBJECT_UPDATED_IND:
  370. case OM_OBJECT_DELETED_IND:
  371. {
  372. ProcessOMCObjectEvents(pomPrimary,
  373. event,
  374. ((POM_EVENT_DATA16) &param1)->hWSGroup,
  375. ((POM_EVENT_DATA16) &param1)->worksetID,
  376. (POM_OBJECT) param2);
  377. }
  378. break;
  379. default:
  380. {
  381. ERROR_OUT(( "Unexpected ObMan event 0x%08x", event));
  382. }
  383. }
  384. TRACE_OUT(( "Processed ObMan event %x", event));
  385. DC_QUIT;
  386. CHECK_NET_EVENTS:
  387. //
  388. // This function is only for network layer events so we quit if we've
  389. // got something else:
  390. //
  391. if ((event < NET_BASE_EVENT) || (event > NET_LAST_EVENT))
  392. {
  393. fProcessed = FALSE;
  394. DC_QUIT;
  395. }
  396. //
  397. // Now switch on the event type:
  398. //
  399. switch (event)
  400. {
  401. case NET_EVENT_USER_ATTACH:
  402. {
  403. //
  404. // Find the domain data for this call
  405. //
  406. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  407. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  408. FIELD_OFFSET(OM_DOMAIN, callID),
  409. param2, FIELD_SIZE(OM_DOMAIN, callID));
  410. if (pDomain)
  411. {
  412. ProcessNetAttachUser(pomPrimary, pDomain, LOWORD(param1),
  413. HIWORD(param1));
  414. }
  415. break;
  416. }
  417. case NET_EVENT_USER_DETACH:
  418. {
  419. //
  420. // Find the domain data for this call
  421. //
  422. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  423. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  424. FIELD_OFFSET(OM_DOMAIN, callID),
  425. param2, FIELD_SIZE(OM_DOMAIN, callID));
  426. if (pDomain)
  427. {
  428. ProcessNetDetachUser(pomPrimary, pDomain, LOWORD(param1));
  429. }
  430. break;
  431. }
  432. case NET_EVENT_CHANNEL_LEAVE:
  433. {
  434. //
  435. // Find the domain data for this call
  436. //
  437. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  438. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  439. FIELD_OFFSET(OM_DOMAIN, callID),
  440. param2, FIELD_SIZE(OM_DOMAIN, callID));
  441. if (pDomain)
  442. {
  443. ProcessNetLeaveChannel(pomPrimary, pDomain, LOWORD(param1));
  444. }
  445. break;
  446. }
  447. case NET_EVENT_TOKEN_GRAB:
  448. {
  449. //
  450. // Find the domain data for this call
  451. //
  452. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  453. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  454. FIELD_OFFSET(OM_DOMAIN, callID),
  455. param2, FIELD_SIZE(OM_DOMAIN, callID));
  456. if (pDomain)
  457. {
  458. ProcessNetTokenGrab(pomPrimary, pDomain, LOWORD(param1));
  459. }
  460. break;
  461. }
  462. case NET_EVENT_TOKEN_INHIBIT:
  463. {
  464. //
  465. // Find the domain data for this call
  466. //
  467. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  468. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  469. FIELD_OFFSET(OM_DOMAIN, callID),
  470. param2, FIELD_SIZE(OM_DOMAIN, callID));
  471. if (pDomain)
  472. {
  473. ProcessNetTokenInhibit(pomPrimary, pDomain, LOWORD(param1));
  474. }
  475. break;
  476. }
  477. case NET_EVENT_CHANNEL_JOIN:
  478. {
  479. PNET_JOIN_CNF_EVENT pEvent = (PNET_JOIN_CNF_EVENT)param2;
  480. //
  481. // Find the domain data for this call
  482. //
  483. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  484. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  485. FIELD_OFFSET(OM_DOMAIN, callID),
  486. pEvent->callID, FIELD_SIZE(OM_DOMAIN, callID));
  487. if (pDomain)
  488. {
  489. ProcessNetJoinChannel(pomPrimary, pDomain, pEvent);
  490. }
  491. MG_FreeBuffer(pomPrimary->pmgClient, (void **)&pEvent);
  492. break;
  493. }
  494. case NET_EVENT_DATA_RECEIVED:
  495. {
  496. PNET_SEND_IND_EVENT pEvent = (PNET_SEND_IND_EVENT)param2;
  497. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  498. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  499. FIELD_OFFSET(OM_DOMAIN, callID),
  500. pEvent->callID, FIELD_SIZE(OM_DOMAIN, callID));
  501. if (pDomain)
  502. {
  503. ProcessNetData(pomPrimary, pDomain, pEvent);
  504. }
  505. MG_FreeBuffer(pomPrimary->pmgClient, (void**)&pEvent);
  506. break;
  507. }
  508. case NET_FEEDBACK:
  509. {
  510. //
  511. // A NET_FEEDBACK event includes the pmgUser which identifies
  512. // the send pool from which the buffer has been freed. We use
  513. // it to find the Domain:
  514. //
  515. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  516. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  517. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)param2,
  518. FIELD_SIZE(OM_DOMAIN, callID));
  519. if (pDomain)
  520. {
  521. //
  522. // Generating a FEEDBACK event doesn't cause the use count
  523. // of the Domain record to be bumped, so set the
  524. // <domainRecBumped> flag to FALSE on the call to
  525. // ProcessSendQueue:
  526. //
  527. ProcessSendQueue(pomPrimary, pDomain, FALSE);
  528. }
  529. break;
  530. }
  531. case NET_FLOW:
  532. {
  533. ERROR_OUT(("OMPEventsHandler received NET_FLOW; shouldn't have"));
  534. break;
  535. }
  536. }
  537. DC_EXIT_POINT:
  538. UT_Unlock(UTLOCK_OM);
  539. DebugExitBOOL(OMPEventsHandler, fProcessed);
  540. return(fProcessed);
  541. }
  542. //
  543. // DomainRecordFindOrCreate(...)
  544. //
  545. UINT DomainRecordFindOrCreate
  546. (
  547. POM_PRIMARY pomPrimary,
  548. UINT callID,
  549. POM_DOMAIN * ppDomain
  550. )
  551. {
  552. POM_DOMAIN pDomain;
  553. UINT rc = 0;
  554. DebugEntry(DomainRecordFindOrCreate);
  555. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  556. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  557. FIELD_OFFSET(OM_DOMAIN, callID),
  558. (DWORD)callID, FIELD_SIZE(OM_DOMAIN, callID));
  559. if (pDomain == NULL)
  560. {
  561. //
  562. // We don't have a record for this Domain so create one:
  563. //
  564. rc = DomainAttach(pomPrimary, callID, &pDomain);
  565. if (rc != 0)
  566. {
  567. DC_QUIT;
  568. }
  569. }
  570. *ppDomain = pDomain;
  571. DC_EXIT_POINT:
  572. DebugExitDWORD(DomainRecordFindOrCreate, rc);
  573. return(rc);
  574. }
  575. //
  576. // DomainAttach(...)
  577. //
  578. UINT DomainAttach
  579. (
  580. POM_PRIMARY pomPrimary,
  581. UINT callID,
  582. POM_DOMAIN * ppDomain
  583. )
  584. {
  585. POM_DOMAIN pDomain = NULL;
  586. NET_FLOW_CONTROL netFlow;
  587. UINT rc = 0;
  588. DebugEntry(DomainAttach);
  589. TRACE_OUT(( "Attaching to Domain 0x%08x...", callID));
  590. if (callID != OM_NO_CALL)
  591. {
  592. CM_STATUS status;
  593. CMS_GetStatus(&status);
  594. if (!(status.attendeePermissions & NM_PERMIT_USEOLDWBATALL))
  595. {
  596. WARNING_OUT(("Joining Meeting with no OLDWB OM at all"));
  597. rc = NET_RC_MGC_NOT_CONNECTED;
  598. DC_QUIT;
  599. }
  600. }
  601. //
  602. // This function does the following:
  603. //
  604. // - create a new Domain record
  605. //
  606. // - if the Domain is our local Domain (OM_NO_CALL) call
  607. // ObManControlInit
  608. //
  609. // - else call MG_AttachUser to start attaching to the Domain.
  610. //
  611. rc = NewDomainRecord(pomPrimary,
  612. callID,
  613. &pDomain);
  614. if (rc != 0)
  615. {
  616. DC_QUIT;
  617. }
  618. //
  619. // What we do now depends on whether this is our "local" Domain (i.e.
  620. // callID == OM_NO_CALL):
  621. //
  622. if (callID == OM_NO_CALL)
  623. {
  624. TRACE_OUT(( "Is local domain - skipping forward"));
  625. //
  626. // This is our "local" Domain, so don't call MG_AttachUser.
  627. // Instead, we fake up a successful token grab event and rejoin the
  628. // domain attach processing there:
  629. //
  630. TRACE_OUT(( "Faking successful token grab for local domain"));
  631. pDomain->state = PENDING_TOKEN_GRAB;
  632. rc = ProcessNetTokenGrab(pomPrimary, pDomain, NET_RESULT_OK);
  633. if (rc != 0)
  634. {
  635. DC_QUIT;
  636. }
  637. }
  638. else
  639. {
  640. TRACE_OUT(( "Is real domain - attaching"));
  641. //
  642. // Set up our target latencies. Don't bother restricting the max
  643. // stream sizes.
  644. //
  645. ZeroMemory(&netFlow, sizeof(netFlow));
  646. netFlow.latency[NET_TOP_PRIORITY] = 0;
  647. netFlow.latency[NET_HIGH_PRIORITY] = 2000L;
  648. netFlow.latency[NET_MEDIUM_PRIORITY] = 5000L;
  649. netFlow.latency[NET_LOW_PRIORITY] = 10000L;
  650. rc = MG_Attach(pomPrimary->pmgClient, callID, &netFlow);
  651. if (rc != 0)
  652. {
  653. DC_QUIT;
  654. }
  655. //
  656. // Set up the remaining fields of the Domain record:
  657. //
  658. pDomain->state = PENDING_ATTACH;
  659. //
  660. // The <userID> field is set when the NET_ATTACH event arrives.
  661. //
  662. //
  663. // The next stage in the Domain attach process is when the
  664. // NET_ATTACH event arrives. This will cause the
  665. // ProcessNetAttachUser function to be called.
  666. //
  667. }
  668. //
  669. // Finally, set caller's pointer:
  670. //
  671. *ppDomain = pDomain;
  672. DC_EXIT_POINT:
  673. if (rc != 0)
  674. {
  675. //
  676. // Do not trace an error if we get NOT_CONNECTED - it is a valid
  677. // race condition (but we still must do the cleanup below).
  678. //
  679. if (rc != NET_RC_MGC_NOT_CONNECTED)
  680. {
  681. // lonchanc: rc=0x706 can happen here, bug #942.
  682. // this was ERROR_OUT
  683. WARNING_OUT(( "Error %d attaching to Domain %u", rc, callID));
  684. }
  685. if (pDomain != NULL)
  686. {
  687. ProcessOwnDetach(pomPrimary, pDomain);
  688. }
  689. }
  690. DebugExitDWORD(DomainAttach, rc);
  691. return(rc);
  692. }
  693. //
  694. // DomainDetach(...)
  695. //
  696. void DomainDetach
  697. (
  698. POM_PRIMARY pomPrimary,
  699. POM_DOMAIN * ppDomain,
  700. BOOL fExit
  701. )
  702. {
  703. POM_DOMAIN pDomain;
  704. DebugEntry(DomainDetach);
  705. ASSERT(ppDomain != NULL);
  706. pDomain = *ppDomain;
  707. //
  708. // This function does all the network cleanup required, then calls on
  709. // to discard the ObMan memory etc associated with the domain. Note
  710. // that we don't bother releasing tokens, leaving channels, etc since
  711. // the network layer will do this for us automatically.
  712. //
  713. if (!fExit &&
  714. (pDomain->callID != OM_NO_CALL) &&
  715. (pDomain->state >= PENDING_ATTACH))
  716. {
  717. MG_Detach(pomPrimary->pmgClient);
  718. }
  719. TRACE_OUT(( "Detached from Domain %u", pDomain->callID));
  720. FreeDomainRecord(ppDomain);
  721. DebugExitVOID(DomainDetach);
  722. }
  723. //
  724. // NewDomainRecord(...)
  725. //
  726. UINT NewDomainRecord
  727. (
  728. POM_PRIMARY pomPrimary,
  729. UINT callID,
  730. POM_DOMAIN* ppDomain
  731. )
  732. {
  733. POM_WSGROUP pOMCWSGroup = NULL;
  734. POM_DOMAIN pDomain;
  735. BOOL noCompression;
  736. BOOL inserted = FALSE;
  737. UINT rc = 0;
  738. DebugEntry(NewDomainRecord);
  739. //
  740. // Allocate Domain record:
  741. //
  742. pDomain = (POM_DOMAIN)UT_MallocRefCount(sizeof(OM_DOMAIN), TRUE);
  743. if (!pDomain)
  744. {
  745. rc = UT_RC_NO_MEM;
  746. DC_QUIT;
  747. }
  748. SET_STAMP(pDomain, DOMAIN);
  749. //
  750. // Fill in the fields:
  751. //
  752. pDomain->callID = callID;
  753. pDomain->valid = TRUE;
  754. //
  755. // Set up our maximum compression caps. They are subsequently
  756. // negotiated as follows:
  757. //
  758. // - if there are any other nodes out there, we will negotiate down
  759. // when we receive a WELCOME message from one of them
  760. //
  761. // - if any other nodes join subsequently, we will negotiate down when
  762. // we receive their HELLO message.
  763. //
  764. COM_ReadProfInt(DBG_INI_SECTION_NAME, OM_INI_NOCOMPRESSION, FALSE,
  765. &noCompression);
  766. if (noCompression)
  767. {
  768. WARNING_OUT(("NewDomainRecord: compression off"));
  769. pDomain->compressionCaps = OM_CAPS_NO_COMPRESSION;
  770. }
  771. else
  772. {
  773. pDomain->compressionCaps = OM_CAPS_PKW_COMPRESSION;
  774. }
  775. //
  776. // This will be ObMan's workset group handle for the ObManControl
  777. // workset group in this domain. Since we know that domain handles are
  778. // only ever -1 or 0, we just cast the domain handle down to 8 bits to
  779. // give the hWSGroup. If the way domain handles are allocated changes,
  780. // will need to do something cleverer here.
  781. //
  782. pDomain->omchWSGroup = (BYTE) callID;
  783. COM_BasedListInit(&(pDomain->wsGroups));
  784. COM_BasedListInit(&(pDomain->pendingRegs));
  785. COM_BasedListInit(&(pDomain->pendingLocks));
  786. COM_BasedListInit(&(pDomain->receiveList));
  787. COM_BasedListInit(&(pDomain->bounceList));
  788. COM_BasedListInit(&(pDomain->helperCBs));
  789. COM_BasedListInit(&(pDomain->sendQueue[ NET_TOP_PRIORITY ]));
  790. COM_BasedListInit(&(pDomain->sendQueue[ NET_HIGH_PRIORITY ]));
  791. COM_BasedListInit(&(pDomain->sendQueue[ NET_MEDIUM_PRIORITY ]));
  792. COM_BasedListInit(&(pDomain->sendQueue[ NET_LOW_PRIORITY ]));
  793. //
  794. // Insert the record for this new Domain in the list hung off the root
  795. // data structure:
  796. //
  797. TRACE_OUT((" Inserting record for Domain %u in global list", callID));
  798. COM_BasedListInsertAfter(&(pomPrimary->domains), &(pDomain->chain));
  799. inserted = TRUE;
  800. //
  801. // Here we create a record for the ObManControl workset group and cause
  802. // it to be inserted in the list hung off the Domain record:
  803. //
  804. // Note that this does not involve sending any data; it merely creates
  805. // the record locally.
  806. //
  807. rc = WSGRecordCreate(pomPrimary,
  808. pDomain,
  809. OMWSG_OM,
  810. OMFP_OM,
  811. &pOMCWSGroup);
  812. if (rc != 0)
  813. {
  814. DC_QUIT;
  815. }
  816. //
  817. // Create a single, empty workset (this function broadcasts the
  818. // creation throughout the Domain):
  819. //
  820. rc = WorksetCreate(pomPrimary->putTask,
  821. pOMCWSGroup,
  822. OM_INFO_WORKSET,
  823. FALSE,
  824. NET_TOP_PRIORITY);
  825. if (rc != 0)
  826. {
  827. DC_QUIT;
  828. }
  829. //
  830. // Fill in the fixed workset group ID (normally, we would call
  831. // WSGGetNewID to allocate an unused one).
  832. //
  833. pOMCWSGroup->wsGroupID = WSGROUPID_OMC;
  834. //
  835. // We fill in the channel ID when we get the result from JoinByKey
  836. //
  837. //
  838. // Add ObMan's putTask to the workset group's client list, so it will
  839. // get events posted to it.
  840. //
  841. rc = AddClientToWSGList(pomPrimary->putTask,
  842. pOMCWSGroup,
  843. pDomain->omchWSGroup,
  844. PRIMARY);
  845. if (rc != 0)
  846. {
  847. DC_QUIT;
  848. }
  849. *ppDomain = pDomain;
  850. DC_EXIT_POINT:
  851. if (rc != 0)
  852. {
  853. ERROR_OUT(( "Error %d creating record for domain %u", callID));
  854. if (pOMCWSGroup != NULL)
  855. {
  856. COM_BasedListRemove(&(pOMCWSGroup->chain));
  857. UT_FreeRefCount((void**)&pOMCWSGroup, FALSE);
  858. }
  859. if (inserted)
  860. {
  861. COM_BasedListRemove(&(pDomain->chain));
  862. }
  863. if (pDomain != NULL)
  864. {
  865. UT_FreeRefCount((void**)&pDomain, FALSE);
  866. }
  867. }
  868. DebugExitDWORD(NewDomainRecord, rc);
  869. return(rc);
  870. }
  871. //
  872. // FreeDomainRecord(...)
  873. //
  874. void FreeDomainRecord
  875. (
  876. POM_DOMAIN * ppDomain
  877. )
  878. {
  879. POM_DOMAIN pDomain;
  880. NET_PRIORITY priority;
  881. POM_SEND_INST pSendInst;
  882. DebugEntry(FreeDomainRecord);
  883. //
  884. // This function
  885. //
  886. // - frees any outstanding send requests (and their associated CBs)
  887. //
  888. // - invalidates, removes from the global list and frees the Domain
  889. // record.
  890. //
  891. pDomain = *ppDomain;
  892. //
  893. // Free all the send instructions queued in the domain:
  894. //
  895. for (priority = NET_TOP_PRIORITY;priority <= NET_LOW_PRIORITY;priority++)
  896. {
  897. for (; ; )
  898. {
  899. pSendInst = (POM_SEND_INST)COM_BasedListFirst(&(pDomain->sendQueue[priority]),
  900. FIELD_OFFSET(OM_SEND_INST, chain));
  901. if (pSendInst == NULL)
  902. {
  903. break;
  904. }
  905. TRACE_OUT(( "Freeing send instruction at priority %u", priority));
  906. FreeSendInst(pSendInst);
  907. }
  908. }
  909. pDomain->valid = FALSE;
  910. COM_BasedListRemove(&(pDomain->chain));
  911. UT_FreeRefCount((void**)ppDomain, FALSE);
  912. DebugExitVOID(FreeDomainRecord);
  913. }
  914. //
  915. // ProcessNetAttachUser(...)
  916. //
  917. void ProcessNetAttachUser
  918. (
  919. POM_PRIMARY pomPrimary,
  920. POM_DOMAIN pDomain,
  921. NET_UID userId,
  922. NET_RESULT result
  923. )
  924. {
  925. NET_CHANNEL_ID channelCorrelator;
  926. UINT rc = 0;
  927. DebugEntry(ProcessNetAttachUser);
  928. TRACE_OUT(( "Got NET_ATTACH for Domain %u (userID: %hu, result: %hu)",
  929. pDomain->callID, userId, result));
  930. //
  931. // Check that this Domain is in the pending attach state:
  932. //
  933. if (pDomain->state != PENDING_ATTACH)
  934. {
  935. WARNING_OUT(( "Unexpected NET_ATTACH - Domain %u is in state %hu)",
  936. pDomain->callID, pDomain->state));
  937. DC_QUIT;
  938. }
  939. //
  940. // If we failed to attach, set the retCode so we tidy up below:
  941. //
  942. if (result != NET_RESULT_OK)
  943. {
  944. ERROR_OUT(( "Failed to attach to Domain %u; cleaning up...",
  945. pDomain->callID));
  946. rc = result;
  947. DC_QUIT;
  948. }
  949. //
  950. // Otherwise, record our user ID for this Domain and then join our user
  951. // ID channel:
  952. //
  953. pDomain->userID = userId;
  954. TRACE_OUT(("Asking to join own channel %hu", pDomain->userID));
  955. rc = MG_ChannelJoin(pomPrimary->pmgClient,
  956. &channelCorrelator,
  957. pDomain->userID);
  958. if (rc != 0)
  959. {
  960. DC_QUIT;
  961. }
  962. //
  963. // Set the Domain <state>:
  964. //
  965. pDomain->state = PENDING_JOIN_OWN;
  966. //
  967. // The next step in the Domain attach process happens when the NET_JOIN
  968. // event arrives for the channel we've just joined. This event causes
  969. // the ProcessNetJoinChannel function to be called.
  970. //
  971. DC_EXIT_POINT:
  972. if (rc != 0)
  973. {
  974. WARNING_OUT(("Error %d joining own user channel %hu",
  975. rc, pDomain->userID));
  976. ProcessOwnDetach(pomPrimary, pDomain);
  977. }
  978. DebugExitVOID(ProcessNetAttachUser);
  979. }
  980. //
  981. // ProcessNetJoinChannel(...)
  982. //
  983. void ProcessNetJoinChannel
  984. (
  985. POM_PRIMARY pomPrimary,
  986. POM_DOMAIN pDomain,
  987. PNET_JOIN_CNF_EVENT pNetJoinCnf
  988. )
  989. {
  990. POM_WSGROUP pOMCWSGroup;
  991. NET_CHANNEL_ID channelCorrelator;
  992. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  993. BOOL success = TRUE;
  994. DebugEntry(ProcessNetJoinChannel);
  995. TRACE_OUT(( "JOIN_CON - channel %hu - result %hu",
  996. pNetJoinCnf->channel, pNetJoinCnf->result));
  997. switch (pDomain->state)
  998. {
  999. case PENDING_JOIN_OWN:
  1000. {
  1001. //
  1002. // This event is in response to us trying to join our own user
  1003. // channel, as part of the mutli-stage Domain attach process.
  1004. // The next step is to join the ObManControl channel.
  1005. //
  1006. //
  1007. // First check that the join was successful:
  1008. //
  1009. if (pNetJoinCnf->result != NET_RESULT_OK)
  1010. {
  1011. ERROR_OUT(("Failed to join own user ID channel (reason: %hu)",
  1012. pNetJoinCnf->result));
  1013. success = FALSE;
  1014. DC_QUIT;
  1015. }
  1016. //
  1017. // Verify that this is a join event for the correct channel
  1018. //
  1019. ASSERT(pNetJoinCnf->channel == pDomain->userID);
  1020. //
  1021. // The next step in the process of attaching to a Domain is to
  1022. // join the ObManControl channel; we set the state accordingly:
  1023. //
  1024. TRACE_OUT(( "Asking to join ObManControl channel using key"));
  1025. if (MG_ChannelJoinByKey(pomPrimary->pmgClient,
  1026. &channelCorrelator,
  1027. GCC_OBMAN_CHANNEL_KEY) != 0)
  1028. {
  1029. success = FALSE;
  1030. DC_QUIT;
  1031. }
  1032. pDomain->state = PENDING_JOIN_OMC;
  1033. //
  1034. // The next stage in the Domain attach process happens when the
  1035. // NET_JOIN event arrives for the ObManControl channel. This
  1036. // will cause this function to be executed again, but this time
  1037. // the next case statement will be executed.
  1038. //
  1039. }
  1040. break;
  1041. case PENDING_JOIN_OMC:
  1042. {
  1043. //
  1044. // This event is in response to us trying to join the
  1045. // ObManControl workset group channel, as part of the
  1046. // multi-stage Domain attach process.
  1047. //
  1048. //
  1049. // Check that the join was successful:
  1050. //
  1051. if (pNetJoinCnf->result != NET_RESULT_OK)
  1052. {
  1053. WARNING_OUT(( "Bad result %#hx joining ObManControl channel",
  1054. pNetJoinCnf->result));
  1055. success = FALSE;
  1056. DC_QUIT;
  1057. }
  1058. //
  1059. // If so, store the value returned in the domain record:
  1060. //
  1061. pDomain->omcChannel = pNetJoinCnf->channel;
  1062. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1063. pOMCWSGroup->channelID = pDomain->omcChannel;
  1064. //
  1065. // We need a token to determine which ObMan is going to
  1066. // initialise the ObManControl workset group. Get GCC to
  1067. // assign us one (this returns a static value for R1.1 calls).
  1068. //
  1069. if (!CMS_AssignTokenId(pomPrimary->pcmClient, GCC_OBMAN_TOKEN_KEY))
  1070. {
  1071. success = FALSE;
  1072. DC_QUIT;
  1073. }
  1074. pDomain->state = PENDING_TOKEN_ASSIGN;
  1075. }
  1076. break;
  1077. case DOMAIN_READY:
  1078. {
  1079. //
  1080. // This should be a join event for a regular workset group
  1081. // channel. We check that we have indeed set up a workset
  1082. // group registration CB containing the channel correlator
  1083. // associated with this event:
  1084. //
  1085. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  1086. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  1087. FIELD_OFFSET(OM_WSGROUP_REG_CB, channelCorrelator),
  1088. pNetJoinCnf->correlator,
  1089. FIELD_SIZE(OM_WSGROUP_REG_CB, channelCorrelator));
  1090. if (pRegistrationCB == NULL)
  1091. {
  1092. ERROR_OUT((
  1093. "Unexpected JOIN for channel %hu - no reg CB found",
  1094. pNetJoinCnf->channel));
  1095. DC_QUIT;
  1096. }
  1097. //
  1098. // Check that the join was successful:
  1099. //
  1100. if (pNetJoinCnf->result != NET_RESULT_OK)
  1101. {
  1102. //
  1103. // If not, trace then try again:
  1104. //
  1105. WARNING_OUT(("Failure 0x%08x joining channel %hu for WSG %d, trying again",
  1106. pNetJoinCnf->result,
  1107. pNetJoinCnf->channel,
  1108. pRegistrationCB->wsg));
  1109. pRegistrationCB->pWSGroup->state = INITIAL;
  1110. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  1111. DC_QUIT;
  1112. }
  1113. //
  1114. // Otherwise, call WSGRegisterStage3 to continue the
  1115. // registration process:
  1116. //
  1117. WSGRegisterStage3(pomPrimary,
  1118. pDomain,
  1119. pRegistrationCB,
  1120. pNetJoinCnf->channel);
  1121. }
  1122. break;
  1123. case PENDING_ATTACH:
  1124. case PENDING_WELCOME:
  1125. case GETTING_OMC:
  1126. {
  1127. //
  1128. // Shouldn't get any join indications in these states.
  1129. //
  1130. ERROR_OUT(( "Unexpected JOIN in domain state %hu",
  1131. pDomain->state));
  1132. }
  1133. break;
  1134. default:
  1135. {
  1136. //
  1137. // This is also an error:
  1138. //
  1139. ERROR_OUT(( "Invalid state %hu for domain %u",
  1140. pDomain->state, pDomain->callID));
  1141. }
  1142. }
  1143. DC_EXIT_POINT:
  1144. if (!success)
  1145. {
  1146. //
  1147. // For any error here, we react as if we've been kicked out of the
  1148. // domain:
  1149. //
  1150. ProcessOwnDetach(pomPrimary, pDomain);
  1151. }
  1152. DebugExitVOID(ProcessNetJoinChannel);
  1153. }
  1154. //
  1155. //
  1156. //
  1157. // ProcessCMSTokenAssign(...)
  1158. //
  1159. //
  1160. //
  1161. void ProcessCMSTokenAssign
  1162. (
  1163. POM_PRIMARY pomPrimary,
  1164. POM_DOMAIN pDomain,
  1165. BOOL success,
  1166. NET_TOKEN_ID tokenID
  1167. )
  1168. {
  1169. DebugEntry(ProcessCMSTokenAssign);
  1170. TRACE_OUT(( "TOKEN_ASSIGN_CONFIRM: result %hu, token ID %#hx",
  1171. success, tokenID));
  1172. if (pDomain->state != PENDING_TOKEN_ASSIGN)
  1173. {
  1174. WARNING_OUT(("Got TOKEN_ASSIGN_CONFIRM in state %hu",
  1175. pDomain->state));
  1176. DC_QUIT;
  1177. }
  1178. if (!success)
  1179. {
  1180. //
  1181. // Nothing to do - the domain attach process will time out.
  1182. //
  1183. ERROR_OUT(( "Failed to get token assigned"));
  1184. DC_QUIT;
  1185. }
  1186. pDomain->tokenID = tokenID;
  1187. //
  1188. // Now that we know what the token ID is, try to grab it:
  1189. //
  1190. if (MG_TokenGrab(pomPrimary->pmgClient,
  1191. pDomain->tokenID) != 0)
  1192. {
  1193. ERROR_OUT(( "Failed to grab token"));
  1194. DC_QUIT;
  1195. }
  1196. pDomain->state = PENDING_TOKEN_GRAB;
  1197. DC_EXIT_POINT:
  1198. DebugExitVOID(ProcessCMSTokenAssign);
  1199. }
  1200. //
  1201. // ProcessNetTokenGrab(...)
  1202. //
  1203. UINT ProcessNetTokenGrab
  1204. (
  1205. POM_PRIMARY pomPrimary,
  1206. POM_DOMAIN pDomain,
  1207. NET_RESULT result
  1208. )
  1209. {
  1210. POM_WSGROUP pOMCWSGroup = NULL;
  1211. UINT rc = 0;
  1212. DebugEntry(ProcessNetTokenGrab);
  1213. TRACE_OUT(( "Got token grab confirm - result = %hu", result));
  1214. if (pDomain->state != PENDING_TOKEN_GRAB)
  1215. {
  1216. ERROR_OUT(( "Got TOKEN_GRAB_CONFIRM in state %hu",
  1217. pDomain->state));
  1218. rc = OM_RC_NETWORK_ERROR;
  1219. DC_QUIT;
  1220. }
  1221. //
  1222. // What to do here depends on whether we've succeeded in grabbing the
  1223. // token:
  1224. //
  1225. if (result == NET_RESULT_OK)
  1226. {
  1227. //
  1228. // We're the "top ObMan" in the Domain, so it's up to us to
  1229. // initialise the ObManControl workset group and welcome any others
  1230. // into the Domain (the Welcome message is broadcast on the
  1231. // ObManControl channel):
  1232. //
  1233. rc = ObManControlInit(pomPrimary, pDomain);
  1234. if (rc != 0)
  1235. {
  1236. DC_QUIT;
  1237. }
  1238. //
  1239. // If we get here, then the Domain attach process has finished.
  1240. // Phew! Any workset group registration attempts in progress will
  1241. // be processed shortly, next time the bouncing
  1242. // OMINT_EVENT_WSG_REGISTER_CONT event is processed
  1243. //
  1244. }
  1245. else
  1246. {
  1247. //
  1248. // Someone else is in charge, so we need to get a copy of
  1249. // ObManControl from them (or anyone else who's prepared to give it
  1250. // to us). So, we need to discover the user ID of one of them so
  1251. // we can send our request there (if we just broadcasted our
  1252. // request, then each node would reply, flooding the Domain)
  1253. //
  1254. rc = SayHello(pomPrimary, pDomain);
  1255. if (rc != 0)
  1256. {
  1257. DC_QUIT;
  1258. }
  1259. //
  1260. // The next step in the Domain attach process happens when one of
  1261. // the other nodes out there replies to our HELLO with a WELCOME
  1262. // message. Execution continues in the ProcessWelcome function.
  1263. //
  1264. }
  1265. DC_EXIT_POINT:
  1266. if (rc != 0)
  1267. {
  1268. if (pOMCWSGroup != NULL)
  1269. {
  1270. //
  1271. // This will remove the ObManControl workset group from the
  1272. // Domain and subsequently call DomainDetach to detach from the
  1273. // Domain and free the Domain record:
  1274. //
  1275. DeregisterLocalClient(pomPrimary, &pDomain, pOMCWSGroup, FALSE);
  1276. UT_FreeRefCount((void**)&pOMCWSGroup, FALSE);
  1277. ASSERT((pDomain == NULL));
  1278. }
  1279. }
  1280. DebugExitDWORD(ProcessNetTokenGrab, rc);
  1281. return(rc);
  1282. }
  1283. //
  1284. //
  1285. //
  1286. // ProcessNetTokenInhibit(...)
  1287. //
  1288. //
  1289. //
  1290. UINT ProcessNetTokenInhibit(POM_PRIMARY pomPrimary,
  1291. POM_DOMAIN pDomain,
  1292. NET_RESULT result)
  1293. {
  1294. UINT rc = 0;
  1295. DebugEntry(ProcessNetTokenInhibit);
  1296. TRACE_OUT(( "Got token inhibit confirm - result = %hu", result));
  1297. if (result == NET_RESULT_OK)
  1298. {
  1299. //
  1300. // Now send a Welcome message on the ObManControl channel. It is
  1301. // crucial that this happens at the same time as we set the Domain
  1302. // state to READY, because if another node is joining the call at
  1303. // the same time it will send a Hello message:
  1304. //
  1305. // - if the message has already arrived, we will have thrown it
  1306. // away
  1307. // because the Domain state was not READY, so we must send it now
  1308. //
  1309. // - if it has yet to arrive, then setting the Domain state to
  1310. // READY
  1311. // now means we'll respond with another Welcome when it does
  1312. // arrive.
  1313. //
  1314. pDomain->state = DOMAIN_READY;
  1315. rc = SayWelcome(pomPrimary, pDomain, pDomain->omcChannel);
  1316. if (rc != 0)
  1317. {
  1318. DC_QUIT;
  1319. }
  1320. //
  1321. // OK, the domain attach process has finished. We need to take no
  1322. // further action other than setting the state. Any pending
  1323. // workset group registrations will continue back at the
  1324. // WSGRegisterStage1 function, where hopefully the bounced
  1325. // OMINT_EVENT_WSGROUP_REGISTER event is just about to arrive...
  1326. //
  1327. }
  1328. else
  1329. {
  1330. //
  1331. // Again, no action. We cannot join the domain, but the workset
  1332. // group registrations will time out in due course.
  1333. //
  1334. WARNING_OUT(( "Token inhibit failed!"));
  1335. }
  1336. DC_EXIT_POINT:
  1337. DebugExitDWORD(ProcessNetTokenInhibit, rc);
  1338. return(rc);
  1339. }
  1340. //
  1341. //
  1342. //
  1343. // ObManControlInit(...)
  1344. //
  1345. //
  1346. //
  1347. UINT ObManControlInit(POM_PRIMARY pomPrimary,
  1348. POM_DOMAIN pDomain)
  1349. {
  1350. POM_WSGROUP pOMCWSGroup;
  1351. UINT rc = 0;
  1352. DebugEntry(ObManControlInit);
  1353. //
  1354. // First, set up a pointer to the ObManControl workset group, which
  1355. // should already have been put in the Domain record:
  1356. //
  1357. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1358. //
  1359. // Initialising the ObManControl workset group involves
  1360. //
  1361. // - adding a WSGROUP_INFO object to it, which identifies ObManControl
  1362. // itself.
  1363. //
  1364. TRACE_OUT(( "Initialising ObManControl in Domain %u",
  1365. pDomain->callID));
  1366. //
  1367. // Now we must add a workset group identification object, identifying
  1368. // ObManControl, to workset #0 in ObManControl.
  1369. //
  1370. // Slightly circular, but we try to treat ObManControl as a regular
  1371. // workset group as much as possible; if we didn't add this
  1372. // identification object then when a Client (e.g. AppLoader) tries to
  1373. // register with ObManControl, we would look in workset #0 for a
  1374. // reference to it, not find one and then create it again!
  1375. //
  1376. rc = CreateAnnounce(pomPrimary, pDomain, pOMCWSGroup);
  1377. if (rc != 0)
  1378. {
  1379. DC_QUIT;
  1380. }
  1381. //
  1382. // In addition, we add our registration object to ObManControl workset
  1383. // #0 and update it immediately to status READY_TO_SEND:
  1384. //
  1385. rc = RegAnnounceBegin(pomPrimary,
  1386. pDomain,
  1387. pOMCWSGroup,
  1388. pDomain->userID,
  1389. &(pOMCWSGroup->pObjReg));
  1390. if (rc != 0)
  1391. {
  1392. DC_QUIT;
  1393. }
  1394. rc = RegAnnounceComplete(pomPrimary, pDomain, pOMCWSGroup);
  1395. if (rc != 0)
  1396. {
  1397. DC_QUIT;
  1398. }
  1399. //
  1400. // OK, we've initialised ObManControl for this call - inhibit the token
  1401. // so that no one else can do the same (if this is the local domain,
  1402. // just fake up an inhibit confirm):
  1403. //
  1404. if (pDomain->callID == OM_NO_CALL)
  1405. {
  1406. TRACE_OUT(( "Faking successful token inhibit for local domain"));
  1407. rc = ProcessNetTokenInhibit(pomPrimary, pDomain, NET_RESULT_OK);
  1408. if (rc != 0)
  1409. {
  1410. DC_QUIT;
  1411. }
  1412. }
  1413. else
  1414. {
  1415. rc = MG_TokenInhibit(pomPrimary->pmgClient,
  1416. pDomain->tokenID);
  1417. if (rc != 0)
  1418. {
  1419. DC_QUIT;
  1420. }
  1421. pDomain->state = PENDING_TOKEN_INHIBIT;
  1422. }
  1423. DC_EXIT_POINT:
  1424. if (rc != 0)
  1425. {
  1426. WARNING_OUT(("Error %d initialising ObManControl WSG for Domain %u",
  1427. rc, pDomain->callID));
  1428. }
  1429. DebugExitDWORD(ObManControlInit, rc);
  1430. return(rc);
  1431. }
  1432. //
  1433. //
  1434. //
  1435. // SayHello(...)
  1436. //
  1437. //
  1438. //
  1439. UINT SayHello(POM_PRIMARY pomPrimary,
  1440. POM_DOMAIN pDomain)
  1441. {
  1442. POMNET_JOINER_PKT pHelloPkt;
  1443. UINT rc = 0;
  1444. DebugEntry(SayHello);
  1445. //
  1446. // Generate and queue an OMNET_HELLO message:
  1447. //
  1448. TRACE_OUT(( "Saying hello in Domain %u", pDomain->callID));
  1449. pHelloPkt = (POMNET_JOINER_PKT)UT_MallocRefCount(sizeof(OMNET_JOINER_PKT), TRUE);
  1450. if (!pHelloPkt)
  1451. {
  1452. rc = UT_RC_NO_MEM;
  1453. DC_QUIT;
  1454. }
  1455. pHelloPkt->header.sender = pDomain->userID;
  1456. pHelloPkt->header.messageType = OMNET_HELLO;
  1457. //
  1458. // All fields in the joiner packet after <capsLen> are capabilities. To
  1459. // calculate the size of these capabilities, we use the offset and size
  1460. // of the caps len field itself to determine the amount of data after
  1461. // it.
  1462. //
  1463. pHelloPkt->capsLen = sizeof(OMNET_JOINER_PKT) -
  1464. (offsetof(OMNET_JOINER_PKT, capsLen) + sizeof(pHelloPkt->capsLen));
  1465. TRACE_OUT(( "Our caps len is 0x%08x", pHelloPkt->capsLen));
  1466. //
  1467. // Take our compression caps from the domain record:
  1468. //
  1469. pHelloPkt->compressionCaps = pDomain->compressionCaps;
  1470. TRACE_OUT(( "Broadcasting compression caps 0x%08x in HELLO",
  1471. pHelloPkt->compressionCaps));
  1472. rc = QueueMessage(pomPrimary->putTask,
  1473. pDomain,
  1474. pDomain->omcChannel,
  1475. NET_TOP_PRIORITY,
  1476. NULL, // no wsgroup
  1477. NULL, // no workset
  1478. NULL, // no object
  1479. (POMNET_PKT_HEADER) pHelloPkt,
  1480. NULL, // no associated object data
  1481. FALSE);
  1482. if (rc != 0)
  1483. {
  1484. DC_QUIT;
  1485. }
  1486. //
  1487. // When the associated response (OMNET_WELCOME) is received from another
  1488. // node, we will ask that node for a copy of the ObManControl workset
  1489. // group. In the meantime, there's nothing else to do.
  1490. //
  1491. pDomain->state = PENDING_WELCOME;
  1492. DC_EXIT_POINT:
  1493. if (rc != 0)
  1494. {
  1495. ERROR_OUT(( "Error %d saying hello in Domain %u", rc, pDomain->callID));
  1496. }
  1497. DebugExitDWORD(SayHello, rc);
  1498. return(rc);
  1499. }
  1500. //
  1501. //
  1502. //
  1503. // ProcessHello(...)
  1504. //
  1505. //
  1506. //
  1507. UINT ProcessHello(POM_PRIMARY pomPrimary,
  1508. POM_DOMAIN pDomain,
  1509. POMNET_JOINER_PKT pHelloPkt,
  1510. UINT lengthOfPkt)
  1511. {
  1512. NET_CHANNEL_ID lateJoiner;
  1513. UINT rc = 0;
  1514. DebugEntry(ProcessHello);
  1515. lateJoiner = pHelloPkt->header.sender;
  1516. //
  1517. // A late joiner has said hello. If we are not fully attached yet, we
  1518. // trace and quit:
  1519. //
  1520. if (pDomain->state != DOMAIN_READY)
  1521. {
  1522. WARNING_OUT(( "Can't process HELLO on channel %#hx - domain state %hu",
  1523. lateJoiner, pDomain->state));
  1524. DC_QUIT;
  1525. }
  1526. //
  1527. // Merge in the late joiner's capabilities with our view of the
  1528. // domain-wide caps.
  1529. //
  1530. MergeCaps(pDomain, pHelloPkt, lengthOfPkt);
  1531. //
  1532. // Now send a welcome message to the late joiner.
  1533. //
  1534. rc = SayWelcome(pomPrimary, pDomain, lateJoiner);
  1535. if (rc != 0)
  1536. {
  1537. DC_QUIT;
  1538. }
  1539. DC_EXIT_POINT:
  1540. if (rc != 0)
  1541. {
  1542. ERROR_OUT(( "Error %d processing hello from node %#hx in Domain %u",
  1543. rc, lateJoiner, pDomain->callID));
  1544. }
  1545. DebugExitDWORD(ProcessHello, rc);
  1546. return(rc);
  1547. } // ProcessHello
  1548. //
  1549. //
  1550. //
  1551. // MergeCaps(...)
  1552. //
  1553. //
  1554. //
  1555. void MergeCaps(POM_DOMAIN pDomain,
  1556. POMNET_JOINER_PKT pJoinerPkt,
  1557. UINT lengthOfPkt)
  1558. {
  1559. NET_CHANNEL_ID sender;
  1560. UINT compressionCaps;
  1561. DebugEntry(MergeCaps);
  1562. sender = pJoinerPkt->header.sender;
  1563. compressionCaps = 0;
  1564. //
  1565. // We have received a HELLO or WELCOME packet from another node.
  1566. //
  1567. // - For a HELLO packet, these caps will be the caps of a late joiner.
  1568. //
  1569. // - For a WELCOME packet, these caps will be the domain-wide caps as
  1570. // viewed by our helper node.
  1571. //
  1572. // Either way, we need to merge in the capabilities from the packet into
  1573. // our view of the domain-wide capabilities.
  1574. //
  1575. // Note that in some backlevel calls, the joiner packet will not contain
  1576. // capabilities - so check the length of the packet first
  1577. //
  1578. if (lengthOfPkt >= (offsetof(OMNET_JOINER_PKT, capsLen) +
  1579. sizeof(pJoinerPkt->capsLen)))
  1580. {
  1581. //
  1582. // OK, this packet contains a capsLen field. See if it contains
  1583. // compression capabilities (these immediately follow the capsLen
  1584. // field and are four bytes long).
  1585. //
  1586. TRACE_OUT(( "Caps len from node 0x%08x is 0x%08x",
  1587. sender, pJoinerPkt->capsLen));
  1588. if (pJoinerPkt->capsLen >= 4)
  1589. {
  1590. //
  1591. // Packet contains compression caps - record them:
  1592. //
  1593. compressionCaps = pJoinerPkt->compressionCaps;
  1594. TRACE_OUT(( "Compression caps in joiner packet from 0x%08x: 0x%08x",
  1595. sender, compressionCaps));
  1596. }
  1597. else
  1598. {
  1599. //
  1600. // If not specified, assume NO compression is supported. This
  1601. // should never happen in practice, because if someone supports
  1602. // any capabilities at all, they should support compression
  1603. // capabilities.
  1604. //
  1605. compressionCaps = OM_CAPS_NO_COMPRESSION;
  1606. ERROR_OUT(( "Party 0x%08x supports caps but not compression caps",
  1607. sender));
  1608. }
  1609. }
  1610. else
  1611. {
  1612. //
  1613. // If no capabilities specified at all, assume PKW compression plus
  1614. // no compression (since that is how LSP20 behaves).
  1615. //
  1616. compressionCaps = (OM_CAPS_PKW_COMPRESSION | OM_CAPS_NO_COMPRESSION);
  1617. TRACE_OUT(( "No caps in joiner pkt - assume PKW + NO compress (0x%08x)",
  1618. compressionCaps));
  1619. }
  1620. //
  1621. // OK, we've determined the capabilities from the packet. Now merge
  1622. // them into our view of the domain-wide caps:
  1623. //
  1624. pDomain->compressionCaps &= compressionCaps;
  1625. TRACE_OUT(( "Domain-wide compression caps now 0x%08x",
  1626. pDomain->compressionCaps));
  1627. DebugExitVOID(MergeCaps);
  1628. } // MergeCaps
  1629. //
  1630. //
  1631. //
  1632. // SayWelcome(...)
  1633. //
  1634. //
  1635. //
  1636. UINT SayWelcome(POM_PRIMARY pomPrimary,
  1637. POM_DOMAIN pDomain,
  1638. NET_CHANNEL_ID channel)
  1639. {
  1640. POMNET_JOINER_PKT pWelcomePkt;
  1641. UINT rc = 0;
  1642. DebugEntry(SayWelcome);
  1643. //
  1644. // The <channel> passed in is one of the following:
  1645. //
  1646. // - the channel of a late-joiner which just sent us a HELLO message, or
  1647. //
  1648. // - the broadcast ObManControl channel, in the case where this is a
  1649. // Welcome we're sending at start of day.
  1650. //
  1651. TRACE_OUT(( "Sending welcome on channel %hu ", channel));
  1652. pWelcomePkt = (POMNET_JOINER_PKT)UT_MallocRefCount(sizeof(OMNET_JOINER_PKT), TRUE);
  1653. if (!pWelcomePkt)
  1654. {
  1655. rc = UT_RC_NO_MEM;
  1656. DC_QUIT;
  1657. }
  1658. pWelcomePkt->header.sender = pDomain->userID; // own user ID
  1659. pWelcomePkt->header.messageType = OMNET_WELCOME;
  1660. //
  1661. // All fields in the joiner packet after <capsLen> are capabilities. To
  1662. // calculate the size of these capabilities, we use the offset and size
  1663. // of the <capsLen> field itself to determine the amount of data after
  1664. // it.
  1665. //
  1666. pWelcomePkt->capsLen = sizeof(OMNET_JOINER_PKT) -
  1667. (offsetof(OMNET_JOINER_PKT, capsLen) + sizeof(pWelcomePkt->capsLen));
  1668. //
  1669. // The value we use for the compressionCaps is our current view of the
  1670. // domain-wide compression capabilities.
  1671. //
  1672. pWelcomePkt->compressionCaps = pDomain->compressionCaps;
  1673. TRACE_OUT(( "Sending caps 0x%08x in WELCOME on channel 0x%08x",
  1674. pWelcomePkt->compressionCaps, channel));
  1675. rc = QueueMessage(pomPrimary->putTask,
  1676. pDomain,
  1677. channel,
  1678. NET_TOP_PRIORITY,
  1679. NULL, // no wsgroup
  1680. NULL, // no workset
  1681. NULL, // no object
  1682. (POMNET_PKT_HEADER) pWelcomePkt,
  1683. NULL, // no object data
  1684. FALSE);
  1685. if (rc != 0)
  1686. {
  1687. DC_QUIT;
  1688. }
  1689. //
  1690. // When this WELCOME message is received at the other end, the
  1691. // ProcessWelcome function is invoked.
  1692. //
  1693. DC_EXIT_POINT:
  1694. if (rc != 0)
  1695. {
  1696. ERROR_OUT(( "Error %d sending welcome on channel 0x%08x in Domain %u",
  1697. rc, channel, pDomain->callID));
  1698. }
  1699. DebugExitDWORD(SayWelcome, rc);
  1700. return(rc);
  1701. } // SayWelcome
  1702. //
  1703. //
  1704. //
  1705. // ProcessWelcome(...)
  1706. //
  1707. //
  1708. //
  1709. UINT ProcessWelcome(POM_PRIMARY pomPrimary,
  1710. POM_DOMAIN pDomain,
  1711. POMNET_JOINER_PKT pWelcomePkt,
  1712. UINT lengthOfPkt)
  1713. {
  1714. POM_WSGROUP pOMCWSGroup;
  1715. UINT rc = 0;
  1716. DebugEntry(ProcessWelcome);
  1717. //
  1718. // This function is called when a remote instance of ObMan has replied
  1719. // to an OMNET_HELLO message which we sent.
  1720. //
  1721. // We sent the HELLO message as part of the procedure to get a copy of
  1722. // the ObManControl workset group; now we know someone who has it, we
  1723. // send them an OMNET_WSGROUP_SEND_REQ on their single-user channel,
  1724. // enclosing our own single-user channel ID for the response.
  1725. //
  1726. // However, every node in the Domain will respond to our initial HELLO,
  1727. // but we only need to ask the first respondent for the workset group.
  1728. // So, we check the Domain state and then change it so we will ignore
  1729. // future WELCOMES for this Domain:
  1730. //
  1731. // (No mutex required for this test-and-set since only ever executed in
  1732. // ObMan task).
  1733. //
  1734. if (pDomain->state == PENDING_WELCOME)
  1735. {
  1736. //
  1737. // OK, this is the first WELCOME we've got since we broadcast the
  1738. // HELLO. So, we reply to it with a SEND_REQUEST for ObManControl.
  1739. //
  1740. TRACE_OUT((
  1741. "Got first WELCOME message in Domain %u, from node 0x%08x",
  1742. pDomain->callID, pWelcomePkt->header.sender));
  1743. //
  1744. // Merge in the capabilities which our helper node has told us
  1745. // about:
  1746. //
  1747. MergeCaps(pDomain, pWelcomePkt, lengthOfPkt);
  1748. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1749. //
  1750. // ...and call the IssueSendReq function specifying the sender of
  1751. // the WELCOME message as the node to get the workset group from:
  1752. //
  1753. rc = IssueSendReq(pomPrimary,
  1754. pDomain,
  1755. pOMCWSGroup,
  1756. pWelcomePkt->header.sender);
  1757. if (rc != 0)
  1758. {
  1759. ERROR_OUT(( "Error %d requesting OMC from 0x%08x in Domain %u",
  1760. rc, pWelcomePkt->header.sender, pDomain->callID));
  1761. DC_QUIT;
  1762. }
  1763. pDomain->state = GETTING_OMC;
  1764. //
  1765. // Next, the remote node which welcomed us will send us the
  1766. // contents of the ObManControl workset group. When it has
  1767. // finished, it will send an OMNET_WSGROUP_SEND_COMPLETE message,
  1768. // which is where we take up the next step of the multi-stage
  1769. // Domain attach process.
  1770. //
  1771. }
  1772. else
  1773. {
  1774. //
  1775. // OK, we're in some other state i.e. not waiting for a WELCOME
  1776. // message - so just ignore it.
  1777. //
  1778. TRACE_OUT(( "Ignoring WELCOME from 0x%08x - in state %hu",
  1779. pWelcomePkt->header.sender, pDomain->state));
  1780. }
  1781. TRACE_OUT(( "Processed WELCOME message from node 0x%08x in Domain %u",
  1782. pWelcomePkt->header.sender, pDomain->callID));
  1783. DC_EXIT_POINT:
  1784. if (rc != 0)
  1785. {
  1786. ERROR_OUT(( "Error %d processing WELCOME message from "
  1787. "node 0x%08x in Domain %u",
  1788. rc, pWelcomePkt->header.sender, pDomain->callID));
  1789. }
  1790. DebugExitDWORD(ProcessWelcome, rc);
  1791. return(rc);
  1792. }
  1793. //
  1794. // ProcessNetDetachUser()
  1795. //
  1796. void ProcessNetDetachUser
  1797. (
  1798. POM_PRIMARY pomPrimary,
  1799. POM_DOMAIN pDomain,
  1800. NET_UID detachedUserID
  1801. )
  1802. {
  1803. DebugEntry(ProcessNetDetachUser);
  1804. //
  1805. // There are two cases here:
  1806. //
  1807. // 1. this is a detach indication for ourselves i.e. we have been
  1808. // booted off the network by MCS for some reason
  1809. //
  1810. // 2. this is a detach indication for someone else i.e. another user
  1811. // has left (or been booted off) the MCS Domain.
  1812. //
  1813. // We differentiate the two cases by checking the ID of the detached
  1814. // user against our own.
  1815. //
  1816. if (detachedUserID == pDomain->userID)
  1817. {
  1818. //
  1819. // It's for us, so call the ProcessOwnDetach function:
  1820. //
  1821. ProcessOwnDetach(pomPrimary, pDomain);
  1822. }
  1823. else
  1824. {
  1825. //
  1826. // It's someone else, so we call the ProcessOtherDetach function:
  1827. //
  1828. ProcessOtherDetach(pomPrimary, pDomain, detachedUserID);
  1829. }
  1830. DebugExitVOID(ProcessNetDetachUser);
  1831. }
  1832. //
  1833. // ProcessOtherDetach(...)
  1834. //
  1835. UINT ProcessOtherDetach
  1836. (
  1837. POM_PRIMARY pomPrimary,
  1838. POM_DOMAIN pDomain,
  1839. NET_UID detachedUserID
  1840. )
  1841. {
  1842. POM_WSGROUP pOMCWSGroup;
  1843. POM_WORKSET pOMCWorkset;
  1844. OM_WORKSET_ID worksetID;
  1845. UINT rc = 0;
  1846. DebugEntry(ProcessOtherDetach);
  1847. TRACE_OUT(( "DETACH_IND for user 0x%08x in domain %u",
  1848. detachedUserID, pDomain->callID));
  1849. //
  1850. // Someone else has left the Domain. What this means is that we must
  1851. //
  1852. // - release any locks they may have acquired for worksets/objects in
  1853. // this Domain
  1854. //
  1855. // - remove any registration objects they might have added to worksets
  1856. // in ObManControl
  1857. //
  1858. // - remove any objects they have added to non-persistent worksets
  1859. //
  1860. // - if we are catching up from them then select another node to catch
  1861. // up from or stop catch up if no one else is left.
  1862. //
  1863. //
  1864. // The processing is as follows:
  1865. //
  1866. // FOR each registration workset in ObManControl which is in use
  1867. //
  1868. // FOR each object in the workset
  1869. //
  1870. // IF it relates to the node which has just/has just been
  1871. // detached, then that node was registered with the
  1872. // workset group, so
  1873. //
  1874. // - delete the object and post a DELETE_IND to
  1875. // any local Clients which have the workset open
  1876. // - search this workset group for any locks held by this
  1877. // node and release them.
  1878. //
  1879. //
  1880. // OK, to work: first we derive a pointer to the ObManControl workset
  1881. // group:
  1882. //
  1883. pOMCWSGroup = GetOMCWsgroup(pDomain);
  1884. //
  1885. // Now begin the outer FOR loop:
  1886. //
  1887. for (worksetID = 0;
  1888. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  1889. worksetID++)
  1890. {
  1891. //
  1892. // Get a pointer to the workset:
  1893. //
  1894. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  1895. if (pOMCWorkset == NULL)
  1896. {
  1897. //
  1898. // There is no workset with this ID so we skip to the next one:
  1899. //
  1900. continue;
  1901. }
  1902. ValidateWorkset(pOMCWorkset);
  1903. //
  1904. // OK, worksetID corresponds to the ID of an actual workset group
  1905. // in the domain. These functions will do any clearup on behalf of
  1906. // the detached node.
  1907. //
  1908. RemovePersonObject(pomPrimary,
  1909. pDomain,
  1910. (OM_WSGROUP_ID) worksetID,
  1911. detachedUserID);
  1912. ReleaseAllNetLocks(pomPrimary,
  1913. pDomain,
  1914. (OM_WSGROUP_ID) worksetID,
  1915. detachedUserID);
  1916. PurgeNonPersistent(pomPrimary,
  1917. pDomain,
  1918. (OM_WSGROUP_ID) worksetID,
  1919. detachedUserID);
  1920. //
  1921. // Finished this workset so go on to the next.
  1922. //
  1923. }
  1924. //
  1925. // Well, that's it:
  1926. //
  1927. TRACE_OUT(( "Cleaned up after node 0x%08x detached from Domain %u",
  1928. detachedUserID, pDomain->callID));
  1929. DebugExitDWORD(ProcessOtherDetach, rc);
  1930. return(rc);
  1931. }
  1932. //
  1933. // ProcessOwnDetach(..)
  1934. //
  1935. UINT ProcessOwnDetach
  1936. (
  1937. POM_PRIMARY pomPrimary,
  1938. POM_DOMAIN pDomain
  1939. )
  1940. {
  1941. POM_DOMAIN pLocalDomainRec;
  1942. POM_WSGROUP pWSGroup;
  1943. POM_LOCK_REQ pLockReq;
  1944. POM_LOCK_REQ pTempLockReq;
  1945. POM_WSGROUP pTempWSGroup;
  1946. POM_WSGROUP_REG_CB pRegistrationCB;
  1947. POM_WSGROUP_REG_CB pTempRegCB;
  1948. UINT callID;
  1949. UINT rc = 0;
  1950. DebugEntry(ProcessOwnDetach);
  1951. //
  1952. // First of all, remove all traces of everybody else (because the call
  1953. // may have ended already, we may not get explicit DETACH_INDICATIONs
  1954. // for them):
  1955. //
  1956. ProcessOtherDetach(pomPrimary, pDomain, NET_ALL_REMOTES);
  1957. //
  1958. // We proceed as follows:
  1959. //
  1960. // - get a pointer to the record for the "local" Domain (or create it
  1961. // if it doesn't exist)
  1962. //
  1963. // - move all the pending lock requests, registrations and workset
  1964. // groups in this Domain into the local Domain.
  1965. //
  1966. callID = pDomain->callID;
  1967. if (callID == OM_NO_CALL)
  1968. {
  1969. WARNING_OUT(( "Detach for local domain - avoiding recursive cleanup"));
  1970. FreeDomainRecord(&pDomain);
  1971. DC_QUIT;
  1972. }
  1973. TRACE_OUT(( "Processing own detach/end call etc. for Domain %u",
  1974. callID));
  1975. rc = DomainRecordFindOrCreate(pomPrimary, OM_NO_CALL, &pLocalDomainRec);
  1976. if (rc != 0)
  1977. {
  1978. DC_QUIT;
  1979. }
  1980. //
  1981. // Move the pending lock requests (need the pTemp... variables since we
  1982. // need to chain from the old position):
  1983. //
  1984. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  1985. while (pLockReq != NULL)
  1986. {
  1987. TRACE_OUT((" Moving lock for workset %hu in WSG ID %hu",
  1988. pLockReq->worksetID, pLockReq->wsGroupID));
  1989. pTempLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  1990. FIELD_OFFSET(OM_LOCK_REQ, chain));
  1991. COM_BasedListRemove(&(pLockReq->chain));
  1992. COM_BasedListInsertBefore(&(pLocalDomainRec->pendingLocks),
  1993. &(pLockReq->chain));
  1994. pLockReq = pTempLockReq;
  1995. }
  1996. //
  1997. // Now cancel any outstanding registrations:
  1998. //
  1999. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListFirst(&(pDomain->pendingRegs),
  2000. FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  2001. while (pRegistrationCB != NULL)
  2002. {
  2003. TRACE_OUT(("Aborting registration for WSG %d", pRegistrationCB->wsg));
  2004. pTempRegCB = (POM_WSGROUP_REG_CB)COM_BasedListNext(&(pDomain->pendingRegs),
  2005. pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  2006. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_NETWORK_ERROR);
  2007. pRegistrationCB = pTempRegCB;
  2008. }
  2009. //
  2010. // Move the workset groups.
  2011. //
  2012. // Note that we will move the ObManControl workset group for the Domain
  2013. // we've detached from into the local Domain as well; it does not
  2014. // replace the OMC workset group for the local Domain, but we can't just
  2015. // throw it away since the Application Loader Primary and Secondaries
  2016. // still have valid workset group handles for it. They will eventually
  2017. // deregister from it and it will be thrown away.
  2018. //
  2019. // Since WSGMove relies on the fact that there is an OMC workset group
  2020. // in the Domain out of which workset groups are being moved, we must
  2021. // move the OMC workset group last.
  2022. //
  2023. // So, start at the end and work backwards:
  2024. //
  2025. pWSGroup = (POM_WSGROUP)COM_BasedListLast(&(pDomain->wsGroups), FIELD_OFFSET(OM_WSGROUP, chain));
  2026. while (pWSGroup != NULL)
  2027. {
  2028. //
  2029. // Move each one into the local Domain. We need pTempWSGroup
  2030. // since we have to do the chaining before calling WSGroupMove.
  2031. // That function removes the workset group from the list.
  2032. //
  2033. pTempWSGroup = (POM_WSGROUP)COM_BasedListPrev(&(pDomain->wsGroups), pWSGroup,
  2034. FIELD_OFFSET(OM_WSGROUP, chain));
  2035. WSGMove(pomPrimary, pLocalDomainRec, pWSGroup);
  2036. pWSGroup = pTempWSGroup;
  2037. }
  2038. DC_EXIT_POINT:
  2039. if (rc != 0)
  2040. {
  2041. ERROR_OUT(( "Error %d processing NET_DETACH for self in Domain %u",
  2042. rc, callID));
  2043. }
  2044. DebugExitDWORD(ProcessOwnDetach, rc);
  2045. return(rc);
  2046. }
  2047. //
  2048. //
  2049. //
  2050. // ProcessNetLeaveChannel(...)
  2051. //
  2052. //
  2053. //
  2054. UINT ProcessNetLeaveChannel
  2055. (
  2056. POM_PRIMARY pomPrimary,
  2057. POM_DOMAIN pDomain,
  2058. NET_CHANNEL_ID channel
  2059. )
  2060. {
  2061. POM_DOMAIN pLocalDomainRec;
  2062. POM_WSGROUP pWSGroup;
  2063. UINT callID;
  2064. UINT rc = 0;
  2065. DebugEntry(ProcessNetLeaveChannel);
  2066. callID = pDomain->callID;
  2067. //
  2068. // We've been forced out of the channel by MCS. We don't try to rejoin
  2069. // as this usually indicates a serious error. Instead, we treat this
  2070. // as a move of the associated workset group into the local Domain
  2071. // (unless it's our own user ID channel or the ObManControl channel, in
  2072. // which case we can't really do anything useful in this Domain, so we
  2073. // detach completely).
  2074. //
  2075. if ((channel == pDomain->userID) ||
  2076. (channel == pDomain->omcChannel))
  2077. {
  2078. //
  2079. // This is our own user ID channel, so we behave as if we were
  2080. // booted out by MCS:
  2081. //
  2082. rc = ProcessOwnDetach(pomPrimary, pDomain);
  2083. if (rc != 0)
  2084. {
  2085. DC_QUIT;
  2086. }
  2087. }
  2088. else
  2089. {
  2090. //
  2091. // Not our own single-user channel or the ObManControl channel, so
  2092. // we don't need to take such drastic action. Instead, we process
  2093. // it as if it's a regular move of a workset group into the "local"
  2094. // Domain (i.e. NET_INVALID_DOMAIN_ID).
  2095. //
  2096. // SFR ? { Purge our list of outstanding receives for channel
  2097. PurgeReceiveCBs(pDomain, channel);
  2098. //
  2099. // So, find the workset group which is involved...
  2100. //
  2101. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  2102. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  2103. FIELD_OFFSET(OM_WSGROUP, channelID), (DWORD)channel,
  2104. FIELD_SIZE(OM_WSGROUP, channelID));
  2105. if (pWSGroup == NULL)
  2106. {
  2107. ERROR_OUT((
  2108. "Got NET_LEAVE for channel %hu but no workset group!",
  2109. channel));
  2110. DC_QUIT;
  2111. }
  2112. //
  2113. // ...and move it into the local Domain:
  2114. //
  2115. rc = DomainRecordFindOrCreate(pomPrimary,
  2116. OM_NO_CALL,
  2117. &pLocalDomainRec);
  2118. if (rc != 0)
  2119. {
  2120. DC_QUIT;
  2121. }
  2122. WSGMove(pomPrimary, pLocalDomainRec, pWSGroup);
  2123. }
  2124. TRACE_OUT(( "Processed NET_LEAVE for channel %hu in Domain %u",
  2125. channel, callID));
  2126. DC_EXIT_POINT:
  2127. if (rc != 0)
  2128. {
  2129. ERROR_OUT(( "Error %d processing NET_LEAVE for %hu in Domain %u",
  2130. rc, channel, callID));
  2131. }
  2132. DebugExitDWORD(ProcessNetLeaveChannel, rc);
  2133. return(rc);
  2134. }
  2135. //
  2136. //
  2137. // LOCKING - OVERVIEW
  2138. //
  2139. // Workset locking operates on a request/reply protocol, which means that
  2140. // when we want a lock, we ask everyone else on the channel if we can have
  2141. // it. If they all say yes, we get it; otherwise we don't.
  2142. //
  2143. // This is non-trivial. Some nodes might disappear before they send us
  2144. // their reply, while some might disappear after they've send their reply.
  2145. // Others might just be far away and take a long time to reply. In
  2146. // addition, new nodes can join the channel at any time.
  2147. //
  2148. // To cope with all this, to lock a workset we build up a list of the
  2149. // remote nodes in the call which are using the workset group (the
  2150. // "expected respondents" list) and if the list is non-empty, we broadcast
  2151. // an OMNET_LOCK_REQ message on the channel for the workset group which
  2152. // contains the workset
  2153. //
  2154. // As each reply comes in, we check it off against the list of expected
  2155. // respondents. If we weren't expecting a reply from that node we ignore
  2156. // it. Otherwise, if the reply is a GRANT, we remove that node from the
  2157. // list and continue waiting for the others. If the reply is a DENY, we
  2158. // give up, discard all the memory allocated for the lock request and its
  2159. // associated CBs and post a failure event to the client.
  2160. //
  2161. // If the list of expected respondents becomes empty because everyone has
  2162. // replied with a GRANT, we again free up any memory used and post an event
  2163. // to the client.
  2164. //
  2165. // While all this is going on, we have a timer running in the background.
  2166. // It ticks every second for ten seconds (both configurable via .INI file)
  2167. // and when it does, we re-examine our list of expected respondents to see
  2168. // if any of them have deregistered from the workset group (or detached
  2169. // from the domain, which implies the former). If they have, we fake up a
  2170. // GRANT message from them, thus potentially triggering the success event
  2171. // to our local client.
  2172. //
  2173. // If anyone ever requests a lock while we have the lock, we DENY them the
  2174. // lock. If anyone ever requests a lock while we are also requesting the
  2175. // lock, we compare their MCS user IDs. If the other node has a higher
  2176. // numerical value, we abort our attempt in favour of them and send back a
  2177. // GRANT; otherwise we DENY the lock.
  2178. //
  2179. // If ever a node detaches when it has a lock, we trap this in
  2180. // ReleaseAllNetLocks, which compares the ID of the lock owner against the
  2181. // ID of the detached node and unlocks the workset if they match. For this
  2182. // reason, it is vital that we always know exactly who has the lock. We
  2183. // achieve this by, whenever we grant the lock to someone, we record their
  2184. // user ID.
  2185. //
  2186. // So, if we ever abort the locking of a workset in favour of someone else,
  2187. // we must broadcast this info to everyone else (since they must be told
  2188. // who really has the lock, and they will think that we have the lock if we
  2189. // don't tell them otherwise). We use a LOCK_NOTIFY message for this.
  2190. //
  2191. //
  2192. //
  2193. // ProcessLockRequest(...)
  2194. //
  2195. void ProcessLockRequest
  2196. (
  2197. POM_PRIMARY pomPrimary,
  2198. POM_DOMAIN pDomain,
  2199. POMNET_LOCK_PKT pLockReqPkt
  2200. )
  2201. {
  2202. POM_WSGROUP pWSGroup;
  2203. POM_WORKSET pWorkset;
  2204. NET_UID sender;
  2205. OM_WORKSET_ID worksetID;
  2206. OMNET_MESSAGE_TYPE reply = OMNET_LOCK_DENY;
  2207. UINT rc = 0;
  2208. DebugEntry(ProcessLockRequest);
  2209. sender = pLockReqPkt->header.sender;
  2210. worksetID = pLockReqPkt->worksetID;
  2211. //
  2212. // Find the workset group and workset this lock request relates to:
  2213. //
  2214. rc = PreProcessMessage(pDomain,
  2215. pLockReqPkt->wsGroupID,
  2216. worksetID,
  2217. NULL,
  2218. pLockReqPkt->header.messageType,
  2219. &pWSGroup,
  2220. &pWorkset,
  2221. NULL);
  2222. switch (rc)
  2223. {
  2224. case 0:
  2225. {
  2226. //
  2227. // Fine, this is what we want.
  2228. //
  2229. }
  2230. break;
  2231. case OM_RC_WSGROUP_NOT_FOUND:
  2232. {
  2233. //
  2234. // We shouldn't be getting network events for this workset
  2235. // group if we don't have a workset group record for it!
  2236. //
  2237. WARNING_OUT(( "Got LOCK_REQUEST for unknown workset group %hu",
  2238. pLockReqPkt->wsGroupID));
  2239. //
  2240. // Grant the lock anyway:
  2241. //
  2242. reply = OMNET_LOCK_GRANT;
  2243. DC_QUIT;
  2244. }
  2245. break;
  2246. case OM_RC_WORKSET_NOT_FOUND:
  2247. {
  2248. //
  2249. // If we don't have this workset, that means that the lock
  2250. // request has got here before the WORKSET_NEW event for the
  2251. // workset. This means that we're in the early stages of
  2252. // registering with the workset group, and somebody else is
  2253. // trying to lock the workset. So, we create the workset now
  2254. // and continue as normal.
  2255. //
  2256. // In the DC_ABSence of any other information, we create the
  2257. // workset with TOP_PRIORITY and PERSISTENT - it will be set to
  2258. // the correct priority when the WORKSET_CATCHUP/NEW arrives.
  2259. //
  2260. WARNING_OUT(( "Lock req for unknown WSG %d workset %d - creating",
  2261. pWSGroup->wsg, worksetID));
  2262. rc = WorksetCreate(pomPrimary->putTask,
  2263. pWSGroup,
  2264. worksetID,
  2265. FALSE,
  2266. NET_TOP_PRIORITY);
  2267. if (rc != 0)
  2268. {
  2269. reply = OMNET_LOCK_DENY;
  2270. DC_QUIT;
  2271. }
  2272. pWorkset = pWSGroup->apWorksets[worksetID];
  2273. }
  2274. break;
  2275. default:
  2276. {
  2277. ERROR_OUT(( "Error %d from PreProcessMessage", rc));
  2278. reply = OMNET_LOCK_DENY;
  2279. DC_QUIT;
  2280. }
  2281. }
  2282. //
  2283. // Whether we grant this lock to the remote node depends on whether
  2284. // we're trying to lock it for ourselves, so switch according to the
  2285. // workset's lock state:
  2286. //
  2287. ValidateWorkset(pWorkset);
  2288. switch (pWorkset->lockState)
  2289. {
  2290. case LOCKING:
  2291. {
  2292. //
  2293. // We're trying to lock it ourselves, so compare MCS user IDs
  2294. // to resolve the conflict:
  2295. //
  2296. if (pDomain->userID > sender)
  2297. {
  2298. //
  2299. // We win, so deny the lock:
  2300. //
  2301. reply = OMNET_LOCK_DENY;
  2302. }
  2303. else
  2304. {
  2305. //
  2306. // The other node wins, so grant the lock to the node which
  2307. // requested it (marking it as granted to that node) and
  2308. // cancel our own attempt to get it:
  2309. //
  2310. WARNING_OUT(( "Aborting attempt to lock workset %u in WSG %d "
  2311. "in favour of node 0x%08x",
  2312. pWorkset->worksetID, pWSGroup->wsg, sender));
  2313. reply = OMNET_LOCK_GRANT;
  2314. //
  2315. // To cancel our own attempt, we must find the lock request
  2316. // CBs which we set up when we sent out our own
  2317. // OMNET_LOCK_REQ.
  2318. //
  2319. // To do this, call HandleMultLockReq which will find and
  2320. // deal with all the pending requests for this workset:
  2321. //
  2322. pWorkset->lockState = LOCK_GRANTED;
  2323. pWorkset->lockCount = 0;
  2324. pWorkset->lockedBy = sender;
  2325. HandleMultLockReq(pomPrimary,
  2326. pDomain,
  2327. pWSGroup,
  2328. pWorkset,
  2329. OM_RC_WORKSET_LOCK_GRANTED);
  2330. //
  2331. // Since we are aborting in favour of another node, need to
  2332. // broadcast a LOCK_NOTIFY so that evryone else stays in
  2333. // sync with who's got the lock.
  2334. //
  2335. // Note: we do not do this in R1.1 calls since this message
  2336. // is not part of the ObMan R1.1 protocol.
  2337. //
  2338. QueueLockNotify(pomPrimary,
  2339. pDomain,
  2340. pWSGroup,
  2341. pWorkset,
  2342. sender);
  2343. }
  2344. }
  2345. break;
  2346. case LOCKED:
  2347. {
  2348. //
  2349. // We already have the workset locked so we deny the lock:
  2350. //
  2351. reply = OMNET_LOCK_DENY;
  2352. }
  2353. break;
  2354. case LOCK_GRANTED:
  2355. {
  2356. //
  2357. // If the state is LOCK_GRANTED, we allow this node to have the
  2358. // lock - the other node to which it was previously granted may
  2359. // refuse, but that's not our problem. We don't change the
  2360. // <lockedBy> field - if the node we think has the lock grants
  2361. // it to the other one, we will receive a LOCK_NOTIFY in due
  2362. // course.
  2363. //
  2364. reply = OMNET_LOCK_GRANT;
  2365. }
  2366. break;
  2367. case UNLOCKED:
  2368. {
  2369. //
  2370. // If the state is UNLOCKED, the other node can have the lock;
  2371. // we don't care, but make sure to record the ID of the node
  2372. // we're granting the lock to:
  2373. //
  2374. reply = OMNET_LOCK_GRANT;
  2375. //
  2376. // SFR5900: Only change the internal state if this is not a
  2377. // check point workset.
  2378. //
  2379. if (pWorkset->worksetID != OM_CHECKPOINT_WORKSET)
  2380. {
  2381. pWorkset->lockState = LOCK_GRANTED;
  2382. pWorkset->lockCount = 0;
  2383. pWorkset->lockedBy = sender;
  2384. }
  2385. }
  2386. break;
  2387. default:
  2388. {
  2389. //
  2390. // We should have covered all the options so if we get here
  2391. // there's something wrong.
  2392. //
  2393. ERROR_OUT(("Reached default case in workset lock switch (state: %hu)",
  2394. pWorkset->lockState));
  2395. }
  2396. }
  2397. DC_EXIT_POINT:
  2398. QueueLockReply(pomPrimary, pDomain, reply, sender, pLockReqPkt);
  2399. DebugExitVOID(ProcessLockRequest);
  2400. }
  2401. //
  2402. // QueueLockReply(...)
  2403. //
  2404. void QueueLockReply
  2405. (
  2406. POM_PRIMARY pomPrimary,
  2407. POM_DOMAIN pDomain,
  2408. OMNET_MESSAGE_TYPE message,
  2409. NET_CHANNEL_ID channel,
  2410. POMNET_LOCK_PKT pLockReqPkt
  2411. )
  2412. {
  2413. POMNET_LOCK_PKT pLockReplyPkt;
  2414. NET_PRIORITY priority;
  2415. DebugEntry(QueueLockReply);
  2416. //
  2417. // The reply is identical to the request with the exception of the
  2418. // <messageType> and <sender> fields. However, we can't just queue the
  2419. // same chunk of memory to be sent, because pLockReqPkt points to a NET
  2420. // buffer which will be freed soon. So, we allocate some new memory,
  2421. // copy the data across and set the fields:
  2422. //
  2423. pLockReplyPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  2424. if (!pLockReplyPkt)
  2425. {
  2426. ERROR_OUT(("Out of memory for QueueLockReply"));
  2427. DC_QUIT;
  2428. }
  2429. pLockReplyPkt->header.sender = pDomain->userID;
  2430. pLockReplyPkt->header.messageType = message;
  2431. pLockReplyPkt->wsGroupID = pLockReqPkt->wsGroupID;
  2432. pLockReplyPkt->worksetID = pLockReqPkt->worksetID;
  2433. //
  2434. // The <data1> field of the lock packet is the correlator the requester
  2435. // put in the original LOCK_REQUEST packet.
  2436. //
  2437. pLockReplyPkt->data1 = pLockReqPkt->data1;
  2438. //
  2439. // Lock replies normally go LOW_PRIORITY (with NET_SEND_ALL_PRIORITIES)
  2440. // so that they do not overtake any data queued at this node.
  2441. //
  2442. // However, if they're for ObManControl we send them TOP_PRIORITY
  2443. // (WITHOUT NET_SEND_ALL_PRIORITIES). This is safe because _all_
  2444. // ObManControl data is sent TOP_PRIORITY so there's no fear of a lock
  2445. // reply overtaking a data packet.
  2446. //
  2447. // Correspondingly, when we request a lock, we expect one reply at each
  2448. // priority unless it is for ObManControl.
  2449. //
  2450. if (pLockReqPkt->wsGroupID == WSGROUPID_OMC)
  2451. {
  2452. priority = NET_TOP_PRIORITY;
  2453. }
  2454. else
  2455. {
  2456. priority = NET_LOW_PRIORITY | NET_SEND_ALL_PRIORITIES;
  2457. }
  2458. if (QueueMessage(pomPrimary->putTask,
  2459. pDomain,
  2460. channel,
  2461. priority,
  2462. NULL,
  2463. NULL,
  2464. NULL,
  2465. (POMNET_PKT_HEADER) pLockReplyPkt,
  2466. NULL,
  2467. TRUE) != 0)
  2468. {
  2469. ERROR_OUT(("Error queueing lock reply for workset %hu, WSG %hu",
  2470. pLockReqPkt->worksetID, pLockReqPkt->wsGroupID));
  2471. UT_FreeRefCount((void**)&pLockReplyPkt, FALSE);
  2472. }
  2473. DC_EXIT_POINT:
  2474. DebugExitVOID(QueueLockReply);
  2475. }
  2476. //
  2477. // QueueLockNotify(...)
  2478. //
  2479. void QueueLockNotify
  2480. (
  2481. POM_PRIMARY pomPrimary,
  2482. POM_DOMAIN pDomain,
  2483. POM_WSGROUP pWSGroup,
  2484. POM_WORKSET pWorkset,
  2485. NET_UID sender
  2486. )
  2487. {
  2488. POMNET_LOCK_PKT pLockNotifyPkt;
  2489. NET_PRIORITY priority;
  2490. DebugEntry(QueueLockNotify);
  2491. ValidateWorkset(pWorkset);
  2492. pLockNotifyPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  2493. if (!pLockNotifyPkt)
  2494. {
  2495. ERROR_OUT(("Out of memory for QueueLockNotify"));
  2496. DC_QUIT;
  2497. }
  2498. //
  2499. // For a LOCK_NOTIFY, the <data1> field is the user ID of the node
  2500. // we've granted the lock to.
  2501. //
  2502. pLockNotifyPkt->header.sender = pDomain->userID;
  2503. pLockNotifyPkt->header.messageType = OMNET_LOCK_NOTIFY;
  2504. pLockNotifyPkt->wsGroupID = pWSGroup->wsGroupID;
  2505. pLockNotifyPkt->worksetID = pWorkset->worksetID;
  2506. pLockNotifyPkt->data1 = sender;
  2507. //
  2508. // LOCK_NOTIFY messages go at the priority of the workset involved. If
  2509. // this is OBMAN_CHOOSES_PRIORITY, then all bets are off and we send
  2510. // them TOP_PRIORITY.
  2511. //
  2512. priority = pWorkset->priority;
  2513. if (priority == OM_OBMAN_CHOOSES_PRIORITY)
  2514. {
  2515. priority = NET_TOP_PRIORITY;
  2516. }
  2517. if (QueueMessage(pomPrimary->putTask,
  2518. pDomain,
  2519. pWSGroup->channelID,
  2520. priority,
  2521. NULL,
  2522. NULL,
  2523. NULL,
  2524. (POMNET_PKT_HEADER) pLockNotifyPkt,
  2525. NULL,
  2526. TRUE) != 0)
  2527. {
  2528. ERROR_OUT(("Error queueing lock notify for workset %hu in WSG %hu",
  2529. pWorkset->worksetID, pWSGroup->wsGroupID));
  2530. UT_FreeRefCount((void**)&pLockNotifyPkt, FALSE);
  2531. }
  2532. DC_EXIT_POINT:
  2533. DebugExitVOID(QueueLockNotify);
  2534. }
  2535. //
  2536. // ProcessLockNotify(...)
  2537. //
  2538. void ProcessLockNotify
  2539. (
  2540. POM_PRIMARY pomPrimary,
  2541. POM_DOMAIN pDomain,
  2542. POM_WSGROUP pWSGroup,
  2543. POM_WORKSET pWorkset,
  2544. NET_UID owner
  2545. )
  2546. {
  2547. POM_WORKSET pOMCWorkset;
  2548. POM_OBJECT pObjPerson;
  2549. DebugEntry(ProcessLockNotify);
  2550. ValidateWSGroup(pWSGroup);
  2551. ValidateWorkset(pWorkset);
  2552. //
  2553. // This message is sent when one remote node has granted the lock to
  2554. // another. We use it to update our view of who has got the lock.
  2555. //
  2556. TRACE_OUT(("Got LOCK_NOTIFY for workset %u in WSG %d - node 0x%08x has the lock",
  2557. pWorkset->worksetID, pWSGroup->wsg, owner));
  2558. //
  2559. // Check the lock state for the workset:
  2560. //
  2561. switch (pWorkset->lockState)
  2562. {
  2563. case LOCKED:
  2564. {
  2565. //
  2566. // A remote node has just told us that another remote node has
  2567. // got this workset lock - but we think we've got it!
  2568. //
  2569. ERROR_OUT(( "Bad LOCK_NOTIFY for WSG %d workset %d, owner 0x%08x",
  2570. pWSGroup->wsg, pWorkset->worksetID, owner));
  2571. DC_QUIT;
  2572. }
  2573. break;
  2574. case LOCKING:
  2575. {
  2576. //
  2577. // We should get a LOCK_DENY or a LOCK_GRANT later - do nothing
  2578. // now.
  2579. //
  2580. DC_QUIT;
  2581. }
  2582. break;
  2583. case LOCK_GRANTED:
  2584. case UNLOCKED:
  2585. {
  2586. //
  2587. // One remote node has granted the lock to another. Check the
  2588. // latter is still attached, by looking in the control workset:
  2589. //
  2590. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  2591. FindPersonObject(pOMCWorkset,
  2592. owner,
  2593. FIND_THIS,
  2594. &pObjPerson);
  2595. if (pObjPerson != NULL)
  2596. {
  2597. ValidateObject(pObjPerson);
  2598. //
  2599. // If our internal state is LOCK_GRANTED and we have just
  2600. // received a LOCK_NOTIFY from another node then we can
  2601. // just ignore it - it is for a lock request that we have
  2602. // just abandoned.
  2603. //
  2604. if ( (pWorkset->lockState == LOCK_GRANTED) &&
  2605. (owner == pDomain->userID) )
  2606. {
  2607. TRACE_OUT(( "Ignoring LOCK_NOTIFY for ourselves"));
  2608. DC_QUIT;
  2609. }
  2610. //
  2611. // Only store the new ID it is greater than the last ID we
  2612. // were notified of - it is possible for LOCK_NOTIFIES to
  2613. // get crossed on the wire. Consider the following
  2614. // scenario:
  2615. //
  2616. // Machines 1, 2, 3 and 4 are all in a call and all try and
  2617. // lock at the same time.
  2618. //
  2619. // - 2 grants to 3 and sends a LOCK_NOTIFY saying that 3
  2620. // has the lock.
  2621. //
  2622. // - 3 grants to 4 and sends a LOCK_NOTIFY saying that 4
  2623. // has the lock
  2624. //
  2625. // 4 actually has the lock at this point.
  2626. //
  2627. // Machine 1 gets the lock notification from 3 and sets its
  2628. // 'lockedBy' field to 4.
  2629. // Machine 1 then gets the lock notification from 2 and
  2630. // resets the 'lockedBy' field to 3.
  2631. //
  2632. // 4 then unlocks and sends the unlock notification. When
  2633. // 1 gets the unlock, it does not recognise the ID of the
  2634. // unlocking machine (it thinks 3 has the lock) so doesnt
  2635. // bother to reset the local locked state. Any subsequent
  2636. // attempts to lock the workset on 1 fail because it still
  2637. // still thinks 3 has the lock.
  2638. //
  2639. if (owner > pWorkset->lockedBy)
  2640. {
  2641. pWorkset->lockedBy = owner;
  2642. TRACE_OUT(( "Node ID 0x%08x has the lock (?)",
  2643. pWorkset->lockedBy));
  2644. }
  2645. }
  2646. else
  2647. {
  2648. //
  2649. // If not, we assume that this node was granted the lock
  2650. // but then went away. If we did think the workset was
  2651. // locked, mark it as unlocked and post an unlock event.
  2652. //
  2653. if (pWorkset->lockState == LOCK_GRANTED)
  2654. {
  2655. TRACE_OUT(("node 0x%08x had lock on workset %d in WSG %d but has left",
  2656. owner, pWorkset->worksetID, pWSGroup->wsg));
  2657. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  2658. }
  2659. }
  2660. }
  2661. break;
  2662. default:
  2663. {
  2664. //
  2665. // We should have covered all the options so if we get here
  2666. // there's something wrong.
  2667. //
  2668. ERROR_OUT(("Reached deafult case in workset lock switch (state: %hu)",
  2669. pWorkset->lockState));
  2670. }
  2671. }
  2672. DC_EXIT_POINT:
  2673. DebugExitVOID(ProcessLockNotify);
  2674. }
  2675. //
  2676. // ProcessLockReply(...)
  2677. //
  2678. void ProcessLockReply
  2679. (
  2680. POM_PRIMARY pomPrimary,
  2681. POM_DOMAIN pDomain,
  2682. NET_UID sender,
  2683. OM_CORRELATOR correlator,
  2684. OMNET_MESSAGE_TYPE replyType)
  2685. {
  2686. POM_WSGROUP pWSGroup = NULL;
  2687. POM_WORKSET pWorkset;
  2688. POM_LOCK_REQ pLockReq;
  2689. POM_NODE_LIST pNodeEntry;
  2690. DebugEntry(ProcessLockReply);
  2691. //
  2692. // Search the domain's list of pending locks for one which matches the
  2693. // correlator (we do it this way rather than using the workset group ID
  2694. // and workset ID to ensure that we don't get confused between
  2695. // successive lock requests for the same workset).
  2696. //
  2697. TRACE_OUT(( "Searching domain %u's list for lock corr %hu",
  2698. pDomain->callID, correlator));
  2699. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingLocks),
  2700. (void**)&pLockReq, FIELD_OFFSET(OM_LOCK_REQ, chain),
  2701. FIELD_OFFSET(OM_LOCK_REQ, correlator), (DWORD)correlator,
  2702. FIELD_SIZE(OM_LOCK_REQ, correlator));
  2703. if (pLockReq == NULL)
  2704. {
  2705. //
  2706. // Could be any of the following:
  2707. //
  2708. // - This reply is from a node we were never expecting a lock
  2709. // request from in the first place, and we've got all the other
  2710. // replies so we've thrown away the lock request.
  2711. //
  2712. // - Someone else has denied us the lock so we've given up.
  2713. //
  2714. // - The node was too slow to reply and we've given up on the lock
  2715. // request.
  2716. //
  2717. // - We've left the domain and so moved all the pending lock
  2718. // requests into the local domain.
  2719. //
  2720. // - A logic error.
  2721. //
  2722. // The only thing we can do here is quit.
  2723. //
  2724. WARNING_OUT(( "Unexpected lock correlator 0x%08x (domain %u)",
  2725. correlator, pDomain->callID));
  2726. DC_QUIT;
  2727. }
  2728. //
  2729. // Otherwise, we search the list of expected respondents looking for
  2730. // the node which has just replied:
  2731. //
  2732. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pLockReq->nodes),
  2733. (void**)&pNodeEntry, FIELD_OFFSET(OM_NODE_LIST, chain),
  2734. FIELD_OFFSET(OM_NODE_LIST, userID), (DWORD)sender,
  2735. FIELD_SIZE(OM_NODE_LIST, userID));
  2736. if (pNodeEntry == NULL)
  2737. {
  2738. //
  2739. // Could be any of the following:
  2740. //
  2741. // - We removed the node from the list because it had deregistered
  2742. // when the timeout expired (will only happen when delete of
  2743. // person object overtakes lock reply and timeout expires locally
  2744. // betweem the two).
  2745. //
  2746. // - The node joined since we compiled the list.
  2747. //
  2748. // - A logic error.
  2749. //
  2750. TRACE_OUT(("Recd unexpected lock reply from node 0x%08x in Domain %u",
  2751. sender, pDomain->callID));
  2752. DC_QUIT;
  2753. }
  2754. //
  2755. // Otherwise, this is a normal lock reply so we just remove the node
  2756. // from the list and free up its chunk of memory.
  2757. //
  2758. COM_BasedListRemove(&(pNodeEntry->chain));
  2759. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  2760. pWSGroup = pLockReq->pWSGroup;
  2761. //
  2762. // If the client has just deregistered from the workset group, we'll
  2763. // be throwing it away soon, so don't do any more processing:
  2764. //
  2765. if (!pWSGroup->valid)
  2766. {
  2767. WARNING_OUT(("Ignoring lock reply for discarded WSG %d", pWSGroup->wsg));
  2768. DC_QUIT;
  2769. }
  2770. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  2771. ASSERT((pWorkset != NULL));
  2772. //
  2773. // Now check the workset's lock state: if we're not/no longer trying to
  2774. // lock it, quit.
  2775. //
  2776. // Note, however, that checkpointing worksets are never marked as
  2777. // LOCKING, even when we're locking them, so exclude them from the
  2778. // test:
  2779. //
  2780. if ((pWorkset->lockState != LOCKING) &&
  2781. (pWorkset->worksetID != OM_CHECKPOINT_WORKSET))
  2782. {
  2783. WARNING_OUT(( "Recd unwanted lock reply from %hu for workset %d WSG %d",
  2784. sender, pWorkset->worksetID, pWSGroup->wsg));
  2785. DC_QUIT;
  2786. }
  2787. //
  2788. // If this is a negative reply, then we have failed to get the lock so
  2789. // inform our local client and then quit:
  2790. //
  2791. if (replyType == OMNET_LOCK_DENY)
  2792. {
  2793. //
  2794. // We do not expect this for a CHECKPOINT_WORKSET:
  2795. //
  2796. ASSERT((pWorkset->worksetID != OM_CHECKPOINT_WORKSET));
  2797. WARNING_OUT(( "node 0x%08x has denied the lock for workset %u in WSG %d",
  2798. sender, pWorkset->worksetID, pWSGroup->wsg));
  2799. pWorkset->lockState = UNLOCKED;
  2800. pWorkset->lockCount = 0;
  2801. HandleMultLockReq(pomPrimary,
  2802. pDomain,
  2803. pWSGroup,
  2804. pWorkset,
  2805. OM_RC_WORKSET_LOCK_GRANTED);
  2806. //
  2807. // Since we have given up our lock request in favour of another
  2808. // node, need to broadcast a LOCK_NOTIFY so that everyone else
  2809. // stays in sync with who's got the lock.
  2810. //
  2811. QueueLockNotify(pomPrimary, pDomain, pWSGroup, pWorkset, sender);
  2812. DC_QUIT;
  2813. }
  2814. TRACE_OUT(( "Affirmative lock reply received from node 0x%08x", sender));
  2815. //
  2816. // Check if the list of expected respondents is now empty:
  2817. //
  2818. if (COM_BasedListIsEmpty(&(pLockReq->nodes)))
  2819. {
  2820. //
  2821. // List is now empty, so all nodes have replied to the request,
  2822. // therefore lock has succeeded:
  2823. //
  2824. TRACE_OUT(( "Got all LOCK_GRANT replies for workset %u in WSG %d",
  2825. pWorkset->worksetID, pWSGroup->wsg));
  2826. if (pWorkset->worksetID == OM_CHECKPOINT_WORKSET)
  2827. {
  2828. //
  2829. // This is a checkpointing workset. We do not set the state to
  2830. // LOCKED (we never do for these worksets) and we only process
  2831. // the particular pending lock request which this packet came
  2832. // in reply to - otherwise we couldn't guarantee an end-to-end
  2833. // ping on each checkpoint:
  2834. //
  2835. WorksetLockResult(pomPrimary->putTask, &pLockReq, 0);
  2836. }
  2837. else
  2838. {
  2839. //
  2840. // This is not a checkpointing workset, so set the state to
  2841. // LOCKED and process ALL pending locks for this workset:
  2842. //
  2843. pWorkset->lockState = LOCKED;
  2844. HandleMultLockReq(pomPrimary, pDomain, pWSGroup, pWorkset, 0);
  2845. }
  2846. }
  2847. else
  2848. {
  2849. //
  2850. // Otherwise, still awaiting some replies, so we do nothing more
  2851. // for the moment except trace.
  2852. //
  2853. TRACE_OUT(( "Still need lock replies for workset %u in WSG %d",
  2854. pLockReq->worksetID, pWSGroup->wsg));
  2855. }
  2856. DC_EXIT_POINT:
  2857. DebugExitVOID(ProcessLockReply);
  2858. }
  2859. //
  2860. // PurgeLockRequests(...)
  2861. //
  2862. void PurgeLockRequests
  2863. (
  2864. POM_DOMAIN pDomain,
  2865. POM_WSGROUP pWSGroup
  2866. )
  2867. {
  2868. POM_LOCK_REQ pLockReq;
  2869. POM_LOCK_REQ pNextLockReq;
  2870. POM_NODE_LIST pNodeEntry;
  2871. DebugEntry(PurgeLockRequests);
  2872. //
  2873. // Search this domain's list of lock requests looking for a match on
  2874. // workset group ID:
  2875. //
  2876. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  2877. while (pLockReq != NULL)
  2878. {
  2879. //
  2880. // This loop might remove pLockReq from the list, so chain first:
  2881. //
  2882. pNextLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  2883. FIELD_OFFSET(OM_LOCK_REQ, chain));
  2884. //
  2885. // For each match...
  2886. //
  2887. if (pLockReq->wsGroupID == pWSGroup->wsGroupID)
  2888. {
  2889. TRACE_OUT(( "'%s' still has lock req oustanding - discarding"));
  2890. //
  2891. // Discard any node list entries remaining...
  2892. //
  2893. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  2894. while (pNodeEntry != NULL)
  2895. {
  2896. COM_BasedListRemove(&(pNodeEntry->chain));
  2897. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  2898. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  2899. }
  2900. //
  2901. // ...and discard the lock request itself:
  2902. //
  2903. COM_BasedListRemove(&(pLockReq->chain));
  2904. UT_FreeRefCount((void**)&pLockReq, FALSE);
  2905. }
  2906. pLockReq = pNextLockReq;
  2907. }
  2908. DebugExitVOID(PurgeLockRequests);
  2909. }
  2910. //
  2911. // ProcessLockTimeout(...)
  2912. //
  2913. void ProcessLockTimeout
  2914. (
  2915. POM_PRIMARY pomPrimary,
  2916. UINT retriesToGo,
  2917. UINT callID
  2918. )
  2919. {
  2920. POM_DOMAIN pDomain;
  2921. POM_WSGROUP pWSGroup;
  2922. POM_WORKSET pWorkset;
  2923. POM_LOCK_REQ pLockReq = NULL;
  2924. POM_WORKSET pOMCWorkset;
  2925. POM_OBJECT pObj;
  2926. POM_NODE_LIST pNodeEntry;
  2927. POM_NODE_LIST pNextNodeEntry;
  2928. DebugEntry(ProcessLockTimeout);
  2929. //
  2930. // When we broadcast a lock request, we start a timer going so that we
  2931. // don't hang around for ever waiting for replies from nodes which have
  2932. // gone away. This timer has now popped, so we validate our list of
  2933. // expected respondents by checking that each entry relates to a node
  2934. // still in the domain.
  2935. //
  2936. //
  2937. // First, find the lock request CB by looking in each domain and then
  2938. // at the correlators of each pending lock request:
  2939. //
  2940. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  2941. while (pDomain != NULL)
  2942. {
  2943. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingLocks),
  2944. (void**)&pLockReq, FIELD_OFFSET(OM_LOCK_REQ, chain),
  2945. FIELD_OFFSET(OM_LOCK_REQ, retriesToGo), (DWORD)retriesToGo,
  2946. FIELD_SIZE(OM_LOCK_REQ, retriesToGo));
  2947. if (pLockReq != NULL)
  2948. {
  2949. TRACE_OUT(( "Found correlated lock request"));
  2950. break;
  2951. }
  2952. //
  2953. // Didn't find anything in this domain - go on to the next:
  2954. //
  2955. pDomain = (POM_DOMAIN)COM_BasedListNext(&(pomPrimary->domains), pDomain,
  2956. FIELD_OFFSET(OM_DOMAIN, chain));
  2957. }
  2958. if (pLockReq == NULL)
  2959. {
  2960. TRACE_OUT(( "Lock timeout expired after lock granted/refused"));
  2961. DC_QUIT;
  2962. }
  2963. pWSGroup = pLockReq->pWSGroup;
  2964. //
  2965. // If the client has just deregistered from the workset group, we'll
  2966. // be throwing it away soon, so don't do any more processing:
  2967. //
  2968. if (!pWSGroup->valid)
  2969. {
  2970. WARNING_OUT(( "Ignoring lock timeout for discarded WSG %d",
  2971. pWSGroup->wsg));
  2972. DC_QUIT;
  2973. }
  2974. //
  2975. // We know the workset must still exist because worksets don't get
  2976. // discarded unless the whole workset group is being discarded.
  2977. //
  2978. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  2979. ASSERT((pWorkset != NULL));
  2980. //
  2981. // The workset must be in the LOCKING state because if it is LOCKED or
  2982. // UNLOCKED, then we shouldn't have found a lock request CB for it
  2983. // (unless of course it's a checkpointing workset):
  2984. //
  2985. if (pWorkset->lockState != LOCKING)
  2986. {
  2987. if (pWorkset->worksetID != OM_CHECKPOINT_WORKSET)
  2988. {
  2989. WARNING_OUT((
  2990. "Got lock timeout for workset %u in WSG %d but state is %u",
  2991. pWorkset->worksetID, pWSGroup->wsg,
  2992. pWorkset->lockState));
  2993. DC_QUIT;
  2994. }
  2995. }
  2996. //
  2997. // Go through the relevant control workset to see if any of the
  2998. // expected respondents have disappeared.
  2999. //
  3000. pOMCWorkset = GetOMCWorkset(pDomain, pLockReq->wsGroupID);
  3001. ASSERT((pOMCWorkset != NULL));
  3002. //
  3003. // Chain through each of the objects in our expected respondents list
  3004. // as follows:
  3005. //
  3006. // FOR each object in the expected respondents list
  3007. //
  3008. // FOR each person object in the relevant ObManControl workset
  3009. //
  3010. // IF they match on user ID, this node is still around so
  3011. // don't delete it
  3012. //
  3013. // IF no match found then node has gone away so remove it from
  3014. // expected respondents list.
  3015. //
  3016. //
  3017. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  3018. while (pNodeEntry != NULL)
  3019. {
  3020. //
  3021. // We might free up pNodeEntry on a pass through the loop (in
  3022. // ProcessLockReply), but we will need to be able to chain from it
  3023. // all the same. So, we chain at the START of the loop, putting a
  3024. // pointer to the next item in pTempNodeEntry; at the end of the
  3025. // loop, we assign this value to pNodeEntry:
  3026. //
  3027. pNextNodeEntry = (POM_NODE_LIST)COM_BasedListNext(&(pLockReq->nodes), pNodeEntry,
  3028. FIELD_OFFSET(OM_NODE_LIST, chain));
  3029. //
  3030. // Now, search for this user's person object:
  3031. //
  3032. FindPersonObject(pOMCWorkset,
  3033. pNodeEntry->userID,
  3034. FIND_THIS,
  3035. &pObj);
  3036. if (pObj == NULL)
  3037. {
  3038. //
  3039. // We didn't find this node in the workset, so it must have
  3040. // disappeared. Therefore, we fake a LOCK_GRANT message from
  3041. // it. ProcessLockReply will duplicate some of the processing
  3042. // we've done but it saves duplicating code.
  3043. //
  3044. WARNING_OUT((
  3045. "node 0x%08x has disappeared - faking LOCK_GRANT message",
  3046. pNodeEntry->userID));
  3047. ProcessLockReply(pomPrimary,
  3048. pDomain,
  3049. pNodeEntry->userID,
  3050. pLockReq->correlator,
  3051. OMNET_LOCK_GRANT);
  3052. }
  3053. //
  3054. // Now, go on to the next item in the expected respondents list:
  3055. //
  3056. pNodeEntry = pNextNodeEntry;
  3057. }
  3058. //
  3059. // ProcessLockReply may have determined, with the faked messages we
  3060. // gave it, that the lock attempt has succeeded completely. If so, the
  3061. // workset's lock state will now be LOCKED. If it isn't, we'll need to
  3062. // post another timeout event.
  3063. //
  3064. if (pWorkset->lockState == LOCKING)
  3065. {
  3066. TRACE_OUT(( "Replies to lock request still expected"));
  3067. if (pLockReq->retriesToGo == 0)
  3068. {
  3069. //
  3070. // We've run out of retries so give up now:
  3071. //
  3072. WARNING_OUT(( "Timed out trying to lock workset %u in WSG %d",
  3073. pLockReq->worksetID, pWSGroup->wsg));
  3074. pWorkset->lockState = UNLOCKED;
  3075. pWorkset->lockedBy = 0;
  3076. pWorkset->lockCount = 0;
  3077. HandleMultLockReq(pomPrimary,
  3078. pDomain,
  3079. pWSGroup,
  3080. pWorkset,
  3081. OM_RC_OUT_OF_RESOURCES);
  3082. //
  3083. // Now send an unlock message to all nodes, so that they don't
  3084. // think we still have it locked.
  3085. //
  3086. if (QueueUnlock(pomPrimary->putTask,
  3087. pDomain,
  3088. pWSGroup->wsGroupID,
  3089. pWorkset->worksetID,
  3090. pWSGroup->channelID,
  3091. pWorkset->priority) != 0)
  3092. {
  3093. DC_QUIT;
  3094. }
  3095. }
  3096. else // retriesToGo == 0
  3097. {
  3098. pLockReq->retriesToGo--;
  3099. UT_PostEvent(pomPrimary->putTask,
  3100. pomPrimary->putTask,
  3101. OM_LOCK_RETRY_DELAY_DFLT,
  3102. OMINT_EVENT_LOCK_TIMEOUT,
  3103. retriesToGo,
  3104. callID);
  3105. }
  3106. }
  3107. DC_EXIT_POINT:
  3108. DebugExitVOID(ProcessLockTimeout);
  3109. }
  3110. //
  3111. // HandleMultLockReq
  3112. //
  3113. void HandleMultLockReq
  3114. (
  3115. POM_PRIMARY pomPrimary,
  3116. POM_DOMAIN pDomain,
  3117. POM_WSGROUP pWSGroup,
  3118. POM_WORKSET pWorkset,
  3119. UINT result
  3120. )
  3121. {
  3122. POM_LOCK_REQ pLockReq;
  3123. DebugEntry(HandleMultLockReq);
  3124. //
  3125. // We need to search this Domain's list of lock requests for every one
  3126. // which matches the workset group and workset specified in the
  3127. // parameter list. Find the primary record first as a sanity check:
  3128. //
  3129. FindLockReq(pDomain, pWSGroup, pWorkset, &pLockReq, LOCK_PRIMARY);
  3130. if (pLockReq == NULL)
  3131. {
  3132. ERROR_OUT(( "No primary lock request CB found for workset %u!",
  3133. pWorkset->worksetID));
  3134. DC_QUIT;
  3135. }
  3136. while (pLockReq != NULL)
  3137. {
  3138. WorksetLockResult(pomPrimary->putTask, &pLockReq, result);
  3139. FindLockReq(pDomain, pWSGroup, pWorkset,
  3140. &pLockReq, LOCK_SECONDARY);
  3141. }
  3142. DC_EXIT_POINT:
  3143. DebugExitVOID(HandleMultLockReq);
  3144. }
  3145. //
  3146. //
  3147. //
  3148. // FindLockReq
  3149. //
  3150. //
  3151. //
  3152. void FindLockReq(POM_DOMAIN pDomain,
  3153. POM_WSGROUP pWSGroup,
  3154. POM_WORKSET pWorkset,
  3155. POM_LOCK_REQ * ppLockReq,
  3156. BYTE lockType)
  3157. {
  3158. POM_LOCK_REQ pLockReq;
  3159. DebugEntry(FindLockReq);
  3160. //
  3161. // We need to search this Domain's list of lock requests for every one
  3162. // which matches the workset group, workset and lock type specified in
  3163. // the parameter list.
  3164. //
  3165. // So, we search the list to find a match on workset group ID, then
  3166. // compare the workset ID. If that doesn't match, we continue down the
  3167. // list:
  3168. //
  3169. pLockReq = (POM_LOCK_REQ)COM_BasedListFirst(&(pDomain->pendingLocks), FIELD_OFFSET(OM_LOCK_REQ, chain));
  3170. while (pLockReq != NULL)
  3171. {
  3172. if ((pLockReq->wsGroupID == pWSGroup->wsGroupID) &&
  3173. (pLockReq->worksetID == pWorkset->worksetID) &&
  3174. (pLockReq->type == lockType))
  3175. {
  3176. break;
  3177. }
  3178. pLockReq = (POM_LOCK_REQ)COM_BasedListNext(&(pDomain->pendingLocks), pLockReq,
  3179. FIELD_OFFSET(OM_LOCK_REQ, chain));
  3180. }
  3181. *ppLockReq = pLockReq;
  3182. DebugExitVOID(FindLockReq);
  3183. }
  3184. //
  3185. // ProcessUnlock(...)
  3186. //
  3187. void ProcessUnlock
  3188. (
  3189. POM_PRIMARY pomPrimary,
  3190. POM_WORKSET pWorkset,
  3191. NET_UID sender
  3192. )
  3193. {
  3194. DebugEntry(ProcessUnlock);
  3195. //
  3196. // Check the workset was locked by the node that's now unlocking it:
  3197. //
  3198. if (pWorkset->lockedBy != sender)
  3199. {
  3200. WARNING_OUT(( "Unexpected UNLOCK from node 0x%08x for %hu!",
  3201. sender, pWorkset->worksetID));
  3202. }
  3203. else
  3204. {
  3205. TRACE_OUT(( "Unlocking:%hu for node 0x%08x",
  3206. pWorkset->worksetID, sender));
  3207. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  3208. }
  3209. DebugExitVOID(ProcessUnlock);
  3210. }
  3211. //
  3212. // ReleaseAllNetLocks(...)
  3213. //
  3214. void ReleaseAllNetLocks
  3215. (
  3216. POM_PRIMARY pomPrimary,
  3217. POM_DOMAIN pDomain,
  3218. OM_WSGROUP_ID wsGroupID,
  3219. NET_UID userID
  3220. )
  3221. {
  3222. POM_WSGROUP pWSGroup;
  3223. POM_WORKSET pWorkset;
  3224. OM_WORKSET_ID worksetID;
  3225. DebugEntry(ReleaseAllNetLocks);
  3226. //
  3227. // Find the workset group:
  3228. //
  3229. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  3230. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  3231. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  3232. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  3233. if (pWSGroup == NULL)
  3234. {
  3235. //
  3236. // This will happen for a workset group which the other node is
  3237. // registered with but we're not, so just trace and quit:
  3238. //
  3239. TRACE_OUT(("No record found for WSG ID %hu", wsGroupID));
  3240. DC_QUIT;
  3241. }
  3242. TRACE_OUT(( "Releasing all locks held by node 0x%08x in WSG %d",
  3243. userID, pWSGroup->wsg));
  3244. //
  3245. // For each workset in it, if the lock has been granted to the detached
  3246. // node, unlock it:
  3247. //
  3248. for (worksetID = 0;
  3249. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  3250. worksetID++)
  3251. {
  3252. pWorkset = pWSGroup->apWorksets[worksetID];
  3253. if (pWorkset == NULL)
  3254. {
  3255. continue;
  3256. }
  3257. //
  3258. // If this workset is locked by someone other than us...
  3259. //
  3260. if (pWorkset->lockState == LOCK_GRANTED)
  3261. {
  3262. //
  3263. // ...and if it is locked by the departed node (or if everyone
  3264. // has been detached)...
  3265. //
  3266. if ((userID == pWorkset->lockedBy) ||
  3267. (userID == NET_ALL_REMOTES))
  3268. {
  3269. //
  3270. // ...unlock it.
  3271. //
  3272. TRACE_OUT((
  3273. "Unlocking workset %u in WSG %d for detached node 0x%08x",
  3274. worksetID, pWSGroup->wsg, userID));
  3275. WorksetUnlockLocal(pomPrimary->putTask, pWorkset);
  3276. }
  3277. }
  3278. }
  3279. DC_EXIT_POINT:
  3280. DebugExitVOID(ReleaseAllNetLocks);
  3281. }
  3282. //
  3283. // ProcessWSGRegister(...)
  3284. //
  3285. void ProcessWSGRegister
  3286. (
  3287. POM_PRIMARY pomPrimary,
  3288. POM_WSGROUP_REG_CB pRegistrationCB
  3289. )
  3290. {
  3291. POM_DOMAIN pDomain;
  3292. POM_WSGROUP pWSGroup;
  3293. POM_USAGE_REC pUsageRec = NULL;
  3294. POM_CLIENT_LIST pClientListEntry;
  3295. UINT mode;
  3296. UINT type;
  3297. UINT rc = 0;
  3298. DebugEntry(ProcessWSGRegister);
  3299. //
  3300. // Check if this registration has been aborted already:
  3301. //
  3302. if (!pRegistrationCB->valid)
  3303. {
  3304. WARNING_OUT(( "Reg CB for WSG %d no longer valid - aborting registration",
  3305. pRegistrationCB->wsg));
  3306. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  3307. DC_QUIT;
  3308. }
  3309. //
  3310. // Determine whether we're doing a REGISTER or a MOVE (we use the
  3311. // string values for tracing):
  3312. //
  3313. mode = pRegistrationCB->mode;
  3314. type = pRegistrationCB->type;
  3315. TRACE_OUT(( "Processing %d request (pre-Stage1) for WSG %d",
  3316. pRegistrationCB->wsg));
  3317. //
  3318. // Find the Domain record (in the case of a MOVE, this will be the
  3319. // record for the Domain INTO WHICH the Client wants to move the WSG).
  3320. //
  3321. // Note that this process will cause us to attach to the Domain if
  3322. // we're not already attached.
  3323. //
  3324. rc = DomainRecordFindOrCreate(pomPrimary,
  3325. pRegistrationCB->callID,
  3326. &pDomain);
  3327. if (rc != 0)
  3328. {
  3329. DC_QUIT;
  3330. }
  3331. //
  3332. // Save the pointer to the Domain record because we'll need it later:
  3333. //
  3334. pRegistrationCB->pDomain = pDomain;
  3335. //
  3336. // Put the registration CB in the list hung off the Domain record:
  3337. //
  3338. COM_BasedListInsertAfter(&(pDomain->pendingRegs),
  3339. &(pRegistrationCB->chain));
  3340. //
  3341. // OK, now we need to look for the workset group.
  3342. //
  3343. // If this is a MOVE, we can find the workset group record immediately
  3344. // using the offset stored in the request CB.
  3345. //
  3346. // If this is a REGISTER, we need to look for the record in the list
  3347. // hung off the Domain record, and, if none is found, create one:
  3348. //
  3349. if (type == WSGROUP_REGISTER)
  3350. {
  3351. WSGRecordFind(pDomain, pRegistrationCB->wsg, pRegistrationCB->fpHandler,
  3352. &pWSGroup);
  3353. if (pWSGroup == NULL)
  3354. {
  3355. //
  3356. // The workset group was not found in the list hung off the
  3357. // Domain record, which means that there is no workset group
  3358. // with this name/FP combination present ON THIS MACHINE for
  3359. // this Domain.
  3360. //
  3361. rc = WSGRecordCreate(pomPrimary,
  3362. pDomain,
  3363. pRegistrationCB->wsg,
  3364. pRegistrationCB->fpHandler,
  3365. &pWSGroup);
  3366. if (rc != 0)
  3367. {
  3368. DC_QUIT;
  3369. }
  3370. }
  3371. //
  3372. // Now that we've got a pointer to the workset group, we put a
  3373. // Client pointer to it into the usage record.
  3374. //
  3375. // We use the <clientPRootData> field of the registration CB as the
  3376. // base and to it we add the offset of the workset group we've just
  3377. // found/created.
  3378. //
  3379. // First, however, to get access to the usage record we need to
  3380. // generate an ObMan pointer to it:
  3381. //
  3382. pUsageRec = pRegistrationCB->pUsageRec;
  3383. //
  3384. // ...and add it to the Client pointer to the root of OMGLOBAL,
  3385. // putting the result in the relevant field in the usage record:
  3386. //
  3387. pUsageRec->pWSGroup = pWSGroup;
  3388. pUsageRec->flags &= ~PWSGROUP_IS_PREGCB;
  3389. //
  3390. // Now add this Client to the workset group's client list (as a
  3391. // PRIMARY):
  3392. //
  3393. rc = AddClientToWSGList(pRegistrationCB->putTask,
  3394. pWSGroup,
  3395. pRegistrationCB->hWSGroup,
  3396. PRIMARY);
  3397. if (rc != 0)
  3398. {
  3399. DC_QUIT;
  3400. }
  3401. pUsageRec->flags |= ADDED_TO_WSGROUP_LIST;
  3402. }
  3403. else // type == WSGROUP_MOVE
  3404. {
  3405. //
  3406. // Get pointer to WSGroup from the offset stored in the
  3407. // Registration CB:
  3408. //
  3409. pWSGroup = pRegistrationCB->pWSGroup;
  3410. //
  3411. // If it has become invalid, then all local Clients must have
  3412. // deregistered from it in the time it took for this event to to be
  3413. // processed. This is unusual, but not wrong, so we alert:
  3414. //
  3415. if (!pWSGroup->valid)
  3416. {
  3417. WARNING_OUT(( "Aborting Move req for WSG %d - record is invalid",
  3418. pWSGroup->wsg));
  3419. DC_QUIT;
  3420. }
  3421. }
  3422. //
  3423. // So, whatever just happened above, we should now have a valid pointer
  3424. // to a valid workset group record which is the one the Client wanted
  3425. // to move/register with in the first place.
  3426. //
  3427. //
  3428. // This workset group might be marked TO_BE_DISCARDED, if the last
  3429. // local Client deregistered from it a while ago but it hasn't actually
  3430. // been discarded. We don't want it discardable any more:
  3431. //
  3432. if (pWSGroup->toBeDiscarded)
  3433. {
  3434. WARNING_OUT(("WSG %d marked TO_BE_DISCARDED - clearing flag for new registration",
  3435. pWSGroup->wsg));
  3436. pWSGroup->toBeDiscarded = FALSE;
  3437. }
  3438. //
  3439. // We'll need the ObMan-context pointer to the workset group later, so
  3440. // store it in the CB:
  3441. //
  3442. pRegistrationCB->pWSGroup = pWSGroup;
  3443. //
  3444. // OK, now we've set up the various records and put the necessary
  3445. // pointers in the registration CB, so start the workset group
  3446. // registration/move process in earnest. To do this, we post another
  3447. // event to the ObMan task which will result in WSGRegisterStage1 being
  3448. // called.
  3449. //
  3450. // The reason we don't call the function directly is that this event
  3451. // may have to be bounced, and if so, we want to restart the
  3452. // registration process at the beginning of WSGRegisterStage1 (rather
  3453. // than the beginning of this function).
  3454. //
  3455. // Before we post the event, bump up the use counts of the Domain
  3456. // record and workset group, since the CB holds references to them and
  3457. // they may be freed by something else before we process the event.
  3458. //
  3459. // In addition, bump up the use count of the registration CB because if
  3460. // the call goes down before the event is processed, the reg CB will
  3461. // have been freed.
  3462. //
  3463. UT_BumpUpRefCount(pDomain);
  3464. UT_BumpUpRefCount(pWSGroup);
  3465. UT_BumpUpRefCount(pRegistrationCB);
  3466. pRegistrationCB->flags |= BUMPED_CBS;
  3467. UT_PostEvent(pomPrimary->putTask,
  3468. pomPrimary->putTask,
  3469. 0, // no delay
  3470. OMINT_EVENT_WSGROUP_REGISTER_CONT,
  3471. 0, // no param1
  3472. (UINT_PTR) pRegistrationCB);
  3473. TRACE_OUT(( "Processed initial request for WSG %d TASK 0x%08x",
  3474. pRegistrationCB->wsg, pRegistrationCB->putTask));
  3475. DC_EXIT_POINT:
  3476. if (rc != 0)
  3477. {
  3478. //
  3479. // We hit an error, so let the Client know:
  3480. //
  3481. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  3482. // lonchanc: bug #942 happened here
  3483. // this was ERROR_OUT
  3484. WARNING_OUT(( "Error %d processing WSG %d",
  3485. rc, pRegistrationCB->wsg));
  3486. //
  3487. // Calling WSGRegisterResult above will have dealt with our bad
  3488. // return code, so we don't need to return it to our caller. So,
  3489. // swallow:
  3490. //
  3491. rc = 0;
  3492. }
  3493. DebugExitVOID(ProcessWSGRegister);
  3494. }
  3495. //
  3496. //
  3497. //
  3498. // WSGRegisterAbort(...)
  3499. //
  3500. //
  3501. //
  3502. void WSGRegisterAbort(POM_PRIMARY pomPrimary,
  3503. POM_DOMAIN pDomain,
  3504. POM_WSGROUP_REG_CB pRegistrationCB)
  3505. {
  3506. DebugEntry(WSGRegisterAbort);
  3507. //
  3508. // This function can be called at any stage of the workset group
  3509. // registration process if for some reason the registration has to be
  3510. // aborted.
  3511. //
  3512. //
  3513. // Now remove this Client from the list of Clients registered with the
  3514. // workset group and if there are none left, discard the workset group:
  3515. //
  3516. RemoveClientFromWSGList(pomPrimary->putTask,
  3517. pRegistrationCB->putTask,
  3518. pRegistrationCB->pWSGroup);
  3519. //
  3520. // Now post failure to the Client and finish up the cleanup:
  3521. //
  3522. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_OUT_OF_RESOURCES);
  3523. DebugExitVOID(WSGRegisterAbort);
  3524. }
  3525. //
  3526. // WSGRecordCreate(...)
  3527. //
  3528. UINT WSGRecordCreate
  3529. (
  3530. POM_PRIMARY pomPrimary,
  3531. POM_DOMAIN pDomain,
  3532. OMWSG wsg,
  3533. OMFP fpHandler,
  3534. POM_WSGROUP * ppWSGroup
  3535. )
  3536. {
  3537. POM_WSGROUP pWSGroup;
  3538. BOOL opened = FALSE;
  3539. UINT rc = 0;
  3540. DebugEntry(WSGRecordCreate);
  3541. pWSGroup = (POM_WSGROUP)UT_MallocRefCount(sizeof(OM_WSGROUP), TRUE);
  3542. if (!pWSGroup)
  3543. {
  3544. rc = UT_RC_NO_MEM;
  3545. DC_QUIT;
  3546. }
  3547. SET_STAMP(pWSGroup, WSGROUP);
  3548. pWSGroup->pDomain = pDomain;
  3549. pWSGroup->valid = TRUE;
  3550. pWSGroup->wsg = wsg;
  3551. pWSGroup->fpHandler = fpHandler;
  3552. COM_BasedListInit(&(pWSGroup->clients));
  3553. pWSGroup->state = INITIAL;
  3554. //
  3555. // Finally insert the new WSG record into the domain's list. We insert
  3556. // at the end of the list so if we get forced out of a channel
  3557. // (a LEAVE_IND event) and the channel happens to be reused by MCS
  3558. // for another WSG before we have a chance to process the LEAVE_IND,
  3559. // the record for the old WSG will be found first.
  3560. //
  3561. COM_BasedListInsertBefore(&(pDomain->wsGroups),
  3562. &(pWSGroup->chain));
  3563. //
  3564. // *** NEW FOR MULTI-PARTY ***
  3565. //
  3566. // The checkpointing process used when helping a late joiner catch up
  3567. // uses a dummy workset (#255) in each workset group. Create this now:
  3568. //
  3569. rc = WorksetCreate(pomPrimary->putTask,
  3570. pWSGroup,
  3571. OM_CHECKPOINT_WORKSET,
  3572. FALSE,
  3573. NET_TOP_PRIORITY);
  3574. if (rc != 0)
  3575. {
  3576. DC_QUIT;
  3577. }
  3578. //
  3579. // Set up caller's pointer:
  3580. //
  3581. *ppWSGroup = pWSGroup;
  3582. TRACE_OUT(( "Created record for WSG %d FP %d in Domain %u",
  3583. wsg, fpHandler, pDomain->callID));
  3584. DC_EXIT_POINT:
  3585. //
  3586. // Cleanup:
  3587. //
  3588. if (rc != 0)
  3589. {
  3590. ERROR_OUT(( "Error %d creating record for WSG %d FP %d in Domain %u",
  3591. rc, wsg, fpHandler, pDomain->callID));
  3592. if (pWSGroup != NULL)
  3593. {
  3594. COM_BasedListRemove(&(pWSGroup->chain));
  3595. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  3596. }
  3597. }
  3598. DebugExitDWORD(WSGRecordCreate, rc);
  3599. return(rc);
  3600. }
  3601. //
  3602. //
  3603. //
  3604. // WSGRegisterStage1(...)
  3605. //
  3606. //
  3607. //
  3608. void WSGRegisterStage1(POM_PRIMARY pomPrimary,
  3609. POM_WSGROUP_REG_CB pRegistrationCB)
  3610. {
  3611. POM_DOMAIN pDomain;
  3612. POM_WSGROUP pWSGroup;
  3613. UINT type;
  3614. DebugEntry(WSGRegisterStage1);
  3615. //
  3616. // If the registration CB has been marked invalid, then just quit
  3617. // (don't have to do any abort processing since that will have been
  3618. // done by whatever marked the CB invalid):
  3619. //
  3620. if (!pRegistrationCB->valid )
  3621. {
  3622. WARNING_OUT(( "Reg CB for WSG %d marked invalid, quitting",
  3623. pRegistrationCB->wsg));
  3624. DC_QUIT;
  3625. }
  3626. //
  3627. // Determine whether we're doing a REGISTER or a MOVE (we use the
  3628. // string values for tracing):
  3629. //
  3630. type = pRegistrationCB->type;
  3631. TRACE_OUT(( "Processing %d request (Stage1) for WSG %d",
  3632. type, pRegistrationCB->wsg));
  3633. //
  3634. // Set up pointers
  3635. //
  3636. pDomain = pRegistrationCB->pDomain;
  3637. pWSGroup = pRegistrationCB->pWSGroup;
  3638. //
  3639. // Check they're still valid:
  3640. //
  3641. if (!pDomain->valid)
  3642. {
  3643. WARNING_OUT(( "Record for Domain %u not valid, aborting registration",
  3644. pDomain->callID));
  3645. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  3646. DC_QUIT;
  3647. }
  3648. ValidateWSGroup(pWSGroup);
  3649. if (!pWSGroup->valid)
  3650. {
  3651. WARNING_OUT(( "Record for WSG %d in Domain %u not valid, aborting",
  3652. pWSGroup->wsg, pDomain->callID));
  3653. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  3654. DC_QUIT;
  3655. }
  3656. //
  3657. // Now examine the Domain state. If it is
  3658. //
  3659. // - READY, then this is a Domain that we are fully attached to
  3660. //
  3661. // - anything else, then we are some way through the process of
  3662. // attaching to the Domain (in some other part of the code).
  3663. //
  3664. // We react to each situation as follows:
  3665. //
  3666. // - continue with the workset group registration/move
  3667. //
  3668. // - repost the event with a delay to retry the registration/move in a
  3669. // short while.
  3670. //
  3671. if (pDomain->state != DOMAIN_READY)
  3672. {
  3673. //
  3674. // Since we are in the process of attaching to the Domain, we can
  3675. // do nothing else at the moment. Therefore, we bounce this event
  3676. // back to our event queue, with a delay.
  3677. //
  3678. TRACE_OUT(( "State for Domain %u is %hu",
  3679. pDomain->callID, pDomain->state));
  3680. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3681. DC_QUIT;
  3682. }
  3683. //
  3684. // OK, so the Domain is in the READY state. What we do next depends on
  3685. // two things:
  3686. //
  3687. // - whether this is a WSGMove or a WSGRegister
  3688. //
  3689. // - what state the workset group is in.
  3690. //
  3691. //
  3692. // If this is a REGISTER, then if the workset group state is
  3693. //
  3694. // - READY, then there's another local Client registered with the
  3695. // workset, and everything is all set up so we just call
  3696. // WSGRegisterSuccess straight away.
  3697. //
  3698. // - INITIAL, then this is the first time we've been here for this
  3699. // workset group, so we start the process of locking
  3700. // ObManControl etc. (see below)
  3701. //
  3702. // - anything else, then we're somewhere in between the two:
  3703. // another reqeust to register with the workset group is in
  3704. // progress so we repost the event with a delay; by the time it
  3705. // comes back to us the workset group should be in the READY
  3706. // state.
  3707. //
  3708. //
  3709. // If this is a MOVE, then if the workset group state is
  3710. //
  3711. // - READY, then the workset group is fully set up in whatever
  3712. // Domain it's in at the moment so we allow the move to proceed
  3713. //
  3714. // - anything else, then we're somewhere in the middle of the
  3715. // registration process for the workset group. We do not want
  3716. // to interfere with the registration by trying to do a move
  3717. // simultaneously (for the simple reason that it introduces far
  3718. // more complexity into the state machine) so we bounce the
  3719. // event (i.e. we only process a MOVE when the workset group
  3720. // is fully set up).
  3721. //
  3722. TRACE_OUT(( "State for WSG %d is %u", pWSGroup->wsg, pWSGroup->state));
  3723. switch (pWSGroup->state)
  3724. {
  3725. case INITIAL:
  3726. {
  3727. //
  3728. // Workset group record has just been created, but nothing else
  3729. // has been done.
  3730. //
  3731. //
  3732. // OK, proceed with processing the Client's move/registration
  3733. // attempt. Whichever is involved, we start by locking the
  3734. // ObManControl workset group; when that completes, we continue
  3735. // in WSGRegisterStage2.
  3736. //
  3737. // Note: this function returns a lock correlator which it
  3738. // will be the same as the correlator returned in
  3739. // the WORKSET_LOCK_CON event. We will use this
  3740. // correlator to look up the registration CB, so
  3741. // stuff the return value from the function in it
  3742. //
  3743. // Note: in the case of a move, we will only ever get
  3744. // here because we had to retry the move from the
  3745. // top after failing to lock ObManControl
  3746. //
  3747. LockObManControl(pomPrimary,
  3748. pDomain,
  3749. &(pRegistrationCB->lockCorrelator));
  3750. pRegistrationCB->flags |= LOCKED_OMC;
  3751. pWSGroup->state = LOCKING_OMC;
  3752. }
  3753. break;
  3754. case LOCKING_OMC:
  3755. case PENDING_JOIN:
  3756. case PENDING_SEND_MIDWAY:
  3757. {
  3758. //
  3759. // We're already in the process of either registering another
  3760. // Client with this workset group, or moving the workset group
  3761. // into a new Domain, so we delay this Client's
  3762. // registration/move attempt for the moment:
  3763. //
  3764. // Don't expect to get here - remove if error not hit
  3765. //
  3766. // CMF 21/11/95
  3767. ERROR_OUT(( "Should not be here"));
  3768. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3769. DC_QUIT;
  3770. }
  3771. break;
  3772. case PENDING_SEND_COMPLETE:
  3773. {
  3774. //
  3775. // WSG Already exists locally, and is fully set up.
  3776. //
  3777. if (type == WSGROUP_REGISTER)
  3778. {
  3779. //
  3780. // If we're doing a REGISTER, this means that some other
  3781. // Client must be registered with it. If we've passed the
  3782. // Clients-per-wsgroup check in ProcessWSGRegister, we must
  3783. // be OK, so we post a result straight away (0 indicates
  3784. // success):
  3785. //
  3786. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  3787. }
  3788. else // type == WSGROUP_MOVE
  3789. {
  3790. //
  3791. // We prohibit moves until we're fully caught up:
  3792. //
  3793. // Don't expect to get here - remove if error not hit
  3794. //
  3795. // CMF 21/11/95
  3796. ERROR_OUT(( "Should not be here"));
  3797. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3798. DC_QUIT;
  3799. }
  3800. }
  3801. break;
  3802. case WSGROUP_READY:
  3803. {
  3804. if (type == WSGROUP_REGISTER)
  3805. {
  3806. //
  3807. // As above:
  3808. //
  3809. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  3810. }
  3811. else // type == WSGROUP_MOVE
  3812. {
  3813. //
  3814. // If we're doing a MOVE, then we start by locking
  3815. // ObManControl, just as above:
  3816. //
  3817. LockObManControl(pomPrimary,
  3818. pDomain,
  3819. &(pRegistrationCB->lockCorrelator));
  3820. pRegistrationCB->flags |= LOCKED_OMC;
  3821. pWSGroup->state = LOCKING_OMC;
  3822. }
  3823. }
  3824. break;
  3825. default:
  3826. {
  3827. ERROR_OUT(("Invalid state %u for WSG %d",
  3828. pWSGroup->state, pWSGroup->wsg));
  3829. }
  3830. }
  3831. TRACE_OUT(( "Completed Stage 1 of %d for WSG %d",
  3832. type, pRegistrationCB->wsg));
  3833. DC_EXIT_POINT:
  3834. //
  3835. // We bumped up the use count of the registration CB when we posted the
  3836. // REGISTER_CONT event which got us here, so now free the CB to
  3837. // decrement the use count. Unless it's already been freed (e.g.
  3838. // because the call went down and the registration was cancelled) it
  3839. // will still be around so future stages of the registration process
  3840. // will be able to use it.
  3841. //
  3842. // NB: Although future stages of the registration process are
  3843. // asynchronous, they will abort if they cannot find the reg CB in
  3844. // the Domain list, so we don't have to worry about bumping it for
  3845. // them (since if it is finally freed, then it must have been
  3846. // removed from the Domain list).
  3847. //
  3848. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  3849. DebugExitVOID(WSGRegisterStage1);
  3850. }
  3851. //
  3852. // LockObManControl(...)
  3853. //
  3854. void LockObManControl(POM_PRIMARY pomPrimary,
  3855. POM_DOMAIN pDomain,
  3856. OM_CORRELATOR * pLockCorrelator)
  3857. {
  3858. POM_WSGROUP pOMCWSGroup;
  3859. POM_WORKSET pOMCWorkset;
  3860. UINT rc = 0;
  3861. DebugEntry(LockObManControl);
  3862. //
  3863. // Get pointers to the ObManControl workset group and workset #0 in it:
  3864. //
  3865. pOMCWSGroup = GetOMCWsgroup(pDomain);
  3866. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  3867. //
  3868. // Start the lock procedure to lock the workset:
  3869. //
  3870. WorksetLockReq(pomPrimary->putTask,
  3871. pomPrimary,
  3872. pOMCWSGroup,
  3873. pOMCWorkset,
  3874. 0,
  3875. pLockCorrelator);
  3876. TRACE_OUT(( "Requested lock for ObManControl in Domain %u",
  3877. pDomain->callID));
  3878. DebugExitVOID(LockObManControl);
  3879. }
  3880. //
  3881. //
  3882. //
  3883. // MaybeUnlockObManControl(...)
  3884. //
  3885. //
  3886. //
  3887. void MaybeUnlockObManControl(POM_PRIMARY pomPrimary,
  3888. POM_WSGROUP_REG_CB pRegistrationCB)
  3889. {
  3890. POM_WSGROUP pOMCWSGroup;
  3891. POM_WORKSET pOMCWorkset;
  3892. DebugEntry(MaybeUnlockObManControl);
  3893. //
  3894. // If we've got ObManControl locked for THIS registration, unlock it
  3895. //
  3896. if (pRegistrationCB->flags & LOCKED_OMC)
  3897. {
  3898. pOMCWSGroup = GetOMCWsgroup(pRegistrationCB->pDomain);
  3899. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  3900. TRACE_OUT(( "Unlocking OMC for %d in WSG %d",
  3901. pRegistrationCB->type,
  3902. pRegistrationCB->wsg));
  3903. WorksetUnlock(pomPrimary->putTask, pOMCWSGroup, pOMCWorkset);
  3904. pRegistrationCB->flags &= ~LOCKED_OMC;
  3905. }
  3906. DebugExitVOID(MaybeUnlockObManControl);
  3907. }
  3908. //
  3909. // ProcessOMCLockConfirm(...)
  3910. //
  3911. void ProcessOMCLockConfirm
  3912. (
  3913. POM_PRIMARY pomPrimary,
  3914. OM_CORRELATOR correlator,
  3915. UINT result
  3916. )
  3917. {
  3918. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  3919. POM_DOMAIN pDomain;
  3920. DebugEntry(ProcessOMCLockConfirm);
  3921. TRACE_OUT(( "Got LOCK_CON with result = 0x%08x and correlator = %hu",
  3922. result, correlator));
  3923. //
  3924. // Next step is to find the registration attempt this lock relates to.
  3925. // It could be in any domain, so search through all of them:
  3926. //
  3927. pDomain = (POM_DOMAIN)COM_BasedListFirst(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  3928. while (pDomain != NULL)
  3929. {
  3930. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  3931. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  3932. FIELD_OFFSET(OM_WSGROUP_REG_CB, lockCorrelator),
  3933. (DWORD)correlator, FIELD_SIZE(OM_WSGROUP_REG_CB, lockCorrelator));
  3934. if (pRegistrationCB != NULL)
  3935. {
  3936. TRACE_OUT(( "Found correlated reg CB in domain %u, for WSG %d",
  3937. pDomain->callID, pRegistrationCB->wsg));
  3938. break;
  3939. }
  3940. //
  3941. // Didn't find anything in this domain - go on to the next:
  3942. //
  3943. pDomain = (POM_DOMAIN)COM_BasedListNext(&(pomPrimary->domains), pDomain,
  3944. FIELD_OFFSET(OM_DOMAIN, chain));
  3945. }
  3946. //
  3947. // If we didn't find it in any of the Domains, it's probably because
  3948. // we've detached from the Domain and thrown away its pending
  3949. // registrations CBs. So trace and quit:
  3950. //
  3951. if (pRegistrationCB == NULL)
  3952. {
  3953. TRACE_OUT(( "Got LOCK_CON event (correlator: 0x%08x) but no reg CB found",
  3954. correlator));
  3955. DC_QUIT;
  3956. }
  3957. //
  3958. // Now check whether the lock succeeded:
  3959. //
  3960. if (result != 0)
  3961. {
  3962. //
  3963. // Failed to get the lock on ObManControl for some reason. This
  3964. // could be because of contention, or else a more general problem.
  3965. // In any event, we call WSGRegisterRetry which will retry (or call
  3966. // WSGRegisterResult if we've run out of retries).
  3967. //
  3968. // Note: since WSGRegisterRetry handles move requests as well, we
  3969. // don't need to check here which type of request it is:
  3970. //
  3971. pRegistrationCB->flags &= ~LOCKED_OMC;
  3972. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  3973. }
  3974. else
  3975. {
  3976. //
  3977. // We've got the lock on ObManControl workset #0, so now we proceed
  3978. // to the next step of the registration process.
  3979. //
  3980. // As above, this function handles both MOVE and REGISTER attempts.
  3981. //
  3982. WSGRegisterStage2(pomPrimary, pRegistrationCB);
  3983. }
  3984. DC_EXIT_POINT:
  3985. DebugExitVOID(ProcessOMCLockConfirm);
  3986. }
  3987. //
  3988. // ProcessCheckpoint(...)
  3989. //
  3990. void ProcessCheckpoint
  3991. (
  3992. POM_PRIMARY pomPrimary,
  3993. OM_CORRELATOR correlator,
  3994. UINT result
  3995. )
  3996. {
  3997. POM_DOMAIN pDomain;
  3998. POM_WSGROUP pWSGroup;
  3999. POM_HELPER_CB pHelperCB = NULL;
  4000. DebugEntry(ProcessCheckpoint);
  4001. //
  4002. // Next step is to find the helper CB this lock relates to. It could
  4003. // be in any domain, so search through all of them:
  4004. //
  4005. pDomain = (POM_DOMAIN)COM_BasedListLast(&(pomPrimary->domains), FIELD_OFFSET(OM_DOMAIN, chain));
  4006. while (pDomain != NULL)
  4007. {
  4008. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->helperCBs),
  4009. (void**)&pHelperCB, FIELD_OFFSET(OM_HELPER_CB, chain),
  4010. FIELD_OFFSET(OM_HELPER_CB, lockCorrelator),
  4011. (DWORD)correlator, FIELD_SIZE(OM_HELPER_CB, lockCorrelator));
  4012. if (pHelperCB != NULL)
  4013. {
  4014. TRACE_OUT(( "Found correlated helper CB, for WSG %d",
  4015. pHelperCB->pWSGroup->wsg));
  4016. break;
  4017. }
  4018. //
  4019. // Didn't find anything in this domain - go on to the next:
  4020. //
  4021. pDomain = (POM_DOMAIN)COM_BasedListPrev(&(pomPrimary->domains), pDomain,
  4022. FIELD_OFFSET(OM_DOMAIN, chain));
  4023. }
  4024. //
  4025. // If we didn't find it in any of the Domains, it's probably because
  4026. // we've detached from the Domain and thrown away its pending helper
  4027. // CBs. So trace and quit:
  4028. //
  4029. if (pHelperCB == NULL)
  4030. {
  4031. WARNING_OUT(( "No helper CB found with lock correlator 0x%08x!", correlator));
  4032. DC_QUIT;
  4033. }
  4034. //
  4035. // Set up local pointers:
  4036. //
  4037. pWSGroup = pHelperCB->pWSGroup;
  4038. ValidateWSGroup(pWSGroup);
  4039. //
  4040. // If the "lock" failed, we send a SEND_DENY message to the late
  4041. // joiner.
  4042. //
  4043. if (result != 0)
  4044. {
  4045. WARNING_OUT(( "Failed to checkpoint WSG %d for %u - giving up",
  4046. pWSGroup->wsg,
  4047. pHelperCB->lateJoiner));
  4048. IssueSendDeny(pomPrimary,
  4049. pDomain,
  4050. pWSGroup->wsGroupID,
  4051. pHelperCB->lateJoiner,
  4052. pHelperCB->remoteCorrelator);
  4053. DC_QUIT;
  4054. }
  4055. //
  4056. // The lock succeeded, so check to see if the workset group pointer we
  4057. // stored is still valid:
  4058. //
  4059. if (!pWSGroup->valid)
  4060. {
  4061. WARNING_OUT(("Discarded WSG %d while checkpointing it for %hu",
  4062. pWSGroup->wsg,
  4063. pHelperCB->lateJoiner));
  4064. IssueSendDeny(pomPrimary,
  4065. pDomain,
  4066. pWSGroup->wsGroupID,
  4067. pHelperCB->lateJoiner,
  4068. pHelperCB->remoteCorrelator);
  4069. DC_QUIT;
  4070. }
  4071. //
  4072. // All is well - go ahead and send the workset group to the late
  4073. // joiner:
  4074. //
  4075. TRACE_OUT(("Checkpoint succeeded for WSG %d - sending to late joiner %hu",
  4076. pWSGroup->wsg, pHelperCB->lateJoiner));
  4077. SendWSGToLateJoiner(pomPrimary,
  4078. pDomain,
  4079. pWSGroup,
  4080. pHelperCB->lateJoiner,
  4081. pHelperCB->remoteCorrelator);
  4082. DC_EXIT_POINT:
  4083. //
  4084. // If we found a helper CB, then we just discard it now:
  4085. //
  4086. if (pHelperCB != NULL)
  4087. {
  4088. FreeHelperCB(&pHelperCB);
  4089. }
  4090. DebugExitVOID(ProcessCheckpoint);
  4091. }
  4092. //
  4093. // NewHelperCB(...)
  4094. //
  4095. BOOL NewHelperCB
  4096. (
  4097. POM_DOMAIN pDomain,
  4098. POM_WSGROUP pWSGroup,
  4099. NET_UID lateJoiner,
  4100. OM_CORRELATOR remoteCorrelator,
  4101. POM_HELPER_CB * ppHelperCB
  4102. )
  4103. {
  4104. POM_HELPER_CB pHelperCB;
  4105. BOOL rc = FALSE;
  4106. DebugEntry(NewHelperCB);
  4107. //
  4108. // This function
  4109. //
  4110. // - allocates a new helper CB
  4111. //
  4112. // - fills in the fields
  4113. //
  4114. // - stores it in the domain's list of helper CBs
  4115. //
  4116. // - bumps the use count of the workset group referenced.
  4117. //
  4118. pHelperCB = (POM_HELPER_CB)UT_MallocRefCount(sizeof(OM_HELPER_CB), TRUE);
  4119. if (!pHelperCB)
  4120. {
  4121. ERROR_OUT(("Out of memory in NewHelperCB"));
  4122. DC_QUIT;
  4123. }
  4124. UT_BumpUpRefCount(pWSGroup);
  4125. SET_STAMP(pHelperCB, HELPERCB);
  4126. pHelperCB->pWSGroup = pWSGroup;
  4127. pHelperCB->lateJoiner = lateJoiner;
  4128. pHelperCB->remoteCorrelator = remoteCorrelator;
  4129. //
  4130. // The lock correlator field is filled in later.
  4131. //
  4132. COM_BasedListInsertBefore(&(pDomain->helperCBs), &(pHelperCB->chain));
  4133. rc = TRUE;
  4134. DC_EXIT_POINT:
  4135. *ppHelperCB = pHelperCB;
  4136. DebugExitBOOL(NewHelperCB, rc);
  4137. return(rc);
  4138. }
  4139. //
  4140. // FreeHelperCB(...)
  4141. //
  4142. void FreeHelperCB
  4143. (
  4144. POM_HELPER_CB * ppHelperCB
  4145. )
  4146. {
  4147. DebugEntry(FreeHelperCB);
  4148. //
  4149. // This function
  4150. //
  4151. // - frees the workset group referenced in the helper CB
  4152. //
  4153. // - removes the helper CB from the domain's list
  4154. //
  4155. // - frees the helper CB.
  4156. //
  4157. UT_FreeRefCount((void**)&((*ppHelperCB)->pWSGroup), FALSE);
  4158. COM_BasedListRemove(&((*ppHelperCB)->chain));
  4159. UT_FreeRefCount((void**)ppHelperCB, FALSE);
  4160. DebugExitVOID(FreeHelperCB);
  4161. }
  4162. //
  4163. // WSGRegisterStage2(...)
  4164. //
  4165. void WSGRegisterStage2
  4166. (
  4167. POM_PRIMARY pomPrimary,
  4168. POM_WSGROUP_REG_CB pRegistrationCB
  4169. )
  4170. {
  4171. POM_DOMAIN pDomain;
  4172. POM_WSGROUP pWSGroup;
  4173. POM_OBJECT pObjInfo;
  4174. POM_WSGROUP_INFO pInfoObject;
  4175. NET_CHANNEL_ID channelID;
  4176. UINT type;
  4177. UINT rc = 0;
  4178. DebugEntry(WSGRegisterStage2);
  4179. //
  4180. // Determine whether we're doing a REGISTER or a MOVE (we use the string
  4181. // value for tracing):
  4182. //
  4183. type = pRegistrationCB->type;
  4184. TRACE_OUT(( "Processing %d request (Stage2) for WSG %d",
  4185. type, pRegistrationCB->wsg));
  4186. //
  4187. // We'll need these below:
  4188. //
  4189. pDomain = pRegistrationCB->pDomain;
  4190. pWSGroup = pRegistrationCB->pWSGroup;
  4191. //
  4192. // Check they're still valid:
  4193. //
  4194. if (!pDomain->valid)
  4195. {
  4196. WARNING_OUT(( "Record for Domain %u not valid, aborting registration",
  4197. pDomain->callID));
  4198. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4199. DC_QUIT;
  4200. }
  4201. if (!pWSGroup->valid)
  4202. {
  4203. WARNING_OUT(( "Record for WSG %d in Domain %u not valid, "
  4204. "aborting registration",
  4205. pWSGroup->wsg, pDomain->callID));
  4206. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4207. DC_QUIT;
  4208. }
  4209. //
  4210. // Sanity check:
  4211. //
  4212. ASSERT(pWSGroup->state == LOCKING_OMC);
  4213. //
  4214. // Now find the information object in workset #0 of ObManControl which
  4215. // matches the WSG name/FP that the Client requested to register with:
  4216. //
  4217. FindInfoObject(pDomain,
  4218. 0, // don't know the ID yet
  4219. pWSGroup->wsg,
  4220. pWSGroup->fpHandler,
  4221. &pObjInfo);
  4222. if (pObjInfo == NULL)
  4223. {
  4224. //
  4225. // The workset group doesn't already exist in the Domain.
  4226. //
  4227. // If this is a REGISTER, this means we must create it. If this is a
  4228. // MOVE, then we can move it into the Domain, which is essentially
  4229. // creating it in the Domain with pre-existing contents.
  4230. //
  4231. // So, for both types of operation, our behaviour is the same at this
  4232. // point; we've already created the workset group record so what we
  4233. // do now is
  4234. //
  4235. // 1. get the Network layer to allocate a new channel ID,
  4236. //
  4237. // 2. allocate a new workset group ID and
  4238. //
  4239. // 3. announce the new workset group to the rest of the Domain.
  4240. //
  4241. // However, the network layer will not assign us a new channel ID
  4242. // synchronously, so steps 2 and 3 must be delayed until we receive
  4243. // the Join event.
  4244. //
  4245. // So, now we set the channel to be joined to 0 (this tells the
  4246. // Network layer to join us to a currently unused channel).
  4247. //
  4248. channelID = 0;
  4249. }
  4250. else
  4251. {
  4252. //
  4253. // Otherwise, the workset group already exists.
  4254. //
  4255. ValidateObject(pObjInfo);
  4256. if (type == WSGROUP_REGISTER)
  4257. {
  4258. //
  4259. // We're registering the Client with an existing workset group, so
  4260. // set the workset group ID to the existing value, and ditto for
  4261. // the channel ID:
  4262. //
  4263. pInfoObject = (POM_WSGROUP_INFO) pObjInfo->pData;
  4264. if (!pInfoObject)
  4265. {
  4266. ERROR_OUT(("WSGRegisterStage2 object 0x%08x has no data", pObjInfo));
  4267. rc = OM_RC_OBJECT_DELETED;
  4268. DC_QUIT;
  4269. }
  4270. ValidateObjectDataWSGINFO(pInfoObject);
  4271. channelID = pInfoObject->channelID;
  4272. }
  4273. else // type == WSGROUP_MOVE
  4274. {
  4275. //
  4276. // We can't move a workset group into a Domain where there already
  4277. // exists a workest group with the same name/FP, so we abort our
  4278. // move attempt at this point (we set the workset group sate back
  4279. // to READY, since that is its state in the Domain it was
  4280. // originally in):
  4281. //
  4282. WARNING_OUT(( "Cannot move WSG %d into Domain %u - WSG/FP clash",
  4283. pWSGroup->wsg, pDomain->callID));
  4284. pWSGroup->state = WSGROUP_READY;
  4285. rc = OM_RC_CANNOT_MOVE_WSGROUP;
  4286. DC_QUIT;
  4287. }
  4288. }
  4289. //
  4290. // Now join the relevant channel (possibly a new one, if <channel> was
  4291. // set to 0 above) and stuff the correlator in the <channelCorrelator>
  4292. // field of the registration CB (when the Join event arrives,
  4293. // ProcessNetJoinChannel will search for the registration CB by channel
  4294. // correlator)
  4295. //
  4296. // Note: if this is our "local" Domain, we skip this step.
  4297. //
  4298. if (pDomain->callID != NET_INVALID_DOMAIN_ID)
  4299. {
  4300. TRACE_OUT(( "Joining channel %hu, Domain %u",
  4301. channelID, pDomain->callID));
  4302. rc = MG_ChannelJoin(pomPrimary->pmgClient,
  4303. &(pRegistrationCB->channelCorrelator),
  4304. channelID);
  4305. if (rc != 0)
  4306. {
  4307. DC_QUIT;
  4308. }
  4309. pWSGroup->state = PENDING_JOIN;
  4310. //
  4311. // OK, that's it for the moment. The saga of workset group
  4312. // move/registration will be picked up by the ProcessNetJoinChannel
  4313. // function, which will invoke the WSGRegisterStage3 function.
  4314. //
  4315. }
  4316. else
  4317. {
  4318. //
  4319. // Since we didn't do a join just now, we won't be getting a JOIN
  4320. // event from the Network layer, so we better call WSGRegisterStage3
  4321. // directly:
  4322. //
  4323. pWSGroup->state = PENDING_JOIN;
  4324. // channel ID not relevant here so use zero
  4325. WSGRegisterStage3(pomPrimary, pDomain, pRegistrationCB, 0);
  4326. }
  4327. TRACE_OUT(( "Completed Register/Move Stage 2 for WSG %d", pWSGroup->wsg));
  4328. DC_EXIT_POINT:
  4329. if (rc != 0)
  4330. {
  4331. //
  4332. // Cleanup:
  4333. //
  4334. ERROR_OUT(( "Error %d at Stage 2 of %d for WSG %d",
  4335. rc, pWSGroup->wsg));
  4336. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  4337. }
  4338. DebugExitVOID(WSGRegisterStage2);
  4339. }
  4340. //
  4341. // WSGRegisterStage3(...)
  4342. //
  4343. void WSGRegisterStage3
  4344. (
  4345. POM_PRIMARY pomPrimary,
  4346. POM_DOMAIN pDomain,
  4347. POM_WSGROUP_REG_CB pRegistrationCB,
  4348. NET_CHANNEL_ID channelID
  4349. )
  4350. {
  4351. POM_WSGROUP pWSGroup;
  4352. POM_WSGROUP pOMCWSGroup;
  4353. POM_WORKSET pOMCWorkset;
  4354. POM_OBJECT pObjInfo;
  4355. POM_OBJECT pObjReg;
  4356. POM_WSGROUP_INFO pInfoObject = NULL;
  4357. UINT type;
  4358. BOOL catchUpReqd = FALSE;
  4359. BOOL success = FALSE; // SFR 2744
  4360. UINT rc = 0;
  4361. DebugEntry(WSGRegisterStage3);
  4362. //
  4363. // We get here when a Join event has been received containing a channel
  4364. // correlator for a channel which is a regular workset group channel.
  4365. //
  4366. //
  4367. // Determine whether we're doing a REGISTER or a MOVE (we use the
  4368. // string values for tracing):
  4369. //
  4370. type = pRegistrationCB->type;
  4371. TRACE_OUT(( "Processing %d request (Stage3) for WSG %d",
  4372. type, pRegistrationCB->wsg));
  4373. //
  4374. // Get a pointer to the workset group:
  4375. //
  4376. pWSGroup = pRegistrationCB->pWSGroup;
  4377. //
  4378. // Check it's still valid:
  4379. //
  4380. if (!pWSGroup->valid)
  4381. {
  4382. WARNING_OUT(("WSG %d' discarded from domain %u - aborting registration",
  4383. pWSGroup->wsg, pDomain->callID));
  4384. WSGRegisterAbort(pomPrimary, pDomain, pRegistrationCB);
  4385. DC_QUIT;
  4386. }
  4387. //
  4388. // Check that this workset group is pending join:
  4389. //
  4390. if (pWSGroup->state != PENDING_JOIN)
  4391. {
  4392. WARNING_OUT(( "Received unexpected Join indication for WSG (state: %hu)",
  4393. pWSGroup->state));
  4394. rc = OM_RC_NETWORK_ERROR;
  4395. DC_QUIT;
  4396. }
  4397. //
  4398. // Now set the channel ID value in the workset group record:
  4399. //
  4400. pWSGroup->channelID = channelID;
  4401. TRACE_OUT(( "Channel ID for WSG %d in Domain %u is %hu",
  4402. pWSGroup->wsg, pDomain->callID, channelID));
  4403. //
  4404. // We'll need this below:
  4405. //
  4406. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4407. //
  4408. // What we do next depends on whether we just created the workset
  4409. // group:
  4410. //
  4411. // - if it already existed, we need to catch up by asking another node
  4412. // for a copy
  4413. //
  4414. // - if we've just created it, we need to allocate a new workset group
  4415. // ID and add an INFO object to workset #0 in ObManControl.
  4416. //
  4417. // So, we search workset #0 for an INFO object to see if the workset
  4418. // group exists.
  4419. //
  4420. // Note: we did a similar search in Stage2 to find out the channel to
  4421. // join for the workset group. The reason we search again here
  4422. // is that the workset group could have been discarded by the
  4423. // other node in the time taken for the join to complete.
  4424. //
  4425. FindInfoObject(pDomain,
  4426. 0, // don't know the ID yet
  4427. pWSGroup->wsg,
  4428. pWSGroup->fpHandler,
  4429. &pObjInfo);
  4430. if (!pObjInfo || !pObjInfo->pData)
  4431. {
  4432. //
  4433. // Doesn't already exist, so no catch-up required:
  4434. //
  4435. catchUpReqd = FALSE;
  4436. }
  4437. else
  4438. {
  4439. //
  4440. // OK, so we found an INFO object, but there might not be any
  4441. // registration record objects in the relevant registration
  4442. // workset, so check:
  4443. //
  4444. ValidateObject(pObjInfo);
  4445. pInfoObject = (POM_WSGROUP_INFO) pObjInfo->pData;
  4446. ValidateObjectDataWSGINFO(pInfoObject);
  4447. pOMCWorkset = pOMCWSGroup->apWorksets[pInfoObject->wsGroupID];
  4448. if (pOMCWorkset == NULL)
  4449. {
  4450. catchUpReqd = TRUE;
  4451. }
  4452. else
  4453. {
  4454. FindPersonObject(pOMCWorkset,
  4455. pDomain->userID,
  4456. FIND_OTHERS,
  4457. &pObjReg);
  4458. if (pObjReg == NULL)
  4459. {
  4460. //
  4461. // This will happen when the remote node has deleted its
  4462. // registration record object but hasn't yet deleted the
  4463. // info object. Because the reg rec object is gone, we
  4464. // can't catch up from that node (or any node):
  4465. //
  4466. TRACE_OUT(( "INFO object found but no reg object - creating"));
  4467. catchUpReqd = FALSE;
  4468. }
  4469. else
  4470. {
  4471. ValidateObject(pObjReg);
  4472. catchUpReqd = TRUE;
  4473. }
  4474. }
  4475. }
  4476. //
  4477. // We should never try to catch up in the local Domain:
  4478. //
  4479. if (catchUpReqd && (pDomain->callID == OM_NO_CALL))
  4480. {
  4481. ERROR_OUT(( "Nearly tried to catch up in local Domain!"));
  4482. catchUpReqd = FALSE;
  4483. }
  4484. if (catchUpReqd)
  4485. {
  4486. //
  4487. // The workset group already exists, so we need to
  4488. //
  4489. // - set the workset group ID to the value in the INFO object, and
  4490. //
  4491. // - start the catch up process.
  4492. //
  4493. // Note: this will only happen in the case of a REGISTER, so we
  4494. // assert
  4495. //
  4496. ASSERT((pRegistrationCB->type == WSGROUP_REGISTER));
  4497. ASSERT((pInfoObject != NULL));
  4498. pWSGroup->wsGroupID = pInfoObject->wsGroupID;
  4499. rc = WSGCatchUp(pomPrimary, pDomain, pWSGroup);
  4500. if (rc == OM_RC_NO_NODES_READY)
  4501. {
  4502. //
  4503. // We get this return code when there are nodes out there with
  4504. // a copy but none of them are ready to send us the workset
  4505. // group.
  4506. //
  4507. // The correct thing to do is to give up for the moment and try
  4508. // again:
  4509. //
  4510. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  4511. rc = 0;
  4512. DC_QUIT;
  4513. }
  4514. //
  4515. // Any other error is more serious:
  4516. //
  4517. if (rc != 0)
  4518. {
  4519. DC_QUIT;
  4520. }
  4521. //
  4522. // We won't be ready to send the workset group to a late-joiner
  4523. // node until we've caught up ourselves; when we have, the
  4524. // ProcessSendComplete function will call RegAnnounceComplete to
  4525. // update the reg object added for us by our helper node.
  4526. //
  4527. }
  4528. else
  4529. {
  4530. if (type == WSGROUP_MOVE)
  4531. {
  4532. //
  4533. // If this is a MOVE, pWSGroup refers to a workset group record
  4534. // which currently belongs to its "old" Domain. Since we're
  4535. // just about to announce the workset group's presence in its
  4536. // new Domain, this is the time to do the move:
  4537. //
  4538. WSGRecordMove(pomPrimary, pRegistrationCB->pDomain, pWSGroup);
  4539. //
  4540. // This will have reset the channel ID in the workset group
  4541. // record so we set it again here (yeah, it's naff):
  4542. //
  4543. pWSGroup->channelID = channelID;
  4544. }
  4545. //
  4546. // We've either just created a new workset group, or moved one into
  4547. // a new Domain, so we need to create a new ID for it in this
  4548. // Domain:
  4549. //
  4550. rc = WSGGetNewID(pomPrimary, pDomain, &(pWSGroup->wsGroupID));
  4551. if (rc != 0)
  4552. {
  4553. DC_QUIT;
  4554. }
  4555. TRACE_OUT(( "Workset group ID for WSG %d in Domain %u is %hu",
  4556. pWSGroup->wsg, pDomain->callID, pWSGroup->wsGroupID));
  4557. //
  4558. // Now call CreateAnnounce to add a WSG_INFO object to workset #0
  4559. // in ObManControl.
  4560. //
  4561. rc = CreateAnnounce(pomPrimary, pDomain, pWSGroup);
  4562. if (rc != 0)
  4563. {
  4564. DC_QUIT;
  4565. }
  4566. //
  4567. // Since we have completed our registration with the workset group,
  4568. // we announce to the world that we have a copy and will send it to
  4569. // others on request:
  4570. //
  4571. rc = RegAnnounceBegin(pomPrimary,
  4572. pDomain,
  4573. pWSGroup,
  4574. pDomain->userID,
  4575. &(pWSGroup->pObjReg));
  4576. if (rc != 0)
  4577. {
  4578. DC_QUIT;
  4579. }
  4580. rc = SetPersonData(pomPrimary, pDomain, pWSGroup);
  4581. if (rc != 0)
  4582. {
  4583. DC_QUIT;
  4584. }
  4585. rc = RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  4586. if (rc != 0)
  4587. {
  4588. DC_QUIT;
  4589. }
  4590. //
  4591. // If we're not catching up, we call Result immediately (if we are
  4592. // catching up, Result will be called when we get the SEND_MIDWAY
  4593. // message):
  4594. //
  4595. // SFR 2744 : Can't call result here because we refer to the reg
  4596. // CB below. So, just set a flag and act on it below.
  4597. //
  4598. success = TRUE;
  4599. }
  4600. TRACE_OUT(( "Completed Register/Move Stage 3 for WSG %d",
  4601. pWSGroup->wsg));
  4602. DC_EXIT_POINT:
  4603. //
  4604. // OK, the critical test-and-set on the ObManControl workset group is
  4605. // finished, so we unlock workset #0 in ObManControl:
  4606. //
  4607. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  4608. // SFR 2744 { : Call WSGRegResult AFTER checks on the flags in reg CB
  4609. if (success == TRUE)
  4610. {
  4611. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  4612. }
  4613. // SFR 2744 }
  4614. if (rc != 0)
  4615. {
  4616. WARNING_OUT(( "Error %d at Stage 3 of %d with WSG %d",
  4617. rc, type, pWSGroup->wsg));
  4618. WSGRegisterResult(pomPrimary, pRegistrationCB, rc);
  4619. rc = 0;
  4620. }
  4621. DebugExitVOID(WSGRegisterStage2);
  4622. }
  4623. //
  4624. // WSGGetNewID(...)
  4625. //
  4626. UINT WSGGetNewID
  4627. (
  4628. POM_PRIMARY pomPrimary,
  4629. POM_DOMAIN pDomain,
  4630. POM_WSGROUP_ID pWSGroupID
  4631. )
  4632. {
  4633. POM_WSGROUP pOMCWSGroup;
  4634. POM_WORKSET pOMCWorkset;
  4635. POM_OBJECT pObj;
  4636. POM_WSGROUP_INFO pInfoObject;
  4637. OM_WSGROUP_ID wsGroupID;
  4638. BOOL found;
  4639. BYTE wsGroupIDsInUse[OM_MAX_WSGROUPS_PER_DOMAIN];
  4640. UINT rc = 0;
  4641. DebugEntry(WSGGetNewID);
  4642. TRACE_OUT(( "Searching for new WSG ID in Domain %u", pDomain->callID));
  4643. ZeroMemory(wsGroupIDsInUse, sizeof(wsGroupIDsInUse));
  4644. //
  4645. // Need to pick a workset group ID so far unused in this Domain to
  4646. // identify this new workset group. So, we build up a list of the IDs
  4647. // currently in use (by examining the INFO objects in workset #0) and
  4648. // then choose one that's not in use.
  4649. //
  4650. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4651. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  4652. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  4653. while (pObj != NULL)
  4654. {
  4655. ValidateObject(pObj);
  4656. if (pObj->flags & DELETED)
  4657. {
  4658. //
  4659. // Do nothing
  4660. //
  4661. }
  4662. else if (!pObj->pData)
  4663. {
  4664. //
  4665. // Do nothing
  4666. //
  4667. ERROR_OUT(("WSGGetNewID: object 0x%08x has no data", pObj));
  4668. }
  4669. else
  4670. {
  4671. ValidateObjectData(pObj->pData);
  4672. pInfoObject = (POM_WSGROUP_INFO)pObj->pData;
  4673. if (pInfoObject->idStamp != OM_WSGINFO_ID_STAMP)
  4674. {
  4675. //
  4676. // Do nothing
  4677. //
  4678. }
  4679. else
  4680. {
  4681. //
  4682. // OK, we've found a WSGROUP_INFO object, so cross off the
  4683. // workset group ID which its workset group is using:
  4684. //
  4685. wsGroupID = pInfoObject->wsGroupID;
  4686. wsGroupIDsInUse[wsGroupID] = TRUE;
  4687. }
  4688. }
  4689. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj,
  4690. FIELD_OFFSET(OM_OBJECT, chain));
  4691. }
  4692. //
  4693. // Now go through the array to find an ID that wasn't marked as being in
  4694. // use:
  4695. //
  4696. found = FALSE;
  4697. for (wsGroupID = 0; wsGroupID < OM_MAX_WSGROUPS_PER_DOMAIN; wsGroupID++)
  4698. {
  4699. if (!wsGroupIDsInUse[wsGroupID])
  4700. {
  4701. TRACE_OUT(( "Workset group ID %hu is not in use, using", wsGroupID));
  4702. found = TRUE;
  4703. break;
  4704. }
  4705. }
  4706. //
  4707. // We checked earlier that the number of workset groups in the Domain
  4708. // hadn't exceeded the maximum (in WSGRecordCreate).
  4709. //
  4710. // However, if the Domain has run out of workset groups in the period
  4711. // since then, we won't have found any:
  4712. //
  4713. if (found == FALSE)
  4714. {
  4715. WARNING_OUT(( "No more workset group IDs for Domain %u!",
  4716. pDomain->callID));
  4717. rc = OM_RC_TOO_MANY_WSGROUPS;
  4718. DC_QUIT;
  4719. }
  4720. //
  4721. // If this is the first time that this ID has been used, then the
  4722. // associated registration workset won't exist. In this case, we create
  4723. // it now.
  4724. //
  4725. // If the ID has been used before, it will exist but it should be empty.
  4726. // In this case, we check that it really is empty.
  4727. //
  4728. pOMCWorkset = pOMCWSGroup->apWorksets[wsGroupID];
  4729. if (pOMCWorkset == NULL)
  4730. {
  4731. TRACE_OUT(( "Registration workset %u not used yet, creating", wsGroupID));
  4732. rc = WorksetCreate(pomPrimary->putTask,
  4733. pOMCWSGroup,
  4734. wsGroupID,
  4735. FALSE,
  4736. NET_TOP_PRIORITY);
  4737. if (rc != 0)
  4738. {
  4739. DC_QUIT;
  4740. }
  4741. }
  4742. else
  4743. {
  4744. ASSERT((pOMCWorkset->numObjects == 0));
  4745. TRACE_OUT(( "Registration workset %u previously used, re-using",
  4746. wsGroupID));
  4747. }
  4748. //
  4749. // Set the caller's pointer:
  4750. //
  4751. *pWSGroupID = wsGroupID;
  4752. DC_EXIT_POINT:
  4753. if (rc != 0)
  4754. {
  4755. //
  4756. // Cleanup:
  4757. //
  4758. ERROR_OUT(( "Error %d allocating ID for new workset group", rc));
  4759. }
  4760. DebugExitDWORD(WSGGetNewID, rc);
  4761. return(rc);
  4762. }
  4763. //
  4764. // CreateAnnounce(...)
  4765. //
  4766. UINT CreateAnnounce
  4767. (
  4768. POM_PRIMARY pomPrimary,
  4769. POM_DOMAIN pDomain,
  4770. POM_WSGROUP pWSGroup
  4771. )
  4772. {
  4773. POM_WSGROUP pOMCWSGroup;
  4774. POM_WORKSET pOMCWorkset;
  4775. POM_WSGROUP_INFO pInfoObject;
  4776. POM_OBJECT pObj;
  4777. OM_OBJECT_ID infoObjectID;
  4778. UINT rc = 0;
  4779. DebugEntry(CreateAnnounce);
  4780. TRACE_OUT(("Announcing creation of WSG %d in Domain %u",
  4781. pWSGroup->wsg, pDomain->callID));
  4782. //
  4783. // Announcing a new workset group involves adding an object which
  4784. // defines the workset group to workset #0 in ObManControl.
  4785. //
  4786. // So, we derive a pointer to the workset...
  4787. //
  4788. pOMCWSGroup = GetOMCWsgroup(pDomain);
  4789. pOMCWorkset = pOMCWSGroup->apWorksets[0];
  4790. ASSERT((pOMCWorkset != NULL));
  4791. //
  4792. // ...create a definition object...
  4793. //
  4794. pInfoObject = (POM_WSGROUP_INFO)UT_MallocRefCount(sizeof(OM_WSGROUP_INFO), TRUE);
  4795. if (!pInfoObject)
  4796. {
  4797. rc = UT_RC_NO_MEM;
  4798. DC_QUIT;
  4799. }
  4800. //
  4801. // ...fill in the fields...
  4802. //
  4803. // (length = sizeof - 4 since value of length field doesn't include the
  4804. // size of the length field itself).
  4805. //
  4806. pInfoObject->length = sizeof(OM_WSGROUP_INFO) -
  4807. sizeof(OM_MAX_OBJECT_SIZE);
  4808. pInfoObject->idStamp = OM_WSGINFO_ID_STAMP;
  4809. pInfoObject->channelID = pWSGroup->channelID;
  4810. pInfoObject->creator = pDomain->userID;
  4811. pInfoObject->wsGroupID = pWSGroup->wsGroupID;
  4812. lstrcpy(pInfoObject->wsGroupName, OMMapWSGToName(pWSGroup->wsg));
  4813. lstrcpy(pInfoObject->functionProfile, OMMapFPToName(pWSGroup->fpHandler));
  4814. //
  4815. // ...and add the object to the workset...
  4816. //
  4817. rc = ObjectAdd(pomPrimary->putTask,
  4818. pomPrimary,
  4819. pOMCWSGroup,
  4820. pOMCWorkset,
  4821. (POM_OBJECTDATA) pInfoObject,
  4822. 0, // update size == 0
  4823. LAST,
  4824. &infoObjectID,
  4825. &pObj);
  4826. if (rc != 0)
  4827. {
  4828. DC_QUIT;
  4829. }
  4830. TRACE_OUT(( "Announced new WSG %d in Domain %u",
  4831. pWSGroup->wsg, pDomain->callID));
  4832. DC_EXIT_POINT:
  4833. if (rc != 0)
  4834. {
  4835. //
  4836. // Cleanup:
  4837. //
  4838. ERROR_OUT(("Error %d announcing new WSG %d in Domain %u",
  4839. rc, pWSGroup->wsg, pDomain->callID));
  4840. }
  4841. DebugExitDWORD(CreateAnnounce, rc);
  4842. return(rc);
  4843. }
  4844. //
  4845. // WSGCatchUp(...)
  4846. //
  4847. UINT WSGCatchUp
  4848. (
  4849. POM_PRIMARY pomPrimary,
  4850. POM_DOMAIN pDomain,
  4851. POM_WSGROUP pWSGroup)
  4852. {
  4853. POM_WORKSET pOMCWorkset;
  4854. POM_OBJECT pObj;
  4855. POM_WSGROUP_REG_REC pRegObject;
  4856. NET_UID remoteUserID;
  4857. UINT rc = 0;
  4858. DebugEntry(WSGCatchUp);
  4859. TRACE_OUT(( "Starting catch-up for WSG %d in Domain %u",
  4860. pWSGroup->wsg, pDomain->callID));
  4861. //
  4862. // This should never be for the "local" Domain:
  4863. //
  4864. ASSERT((pDomain->callID != NET_INVALID_DOMAIN_ID));
  4865. //
  4866. // The catch-up procedure is as follows:
  4867. //
  4868. // - look in ObManControl workset group for the ID of an instance of
  4869. // ObMan which has a copy of this workset group
  4870. //
  4871. // - send it an OMNET_WSGROUP_SEND_REQ message
  4872. //
  4873. // So, start by getting a pointer to the relevant workset:
  4874. //
  4875. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  4876. ValidateWorkset(pOMCWorkset);
  4877. //
  4878. // Now we chain through the workset looking for a reg object which has
  4879. // status READY_TO_SEND:
  4880. //
  4881. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  4882. remoteUserID = 0;
  4883. while (pObj != NULL)
  4884. {
  4885. ValidateObject(pObj);
  4886. if (pObj->flags & DELETED)
  4887. {
  4888. //
  4889. // Skip this one
  4890. //
  4891. }
  4892. else if (!pObj->pData)
  4893. {
  4894. //
  4895. // Skip this one
  4896. //
  4897. ERROR_OUT(("WSGCatchUp: object 0x%08x has no data", pObj));
  4898. }
  4899. else
  4900. {
  4901. pRegObject = (POM_WSGROUP_REG_REC)pObj->pData;
  4902. ValidateObjectDataWSGREGREC(pRegObject);
  4903. if ((pRegObject->status == READY_TO_SEND) &&
  4904. (pRegObject->userID != pDomain->userID))
  4905. {
  4906. //
  4907. // OK, this node has a full copy, so we'll try to get it from
  4908. // there:
  4909. //
  4910. remoteUserID = pRegObject->userID;
  4911. break;
  4912. }
  4913. }
  4914. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj,
  4915. FIELD_OFFSET(OM_OBJECT, chain));
  4916. }
  4917. //
  4918. // ...check that we did actually find a node to get the data from:
  4919. //
  4920. if (remoteUserID == 0)
  4921. {
  4922. WARNING_OUT(( "No node in Domain %u is ready to send WSG %d - retrying",
  4923. pDomain->callID, pWSGroup->wsg));
  4924. rc = OM_RC_NO_NODES_READY;
  4925. DC_QUIT;
  4926. }
  4927. //
  4928. // ...then send that node a request to send us the workset group:
  4929. //
  4930. rc = IssueSendReq(pomPrimary,
  4931. pDomain,
  4932. pWSGroup,
  4933. remoteUserID);
  4934. DC_EXIT_POINT:
  4935. if ((rc != 0) && (rc != OM_RC_NO_NODES_READY))
  4936. {
  4937. ERROR_OUT(( "Error %d starting catch-up for WSG %d in Domain %u",
  4938. rc, pWSGroup->wsg, pDomain->callID));
  4939. }
  4940. DebugExitDWORD(WSGCatchUp, rc);
  4941. return(rc);
  4942. }
  4943. //
  4944. // IssueSendDeny(...)
  4945. //
  4946. void IssueSendDeny
  4947. (
  4948. POM_PRIMARY pomPrimary,
  4949. POM_DOMAIN pDomain,
  4950. OM_WSGROUP_ID wsGroupID,
  4951. NET_UID sender,
  4952. OM_CORRELATOR remoteCorrelator
  4953. )
  4954. {
  4955. POMNET_WSGROUP_SEND_PKT pWSGSendPkt;
  4956. DebugEntry(IssueSendDeny);
  4957. //
  4958. // Now issue the SEND_DENY.
  4959. //
  4960. TRACE_OUT(( "Sending SEND_DENY message to late joiner 0x%08x", sender));
  4961. //
  4962. // We start by allocating some memory:
  4963. //
  4964. pWSGSendPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  4965. if (!pWSGSendPkt)
  4966. {
  4967. ERROR_OUT(("Out of memory in IssueSendDeny"));
  4968. DC_QUIT;
  4969. }
  4970. //
  4971. // Now fill in the fields:
  4972. //
  4973. pWSGSendPkt->header.sender = pDomain->userID;
  4974. pWSGSendPkt->header.messageType = OMNET_WSGROUP_SEND_DENY;
  4975. pWSGSendPkt->wsGroupID = wsGroupID;
  4976. //
  4977. // SFR 7124. Return the correlator for this catchup.
  4978. //
  4979. pWSGSendPkt->correlator = remoteCorrelator;
  4980. //
  4981. // Queue the message to be sent.
  4982. //
  4983. QueueMessage(pomPrimary->putTask,
  4984. pDomain,
  4985. sender,
  4986. NET_TOP_PRIORITY,
  4987. NULL, // no WSG
  4988. NULL, // no workset
  4989. NULL, // no object
  4990. (POMNET_PKT_HEADER) pWSGSendPkt,
  4991. NULL, // no object data
  4992. TRUE);
  4993. DC_EXIT_POINT:
  4994. DebugExitVOID(IssueSendDeny);
  4995. }
  4996. //
  4997. //
  4998. //
  4999. // IssueSendReq(...)
  5000. //
  5001. //
  5002. //
  5003. UINT IssueSendReq(POM_PRIMARY pomPrimary,
  5004. POM_DOMAIN pDomain,
  5005. POM_WSGROUP pWSGroup,
  5006. NET_UID helperNode)
  5007. {
  5008. POMNET_WSGROUP_SEND_PKT pWSGSendPkt;
  5009. UINT rc = 0;
  5010. DebugEntry(IssueSendReq);
  5011. //
  5012. // We start by allocating some memory for the OMNET_SEND_REQ message:
  5013. //
  5014. pWSGSendPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5015. if (!pWSGSendPkt)
  5016. {
  5017. rc = UT_RC_NO_MEM;
  5018. DC_QUIT;
  5019. }
  5020. //
  5021. // Now fill in the fields:
  5022. //
  5023. // SFR 7124. Generate a correlator so we can match
  5024. // SEND_MIDWAY,SEND_COMPLETE and SEND_DENY messages to this catchup.
  5025. //
  5026. pWSGSendPkt->header.sender = pDomain->userID;
  5027. pWSGSendPkt->header.messageType = OMNET_WSGROUP_SEND_REQ;
  5028. pWSGSendPkt->wsGroupID = pWSGroup->wsGroupID;
  5029. pWSGroup->catchupCorrelator = NextCorrelator(pomPrimary);
  5030. pWSGSendPkt->correlator = pWSGroup->catchupCorrelator;
  5031. //
  5032. // The <helperNode> parameter is the node which the calling function
  5033. // has identified as a remote node which is capable of sending us the
  5034. // workset group we want. So, we send that instance of ObMan an
  5035. // OMNET_WSGROUP_SEND_REQ on its single-user channel, enclosing our own
  5036. // single-user channel ID for the response:
  5037. //
  5038. // Note: the SEND_REQ must not overtake any data on its way from us to
  5039. // the remote node (e.g. if we've just added an object,
  5040. // deregistered and then reregistered). Therefore, set the
  5041. // NET_SEND_ALL_PRIORITIES flag.
  5042. //
  5043. // SFR 6117: Don't believe this is a problem for R2.0, so just send at
  5044. // low priority.
  5045. //
  5046. rc = QueueMessage(pomPrimary->putTask,
  5047. pDomain,
  5048. helperNode,
  5049. NET_LOW_PRIORITY,
  5050. pWSGroup,
  5051. NULL, // no workset
  5052. NULL, // no object
  5053. (POMNET_PKT_HEADER) pWSGSendPkt,
  5054. NULL, // no object data
  5055. TRUE);
  5056. if (rc != 0)
  5057. {
  5058. DC_QUIT;
  5059. }
  5060. //
  5061. // Set the workset group state, and record the number of SEND_MIDWAY
  5062. // and SEND_COMPLETE messages we're expecting (one for R11, one per
  5063. // priority for R20).
  5064. //
  5065. // Note: we set the counts up here because we may get some of the
  5066. // SEND_COMPLETEs before we get all the SEND_MIDWAYs, so to set the
  5067. // count in ProcessSendMidway would be too late.
  5068. //
  5069. pWSGroup->state = PENDING_SEND_MIDWAY;
  5070. pWSGroup->sendMidwCount = NET_NUM_PRIORITIES;
  5071. pWSGroup->sendCompCount = NET_NUM_PRIORITIES;
  5072. //
  5073. // Store the helper node ID in the WSG structure.
  5074. //
  5075. pWSGroup->helperNode = helperNode;
  5076. DC_EXIT_POINT:
  5077. if (rc != 0)
  5078. {
  5079. //
  5080. // Cleanup:
  5081. //
  5082. ERROR_OUT(( "Error %d requesting send from node 0x%08x "
  5083. "for WSG %d in Domain %u",
  5084. rc, pWSGroup->wsg, helperNode, pDomain->callID));
  5085. }
  5086. else
  5087. {
  5088. //
  5089. // Success:
  5090. //
  5091. TRACE_OUT(("Requested copy of WSG %d' from node 0x%08x (in Domain %u), correlator %hu",
  5092. pWSGroup->wsg, helperNode, pDomain->callID,
  5093. pWSGroup->catchupCorrelator));
  5094. }
  5095. DebugExitDWORD(IssueSendReq, rc);
  5096. return(rc);
  5097. }
  5098. //
  5099. // ProcessSendReq(...)
  5100. //
  5101. void ProcessSendReq
  5102. (
  5103. POM_PRIMARY pomPrimary,
  5104. POM_DOMAIN pDomain,
  5105. POMNET_WSGROUP_SEND_PKT pSendReqPkt
  5106. )
  5107. {
  5108. POM_WSGROUP pWSGroup;
  5109. POM_WORKSET pWorkset;
  5110. POM_HELPER_CB pHelperCB;
  5111. NET_UID sender;
  5112. BOOL sendDeny = FALSE;
  5113. DebugEntry(ProcessSendReq);
  5114. //
  5115. // This is the user ID of the late joiner:
  5116. //
  5117. sender = pSendReqPkt->header.sender;
  5118. //
  5119. // We start by finding our copy of the workset group:
  5120. //
  5121. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  5122. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  5123. FIELD_OFFSET(OM_WSGROUP, wsGroupID),
  5124. (DWORD)pSendReqPkt->wsGroupID, FIELD_SIZE(OM_WSGROUP, wsGroupID));
  5125. //
  5126. // Quit and deny the send if workset group not found:
  5127. //
  5128. if (pWSGroup == NULL)
  5129. {
  5130. WARNING_OUT(( "Don't have workset group %hu to send to node 0x%08x",
  5131. pSendReqPkt->wsGroupID, sender));
  5132. sendDeny = TRUE;
  5133. DC_QUIT;
  5134. }
  5135. //
  5136. // Quit and deny the send if we don't have ALL the workset group:
  5137. //
  5138. if (pWSGroup->state != WSGROUP_READY)
  5139. {
  5140. WARNING_OUT(("WSG %d is in state %hu - can't send to node 0x%08x",
  5141. pWSGroup->wsg, pWSGroup->state, sender));
  5142. sendDeny = TRUE;
  5143. DC_QUIT;
  5144. }
  5145. TRACE_OUT(( "Processing SEND_REQUEST from node 0x%08x for WSG %d, correlator %hu",
  5146. sender, pWSGroup->wsg, pSendReqPkt->correlator));
  5147. //
  5148. // Right, we're fully registered with the workset group, so we will be
  5149. // its helper node. First, allocate a helper CB to keep track of the
  5150. // process:
  5151. //
  5152. if (!NewHelperCB(pDomain,
  5153. pWSGroup,
  5154. sender,
  5155. pSendReqPkt->correlator,
  5156. &pHelperCB))
  5157. {
  5158. //
  5159. // Deny the workset send request
  5160. //
  5161. sendDeny = TRUE;
  5162. WARNING_OUT(( "Failed to allocate helper CB - issuing SEND_DENY"));
  5163. DC_QUIT;
  5164. }
  5165. //
  5166. // Before we can send the contents of the workset group to the late
  5167. // joiner, we must ensure that our view of the contents is up to date.
  5168. // We do this by checkpointing the workset group, which means locking
  5169. // the dummy workset which exists in all workset groups. Do this now:
  5170. //
  5171. pWorkset = pWSGroup->apWorksets[OM_CHECKPOINT_WORKSET];
  5172. WorksetLockReq(pomPrimary->putTask, pomPrimary,
  5173. pWSGroup,
  5174. pWorkset,
  5175. 0,
  5176. &(pHelperCB->lockCorrelator));
  5177. //
  5178. // We will shortly get a WORKSET_LOCK_CON event containing the
  5179. // correlator just stored in the helper CB. We will look this up and
  5180. // continue the catch-up process then.
  5181. //
  5182. DC_EXIT_POINT:
  5183. //
  5184. // If we set the sendDeny flag above then now send the SEND_DENY
  5185. // message to the late joiner.
  5186. //
  5187. if (sendDeny)
  5188. {
  5189. IssueSendDeny(pomPrimary,
  5190. pDomain,
  5191. pSendReqPkt->wsGroupID,
  5192. sender,
  5193. pSendReqPkt->correlator);
  5194. }
  5195. DebugExitVOID(ProcessSendReq);
  5196. }
  5197. //
  5198. // SendWSGToLateJoiner(...)
  5199. //
  5200. void SendWSGToLateJoiner
  5201. (
  5202. POM_PRIMARY pomPrimary,
  5203. POM_DOMAIN pDomain,
  5204. POM_WSGROUP pWSGroup,
  5205. NET_UID lateJoiner,
  5206. OM_CORRELATOR remoteCorrelator
  5207. )
  5208. {
  5209. POM_WORKSET pWorkset;
  5210. POMNET_OPERATION_PKT pPacket;
  5211. POM_OBJECT pObj;
  5212. POMNET_WSGROUP_SEND_PKT pSendMidwayPkt;
  5213. POMNET_WSGROUP_SEND_PKT pSendCompletePkt;
  5214. POM_OBJECTDATA pData;
  5215. OM_WORKSET_ID worksetID;
  5216. UINT maxSeqUsed = 0;
  5217. NET_PRIORITY catchupPriority = 0;
  5218. UINT rc = 0;
  5219. DebugEntry(SendWSGToLateJoiner);
  5220. //
  5221. // The first thing to do is to announce that the remote node is
  5222. // registering with the workset group:
  5223. //
  5224. rc = RegAnnounceBegin(pomPrimary,
  5225. pDomain,
  5226. pWSGroup,
  5227. lateJoiner,
  5228. &pObj);
  5229. if (rc != 0)
  5230. {
  5231. DC_QUIT;
  5232. }
  5233. //
  5234. // We then start flow control on the user channel of the node that we
  5235. // are sending the data to. We only start flow control on the low
  5236. // priority channel and don't bother to restrict the maximum stream
  5237. // size. If flow control is already started on this stream then this
  5238. // call will have no effect. Note that flow control will automatically
  5239. // be stopped when the call ends.
  5240. //
  5241. MG_FlowControlStart(pomPrimary->pmgClient,
  5242. lateJoiner,
  5243. NET_LOW_PRIORITY,
  5244. 0,
  5245. 8192);
  5246. //
  5247. // Now, cycle through each of the worksets and generate and send
  5248. //
  5249. // - WORKSET_NEW messages for each workset,
  5250. //
  5251. // - a WSG_SEND_MIDWAY message to indicate we've sent all the worksets
  5252. //
  5253. // - OBJECT_ADD messages for each of the objects in each of the
  5254. // worksets.
  5255. //
  5256. // - a WSG_SEND_COMPLETE message to indicate we've sent all the
  5257. // objects.
  5258. //
  5259. // NOTE: We do not send CHECKPOINT worksets, so the for loop should
  5260. // stop before it gets 255.
  5261. //
  5262. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  5263. {
  5264. pWorkset = pWSGroup->apWorksets[worksetID];
  5265. if (!pWorkset)
  5266. {
  5267. continue;
  5268. }
  5269. TRACE_OUT(( "Sending WORKSET_CATCHUP for workset %u", worksetID));
  5270. rc = GenerateOpMessage(pWSGroup,
  5271. worksetID,
  5272. NULL, // no object ID
  5273. NULL, // no object data
  5274. OMNET_WORKSET_CATCHUP,
  5275. &pPacket);
  5276. if (rc != 0)
  5277. {
  5278. DC_QUIT;
  5279. }
  5280. rc = QueueMessage(pomPrimary->putTask,
  5281. pWSGroup->pDomain,
  5282. lateJoiner,
  5283. NET_TOP_PRIORITY,
  5284. pWSGroup,
  5285. pWorkset,
  5286. NULL, // no object
  5287. (POMNET_PKT_HEADER) pPacket,
  5288. NULL, // no object data
  5289. TRUE);
  5290. if (rc != 0)
  5291. {
  5292. DC_QUIT;
  5293. }
  5294. }
  5295. //
  5296. // Now send the SEND_MIDWAY message to indicate that all the
  5297. // WORKSET_NEW messages have been sent:
  5298. //
  5299. pSendMidwayPkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5300. if (!pSendMidwayPkt)
  5301. {
  5302. rc = UT_RC_NO_MEM;
  5303. DC_QUIT;
  5304. }
  5305. pSendMidwayPkt->header.sender = pDomain->userID;
  5306. pSendMidwayPkt->header.messageType = OMNET_WSGROUP_SEND_MIDWAY;
  5307. pSendMidwayPkt->wsGroupID = pWSGroup->wsGroupID;
  5308. pSendMidwayPkt->correlator = remoteCorrelator;
  5309. //
  5310. // The next field is the ID of the reg object which we added above.
  5311. // So, convert the handle of the reg object returned by RegAnnouncBegin
  5312. // to a pointer to the object record and then copy the object ID into
  5313. // the message packet:
  5314. //
  5315. memcpy(&(pSendMidwayPkt->objectID), &(pObj->objectID), sizeof(OM_OBJECT_ID));
  5316. //
  5317. // The last field, which is the highest object ID sequence number
  5318. // previously used by the late joiner in this workset group, is not yet
  5319. // know; it will be filled in below. However (see note below), we
  5320. // queue the message now to ensure it doesn't get stuck behind lots of
  5321. // objects:
  5322. //
  5323. TRACE_OUT(("Queueing WSG_SEND_MIDWAY message to node 0x%08x for WSG %d, correlator %hu",
  5324. lateJoiner, pWSGroup->wsg, remoteCorrelator));
  5325. rc = QueueMessage(pomPrimary->putTask,
  5326. pWSGroup->pDomain,
  5327. lateJoiner,
  5328. NET_TOP_PRIORITY | NET_SEND_ALL_PRIORITIES,
  5329. pWSGroup,
  5330. NULL, // no workset
  5331. NULL, // no object
  5332. (POMNET_PKT_HEADER) pSendMidwayPkt,
  5333. NULL, // no object data
  5334. TRUE);
  5335. if (rc != 0)
  5336. {
  5337. DC_QUIT;
  5338. }
  5339. //
  5340. // If the workset group is ObMan control then we should send it at top
  5341. // priority to ensure that it can overtake any slower pending sends to
  5342. // other nodes. Otherwise we send the send the data at the lowest
  5343. // priority.
  5344. //
  5345. if (pWSGroup->wsGroupID == WSGROUPID_OMC)
  5346. {
  5347. catchupPriority = NET_TOP_PRIORITY;
  5348. }
  5349. else
  5350. {
  5351. catchupPriority = NET_LOW_PRIORITY;
  5352. }
  5353. TRACE_OUT(( "Sending catchup data at priority %hu for 0x%08x",
  5354. catchupPriority,
  5355. lateJoiner));
  5356. //
  5357. // Now start the loop which does the OBJECT_ADDs:
  5358. //
  5359. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  5360. {
  5361. pWorkset = pWSGroup->apWorksets[worksetID];
  5362. if (pWorkset == NULL)
  5363. {
  5364. continue;
  5365. }
  5366. TRACE_OUT(( "Sending OBJECT_CATCHUPs for workset %u", worksetID));
  5367. //
  5368. // Note that we must send deleted objects too, since late-joiners
  5369. // have just as much need as we do to detect out of date
  5370. // operations:
  5371. //
  5372. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  5373. while (pObj != NULL)
  5374. {
  5375. ValidateObject(pObj);
  5376. //
  5377. // The workset group that the late joiner is catching up with
  5378. // may contain objects which it has added in a previous call
  5379. // (with the same network user ID). Since that call is over,
  5380. // it may reuse IDs present in this workset group - to prevent
  5381. // this, we must tell it the highest sequence count it used for
  5382. // object IDs for this workset group, so while we're going
  5383. // through the objects, keep a count:
  5384. //
  5385. if (pObj->objectID.creator == lateJoiner)
  5386. {
  5387. maxSeqUsed = max(maxSeqUsed, pObj->objectID.sequence);
  5388. }
  5389. if (pObj->flags & PENDING_DELETE)
  5390. {
  5391. //
  5392. // If the object is pending delete at this node, we do not
  5393. // send the object data. The way to avoid this is to set
  5394. // pData to NULL (must be done before call to
  5395. // GenerateOpMessage):
  5396. //
  5397. pData = NULL;
  5398. }
  5399. else
  5400. {
  5401. pData = pObj->pData;
  5402. if (pData)
  5403. {
  5404. ValidateObjectData(pData);
  5405. }
  5406. }
  5407. //
  5408. // Now generate the message packet:
  5409. //
  5410. rc = GenerateOpMessage(pWSGroup,
  5411. worksetID,
  5412. &(pObj->objectID),
  5413. pData,
  5414. OMNET_OBJECT_CATCHUP,
  5415. &pPacket);
  5416. if (rc != 0)
  5417. {
  5418. DC_QUIT;
  5419. }
  5420. //
  5421. // Now fill in the catchup-specific fields (note that the
  5422. // <seqStamp> will already have been filled in, but with the
  5423. // current sequence stamp for the workset; for a CatchUp
  5424. // message, this should be the add stamp for the object):
  5425. //
  5426. pPacket->position = pObj->position;
  5427. pPacket->flags = pObj->flags;
  5428. pPacket->updateSize = pObj->updateSize;
  5429. if (pObj->flags & PENDING_DELETE)
  5430. {
  5431. //
  5432. // If the object is pending delete at this node, we send it
  5433. // as if it has been delete-confirmed (since local
  5434. // delete-confirms or their DC_ABSence should have no effect
  5435. // outside this box). To do this, we just set the DELETED
  5436. // flag in the packet:
  5437. //
  5438. pPacket->flags &= ~PENDING_DELETE;
  5439. pPacket->flags |= DELETED;
  5440. }
  5441. COPY_SEQ_STAMP(pPacket->seqStamp, pObj->addStamp);
  5442. COPY_SEQ_STAMP(pPacket->positionStamp, pObj->positionStamp);
  5443. COPY_SEQ_STAMP(pPacket->updateStamp, pObj->updateStamp);
  5444. COPY_SEQ_STAMP(pPacket->replaceStamp, pObj->replaceStamp);
  5445. //
  5446. // ...and queue the message:
  5447. //
  5448. rc = QueueMessage(pomPrimary->putTask,
  5449. pWSGroup->pDomain,
  5450. lateJoiner,
  5451. catchupPriority,
  5452. pWSGroup,
  5453. pWorkset,
  5454. NULL, // no object
  5455. (POMNET_PKT_HEADER) pPacket,
  5456. pData,
  5457. TRUE);
  5458. if (rc != 0)
  5459. {
  5460. DC_QUIT;
  5461. }
  5462. //
  5463. // Now go around the loop again:
  5464. //
  5465. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  5466. FIELD_OFFSET(OM_OBJECT, chain));
  5467. }
  5468. }
  5469. //
  5470. // Now that we know the max sequence number used by this user ID in
  5471. // this workset group, we can set the field in the SEND_MIDWAY packet:
  5472. //
  5473. // NOTE: because the ObMan task is single threaded (in the DC_ABSence of
  5474. // assertion failure which cause a sort of multithreading while
  5475. // the assert box is up) it is safe to alter this value AFTER the
  5476. // message has been queued because we know that the queue will
  5477. // not have been serviced yet.
  5478. //
  5479. pSendMidwayPkt->maxObjIDSeqUsed = maxSeqUsed;
  5480. //
  5481. // Now we send the OMNET_SEND_COMPLETE message. First, allocate some
  5482. // memory...
  5483. //
  5484. pSendCompletePkt = (POMNET_WSGROUP_SEND_PKT)UT_MallocRefCount(sizeof(OMNET_WSGROUP_SEND_PKT), TRUE);
  5485. if (!pSendCompletePkt)
  5486. {
  5487. rc = UT_RC_NO_MEM;
  5488. DC_QUIT;
  5489. }
  5490. //
  5491. // ...fill in the fields...
  5492. //
  5493. pSendCompletePkt->header.sender = pDomain->userID;
  5494. pSendCompletePkt->header.messageType = OMNET_WSGROUP_SEND_COMPLETE;
  5495. pSendCompletePkt->wsGroupID = pWSGroup->wsGroupID;
  5496. pSendCompletePkt->correlator = remoteCorrelator;
  5497. //
  5498. // ...and queue the message for sending (it musn't overtake any of the
  5499. // data so send it at all priorities):
  5500. //
  5501. TRACE_OUT(( "Sending WSG_SEND_COMPLETE message, correlator %hu",
  5502. remoteCorrelator));
  5503. rc = QueueMessage(pomPrimary->putTask,
  5504. pWSGroup->pDomain,
  5505. lateJoiner,
  5506. NET_LOW_PRIORITY | NET_SEND_ALL_PRIORITIES,
  5507. pWSGroup,
  5508. NULL, // no workset
  5509. NULL, // no object
  5510. (POMNET_PKT_HEADER) pSendCompletePkt,
  5511. NULL, // no object data
  5512. TRUE);
  5513. if (rc != 0)
  5514. {
  5515. DC_QUIT;
  5516. }
  5517. TRACE_OUT(( "Processed send request from node 0x%08x for WSG %d",
  5518. lateJoiner, pWSGroup->wsg));
  5519. DC_EXIT_POINT:
  5520. if (rc != 0)
  5521. {
  5522. //
  5523. // An error occurred. We must issue a SEND_DENY message to the
  5524. // remote node.
  5525. //
  5526. ERROR_OUT(( "Error %d sending WSG %d to node 0x%08x",
  5527. rc, pWSGroup->wsg, lateJoiner));
  5528. IssueSendDeny(pomPrimary,
  5529. pDomain,
  5530. pWSGroup->wsGroupID,
  5531. lateJoiner,
  5532. remoteCorrelator);
  5533. }
  5534. DebugExitVOID(SendWSGToLateJoiner);
  5535. }
  5536. //
  5537. // ProcessSendMidway(...)
  5538. //
  5539. void ProcessSendMidway
  5540. (
  5541. POM_PRIMARY pomPrimary,
  5542. POM_DOMAIN pDomain,
  5543. POMNET_WSGROUP_SEND_PKT pSendMidwayPkt
  5544. )
  5545. {
  5546. POM_WORKSET pOMCWorkset;
  5547. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  5548. POM_WSGROUP pWSGroup;
  5549. BOOL fSetPersonData;
  5550. NET_UID sender;
  5551. POM_OBJECT pObjReg;
  5552. UINT rc = 0;
  5553. DebugEntry(ProcessSendMidway);
  5554. sender = pSendMidwayPkt->header.sender;
  5555. //
  5556. // OK, this is an message indicating that the helper node has sent us
  5557. // all the WORKSET_CATCHUPs in the workset group we're catching up with
  5558. // (but note that the objects haven't yet been sent).
  5559. //
  5560. // So, search the list of pending registrations using the correlator
  5561. // value in the packet (we can't use the workset group ID since if it
  5562. // is zero i.e. ObManControl, we'll match on workset groups which
  5563. // haven't yet had their IDs determined (since they are initially
  5564. // zero).
  5565. //
  5566. if (pSendMidwayPkt->wsGroupID == WSGROUPID_OMC)
  5567. {
  5568. //
  5569. // This is a SEND_MIDWAY message for ObManControl.
  5570. //
  5571. pWSGroup = GetOMCWsgroup(pDomain);
  5572. fSetPersonData = FALSE;
  5573. }
  5574. else
  5575. {
  5576. //
  5577. // Not for ObManControl so we search the list of pending
  5578. // registrations.
  5579. //
  5580. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListFirst(&(pDomain->pendingRegs),
  5581. FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  5582. while ((pRegistrationCB != NULL) && (pRegistrationCB->pWSGroup->wsGroupID != pSendMidwayPkt->wsGroupID))
  5583. {
  5584. pRegistrationCB = (POM_WSGROUP_REG_CB)COM_BasedListNext(&(pDomain->pendingRegs),
  5585. pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain));
  5586. }
  5587. if (pRegistrationCB == NULL)
  5588. {
  5589. WARNING_OUT(( "Unexpected SEND_MIDWAY for WSG %hu from 0x%08x",
  5590. pSendMidwayPkt->wsGroupID, sender));
  5591. DC_QUIT;
  5592. }
  5593. pWSGroup = pRegistrationCB->pWSGroup;
  5594. fSetPersonData = TRUE;
  5595. }
  5596. if (!pWSGroup->valid)
  5597. {
  5598. WARNING_OUT(( "Recd SEND_MIDWAY too late for WSG %d (marked invalid)",
  5599. pWSGroup->wsg));
  5600. DC_QUIT;
  5601. }
  5602. //
  5603. // We should be in the PENDING_SEND_MIDWAY state:
  5604. //
  5605. if (pWSGroup->state != PENDING_SEND_MIDWAY)
  5606. {
  5607. WARNING_OUT(( "Recd SEND_MIDWAY with WSG %d in state %hu",
  5608. pWSGroup->wsg, pWSGroup->state));
  5609. DC_QUIT;
  5610. }
  5611. //
  5612. // SFR 7124. Check the correlator of this SEND_MIDWAY against the
  5613. // correlator we generated locally when we sent the last SEND_REQUEST.
  5614. // If they dont match, this is part of an out of date catchup which we
  5615. // can ignore.
  5616. //
  5617. if (pSendMidwayPkt->correlator != pWSGroup->catchupCorrelator)
  5618. {
  5619. WARNING_OUT(("Ignoring SEND_MIDWAY with old correlator %hu (expecting %hu)",
  5620. pSendMidwayPkt->correlator, pWSGroup->catchupCorrelator));
  5621. DC_QUIT;
  5622. }
  5623. //
  5624. // We should get four of these messages, one at each priority (except
  5625. // in a backlevel call when we only get one). Check how many are
  5626. // outstanding:
  5627. //
  5628. pWSGroup->sendMidwCount--;
  5629. if (pWSGroup->sendMidwCount != 0)
  5630. {
  5631. TRACE_OUT(( "Still need %hu SEND_MIDWAY(s) for WSG %d",
  5632. pWSGroup->sendMidwCount, pWSGroup->wsg));
  5633. DC_QUIT;
  5634. }
  5635. TRACE_OUT(( "Last SEND_MIDWAY for WSG %d, ID %hu, from 0x%08x",
  5636. pWSGroup->wsg, pWSGroup->wsGroupID, sender));
  5637. //
  5638. // Set up pointers to the ObManControl workset which holds the reg
  5639. // objects for the workset group we've just registered with:
  5640. //
  5641. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  5642. //
  5643. // If we don't have an associated OMC workset, something's wrong...
  5644. //
  5645. if (pOMCWorkset == NULL)
  5646. {
  5647. //
  5648. // ...unless it's ObManControl itself that we're catching up with -
  5649. // since we can get its SEND_MIDWAY before we've got any of the
  5650. // WORKSET_CATCHUPs:
  5651. //
  5652. if (pWSGroup->wsGroupID != WSGROUPID_OMC)
  5653. {
  5654. ERROR_OUT(( "Got SEND_MIDWAY for unknown workset group %hu!",
  5655. pWSGroup->wsGroupID));
  5656. }
  5657. DC_QUIT;
  5658. }
  5659. //
  5660. // Convert the ID of our reg object (as sent by our helper who added it
  5661. // in the first place) to an object handle:
  5662. //
  5663. rc = ObjectIDToPtr(pOMCWorkset, pSendMidwayPkt->objectID, &pObjReg);
  5664. if (rc != 0)
  5665. {
  5666. DC_QUIT;
  5667. }
  5668. //
  5669. // If we haven't yet stored a reg object handle for this workset
  5670. // group...
  5671. //
  5672. if (pWSGroup->pObjReg == NULL)
  5673. {
  5674. //
  5675. // ...store it now...
  5676. //
  5677. pWSGroup->pObjReg = pObjReg;
  5678. }
  5679. //
  5680. // ...but if we have...
  5681. //
  5682. else // pWSGroup->pObjReg != NULL
  5683. {
  5684. //
  5685. // ...and if it's a different one, something's wrong:
  5686. //
  5687. if (pWSGroup->pObjReg != pObjReg)
  5688. {
  5689. WARNING_OUT(( "Recd SEND_MIDWAY from node 0x%08x claiming our reg object "
  5690. "for WSG %d is 0x%08x but we think it's 0x%08x",
  5691. sender, pWSGroup->wsg, pObjReg,pWSGroup->pObjReg));
  5692. }
  5693. }
  5694. //
  5695. // OK, if we've passed all the above tests then everything is normal,
  5696. // so proceed:
  5697. //
  5698. pWSGroup->state = PENDING_SEND_COMPLETE;
  5699. if (pSendMidwayPkt->maxObjIDSeqUsed > pomPrimary->objectIDsequence)
  5700. {
  5701. TRACE_OUT(( "We've already used ID sequence numbers up to %u for "
  5702. "this workset group - setting global sequence count to this value",
  5703. pSendMidwayPkt->objectID.sequence));
  5704. pomPrimary->objectIDsequence = pSendMidwayPkt->objectID.sequence;
  5705. }
  5706. //
  5707. // Our registration object (added by the remote node) should have
  5708. // arrived by now. We need to add the FE/person data to it (unless
  5709. // this is for ObManControl, in which case there won't be any):
  5710. //
  5711. if (fSetPersonData)
  5712. {
  5713. rc = SetPersonData(pomPrimary, pDomain, pWSGroup);
  5714. if (rc != 0)
  5715. {
  5716. DC_QUIT;
  5717. }
  5718. }
  5719. //
  5720. // Now post the successful REGISTER_CON event back to the Client, if we
  5721. // found a reg CB above:
  5722. //
  5723. if (pRegistrationCB != NULL)
  5724. {
  5725. WSGRegisterResult(pomPrimary, pRegistrationCB, 0);
  5726. }
  5727. DC_EXIT_POINT:
  5728. DebugExitVOID(ProcessSendMidway);
  5729. }
  5730. //
  5731. // ProcessSendComplete(...)
  5732. //
  5733. UINT ProcessSendComplete
  5734. (
  5735. POM_PRIMARY pomPrimary,
  5736. POM_DOMAIN pDomain,
  5737. POMNET_WSGROUP_SEND_PKT pSendCompletePkt
  5738. )
  5739. {
  5740. POM_WSGROUP pWSGroup;
  5741. NET_UID sender;
  5742. UINT rc = 0;
  5743. DebugEntry(ProcessSendComplete);
  5744. //
  5745. // We are now "fully-caught-up" and so are eligible to be helpers
  5746. // ourselves, i.e. if someone wants to ask us for the workset group,
  5747. // we will be able to send them a copy.
  5748. //
  5749. sender = pSendCompletePkt->header.sender;
  5750. //
  5751. // First, we find the workset group the message relates to:
  5752. //
  5753. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  5754. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  5755. FIELD_OFFSET(OM_WSGROUP, wsGroupID),
  5756. (DWORD)pSendCompletePkt->wsGroupID,
  5757. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  5758. if (pWSGroup == NULL)
  5759. {
  5760. //
  5761. // This will happen just after we have deregistered from a WSGroup
  5762. //
  5763. WARNING_OUT(( "Unexpected SEND_COMPLETE (ID %hu) from node 0x%08x",
  5764. pSendCompletePkt->wsGroupID, sender));
  5765. DC_QUIT;
  5766. }
  5767. if (!pWSGroup->valid)
  5768. {
  5769. //
  5770. // This will happen while we are in the process of deregistering
  5771. // from a workset group.
  5772. //
  5773. WARNING_OUT(( "Recd SEND_COMPLETE too late for WSG %d (marked invalid)",
  5774. pWSGroup->wsg));
  5775. DC_QUIT;
  5776. }
  5777. //
  5778. // Check it has come from the correct node and that we are in an
  5779. // appropriate state to receive it.
  5780. //
  5781. // The correct state is either PENDING_SEND_COMPLETE or
  5782. // PENDING_SEND_MIDWAY (we can receive SEND_COMPLETEs in
  5783. // PENDING_SEND_MIDWAY state because of MCS packet reordering).
  5784. //
  5785. if (pSendCompletePkt->header.sender != pWSGroup->helperNode)
  5786. {
  5787. //
  5788. // This will happen if we get a late SEND_COMPLETE after we have
  5789. // decided to catch up from someone else - don't think this should
  5790. // happen!
  5791. //
  5792. // lonchanc: this actually happened in bug #1554.
  5793. // Changed ERROR_OUT to WARNING_OUT
  5794. WARNING_OUT(( "Got SEND_COMPLETE from 0x%08x for WSG %d but helper is 0x%08x",
  5795. sender, pWSGroup->wsg, pWSGroup->helperNode));
  5796. DC_QUIT;
  5797. }
  5798. if ((pWSGroup->state != PENDING_SEND_MIDWAY)
  5799. &&
  5800. (pWSGroup->state != PENDING_SEND_COMPLETE))
  5801. {
  5802. WARNING_OUT(( "Got SEND_COMPLETE for WSG %d from 0x%08x in bad state %hu",
  5803. pWSGroup->wsg, sender, pWSGroup->state));
  5804. DC_QUIT;
  5805. }
  5806. //
  5807. // SFR 7124. Check the correlator of this SEND_COMPLETE against the
  5808. // correlator we generated locally when we sent the last SEND_REQUEST.
  5809. // If they dont match, this is part of an out of date catchup which we
  5810. // can ignore.
  5811. //
  5812. if (pSendCompletePkt->correlator != pWSGroup->catchupCorrelator)
  5813. {
  5814. WARNING_OUT((
  5815. "Ignoring SEND_COMPLETE with old correlator %hu (expecting %hu)",
  5816. pSendCompletePkt->correlator, pWSGroup->catchupCorrelator));
  5817. DC_QUIT;
  5818. }
  5819. //
  5820. // We should get four of these messages, one at each priority (except
  5821. // in a backlevel call when we only get one). Check how many are
  5822. // outstanding:
  5823. //
  5824. pWSGroup->sendCompCount--;
  5825. if (pWSGroup->sendCompCount != 0)
  5826. {
  5827. TRACE_OUT(( "Still need %hu SEND_COMPLETE(s) for WSG %d obj 0x%08x",
  5828. pWSGroup->sendCompCount, pWSGroup->wsg,
  5829. pWSGroup->pObjReg));
  5830. DC_QUIT;
  5831. }
  5832. //
  5833. // If so, we announce that we are registered:
  5834. //
  5835. TRACE_OUT(( "Last SEND_COMPLETE for WSG %d, ID %hu, from 0x%08x obj 0x%08x",
  5836. pWSGroup->wsg, pWSGroup->wsGroupID, sender,
  5837. pWSGroup->pObjReg));
  5838. rc = RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  5839. if (rc != 0)
  5840. {
  5841. DC_QUIT;
  5842. }
  5843. //
  5844. // In addition to the above, if this send-completion message is for the
  5845. // ObManControl workset group we must also set the Domain state:
  5846. //
  5847. if (pSendCompletePkt->wsGroupID == WSGROUPID_OMC)
  5848. {
  5849. //
  5850. // If this message relates to the ObManControl workset group, its
  5851. // arrival signifies that we have completed the Domain attach
  5852. // process, and are now free to continue the processing of the
  5853. // workset group registration attempt which prompted the attach in
  5854. // the first place.
  5855. //
  5856. // The way we "continue" is to set the Domain state to
  5857. // DOMAIN_READY, so that next time the delayed-and-retried
  5858. // OMINT_EVENT_WSGROUP_REGISTER event arrives, it will actually be
  5859. // processed rather than bounced again.
  5860. //
  5861. TRACE_OUT(( "ObManControl fully arrived for Domain %u - inhibiting token",
  5862. pDomain->callID));
  5863. rc = MG_TokenInhibit(pomPrimary->pmgClient,
  5864. pDomain->tokenID);
  5865. if (rc != 0)
  5866. {
  5867. DC_QUIT;
  5868. }
  5869. pDomain->state = PENDING_TOKEN_INHIBIT;
  5870. }
  5871. DC_EXIT_POINT:
  5872. if (rc != 0)
  5873. {
  5874. ERROR_OUT(( "Error %d processing SEND_COMPLETE for WSG %u:%hu",
  5875. rc, pDomain->callID, pSendCompletePkt->wsGroupID));
  5876. }
  5877. DebugExitDWORD(ProcessSendComplete, rc);
  5878. return(rc);
  5879. }
  5880. //
  5881. // RegAnnounceBegin(...)
  5882. //
  5883. UINT RegAnnounceBegin
  5884. (
  5885. POM_PRIMARY pomPrimary,
  5886. POM_DOMAIN pDomain,
  5887. POM_WSGROUP pWSGroup,
  5888. NET_UID nodeID,
  5889. POM_OBJECT * ppObjReg
  5890. )
  5891. {
  5892. POM_WSGROUP pOMCWSGroup;
  5893. POM_WORKSET pOMCWorkset;
  5894. POM_WSGROUP_REG_REC pRegObject = NULL;
  5895. OM_OBJECT_ID regObjectID;
  5896. UINT updateSize;
  5897. UINT rc = 0;
  5898. DebugEntry(RegAnnounceBegin);
  5899. //
  5900. // Trace out who this reg object is for:
  5901. //
  5902. if (nodeID == pDomain->userID)
  5903. {
  5904. TRACE_OUT(("Announcing start of our reg with WSG %d in Domain %u",
  5905. pWSGroup->wsg, pDomain->callID));
  5906. }
  5907. else
  5908. {
  5909. TRACE_OUT(( "Announcing start of reg with WSG %d in Domain %u for node 0x%08x",
  5910. pWSGroup->wsg, pDomain->callID, nodeID));
  5911. }
  5912. //
  5913. // To announce the fact that a node has registered with a workset group,
  5914. // we add a registration object to the relevant workset in ObManControl.
  5915. //
  5916. //
  5917. // The "relevant" ObManControl workset is that whose ID is the same as
  5918. // the ID of the workset group. To add an object to this workset, we
  5919. // will need pointers to the workset itself and to the ObManControl
  5920. // workset group:
  5921. //
  5922. pOMCWSGroup = GetOMCWsgroup(pDomain);
  5923. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  5924. //
  5925. // If the ObManControl workset group is not transferred correctly, this
  5926. // assertion may fail:
  5927. //
  5928. ASSERT((pOMCWorkset != NULL));
  5929. //
  5930. // Now, alloc some memory for the registration record object...
  5931. //
  5932. pRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_REC), TRUE);
  5933. if (!pRegObject)
  5934. {
  5935. rc = UT_RC_NO_MEM;
  5936. DC_QUIT;
  5937. }
  5938. //
  5939. // ...set its fields...
  5940. //
  5941. pRegObject->length = sizeof(OM_WSGROUP_REG_REC) -
  5942. sizeof(OM_MAX_OBJECT_SIZE); // == 4
  5943. pRegObject->idStamp = OM_WSGREGREC_ID_STAMP;
  5944. pRegObject->userID = nodeID;
  5945. pRegObject->status = CATCHING_UP;
  5946. //
  5947. // ...determine the update size, which is meant to be all fields in the
  5948. // REG_REC object except the CPI stuff. We also subtract the size of
  5949. // the <length> field because of the way object update sizes are
  5950. // defined.
  5951. //
  5952. updateSize = (sizeof(OM_WSGROUP_REG_REC) - sizeof(TSHR_PERSON_DATA)) -
  5953. sizeof(OM_MAX_OBJECT_SIZE);
  5954. //
  5955. // ...and add it to the workset:
  5956. //
  5957. rc = ObjectAdd(pomPrimary->putTask,
  5958. pomPrimary,
  5959. pOMCWSGroup,
  5960. pOMCWorkset,
  5961. (POM_OBJECTDATA) pRegObject,
  5962. updateSize,
  5963. FIRST,
  5964. &regObjectID,
  5965. ppObjReg);
  5966. if (rc != 0)
  5967. {
  5968. DC_QUIT;
  5969. }
  5970. //
  5971. // Done!
  5972. //
  5973. TRACE_OUT(( "Added reg object for WSG %d to workset %u in OMC "
  5974. "(handle: 0x%08x, ID: 0x%08x:0x%08x)",
  5975. pWSGroup->wsg, pOMCWorkset->worksetID,
  5976. *ppObjReg, regObjectID.creator, regObjectID.sequence));
  5977. DC_EXIT_POINT:
  5978. if (rc != 0)
  5979. {
  5980. ERROR_OUT(( "Error %d adding registration object for WSG %d to "
  5981. "workset %u in ObManControl",
  5982. rc, pWSGroup->wsg, pOMCWorkset->worksetID));
  5983. }
  5984. DebugExitDWORD(RegAnnounceBegin, rc);
  5985. return(rc);
  5986. }
  5987. //
  5988. // RegAnnounceComplete(...)
  5989. //
  5990. UINT RegAnnounceComplete
  5991. (
  5992. POM_PRIMARY pomPrimary,
  5993. POM_DOMAIN pDomain,
  5994. POM_WSGROUP pWSGroup
  5995. )
  5996. {
  5997. POM_WSGROUP pOMCWSGroup;
  5998. POM_WORKSET pOMCWorkset;
  5999. POM_OBJECT pObjReg;
  6000. POM_WSGROUP_REG_REC pRegObject;
  6001. POM_WSGROUP_REG_REC pNewRegObject;
  6002. UINT updateSize;
  6003. UINT rc = 0;
  6004. DebugEntry(RegAnnounceComplete);
  6005. TRACE_OUT(("Announcing completion of reg for WSG %d", pWSGroup->wsg));
  6006. //
  6007. // Set up pointers to the ObManControl workset group and the workset
  6008. // within it which holds the reg objects for the workset group we've
  6009. // just registered with:
  6010. //
  6011. pOMCWSGroup = GetOMCWsgroup(pDomain);
  6012. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  6013. //
  6014. // Set up pointers to the object record and the object data itself:
  6015. //
  6016. pObjReg = pWSGroup->pObjReg;
  6017. ValidateObject(pObjReg);
  6018. if ((pObjReg->flags & DELETED) || !pObjReg->pData)
  6019. {
  6020. ERROR_OUT(("RegAnnounceComplete: object 0x%08x is deleted or has no data", pObjReg));
  6021. rc = OM_RC_OBJECT_DELETED;
  6022. DC_QUIT;
  6023. }
  6024. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  6025. ValidateObjectDataWSGREGREC(pRegObject);
  6026. ASSERT(pRegObject->status == CATCHING_UP);
  6027. //
  6028. // Allocate some memory for the new object with which we are about to
  6029. // replace the old one:
  6030. //
  6031. updateSize = sizeof(OM_WSGROUP_REG_REC) - sizeof(TSHR_PERSON_DATA);
  6032. pNewRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(updateSize, FALSE);
  6033. if (!pNewRegObject)
  6034. {
  6035. rc = UT_RC_NO_MEM;
  6036. DC_QUIT;
  6037. }
  6038. //
  6039. // Copy the start of the old object into the new one:
  6040. //
  6041. memcpy(pNewRegObject, pRegObject, updateSize);
  6042. //
  6043. // Update the status field and also set the length field to be the
  6044. // length of the object we just allocated (since this is the number of
  6045. // bytes we are updating):
  6046. //
  6047. pNewRegObject->length = updateSize - sizeof(OM_MAX_OBJECT_SIZE);
  6048. pNewRegObject->status = READY_TO_SEND;
  6049. //
  6050. // Issue the update:
  6051. //
  6052. rc = ObjectDRU(pomPrimary->putTask,
  6053. pOMCWSGroup,
  6054. pOMCWorkset,
  6055. pObjReg,
  6056. (POM_OBJECTDATA) pNewRegObject,
  6057. OMNET_OBJECT_UPDATE);
  6058. if (rc != 0)
  6059. {
  6060. DC_QUIT;
  6061. }
  6062. TRACE_OUT(( "Updated status in own reg object for WSG %d to READY_TO_SEND",
  6063. pWSGroup->wsg));
  6064. //
  6065. // Set the workset group state, to ensure that the reg/info objects get
  6066. // deleted when we deregister.
  6067. //
  6068. pWSGroup->state = WSGROUP_READY;
  6069. DC_EXIT_POINT:
  6070. if (rc != 0)
  6071. {
  6072. ERROR_OUT(( "Error %d updating own reg object for WSG %d",
  6073. rc, pWSGroup->wsg));
  6074. }
  6075. DebugExitDWORD(RegAnnounceComplete, rc);
  6076. return(rc);
  6077. }
  6078. //
  6079. // MaybeRetryCatchUp(...)
  6080. //
  6081. void MaybeRetryCatchUp
  6082. (
  6083. POM_PRIMARY pomPrimary,
  6084. POM_DOMAIN pDomain,
  6085. OM_WSGROUP_ID wsGroupID,
  6086. NET_UID userID
  6087. )
  6088. {
  6089. POM_WSGROUP pWSGroup;
  6090. POM_WSGROUP_REG_CB pRegistrationCB;
  6091. DebugEntry(MaybeRetryCatchUp);
  6092. //
  6093. // This function is called on receipt of a DETACH indication from MCS
  6094. // or a SEND_DENY message from another node. We check the workset
  6095. // group identified and see if we were trying to catch up from the
  6096. // departed node.
  6097. //
  6098. // If we do find a match (on the helperNode), then what we do depends
  6099. // on the state of the workset group:
  6100. //
  6101. // - PENDING_SEND_MIDWAY : Retry the registration from the top.
  6102. //
  6103. // - PENDING_SEND_COMPLETE : Just repeat the catchup.
  6104. //
  6105. //
  6106. // Find the workset group:
  6107. //
  6108. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  6109. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  6110. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  6111. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  6112. if (pWSGroup == NULL)
  6113. {
  6114. TRACE_OUT(( "No record found for WSG ID %hu", wsGroupID));
  6115. DC_QUIT;
  6116. }
  6117. //
  6118. // Compare the helperNode stored in the workset group and the userID of
  6119. // the node who has either detached or sent us a SEND_DENY message. If
  6120. // they do not match then we have nothing further to do.
  6121. //
  6122. if (pWSGroup->helperNode != userID)
  6123. {
  6124. DC_QUIT;
  6125. }
  6126. TRACE_OUT(( "Node 0x%08x was our helper node for WSG %d, in state %hu",
  6127. userID, pWSGroup->wsg, pWSGroup->state));
  6128. //
  6129. // We need to retry the registration - check the current state to find
  6130. // out how much we need to do.
  6131. //
  6132. switch (pWSGroup->state)
  6133. {
  6134. case PENDING_SEND_MIDWAY:
  6135. {
  6136. //
  6137. // First check if this is for ObManControl:
  6138. //
  6139. if (pWSGroup->wsGroupID == WSGROUPID_OMC)
  6140. {
  6141. //
  6142. // It is, so we need to retry the domain attach process.
  6143. // We do this by grabbing the ObMan token and resetting the
  6144. // domain state; when the GRAB_CONFIRM event arrives, we
  6145. // will rejoin the domain attach process at the correct
  6146. // point.
  6147. //
  6148. if (MG_TokenGrab(pomPrimary->pmgClient,
  6149. pDomain->tokenID) != 0)
  6150. {
  6151. ERROR_OUT(( "Failed to grab token"));
  6152. DC_QUIT;
  6153. }
  6154. pDomain->state = PENDING_TOKEN_GRAB;
  6155. }
  6156. else
  6157. {
  6158. //
  6159. // Not ObManControl, so there will be a registration CB -
  6160. // find it...
  6161. //
  6162. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->pendingRegs),
  6163. (void**)&pRegistrationCB, FIELD_OFFSET(OM_WSGROUP_REG_CB, chain),
  6164. FIELD_OFFSET(OM_WSGROUP_REG_CB, pWSGroup),
  6165. (DWORD_PTR)pWSGroup, FIELD_SIZE(OM_WSGROUP_REG_CB, pWSGroup));
  6166. if (pRegistrationCB == NULL)
  6167. {
  6168. ERROR_OUT(( "No reg CB found for WSG %d in state %hu!",
  6169. pWSGroup->wsg, PENDING_SEND_MIDWAY));
  6170. DC_QUIT;
  6171. }
  6172. //
  6173. // ...and retry the registation:
  6174. //
  6175. WSGRegisterRetry(pomPrimary, pRegistrationCB);
  6176. }
  6177. }
  6178. break;
  6179. case PENDING_SEND_COMPLETE:
  6180. {
  6181. //
  6182. // Retry the object catchup. There is no point in trying to
  6183. // find the registration CB as it will have been disposed of as
  6184. // soon as we entered the PENDING_SEND_COMPLETE state.
  6185. //
  6186. if (WSGCatchUp(pomPrimary, pDomain, pWSGroup) != 0)
  6187. //
  6188. // If there are no nodes ready to provide us with the catchup
  6189. // information then we are in a state where everyone either
  6190. // does not have the workset group or is catching up the
  6191. // workset group.
  6192. //
  6193. // MD 21/11/95
  6194. //
  6195. // For now pretend that all is well (it's not!) and go into the
  6196. // READY_TO_SEND state - potentially causing ObMan to become
  6197. // inconsistent.
  6198. {
  6199. RegAnnounceComplete(pomPrimary, pDomain, pWSGroup);
  6200. }
  6201. }
  6202. break;
  6203. }
  6204. DC_EXIT_POINT:
  6205. DebugExitVOID(MaybeRetryCatchUp);
  6206. }
  6207. //
  6208. //
  6209. //
  6210. // WSGRegisterRetry(...)
  6211. //
  6212. //
  6213. //
  6214. void WSGRegisterRetry(POM_PRIMARY pomPrimary,
  6215. POM_WSGROUP_REG_CB pRegistrationCB)
  6216. {
  6217. POM_DOMAIN pDomain;
  6218. POM_WSGROUP pWSGroup;
  6219. UINT rc = 0;
  6220. DebugEntry(WSGRegisterRetry);
  6221. //
  6222. // Set up pointers
  6223. //
  6224. pWSGroup = pRegistrationCB->pWSGroup;
  6225. pDomain = pRegistrationCB->pDomain;
  6226. //
  6227. // If we've got ObManControl locked for THIS registration, unlock it:
  6228. //
  6229. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  6230. //
  6231. // If we have joined a channel (so the channelID is non-zero) then
  6232. // leave it.
  6233. //
  6234. if (pWSGroup->channelID != 0)
  6235. {
  6236. TRACE_OUT(( "Leaving channel %hu", pWSGroup->channelID));
  6237. MG_ChannelLeave(pomPrimary->pmgClient,
  6238. pWSGroup->channelID);
  6239. PurgeReceiveCBs(pRegistrationCB->pDomain,
  6240. pWSGroup->channelID);
  6241. //
  6242. // Set the channelID to zero now that we have left it.
  6243. //
  6244. pWSGroup->channelID = 0;
  6245. }
  6246. //
  6247. // Set the workset group state to INITIAL.
  6248. //
  6249. pWSGroup->state = INITIAL;
  6250. //
  6251. // We examine the retry count. If it's zero, we call WSGRegisterResult
  6252. // to indicate failure. Otherwise, we repost the event with a delay
  6253. // and a decremented retry value.
  6254. //
  6255. if (pRegistrationCB->retryCount == 0)
  6256. {
  6257. WARNING_OUT(( "Aborting registration for WSG %d",
  6258. pRegistrationCB->wsg));
  6259. WSGRegisterResult(pomPrimary, pRegistrationCB, OM_RC_TIMED_OUT);
  6260. }
  6261. else
  6262. {
  6263. //
  6264. // Since we're about to post a message referencing the Reg CB, bump
  6265. // the use count:
  6266. //
  6267. UT_BumpUpRefCount(pRegistrationCB);
  6268. TRACE_OUT(( "Retrying %d for WSG %d; retries left: %u",
  6269. pRegistrationCB->type,
  6270. pRegistrationCB->wsg,
  6271. pRegistrationCB->retryCount));
  6272. pRegistrationCB->retryCount--;
  6273. UT_PostEvent(pomPrimary->putTask,
  6274. pomPrimary->putTask,
  6275. OM_REGISTER_RETRY_DELAY_DFLT,
  6276. OMINT_EVENT_WSGROUP_REGISTER_CONT,
  6277. 0,
  6278. (UINT_PTR) pRegistrationCB);
  6279. }
  6280. DebugExitVOID(WSGRegisterRetry);
  6281. }
  6282. //
  6283. //
  6284. //
  6285. // WSGRegisterResult(...)
  6286. //
  6287. //
  6288. //
  6289. void WSGRegisterResult(POM_PRIMARY pomPrimary,
  6290. POM_WSGROUP_REG_CB pRegistrationCB,
  6291. UINT result)
  6292. {
  6293. POM_WSGROUP pWSGroup;
  6294. POM_DOMAIN pDomain;
  6295. POM_WORKSET pOMCWorkset;
  6296. OM_EVENT_DATA16 eventData16;
  6297. OM_EVENT_DATA32 eventData32;
  6298. UINT type;
  6299. UINT event = 0;
  6300. DebugEntry(WSGRegisterResult);
  6301. //
  6302. // Assert that this is a valid registration CB (which it DC_ABSolutely
  6303. // MUST be, since this function gets called synchronously by some other
  6304. // function which should have validated the CB):
  6305. //
  6306. ASSERT(pRegistrationCB->valid);
  6307. //
  6308. // If we've still got ObManControl locked for THIS registration, unlock
  6309. // it:
  6310. //
  6311. MaybeUnlockObManControl(pomPrimary, pRegistrationCB);
  6312. //
  6313. // Determine whether we're doing a REGISTER or a MOVE (we use the
  6314. // string values for tracing):
  6315. //
  6316. type = pRegistrationCB->type;
  6317. switch (type)
  6318. {
  6319. case WSGROUP_REGISTER:
  6320. event = OM_WSGROUP_REGISTER_CON;
  6321. break;
  6322. case WSGROUP_MOVE:
  6323. event = OM_WSGROUP_MOVE_CON;
  6324. break;
  6325. default:
  6326. ERROR_OUT(("Reached default case in switch statement (value: %hu)", event));
  6327. }
  6328. //
  6329. // Here, we set up pointer to workset group.
  6330. //
  6331. // NOTE: This field in the structure might be NULL, if we have had to
  6332. // abort the registration very early. Therefore, do not use
  6333. // pWSGroup without checking it first!!!
  6334. //
  6335. pWSGroup = pRegistrationCB->pWSGroup;
  6336. if (pWSGroup)
  6337. {
  6338. ValidateWSGroup(pWSGroup);
  6339. }
  6340. //
  6341. // Trace if this registration has failed:
  6342. //
  6343. if (result != 0)
  6344. {
  6345. //
  6346. // pWSGroup might be NULL if we aborted the registration before we
  6347. // got around to creating it in ProcessWSGRegister (pre-Stage1).
  6348. // So, do a quick check and use a -1 value for the state if it's
  6349. // NULL. In either case pick up the name from the reg CB:
  6350. //
  6351. WARNING_OUT(( "%d failed for WSG %d (reason: 0x%08x, WSG state: %u)",
  6352. type, pRegistrationCB->wsg, result,
  6353. pWSGroup == NULL ? -1 : (UINT)pWSGroup->state));
  6354. //
  6355. // If a MOVE fails, then the workset group continues to exist in
  6356. // the old domain - so set the state back to WSGROUP_READY:
  6357. //
  6358. if ((type == WSGROUP_MOVE) && (pWSGroup != NULL))
  6359. {
  6360. pWSGroup->state = WSGROUP_READY;
  6361. }
  6362. }
  6363. else
  6364. {
  6365. //
  6366. // If the registration succeeded, pWSGroup must be OK:
  6367. //
  6368. ASSERT((pWSGroup != NULL));
  6369. ASSERT(((pWSGroup->state == WSGROUP_READY) ||
  6370. (pWSGroup->state == PENDING_SEND_COMPLETE)));
  6371. TRACE_OUT(( "%d succeeded for WSG %d (now in state %hu)",
  6372. type, pRegistrationCB->wsg, pWSGroup->state));
  6373. }
  6374. //
  6375. // Fill in the event parameters and post the result to the Client:
  6376. //
  6377. eventData16.hWSGroup = pRegistrationCB->hWSGroup;
  6378. eventData16.worksetID = 0;
  6379. eventData32.correlator = pRegistrationCB->correlator;
  6380. eventData32.result = (WORD)result;
  6381. UT_PostEvent(pomPrimary->putTask,
  6382. pRegistrationCB->putTask,
  6383. 0,
  6384. event,
  6385. *(PUINT) &eventData16,
  6386. *(LPUINT) &eventData32);
  6387. //
  6388. // If the operation was successful, we also post some more events:
  6389. //
  6390. if (result == 0)
  6391. {
  6392. if (type == WSGROUP_REGISTER)
  6393. {
  6394. //
  6395. // If this is a REGISTER, we post WORKSET_NEW events to the
  6396. // Client for all existing worksets:
  6397. //
  6398. PostWorksetNewEvents(pomPrimary->putTask,
  6399. pRegistrationCB->putTask,
  6400. pWSGroup,
  6401. pRegistrationCB->hWSGroup);
  6402. //
  6403. // We also need to generate PERSON_JOINED events - these are
  6404. // generated automatically by the ObMan task on receipt of the
  6405. // respective OBJECT_ADD events, but only once the registration
  6406. // has completed. So, fake ADD events for any objects that may
  6407. // exist already:
  6408. //
  6409. pDomain = pWSGroup->pDomain;
  6410. pOMCWorkset = GetOMCWorkset(pDomain, pWSGroup->wsGroupID);
  6411. PostAddEvents(pomPrimary->putTask,
  6412. pOMCWorkset,
  6413. pDomain->omchWSGroup,
  6414. pomPrimary->putTask);
  6415. }
  6416. }
  6417. //
  6418. // If we mananged to bump up the use counts of the Domain record and
  6419. // workset group, free them now:
  6420. //
  6421. if (pRegistrationCB->flags & BUMPED_CBS)
  6422. {
  6423. ASSERT((pWSGroup != NULL));
  6424. UT_FreeRefCount((void**)&(pRegistrationCB->pWSGroup), FALSE);
  6425. UT_FreeRefCount((void**)&(pRegistrationCB->pDomain), FALSE);
  6426. }
  6427. //
  6428. // Dispose of the registration CB - it has served us well!
  6429. //
  6430. pRegistrationCB->valid = FALSE;
  6431. TRACE_OUT(( "Finished %d attempt for WSG %d: result = 0x%08x",
  6432. type, pRegistrationCB->wsg, result));
  6433. COM_BasedListRemove(&(pRegistrationCB->chain));
  6434. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  6435. DebugExitVOID(WSGRegisterResult);
  6436. }
  6437. //
  6438. // WSGMove(...)
  6439. //
  6440. UINT WSGMove
  6441. (
  6442. POM_PRIMARY pomPrimary,
  6443. POM_DOMAIN pDestDomainRec,
  6444. POM_WSGROUP pWSGroup
  6445. )
  6446. {
  6447. UINT rc = 0;
  6448. DebugEntry(WSGMove);
  6449. //
  6450. // Now move the record into the new Domain record (this also removes
  6451. // the workset group and its reg object from the old Domain)
  6452. //
  6453. WSGRecordMove(pomPrimary, pDestDomainRec, pWSGroup);
  6454. //
  6455. // There is a problem with the way we deal with moving workset groups
  6456. // into the local Domain at call-end: if there is already a workset
  6457. // group of the same name/FP in the local Domain, we get a name clash,
  6458. // which the rest of the ObMan code does not expect. This can cause
  6459. // ObMan to get very confused when the workset group is eventually
  6460. // discarded from the local Domain, since it tries to throw away the
  6461. // wrong WSG_INFO object from workset #0 in ObManControl in the local
  6462. // Domain.
  6463. //
  6464. // In R1.1, this name clash will only ever happen with the ObManControl
  6465. // workset group itself, because of the way the apps use workset groups
  6466. // (i.e. they never register with one in a call AND one in the local
  6467. // Domain simultaneously). Therefore, we make our lives easier by NOT
  6468. // fully moving the ObManControl workset group into the local Domain at
  6469. // call end.
  6470. //
  6471. // Note however that it is OK (required, in fact) to move the workset
  6472. // group record into the list for the local Domain - the problem arises
  6473. // when we try to set it up in the local ObManControl (which we need to
  6474. // do for application workset groups so that they can continue to use
  6475. // person data objects etc.)
  6476. //
  6477. // So, if the workset group name matches ObManControl, skip the rest of
  6478. // this function:
  6479. //
  6480. if (pWSGroup->wsg == OMWSG_OM)
  6481. {
  6482. TRACE_OUT(("Not registering ObManControl in Domain %u (to avoid clash)",
  6483. pDestDomainRec->callID));
  6484. DC_QUIT;
  6485. }
  6486. //
  6487. // Reset the channel ID to zero:
  6488. //
  6489. pWSGroup->channelID = 0;
  6490. //
  6491. // Assign a new ID for this workset group:
  6492. //
  6493. rc = WSGGetNewID(pomPrimary, pDestDomainRec, &(pWSGroup->wsGroupID));
  6494. if (rc != 0)
  6495. {
  6496. DC_QUIT;
  6497. }
  6498. TRACE_OUT(( "Workset group ID for WSG %d in Domain %u is %hu",
  6499. pWSGroup->wsg, pDestDomainRec->callID, pWSGroup->wsGroupID));
  6500. //
  6501. // Now call CreateAnnounce to add a WSG_INFO object to workset #0 in
  6502. // ObManControl. There may be a name clash, but we don't mind in this
  6503. // case because we've been forced to do the move because of a call end:
  6504. //
  6505. rc = CreateAnnounce(pomPrimary, pDestDomainRec, pWSGroup);
  6506. if (rc != 0)
  6507. {
  6508. DC_QUIT;
  6509. }
  6510. //
  6511. // Now add the reg object:
  6512. //
  6513. rc = RegAnnounceBegin(pomPrimary,
  6514. pDestDomainRec,
  6515. pWSGroup,
  6516. pDestDomainRec->userID,
  6517. &(pWSGroup->pObjReg));
  6518. if (rc != 0)
  6519. {
  6520. DC_QUIT;
  6521. }
  6522. //
  6523. // Add the FE data back in:
  6524. //
  6525. rc = SetPersonData(pomPrimary, pDestDomainRec, pWSGroup);
  6526. if (rc != 0)
  6527. {
  6528. DC_QUIT;
  6529. }
  6530. //
  6531. // And update the object, just as if we were registering with it:
  6532. //
  6533. rc = RegAnnounceComplete(pomPrimary, pDestDomainRec, pWSGroup);
  6534. if (rc != 0)
  6535. {
  6536. DC_QUIT;
  6537. }
  6538. DC_EXIT_POINT:
  6539. if (rc != 0)
  6540. {
  6541. ERROR_OUT(( "Error %d moving WSG %d into Domain %u",
  6542. rc, pWSGroup->wsg, pDestDomainRec->callID));
  6543. }
  6544. DebugExitDWORD(WSGMove, rc);
  6545. return(rc);
  6546. }
  6547. //
  6548. // WSGRecordMove(...)
  6549. //
  6550. void WSGRecordMove
  6551. (
  6552. POM_PRIMARY pomPrimary,
  6553. POM_DOMAIN pDestDomainRec,
  6554. POM_WSGROUP pWSGroup
  6555. )
  6556. {
  6557. POM_DOMAIN pOldDomainRec;
  6558. DebugEntry(WSGRecordMove);
  6559. //
  6560. // Find the record for the Domain the workset group is currently in:
  6561. //
  6562. pOldDomainRec = pWSGroup->pDomain;
  6563. ASSERT(pOldDomainRec->valid);
  6564. DeregisterLocalClient(pomPrimary, &pOldDomainRec, pWSGroup, FALSE);
  6565. //
  6566. // Insert it into the destination Domain:
  6567. //
  6568. TRACE_OUT(("Inserting WSG %d' into list for Domain %u",
  6569. pWSGroup->wsg, pDestDomainRec->callID));
  6570. COM_BasedListInsertBefore(&(pDestDomainRec->wsGroups),
  6571. &(pWSGroup->chain));
  6572. //
  6573. // SFR : reset the pending data ack byte counts:
  6574. //
  6575. WSGResetBytesUnacked(pWSGroup);
  6576. //
  6577. // The workset group now belongs to this new Domain, so set it so.
  6578. //
  6579. pWSGroup->pDomain = pDestDomainRec;
  6580. //
  6581. // Finally, post the MOVE_IND event to all Clients registered with the
  6582. // workset group:
  6583. //
  6584. WSGroupEventPost(pomPrimary->putTask,
  6585. pWSGroup,
  6586. PRIMARY | SECONDARY,
  6587. OM_WSGROUP_MOVE_IND,
  6588. 0, // no workset
  6589. pDestDomainRec->callID);
  6590. DebugExitVOID(WSGRecordMove);
  6591. }
  6592. //
  6593. // WSGResetBytesUnacked(...)
  6594. //
  6595. void WSGResetBytesUnacked
  6596. (
  6597. POM_WSGROUP pWSGroup
  6598. )
  6599. {
  6600. OM_WORKSET_ID worksetID;
  6601. POM_WORKSET pWorkset;
  6602. DebugEntry(WSGResetBytesUnacked);
  6603. //
  6604. // Reset workset group's unacked byte count:
  6605. //
  6606. pWSGroup->bytesUnacked = 0;
  6607. //
  6608. // Now do it for each workset in the workset group:
  6609. //
  6610. for (worksetID = 0;
  6611. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  6612. worksetID++)
  6613. {
  6614. pWorkset = pWSGroup->apWorksets[worksetID];
  6615. if (pWorkset != NULL)
  6616. {
  6617. pWorkset->bytesUnacked = 0;
  6618. }
  6619. }
  6620. DebugExitVOID(WSGResetBytesUnacked);
  6621. }
  6622. //
  6623. //
  6624. //
  6625. // ProcessWSGDiscard(...)
  6626. //
  6627. //
  6628. //
  6629. void ProcessWSGDiscard
  6630. (
  6631. POM_PRIMARY pomPrimary,
  6632. POM_WSGROUP pWSGroup
  6633. )
  6634. {
  6635. POM_DOMAIN pDomain;
  6636. DebugEntry(ProcessWSGDiscard);
  6637. ASSERT(!pWSGroup->valid);
  6638. //
  6639. // Now get pointer to Domain record:
  6640. //
  6641. pDomain = pWSGroup->pDomain;
  6642. //
  6643. // If the TO_BE_DISCARDED flag has been cleared since the DISCARD event
  6644. // was posted, we abort the discard process (this will happen when
  6645. // someone local has registered with the workset since it was marked
  6646. // TO_BE_DISCARDED).
  6647. //
  6648. if (!pWSGroup->toBeDiscarded)
  6649. {
  6650. WARNING_OUT(( "Throwing away DISCARD event since WSG %d no longer TO_BE_DISCARDED",
  6651. pWSGroup->wsg));
  6652. DC_QUIT;
  6653. }
  6654. //
  6655. // Otherwise, we can go ahead and discard it:
  6656. //
  6657. WSGDiscard(pomPrimary, pDomain, pWSGroup, FALSE);
  6658. DC_EXIT_POINT:
  6659. DebugExitVOID(ProcessWSGDiscard);
  6660. }
  6661. //
  6662. // WSGDiscard(...)
  6663. //
  6664. void WSGDiscard
  6665. (
  6666. POM_PRIMARY pomPrimary,
  6667. POM_DOMAIN pDomain,
  6668. POM_WSGROUP pWSGroup,
  6669. BOOL fExit
  6670. )
  6671. {
  6672. POM_WORKSET pWorkset;
  6673. OM_WORKSET_ID worksetID;
  6674. DebugEntry(WSGDiscard);
  6675. TRACE_OUT(( "Discarding WSG %d from Domain %u",
  6676. pWSGroup->wsg, pDomain->callID));
  6677. //
  6678. // We only ever discard a workset group when nobody's registered with
  6679. // it, so check:
  6680. //
  6681. ASSERT(COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain)) == NULL);
  6682. //
  6683. // "Discarding" a workset group involves
  6684. //
  6685. // - calling DeregisterLocalClient to remove our person object, leave
  6686. // the channel, remove the workset group from our domain list etc.
  6687. //
  6688. // - discarding each of the worksets in the workset group
  6689. //
  6690. // - freeing the workset group record (which will have been removed
  6691. // from the list hung off the Domain record by
  6692. // DeregisterLocalClient).
  6693. //
  6694. DeregisterLocalClient(pomPrimary, &pDomain, pWSGroup, fExit);
  6695. //
  6696. // Now discard each workset in use:
  6697. //
  6698. for (worksetID = 0;
  6699. worksetID < OM_MAX_WORKSETS_PER_WSGROUP;
  6700. worksetID++)
  6701. {
  6702. pWorkset = pWSGroup->apWorksets[worksetID];
  6703. if (pWorkset != NULL)
  6704. {
  6705. WorksetDiscard(pWSGroup, &pWorkset, fExit);
  6706. }
  6707. }
  6708. //
  6709. // Discard the checkpointing dummy workset:
  6710. //
  6711. pWorkset = pWSGroup->apWorksets[OM_CHECKPOINT_WORKSET];
  6712. ASSERT((pWorkset != NULL));
  6713. WorksetDiscard(pWSGroup, &pWorkset, fExit);
  6714. //
  6715. // Free the workset group record (it will have been removed from the
  6716. // domain's list by DeregisterLocalClient, above):
  6717. //
  6718. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  6719. DebugExitVOID(WSGDiscard);
  6720. }
  6721. //
  6722. // DeregisterLocalClient(...)
  6723. //
  6724. void DeregisterLocalClient
  6725. (
  6726. POM_PRIMARY pomPrimary,
  6727. POM_DOMAIN* ppDomain,
  6728. POM_WSGROUP pWSGroup,
  6729. BOOL fExit
  6730. )
  6731. {
  6732. POM_DOMAIN pDomain;
  6733. UINT callID;
  6734. DebugEntry(DeregisterLocalClient);
  6735. pDomain = *ppDomain;
  6736. callID = pDomain->callID;
  6737. TRACE_OUT(("Removing WSG %d from Domain %u - state is currently %hu",
  6738. pWSGroup->wsg, callID, pWSGroup->state));
  6739. //
  6740. // Removing a workset group from a Domain involves
  6741. //
  6742. // - deleting the registration object from the relevant registration
  6743. // workset in ObManControl, if we put one there earlier
  6744. //
  6745. // - calling WSGDiscard if there is no one left in the Domain who
  6746. // is registered with the workset group
  6747. //
  6748. // - leaving the relevant channel
  6749. //
  6750. // - removing the workset group from the list hung off the Domain
  6751. // record
  6752. //
  6753. // We will skip some of these unwinding stages, depending on how far we
  6754. // got in the registration process. We use a switch statement with NO
  6755. // BREAKS to determine our "entry point" into the unwinding.
  6756. //
  6757. // When we've done all that, we check to see if we are now no longer
  6758. // registered with any workset groups in this Domain. If not, we
  6759. // detach from the Domain.
  6760. //
  6761. switch (pWSGroup->state)
  6762. {
  6763. case WSGROUP_READY:
  6764. case PENDING_SEND_COMPLETE:
  6765. case PENDING_SEND_MIDWAY:
  6766. {
  6767. //
  6768. // SFR 5913: Purge any outstanding lock requests for the
  6769. // workset group.
  6770. //
  6771. PurgeLockRequests(pDomain, pWSGroup);
  6772. //
  6773. // Search for and remove our person object, if we have one:
  6774. //
  6775. RemovePersonObject(pomPrimary,
  6776. pDomain,
  6777. pWSGroup->wsGroupID,
  6778. pDomain->userID);
  6779. pWSGroup->pObjReg = NULL;
  6780. //
  6781. // If we joined a channel for this workset group, leave it:
  6782. //
  6783. if (pWSGroup->channelID != 0)
  6784. {
  6785. TRACE_OUT(( "Leaving channel %hu", pWSGroup->channelID));
  6786. if (!fExit)
  6787. {
  6788. MG_ChannelLeave(pomPrimary->pmgClient, pWSGroup->channelID);
  6789. }
  6790. //
  6791. // Purge any outstanding receives on this channel:
  6792. //
  6793. PurgeReceiveCBs(pDomain, pWSGroup->channelID);
  6794. }
  6795. }
  6796. // NO BREAK - fall through to next case
  6797. case PENDING_JOIN:
  6798. case LOCKING_OMC:
  6799. case INITIAL:
  6800. {
  6801. //
  6802. // If we didn't get as far as PENDING_SEND_MIDWAY then there's
  6803. // very little unwinding to do. This bit removes the workset
  6804. // group from the Domain's list:
  6805. //
  6806. TRACE_OUT(( "Removing workset group record from list"));
  6807. COM_BasedListRemove(&(pWSGroup->chain));
  6808. //
  6809. // We set the channel ID to zero here because even if we never
  6810. // succeeded in joining the channel, the field will contain the
  6811. // channel CORRELATOR returned to us by MG_ChannelJoin
  6812. //
  6813. pWSGroup->channelID = 0;
  6814. //
  6815. // Since the workset group is no longer associated with any
  6816. // Domain, NULL it out.
  6817. //
  6818. pWSGroup->pDomain = NULL;
  6819. }
  6820. break;
  6821. default:
  6822. {
  6823. ERROR_OUT(( "Default case in switch (value: %hu)",
  6824. pWSGroup->state));
  6825. }
  6826. }
  6827. //
  6828. // If this was the last workset group in the domain...
  6829. //
  6830. if (COM_BasedListIsEmpty(&(pDomain->wsGroups)))
  6831. {
  6832. //
  6833. // ...we should detach:
  6834. //
  6835. // Note: this will only happen when the workset group we have just
  6836. // removed is the ObManControl workset group, so assert:
  6837. //
  6838. if (!fExit)
  6839. {
  6840. ASSERT(pWSGroup->wsg == OMWSG_OM);
  6841. }
  6842. //
  6843. // Since ObMan no longer needs this workset group, we remove it
  6844. // from the list of registered Clients:
  6845. //
  6846. RemoveClientFromWSGList(pomPrimary->putTask,
  6847. pomPrimary->putTask,
  6848. pWSGroup);
  6849. TRACE_OUT(( "No longer using any wsGroups in domain %u - detaching",
  6850. callID));
  6851. //
  6852. // This will NULL the caller's pointer:
  6853. //
  6854. DomainDetach(pomPrimary, ppDomain, fExit);
  6855. }
  6856. DebugExitVOID(DeregisterLocalClient);
  6857. }
  6858. //
  6859. // WorksetDiscard(...)
  6860. //
  6861. void WorksetDiscard
  6862. (
  6863. POM_WSGROUP pWSGroup,
  6864. POM_WORKSET * ppWorkset,
  6865. BOOL fExit
  6866. )
  6867. {
  6868. POM_OBJECT pObj;
  6869. POM_OBJECT pObjTemp;
  6870. POM_WORKSET pWorkset;
  6871. POM_CLIENT_LIST pClient;
  6872. DebugEntry(WorksetDiscard);
  6873. //
  6874. // Set up local pointer:
  6875. //
  6876. pWorkset = *ppWorkset;
  6877. //
  6878. // The code here is similar to that in WorksetDoClear, but in this case
  6879. // we discard ALL objects, irrespective of the sequence stamps.
  6880. //
  6881. // In addition, WorksetDoClear doesn't cause the object records to be
  6882. // freed - it only marks them as deleted - whereas we actually free them
  6883. // up.
  6884. //
  6885. TRACE_OUT(( "Discarding all objects in workset %u in WSG %d",
  6886. pWorkset->worksetID, pWSGroup->wsg));
  6887. CheckObjectCount(pWSGroup, pWorkset);
  6888. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  6889. while (pObj != NULL)
  6890. {
  6891. ValidateObject(pObj);
  6892. pObjTemp = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  6893. FIELD_OFFSET(OM_OBJECT, chain));
  6894. //
  6895. // If the object (data) hasn't yet been deleted, do it now:
  6896. //
  6897. if (!(pObj->flags & DELETED))
  6898. {
  6899. if (!pObj->pData)
  6900. {
  6901. ERROR_OUT(("WorksetDiscard: object 0x%08x has no data", pObj));
  6902. }
  6903. else
  6904. {
  6905. ValidateObjectData(pObj->pData);
  6906. UT_FreeRefCount((void**)&pObj->pData, FALSE);
  6907. }
  6908. pWorkset->numObjects--;
  6909. }
  6910. //
  6911. // Now remove the object record itself from the list and free it:
  6912. //
  6913. TRACE_OUT(( "Freeing pObj at 0x%08x", pObj));
  6914. // NULL this out to catch stale references
  6915. COM_BasedListRemove(&(pObj->chain));
  6916. UT_FreeRefCount((void**)&pObj, FALSE);
  6917. pObj = pObjTemp;
  6918. }
  6919. CheckObjectCount(pWSGroup, pWorkset);
  6920. ASSERT(pWorkset->numObjects == 0);
  6921. //
  6922. // Mark the slot in workset offset array (hung off the workset group
  6923. // record) as empty:
  6924. //
  6925. pWSGroup->apWorksets[pWorkset->worksetID] = NULL;
  6926. //
  6927. // Free the clients
  6928. //
  6929. while (pClient = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWorkset->clients),
  6930. FIELD_OFFSET(OM_CLIENT_LIST, chain)))
  6931. {
  6932. TRACE_OUT(("WorksetDiscard: Freeing client 0x%08x workset 0x%08x",
  6933. pClient, pWorkset));
  6934. COM_BasedListRemove(&(pClient->chain));
  6935. UT_FreeRefCount((void**)&pClient, FALSE);
  6936. }
  6937. //
  6938. // Now discard the chunk holding the workset, setting the caller's
  6939. // pointer to NULL:
  6940. //
  6941. TRACE_OUT(( "Discarded workset %u in WSG %d",
  6942. pWorkset->worksetID, pWSGroup->wsg));
  6943. UT_FreeRefCount((void**)ppWorkset, FALSE);
  6944. DebugExitVOID(WorksetDiscard);
  6945. }
  6946. //
  6947. // ProcessOMCObjectEvents(...)
  6948. //
  6949. void ProcessOMCObjectEvents
  6950. (
  6951. POM_PRIMARY pomPrimary,
  6952. UINT event,
  6953. OM_WSGROUP_HANDLE hWSGroup,
  6954. OM_WORKSET_ID worksetID,
  6955. POM_OBJECT pObj
  6956. )
  6957. {
  6958. POM_DOMAIN pDomain;
  6959. POM_WSGROUP pOMCWSGroup;
  6960. POM_WORKSET pOMCWorkset;
  6961. POM_WSGROUP pWSGroup;
  6962. POM_OBJECT pObjOld;
  6963. POM_WSGROUP_REG_REC pPersonObject;
  6964. DebugEntry(ProcessOMCObjectEvents);
  6965. //
  6966. // In this function, we do the following:
  6967. //
  6968. // - find the domain and workset group this event belongs to
  6969. //
  6970. // - if we have a local client to whom we might be interested in
  6971. // posting a person data event, call GeneratePersonEvents
  6972. //
  6973. // - if this is an object add for a person data object which has our
  6974. // user ID in it, store the handle in the workset group record unless
  6975. // we're not expecting the person object, in which case delete it
  6976. //
  6977. // - if this is an object deleted indication for a person data object
  6978. // then we count the number of remaining person objects for the
  6979. // workset group. If it is zero then we remove the info object.
  6980. //
  6981. //
  6982. // To find the domain, we search the list of active domains, looking up
  6983. // the hWSGroup parameter against the omchWSGroup field:
  6984. //
  6985. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  6986. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  6987. FIELD_OFFSET(OM_DOMAIN, omchWSGroup), (DWORD)hWSGroup,
  6988. FIELD_SIZE(OM_DOMAIN, omchWSGroup));
  6989. if (pDomain == NULL)
  6990. {
  6991. //
  6992. // This should only happen at call end time.
  6993. //
  6994. TRACE_OUT(( "No domain with omchWSGroup %u - has call just ended?", hWSGroup));
  6995. DC_QUIT;
  6996. }
  6997. //
  6998. // To find the workset group, we use the fact that the ID of the
  6999. // control workset (for which we have just received the event) is the
  7000. // same as the ID of the workset group to which it relates. So, do a
  7001. // lookup on this ID:
  7002. //
  7003. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  7004. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  7005. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)worksetID,
  7006. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  7007. //
  7008. // SFR 5593: Changed comparison to PENDING_SEND_MIDWAY from
  7009. // WSGROUP_READY to ensure that late joiners get the person add events.
  7010. //
  7011. if ((pWSGroup != NULL) && (pWSGroup->state > PENDING_SEND_MIDWAY))
  7012. {
  7013. //
  7014. // This means that a local client has fully registered with the
  7015. // workset group, so we're in a position maybe translate the event
  7016. // to a person event:
  7017. //
  7018. TRACE_OUT(( "Recd event 0x%08x for person object 0x%08x (for WSG %d in state %hu)",
  7019. event, pObj, pWSGroup->wsg, pWSGroup->state));
  7020. GeneratePersonEvents(pomPrimary, event, pWSGroup, pObj);
  7021. }
  7022. //
  7023. // Now, if this event is an ADD event for an object which
  7024. //
  7025. // - has not been deleted
  7026. // - is a person object (i.e. has an OM_WSGREGREC_ID_STAMP stamp)
  7027. // - contains our user ID (i.e. is _our_ person object)
  7028. //
  7029. // then we do one of the following:
  7030. //
  7031. // - if the workset group exists get a handle to the old person object
  7032. // and delete it. Then store the handle of the new person object in
  7033. // the workset group record.
  7034. // - if the workset group does not exist then delete the person object.
  7035. //
  7036. // This fixes SFRs 2745 and 2592 which are caused by person objects
  7037. // getting left hanging around in some start/stop race scenarios.
  7038. //
  7039. ValidateObject(pObj);
  7040. if ((event == OM_OBJECT_ADD_IND) && !(pObj->flags & DELETED))
  7041. {
  7042. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  7043. if (!pPersonObject)
  7044. {
  7045. ERROR_OUT(("ProcessOMCObjectEvents: object 0x%08x has no data", pObj));
  7046. }
  7047. if (pPersonObject &&
  7048. (pPersonObject->idStamp == OM_WSGREGREC_ID_STAMP) &&
  7049. (pPersonObject->userID == pDomain->userID))
  7050. {
  7051. ValidateObjectData(pObj->pData);
  7052. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7053. if (pOMCWSGroup == NULL)
  7054. {
  7055. // lonchanc: ingore left-over events due to race condition
  7056. DC_QUIT;
  7057. }
  7058. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7059. if (pWSGroup != NULL)
  7060. {
  7061. if ((pWSGroup->pObjReg != NULL) &&
  7062. (pWSGroup->pObjReg != pObj))
  7063. {
  7064. //
  7065. // This object replaces an earlier one we had, so...
  7066. //
  7067. WARNING_OUT(( "Deleting old person object 0x%08x for WSG %d, "
  7068. "since person object 0x%08x has just arrived",
  7069. pWSGroup->pObjReg,
  7070. pWSGroup->wsg,
  7071. pObj));
  7072. //
  7073. // ...set up a pointer to the _old_ object record...
  7074. //
  7075. pObjOld = pWSGroup->pObjReg;
  7076. //
  7077. // ...and delete it:
  7078. //
  7079. ObjectDRU(pomPrimary->putTask,
  7080. pOMCWSGroup,
  7081. pOMCWorkset,
  7082. pObjOld,
  7083. NULL,
  7084. OMNET_OBJECT_DELETE);
  7085. }
  7086. pWSGroup->pObjReg = pObj;
  7087. }
  7088. else
  7089. {
  7090. //
  7091. // We've deregistered from the workset group - delete the
  7092. // object:
  7093. //
  7094. TRACE_OUT(( "Deleting reg object 0x%08x since WSG ID %hu not found",
  7095. pObj, worksetID));
  7096. ObjectDRU(pomPrimary->putTask,
  7097. pOMCWSGroup,
  7098. pOMCWorkset,
  7099. pObj,
  7100. NULL,
  7101. OMNET_OBJECT_DELETE);
  7102. }
  7103. }
  7104. else
  7105. {
  7106. //
  7107. // Not our person object - do nothing.
  7108. //
  7109. }
  7110. //
  7111. // Finished so quit out.
  7112. //
  7113. DC_QUIT;
  7114. }
  7115. //
  7116. // Now, if this event is a DELETED event then we check to see if anyone
  7117. // is still using the workset group. If not then we remove the info
  7118. // object.
  7119. //
  7120. if (event == OM_OBJECT_DELETED_IND)
  7121. {
  7122. //
  7123. // We need to check the number of person objects left in this
  7124. // ObMan control workset if it is not workset zero. If there are
  7125. // no person objects left then remove any orphaned INFO objects.
  7126. //
  7127. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7128. if (pOMCWSGroup == NULL)
  7129. {
  7130. // lonchanc: ingore left-over events due to race condition
  7131. DC_QUIT;
  7132. }
  7133. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7134. if (pOMCWorkset == NULL)
  7135. {
  7136. // lonchanc: ingore left-over events due to race condition
  7137. DC_QUIT;
  7138. }
  7139. if ((pOMCWorkset->numObjects == 0) &&
  7140. (worksetID != 0))
  7141. {
  7142. TRACE_OUT(( "Workset %hu has no person objects - deleting INFO object",
  7143. worksetID));
  7144. RemoveInfoObject(pomPrimary, pDomain, worksetID);
  7145. }
  7146. //
  7147. // A person object has been removed and as we are potentially in
  7148. // the middle of a workset group catchup from this person we may
  7149. // need to retry the catchup.
  7150. //
  7151. // We search through all the workset groups looking for WSGs that
  7152. // are in the PENDING_SEND_MIDWAY or PENDING_SEND_COMPLETE state
  7153. // (i.e. in catchup state). If they are we then search to ensure
  7154. // that the person object for them still exists. If it doesn't
  7155. // then we need to retry the catchup.
  7156. //
  7157. pOMCWSGroup = GetOMCWsgroup(pDomain);
  7158. if (pOMCWSGroup == NULL)
  7159. {
  7160. // lonchanc: ingore left-over events due to race condition
  7161. DC_QUIT;
  7162. }
  7163. pOMCWorkset = pOMCWSGroup->apWorksets[worksetID];
  7164. if (pOMCWorkset == NULL)
  7165. {
  7166. // lonchanc: ingore left-over events due to race condition
  7167. DC_QUIT;
  7168. }
  7169. pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups),
  7170. FIELD_OFFSET(OM_WSGROUP, chain));
  7171. while (pWSGroup != NULL)
  7172. {
  7173. //
  7174. // Check the WSG state to see if we are in the middle of a
  7175. // catchup.
  7176. //
  7177. if ((PENDING_SEND_MIDWAY == pWSGroup->state) ||
  7178. (PENDING_SEND_COMPLETE == pWSGroup->state))
  7179. {
  7180. //
  7181. // We are in the middle of a catchup so we need to check
  7182. // to see that the person object for the person that we
  7183. // are catching up from has not been deleted.
  7184. //
  7185. FindPersonObject(pOMCWorkset,
  7186. pWSGroup->helperNode,
  7187. FIND_THIS,
  7188. &pObj);
  7189. //
  7190. // Check the person handle.
  7191. //
  7192. if (NULL == pObj)
  7193. {
  7194. TRACE_OUT(("Person object removed for WSG %d - retrying"
  7195. " catchup",
  7196. pWSGroup->wsg));
  7197. //
  7198. // Force MaybeRetryCatchUp to retry the catchup by
  7199. // passing the helper node ID that is stored in the
  7200. // workset.
  7201. //
  7202. MaybeRetryCatchUp(pomPrimary,
  7203. pDomain,
  7204. pWSGroup->wsGroupID,
  7205. pWSGroup->helperNode);
  7206. }
  7207. }
  7208. //
  7209. // Get the next WSG.
  7210. //
  7211. pWSGroup = (POM_WSGROUP)COM_BasedListNext(&(pDomain->wsGroups), pWSGroup,
  7212. FIELD_OFFSET(OM_WSGROUP, chain));
  7213. }
  7214. }
  7215. DC_EXIT_POINT:
  7216. if (pObj)
  7217. {
  7218. UT_FreeRefCount((void**)&pObj, FALSE);
  7219. }
  7220. DebugExitVOID(ProcessOMCObjectEvents);
  7221. }
  7222. //
  7223. // GeneratePersonEvents(...)
  7224. //
  7225. void GeneratePersonEvents
  7226. (
  7227. POM_PRIMARY pomPrimary,
  7228. UINT event,
  7229. POM_WSGROUP pWSGroup,
  7230. POM_OBJECT pObj
  7231. )
  7232. {
  7233. POM_WSGROUP_REG_REC pPersonObject;
  7234. UINT newEvent = 0;
  7235. DebugEntry(GeneratePersonEvents);
  7236. //
  7237. // OK, to get here we must have determined that a local client has
  7238. // registered with the workset group. Now proceed to examine the event
  7239. // and generate an appropriate person event for the client:
  7240. //
  7241. switch (event)
  7242. {
  7243. case OM_OBJECT_ADD_IND:
  7244. case OM_OBJECT_UPDATED_IND:
  7245. {
  7246. ValidateObject(pObj);
  7247. if (pObj->flags & DELETED)
  7248. {
  7249. //
  7250. // The object has been deleted already! We can't check its
  7251. // state so just quit:
  7252. //
  7253. DC_QUIT;
  7254. }
  7255. if (!pObj->pData)
  7256. {
  7257. ERROR_OUT(("GeneratePersonEvents: object 0x%08x has no data", pObj));
  7258. DC_QUIT;
  7259. }
  7260. //
  7261. // We're only interested in person objects, so if it's anything
  7262. // else, quit:
  7263. //
  7264. ValidateObjectData(pObj->pData);
  7265. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  7266. if (pPersonObject->idStamp != OM_WSGREGREC_ID_STAMP)
  7267. {
  7268. DC_QUIT;
  7269. }
  7270. //
  7271. // Translate to a PERSON_JOINED event, provided the person data
  7272. // has actually arrived. We determine this by reading the
  7273. // object and checking the <status> in it:
  7274. //
  7275. if (pPersonObject->status == READY_TO_SEND)
  7276. {
  7277. newEvent = OM_PERSON_JOINED_IND;
  7278. }
  7279. }
  7280. break;
  7281. case OM_OBJECT_DELETED_IND:
  7282. {
  7283. //
  7284. // This means that someone has left the call
  7285. //
  7286. newEvent = OM_PERSON_LEFT_IND;
  7287. }
  7288. break;
  7289. case OM_OBJECT_REPLACED_IND:
  7290. {
  7291. //
  7292. // This means someone has done a SetPersonData:
  7293. //
  7294. newEvent = OM_PERSON_DATA_CHANGED_IND;
  7295. }
  7296. break;
  7297. }
  7298. //
  7299. // If there is any translating to be done, newEvent will now be
  7300. // non-zero:
  7301. //
  7302. if (newEvent != 0)
  7303. {
  7304. WSGroupEventPost(pomPrimary->putTask,
  7305. pWSGroup,
  7306. PRIMARY,
  7307. newEvent,
  7308. 0,
  7309. (UINT_PTR)pObj);
  7310. }
  7311. DC_EXIT_POINT:
  7312. DebugExitVOID(GeneratePersonEvents);
  7313. }
  7314. //
  7315. // ProcessOMCWorksetNew(...)
  7316. //
  7317. void ProcessOMCWorksetNew
  7318. (
  7319. POM_PRIMARY pomPrimary,
  7320. OM_WSGROUP_HANDLE hWSGroup,
  7321. OM_WORKSET_ID worksetID
  7322. )
  7323. {
  7324. POM_DOMAIN pDomain;
  7325. POM_WORKSET pOMCWorkset;
  7326. POM_CLIENT_LIST pClientListEntry;
  7327. DebugEntry(ProcessOMCWorksetNew);
  7328. //
  7329. // The ObMan task generates person data events for its clients when the
  7330. // contents of the relevant control workset changes. We therefore add
  7331. // ObMan to this new control workset's list of "clients" and post it
  7332. // events for any objects already there:
  7333. //
  7334. // NOTE: We specify that ObMan should be considered a SECONDARY "client"
  7335. // of this workset so that it is not required to confirm delete
  7336. // events etc.
  7337. //
  7338. TRACE_OUT(( "Recd WORKSET_NEW for workset %u, WSG %u",
  7339. worksetID, hWSGroup));
  7340. //
  7341. // Look up the domain record based on the workset group handle:
  7342. //
  7343. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomPrimary->domains),
  7344. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  7345. FIELD_OFFSET(OM_DOMAIN, omchWSGroup), (DWORD)hWSGroup,
  7346. FIELD_SIZE(OM_DOMAIN, omchWSGroup));
  7347. if (pDomain == NULL)
  7348. {
  7349. WARNING_OUT(( "No domain record found with omchWSGroup %d",
  7350. hWSGroup));
  7351. DC_QUIT;
  7352. }
  7353. pOMCWorkset = GetOMCWorkset(pDomain, worksetID);
  7354. ASSERT((pOMCWorkset != NULL));
  7355. if (AddClientToWsetList(pomPrimary->putTask,
  7356. pOMCWorkset,
  7357. hWSGroup,
  7358. SECONDARY,
  7359. &pClientListEntry) != 0)
  7360. {
  7361. DC_QUIT;
  7362. }
  7363. TRACE_OUT(( "Added ObMan as secondary client for workset"));
  7364. PostAddEvents(pomPrimary->putTask, pOMCWorkset, hWSGroup, pomPrimary->putTask);
  7365. DC_EXIT_POINT:
  7366. DebugExitVOID(ProcessOMCWorksetNew);
  7367. }
  7368. //
  7369. // ProcessSendQueue()
  7370. //
  7371. void ProcessSendQueue
  7372. (
  7373. POM_PRIMARY pomPrimary,
  7374. POM_DOMAIN pDomain,
  7375. BOOL domainRecBumped
  7376. )
  7377. {
  7378. POM_SEND_INST pSendInst;
  7379. NET_PRIORITY priority;
  7380. DebugEntry(ProcessSendQueue);
  7381. //
  7382. // Check the Domain record is still valid:
  7383. //
  7384. if (!pDomain->valid)
  7385. {
  7386. TRACE_OUT(( "Got OMINT_EVENT_SEND_QUEUE too late for discarded Domain %u",
  7387. pDomain->callID));
  7388. DC_QUIT;
  7389. }
  7390. //
  7391. // Check that there is supposed to be a send event outstanding:
  7392. //
  7393. if (pDomain->sendEventOutstanding)
  7394. {
  7395. //
  7396. // Although there might still be a send event outstanding (e.g. a
  7397. // FEEDBACK event) we can't be sure (unless we count them as we
  7398. // generate them). It's vital that we never leave the send queue
  7399. // unprocessed, so to be safe we clear the flag so that QueueMessage
  7400. // will post an event next time it's called:
  7401. //
  7402. pDomain->sendEventOutstanding = FALSE;
  7403. }
  7404. else
  7405. {
  7406. //
  7407. // This will happen
  7408. //
  7409. // - when we get a FEEDBACK event after we've cleared the queue, OR
  7410. //
  7411. // - when we get a SEND_QUEUE event which was posted because there
  7412. // were none outstanding but a FEEDBACK event arrived in the
  7413. // meantime to clear the queue.
  7414. //
  7415. // NOTE: this flag means that there MIGHT not be a send EVENT
  7416. // outstanding (see above). It does not mean that there's
  7417. // nothing on the send queue, so we go ahead and check the
  7418. // queue.
  7419. //
  7420. }
  7421. //
  7422. // The strategy for processing the send queue is to process the highest
  7423. // priority operation first, whether or not a transfer is in progress
  7424. // at another priority.
  7425. //
  7426. // So, for each priority, we check if there's anything in the queue:
  7427. //
  7428. TRACE_OUT(("Searching send queues for Domain %u",pDomain->callID));
  7429. for (priority = NET_TOP_PRIORITY; priority <= NET_LOW_PRIORITY; priority++)
  7430. {
  7431. TRACE_OUT(("Processing queue at priority %u", priority));
  7432. while (pSendInst = (POM_SEND_INST)COM_BasedListFirst(&(pDomain->sendQueue[priority]), FIELD_OFFSET(OM_SEND_INST, chain)))
  7433. {
  7434. TRACE_OUT(("Found send instruction for priority %u", priority));
  7435. if (SendMessagePkt(pomPrimary, pDomain, pSendInst) != 0)
  7436. {
  7437. DC_QUIT;
  7438. }
  7439. }
  7440. }
  7441. DC_EXIT_POINT:
  7442. if (domainRecBumped)
  7443. {
  7444. //
  7445. // If our caller has told us that the use count of the Domain
  7446. // record has been bumped, free it now:
  7447. //
  7448. UT_FreeRefCount((void**)&pDomain, FALSE);
  7449. }
  7450. DebugExitVOID(ProcessSendQueue);
  7451. }
  7452. //
  7453. // SendMessagePkt(...)
  7454. //
  7455. UINT SendMessagePkt
  7456. (
  7457. POM_PRIMARY pomPrimary,
  7458. POM_DOMAIN pDomain,
  7459. POM_SEND_INST pSendInst
  7460. )
  7461. {
  7462. void * pNetBuffer = NULL;
  7463. void * pAnotherNetBuffer = NULL;
  7464. UINT transferSize;
  7465. UINT dataTransferSize;
  7466. BOOL compressed;
  7467. BOOL tryToCompress;
  7468. BOOL spoiled = FALSE;
  7469. BOOL allSent = FALSE;
  7470. NET_PRIORITY queuePriority;
  7471. BOOL fSendExtra;
  7472. POMNET_PKT_HEADER pMessage;
  7473. POM_WSGROUP pWSGroup;
  7474. POM_WORKSET pWorkset;
  7475. UINT rc = 0;
  7476. DebugEntry(SendMessagePkt);
  7477. //
  7478. // We check here if we can spoil this message:
  7479. //
  7480. rc = TryToSpoilOp(pSendInst);
  7481. //
  7482. // If so, quit:
  7483. //
  7484. if (rc == OM_RC_SPOILED)
  7485. {
  7486. spoiled = TRUE;
  7487. rc = 0;
  7488. DC_QUIT;
  7489. }
  7490. //
  7491. // Any other error is more serious:
  7492. //
  7493. if (rc != 0)
  7494. {
  7495. DC_QUIT;
  7496. }
  7497. //
  7498. // Now decide how many bytes we're going to ask the network layer for
  7499. // this time and how many data bytes we're going to transfer:
  7500. //
  7501. DecideTransferSize(pSendInst, &transferSize, &dataTransferSize);
  7502. ASSERT(dataTransferSize <= pSendInst->dataLeftToGo);
  7503. //
  7504. // Add 1 byte to the transfer size for the <compressionType> byte:
  7505. //
  7506. TRACE_OUT(("Asking MG_GetBuffer for 0x%08x bytes for operation type 0x%08x",
  7507. transferSize + 1, pSendInst->messageType));
  7508. rc = MG_GetBuffer(pomPrimary->pmgClient,
  7509. transferSize + 1,
  7510. pSendInst->priority,
  7511. pSendInst->channel,
  7512. &pNetBuffer);
  7513. if (rc != 0)
  7514. {
  7515. //
  7516. // Possible errors include
  7517. // - NET_NOT_CONNECTED, when a backlevel call ends
  7518. // - NET_INVALID_USER_HANDLE, when an MCS call ends
  7519. // - NET_TOO_MUCH_IN_USE, when we hit back pressure (flow control)
  7520. //
  7521. // In all cases, just quit.
  7522. //
  7523. TRACE_OUT(("MG_GetBuffer failed; not sending OM message"));
  7524. DC_QUIT;
  7525. }
  7526. //
  7527. // OK so far, so now copy the header of the message into the first part
  7528. // of the compress buffer:
  7529. //
  7530. pMessage = pSendInst->pMessage;
  7531. ASSERT(pMessage);
  7532. memcpy(pomPrimary->compressBuffer, pMessage, pSendInst->messageSize);
  7533. //
  7534. // ...and now copy the data into the rest of the buffer:
  7535. //
  7536. // This must be a HUGE copy because although the compress buffer is not
  7537. // HUGE, the data is and the bit to be copied may span segments.
  7538. //
  7539. if (dataTransferSize != 0)
  7540. {
  7541. memcpy((LPBYTE)pomPrimary->compressBuffer + pSendInst->messageSize,
  7542. pSendInst->pDataNext, dataTransferSize);
  7543. }
  7544. //
  7545. // Determine whether to compress:
  7546. //
  7547. compressed = FALSE;
  7548. tryToCompress = FALSE;
  7549. if ((pDomain->compressionCaps & OM_CAPS_PKW_COMPRESSION) &&
  7550. (pSendInst->compressOrNot) &&
  7551. (transferSize > DCS_MIN_COMPRESSABLE_PACKET) &&
  7552. (pomPrimary->pgdcWorkBuf != NULL))
  7553. {
  7554. tryToCompress = TRUE;
  7555. }
  7556. //
  7557. // If we passed those tests, compress the packet into the network
  7558. // buffer.
  7559. //
  7560. // This will not use the whole network buffer we have allocated, but it
  7561. // saves us having to have two buffers and doing a second data copy.
  7562. // The network layer can handle a partially used buffer
  7563. //
  7564. if (tryToCompress)
  7565. {
  7566. TRACE_OUT(("OM Compressing %04d bytes", transferSize));
  7567. compressed = GDC_Compress(NULL, GDCCO_MAXSPEED, pomPrimary->pgdcWorkBuf,
  7568. pomPrimary->compressBuffer, transferSize, (LPBYTE)pNetBuffer + 1,
  7569. &transferSize);
  7570. }
  7571. if (compressed)
  7572. {
  7573. TRACE_OUT(("OM Compressed to %04d bytes", transferSize));
  7574. *((LPBYTE)pNetBuffer) = OM_PROT_PKW_COMPRESSED;
  7575. }
  7576. else
  7577. {
  7578. TRACE_OUT(("OM Uncompressed %04d bytes", transferSize));
  7579. memcpy((LPBYTE)pNetBuffer + 1, pomPrimary->compressBuffer,
  7580. transferSize);
  7581. *((LPBYTE)pNetBuffer) = OM_PROT_NOT_COMPRESSED;
  7582. }
  7583. //
  7584. // If we're in a T.120 call and sending on all priorities, we need to
  7585. // do some work to ensure compatibility with NetMeeting 1.0.
  7586. //
  7587. fSendExtra = ((pSendInst->priority & NET_SEND_ALL_PRIORITIES) != 0);
  7588. if ( fSendExtra )
  7589. {
  7590. //
  7591. // T.120 reserves MCS Top Priority for use by GCC. Sending on all
  7592. // priorities used to include Top, but no longer does, to ensure
  7593. // compliance. However, ObMan expects to receive 4 responses when
  7594. // sending on all priorities whereas the MCS glue now uses only
  7595. // 3 priorities. To ensure backward compatibility, whenever ObMan
  7596. // sends on all priorities, it has to add an extra send by making
  7597. // an extra call to the network here.
  7598. // First allocate another net buffer and copy the data to it (we
  7599. // have to do before calling MG_SendData as the other buffer is
  7600. // invalid after this).
  7601. //
  7602. TRACE_OUT(( "SEND_ALL: get extra NET buffer"));
  7603. rc = MG_GetBuffer(pomPrimary->pmgClient,
  7604. transferSize + 1,
  7605. (NET_PRIORITY)(pSendInst->priority & ~NET_SEND_ALL_PRIORITIES),
  7606. pSendInst->channel,
  7607. &pAnotherNetBuffer);
  7608. if (rc != 0)
  7609. {
  7610. WARNING_OUT(("MG_GetBuffer failed; not sending OM packet"));
  7611. }
  7612. else
  7613. {
  7614. memcpy(pAnotherNetBuffer, pNetBuffer, transferSize + 1);
  7615. }
  7616. }
  7617. //
  7618. // Now send the packet, adding 1 byte to the length for the
  7619. // <compressionType> byte:
  7620. //
  7621. TRACE_OUT(( "Sending 0x%08x bytes on channel 0x%08x at priority %hu",
  7622. transferSize + 1, pSendInst->channel, pSendInst->priority));
  7623. if (rc == 0)
  7624. {
  7625. TRACE_OUT(("SendMessagePkt: sending packet size %d",
  7626. transferSize+1));
  7627. rc = MG_SendData(pomPrimary->pmgClient,
  7628. pSendInst->priority,
  7629. pSendInst->channel,
  7630. (transferSize + 1),
  7631. &pNetBuffer);
  7632. }
  7633. if ( fSendExtra && (rc == 0) )
  7634. {
  7635. TRACE_OUT(("SendMessagePkt: sending extra packet size %d",
  7636. transferSize+1));
  7637. rc = MG_SendData(pomPrimary->pmgClient,
  7638. (NET_PRIORITY)(pSendInst->priority & ~NET_SEND_ALL_PRIORITIES),
  7639. pSendInst->channel,
  7640. (transferSize + 1),
  7641. &pAnotherNetBuffer);
  7642. }
  7643. if (rc != 0)
  7644. {
  7645. //
  7646. // Network API says free the buffer on error:
  7647. //
  7648. MG_FreeBuffer(pomPrimary->pmgClient, &pNetBuffer);
  7649. if ( pAnotherNetBuffer != NULL )
  7650. {
  7651. MG_FreeBuffer(pomPrimary->pmgClient, &pAnotherNetBuffer);
  7652. }
  7653. switch (rc)
  7654. {
  7655. case NET_RC_MGC_NOT_CONNECTED:
  7656. case NET_RC_MGC_INVALID_USER_HANDLE:
  7657. //
  7658. // These are the errors the Network layer returns when we're in
  7659. // a singleton Domain or when an MCS domain has just
  7660. // terminated. We ignore them.
  7661. //
  7662. TRACE_OUT(("No data sent since call %u doesn't exist",
  7663. pDomain->callID));
  7664. rc = 0;
  7665. break;
  7666. default:
  7667. //
  7668. // Any other error is more serious, so quit and pass it back:
  7669. //
  7670. DC_QUIT;
  7671. }
  7672. }
  7673. else
  7674. {
  7675. //
  7676. // We've sent a message and will therefore get a FEEDBACK event
  7677. // sometime later. This qualifies as a SEND_EVENT since it will
  7678. // prompt us to examine our send queue, so we set the
  7679. // SEND_EVENT_OUTSTANDING flag:
  7680. //
  7681. TRACE_OUT(("Sent msg in Domain %u (type: 0x%08x) with %hu data bytes",
  7682. pDomain->callID, pSendInst->messageType, dataTransferSize));
  7683. pDomain->sendEventOutstanding = TRUE;
  7684. }
  7685. //
  7686. // Here, we decrement the <bytesUnacked> fields for the workset and
  7687. // workset group:
  7688. //
  7689. if (dataTransferSize != 0)
  7690. {
  7691. pWorkset = pSendInst->pWorkset;
  7692. pWorkset->bytesUnacked -= dataTransferSize;
  7693. pWSGroup = pSendInst->pWSGroup;
  7694. pWSGroup->bytesUnacked -= dataTransferSize;
  7695. }
  7696. //
  7697. // Now update the send instruction and decide whether we've sent all
  7698. // the data for this operation:
  7699. //
  7700. pSendInst->dataLeftToGo -= dataTransferSize;
  7701. pSendInst->pDataNext = (POM_OBJECTDATA)((LPBYTE)pSendInst->pDataNext + dataTransferSize);
  7702. if (pSendInst->dataLeftToGo == 0)
  7703. {
  7704. //
  7705. // If so, we
  7706. //
  7707. // - clear the transfer-in-progress flag for this queue -
  7708. // remember that the NET_SEND_ALL_PRIORITIES flag may be set so
  7709. // we need to clear it
  7710. //
  7711. // - free our copy of the message packet and the data, if any (we
  7712. // bumped up the use count of the data chunk when the message was
  7713. // put on the queue so we won't really be getting rid of it
  7714. // unless it's been freed elsewhere already, which is fine)
  7715. //
  7716. // - pop the instruction off the send queue and free it.
  7717. //
  7718. TRACE_OUT(( "Sent last packet for operation (type: 0x%08x)",
  7719. pSendInst->messageType));
  7720. queuePriority = pSendInst->priority;
  7721. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  7722. pDomain->sendInProgress[queuePriority] = FALSE;
  7723. allSent = TRUE;
  7724. }
  7725. else
  7726. {
  7727. //
  7728. // If not, we
  7729. //
  7730. // - set the transfer-in-progress flag for this queue -
  7731. // remember that the NET_SEND_ALL_PRIORITIES flag may be set so
  7732. // we need to clear it
  7733. //
  7734. // - set the <messageSize> field of the send instruction to the
  7735. // size of a MORE_DATA header, so that only that many bytes are
  7736. // picked out of the message next time
  7737. //
  7738. // - set the <messageType> field of the message to MORE_DATA
  7739. //
  7740. // - leave the operation on the queue.
  7741. //
  7742. TRACE_OUT(("Data left to transfer: %u bytes (starting at 0x%08x)",
  7743. pSendInst->dataLeftToGo, pSendInst->pDataNext));
  7744. queuePriority = pSendInst->priority;
  7745. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  7746. pDomain->sendInProgress[queuePriority] = TRUE;
  7747. pSendInst->messageSize = OMNET_MORE_DATA_SIZE;
  7748. pMessage->messageType = OMNET_MORE_DATA;
  7749. }
  7750. DC_EXIT_POINT:
  7751. //
  7752. // If we're finished with the message (either because we've sent it all
  7753. // or because it was spoiled) we free it (plus any associated data):
  7754. //
  7755. if (spoiled || allSent)
  7756. {
  7757. FreeSendInst(pSendInst);
  7758. }
  7759. DebugExitDWORD(SendMessagePkt, rc);
  7760. return(rc);
  7761. }
  7762. //
  7763. // TryToSpoilOp
  7764. //
  7765. UINT TryToSpoilOp
  7766. (
  7767. POM_SEND_INST pSendInst
  7768. )
  7769. {
  7770. POMNET_OPERATION_PKT pMessage;
  7771. POM_OBJECT pObj;
  7772. POM_WORKSET pWorkset;
  7773. POM_WSGROUP pWSGroup;
  7774. BOOL spoilable = FALSE;
  7775. UINT rc = 0;
  7776. DebugEntry(TryToSpoilOp);
  7777. pMessage = (POMNET_OPERATION_PKT)pSendInst->pMessage;
  7778. pObj = pSendInst->pObj;
  7779. pWorkset = pSendInst->pWorkset;
  7780. pWSGroup = pSendInst->pWSGroup;
  7781. //
  7782. // The rules for spoiling state that
  7783. //
  7784. // - any operation is spoiled by a later operation of the same type
  7785. //
  7786. // - in addition, an Update is spoiled by a later Replace.
  7787. //
  7788. // Since we never have two Adds or two Deletes for the same object,
  7789. // these rules reduce to the following:
  7790. //
  7791. // - a Clear is spoiled by a later Clear
  7792. //
  7793. // - a Move is spoiled by a later Move
  7794. //
  7795. // - a Replace is spoiled by a later Replace
  7796. //
  7797. // - an Update is spoiled by a later Update or a later Replace.
  7798. //
  7799. // So, switch according to the operation type:
  7800. //
  7801. switch (pSendInst->messageType)
  7802. {
  7803. case OMNET_WORKSET_CLEAR:
  7804. if (STAMP_IS_LOWER(pMessage->seqStamp, pWorkset->clearStamp))
  7805. {
  7806. spoilable = TRUE;
  7807. }
  7808. break;
  7809. case OMNET_OBJECT_UPDATE:
  7810. if ((STAMP_IS_LOWER(pMessage->seqStamp, pObj->replaceStamp))
  7811. || (STAMP_IS_LOWER(pMessage->seqStamp, pObj->updateStamp)))
  7812. {
  7813. spoilable = TRUE;
  7814. }
  7815. break;
  7816. case OMNET_OBJECT_REPLACE:
  7817. if (STAMP_IS_LOWER(pMessage->seqStamp, pObj->replaceStamp))
  7818. {
  7819. spoilable = TRUE;
  7820. }
  7821. break;
  7822. case OMNET_OBJECT_MOVE:
  7823. if (STAMP_IS_LOWER(pMessage->seqStamp, pObj->positionStamp))
  7824. {
  7825. spoilable = TRUE;
  7826. }
  7827. break;
  7828. case OMNET_HELLO:
  7829. case OMNET_WELCOME:
  7830. case OMNET_LOCK_REQ:
  7831. case OMNET_LOCK_GRANT:
  7832. case OMNET_LOCK_DENY:
  7833. case OMNET_LOCK_NOTIFY:
  7834. case OMNET_UNLOCK:
  7835. case OMNET_WSGROUP_SEND_REQ:
  7836. case OMNET_WSGROUP_SEND_MIDWAY:
  7837. case OMNET_WSGROUP_SEND_COMPLETE:
  7838. case OMNET_WSGROUP_SEND_DENY:
  7839. case OMNET_WORKSET_NEW:
  7840. case OMNET_WORKSET_CATCHUP:
  7841. case OMNET_OBJECT_ADD:
  7842. case OMNET_OBJECT_DELETE:
  7843. case OMNET_OBJECT_CATCHUP:
  7844. //
  7845. // Do nothing
  7846. //
  7847. break;
  7848. default:
  7849. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  7850. pSendInst->messageType));
  7851. break;
  7852. }
  7853. if (spoilable)
  7854. {
  7855. //
  7856. // To spoil the message, we remove it from the send queue and free
  7857. // the memory (also NULL the caller's pointer):
  7858. //
  7859. //
  7860. // However, if we spoil the message, the data (if any) will never be
  7861. // acknowledged, so we must decrement the relevant <bytesUnacked>
  7862. // fields now:
  7863. //
  7864. TRACE_OUT(( "Spoiling from send queue for workset %u",
  7865. pWorkset->worksetID));
  7866. if (pSendInst->dataLeftToGo != 0)
  7867. {
  7868. pWorkset->bytesUnacked -= pSendInst->dataLeftToGo;
  7869. pWSGroup->bytesUnacked -= pSendInst->dataLeftToGo;
  7870. }
  7871. rc = OM_RC_SPOILED;
  7872. }
  7873. DebugExitDWORD(TryToSpoilOp, rc);
  7874. return(rc);
  7875. }
  7876. //
  7877. // DecideTransferSize(...)
  7878. //
  7879. void DecideTransferSize
  7880. (
  7881. POM_SEND_INST pSendInst,
  7882. UINT * pTransferSize,
  7883. UINT * pDataTransferSize
  7884. )
  7885. {
  7886. UINT transferSize;
  7887. DebugEntry(DecideTransferSize);
  7888. //
  7889. // Ideally, we'd like to transfer everything in one go, where
  7890. // "everything" is the message header plus all the data to go with it
  7891. // (if any):
  7892. //
  7893. transferSize = pSendInst->messageSize + pSendInst->dataLeftToGo;
  7894. TRACE_OUT(("Desired transfer size for this portion: %u", transferSize));
  7895. //
  7896. // However, we never ask for more than half the send pool size, so take
  7897. // the minimum of the two:
  7898. //
  7899. // (we subtract 1 byte to allow for the <compressionType> byte at the
  7900. // start of the packet)
  7901. //
  7902. transferSize = min(transferSize, ((OM_NET_SEND_POOL_SIZE / 2) - 1));
  7903. TRACE_OUT(("Feasible transfer size for this portion: %u",
  7904. transferSize));
  7905. //
  7906. // The logic of the send queue processing requires that the message
  7907. // header is sent completely in the first packet, so assert:
  7908. //
  7909. ASSERT((transferSize >= pSendInst->messageSize));
  7910. //
  7911. // As a sanity check, we ensure we're not trying to transfer more than
  7912. // the biggest buffer allowed:
  7913. //
  7914. ASSERT(transferSize <= OM_NET_MAX_TRANSFER_SIZE);
  7915. //
  7916. // The amount of data to be sent is the transfer size less the size of
  7917. // the header we're sending:
  7918. //
  7919. *pDataTransferSize = ((UINT) transferSize) - pSendInst->messageSize;
  7920. *pTransferSize = (UINT) transferSize;
  7921. TRACE_OUT(("Total transfer size for this packet: %u - data transfer size: %u",
  7922. (UINT) *pTransferSize, (UINT) *pDataTransferSize));
  7923. DebugExitVOID(DecideTransferSize);
  7924. }
  7925. //
  7926. // ProcessNetData(...)
  7927. //
  7928. void ProcessNetData
  7929. (
  7930. POM_PRIMARY pomPrimary,
  7931. POM_DOMAIN pDomain,
  7932. PNET_SEND_IND_EVENT pNetSendInd
  7933. )
  7934. {
  7935. POMNET_PKT_HEADER pHeader;
  7936. UINT dataSize;
  7937. OMNET_MESSAGE_TYPE messageType = 0;
  7938. UINT rc = 0;
  7939. DebugEntry(ProcessNetData);
  7940. //
  7941. // Decompress the packet and set pHeader to point to the start of
  7942. // wherever the data ends up:
  7943. //
  7944. ASSERT((pNetSendInd->lengthOfData < 0xFFFF));
  7945. if (NULL != pNetSendInd->data_ptr) {
  7946. switch (*(pNetSendInd->data_ptr))
  7947. {
  7948. case OM_PROT_NOT_COMPRESSED:
  7949. {
  7950. TRACE_OUT(("Buffer not compressed - taking it as it stands"));
  7951. memcpy(pomPrimary->compressBuffer, pNetSendInd->data_ptr + 1,
  7952. pNetSendInd->lengthOfData--);
  7953. }
  7954. break;
  7955. case OM_PROT_PKW_COMPRESSED:
  7956. {
  7957. TRACE_OUT(("Buffer was PKW compressed - size 0x%08x bytes",
  7958. pNetSendInd->lengthOfData));
  7959. dataSize = sizeof(pomPrimary->compressBuffer);
  7960. ASSERT(pomPrimary->pgdcWorkBuf != NULL);
  7961. if (!GDC_Decompress(NULL, pomPrimary->pgdcWorkBuf,
  7962. pNetSendInd->data_ptr + 1,
  7963. (WORD) pNetSendInd->lengthOfData - 1,
  7964. pomPrimary->compressBuffer, &dataSize))
  7965. {
  7966. ERROR_OUT(("Failed to decompress OM data!"));
  7967. }
  7968. pNetSendInd->lengthOfData = dataSize;
  7969. TRACE_OUT(("Decompressed to 0x%08x bytes",
  7970. pNetSendInd->lengthOfData));
  7971. }
  7972. break;
  7973. default:
  7974. {
  7975. ERROR_OUT(( "Ignoring packet with unknown compression (0x%08x)",
  7976. *(pNetSendInd->data_ptr)));
  7977. DC_QUIT;
  7978. }
  7979. }
  7980. pHeader = (POMNET_PKT_HEADER) pomPrimary->compressBuffer;
  7981. //
  7982. // Now switch accorindg to the message type:
  7983. //
  7984. messageType = pHeader->messageType;
  7985. TRACE_OUT((" Packet contains OMNET message type 0x%08x", messageType));
  7986. switch (messageType)
  7987. {
  7988. case OMNET_HELLO:
  7989. {
  7990. rc = ProcessHello(pomPrimary,
  7991. pDomain,
  7992. (POMNET_JOINER_PKT) pHeader,
  7993. pNetSendInd->lengthOfData);
  7994. }
  7995. break;
  7996. case OMNET_WELCOME:
  7997. {
  7998. rc = ProcessWelcome(pomPrimary,
  7999. pDomain,
  8000. (POMNET_JOINER_PKT) pHeader,
  8001. pNetSendInd->lengthOfData);
  8002. }
  8003. break;
  8004. case OMNET_LOCK_DENY:
  8005. case OMNET_LOCK_GRANT:
  8006. {
  8007. ProcessLockReply(pomPrimary,
  8008. pDomain,
  8009. pHeader->sender,
  8010. ((POMNET_LOCK_PKT) pHeader)->data1,
  8011. pHeader->messageType);
  8012. }
  8013. break;
  8014. case OMNET_LOCK_REQ:
  8015. {
  8016. ProcessLockRequest(pomPrimary, pDomain,
  8017. (POMNET_LOCK_PKT) pHeader);
  8018. }
  8019. break;
  8020. case OMNET_WSGROUP_SEND_REQ:
  8021. {
  8022. ProcessSendReq(pomPrimary,
  8023. pDomain,
  8024. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8025. }
  8026. break;
  8027. case OMNET_WSGROUP_SEND_MIDWAY:
  8028. {
  8029. ProcessSendMidway(pomPrimary,
  8030. pDomain,
  8031. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8032. }
  8033. break;
  8034. case OMNET_WSGROUP_SEND_COMPLETE:
  8035. {
  8036. rc = ProcessSendComplete(pomPrimary,
  8037. pDomain,
  8038. (POMNET_WSGROUP_SEND_PKT) pHeader);
  8039. }
  8040. break;
  8041. case OMNET_WSGROUP_SEND_DENY:
  8042. {
  8043. MaybeRetryCatchUp(pomPrimary,
  8044. pDomain,
  8045. ((POMNET_WSGROUP_SEND_PKT) pHeader)->wsGroupID,
  8046. pHeader->sender);
  8047. }
  8048. break;
  8049. //
  8050. // We use the special ReceiveData function for any messages which
  8051. //
  8052. // - might need to be bounced, or
  8053. //
  8054. // - might fill more than one packet.
  8055. //
  8056. case OMNET_LOCK_NOTIFY:
  8057. case OMNET_UNLOCK:
  8058. case OMNET_WORKSET_NEW:
  8059. case OMNET_WORKSET_CLEAR:
  8060. case OMNET_WORKSET_CATCHUP:
  8061. case OMNET_OBJECT_ADD:
  8062. case OMNET_OBJECT_MOVE:
  8063. case OMNET_OBJECT_UPDATE:
  8064. case OMNET_OBJECT_REPLACE:
  8065. case OMNET_OBJECT_DELETE:
  8066. case OMNET_OBJECT_CATCHUP:
  8067. case OMNET_MORE_DATA:
  8068. {
  8069. rc = ReceiveData(pomPrimary,
  8070. pDomain,
  8071. pNetSendInd,
  8072. (POMNET_OPERATION_PKT) pHeader);
  8073. }
  8074. break;
  8075. default:
  8076. {
  8077. ERROR_OUT(( "Unexpected messageType 0x%08x", messageType));
  8078. }
  8079. }
  8080. DC_EXIT_POINT:
  8081. if (rc != 0)
  8082. {
  8083. ERROR_OUT(( "Error %d processing OMNET message 0x%08x",
  8084. rc, messageType));
  8085. }
  8086. }
  8087. DebugExitVOID(ProcessNetData);
  8088. }
  8089. //
  8090. // ReceiveData(...)
  8091. //
  8092. UINT ReceiveData
  8093. (
  8094. POM_PRIMARY pomPrimary,
  8095. POM_DOMAIN pDomain,
  8096. PNET_SEND_IND_EVENT pNetSendInd,
  8097. POMNET_OPERATION_PKT pNetMessage
  8098. )
  8099. {
  8100. POM_RECEIVE_CB pReceiveCB = NULL;
  8101. UINT thisHeaderSize;
  8102. UINT thisDataSize;
  8103. OMNET_MESSAGE_TYPE messageType;
  8104. long bytesStillExpected = 0;
  8105. UINT rc = 0;
  8106. DebugEntry(ReceiveData);
  8107. //
  8108. // Set up some local variables:
  8109. //
  8110. messageType = pNetMessage->header.messageType;
  8111. //
  8112. // The amount of data included in this message is the size of the
  8113. // network buffer less the size of our message header at the front of
  8114. // it:
  8115. //
  8116. // Note: <thisHeaderSize> is the size of the header IN THIS PACKET,
  8117. // rather than the size of the header in the first packet of a
  8118. // multi-packet send.
  8119. //
  8120. thisHeaderSize = GetMessageSize(pNetMessage->header.messageType);
  8121. thisDataSize = pNetSendInd->lengthOfData - thisHeaderSize;
  8122. //
  8123. // If this is a MORE_DATA packet, then there should already be a
  8124. // receive CB set up for the transfer. If not, we need to create one:
  8125. //
  8126. if (messageType == OMNET_MORE_DATA)
  8127. {
  8128. rc = FindReceiveCB(pDomain, pNetSendInd, pNetMessage, &pReceiveCB);
  8129. //
  8130. // If no receive CB, we swallow the return code and quit. This will
  8131. // happen when we join a channel midway through a large data
  8132. // transfer.
  8133. //
  8134. if (rc == OM_RC_RECEIVE_CB_NOT_FOUND)
  8135. {
  8136. WARNING_OUT(("Discarding unexpected packet from 0x%08x",
  8137. pNetMessage->header.sender));
  8138. rc = 0;
  8139. DC_QUIT;
  8140. }
  8141. }
  8142. else
  8143. {
  8144. // lonchanc: added the following block of code
  8145. if (messageType == OMNET_OBJECT_REPLACE)
  8146. {
  8147. POM_RECEIVE_CB p;
  8148. // lonchanc: This packet does not contain all the data.
  8149. // More data will come in another packets; however,
  8150. // in this case, bytesStillExpected will be greater than zero
  8151. // after substracting from thisDataSize, as a result,
  8152. // this receiveCB will be appended to the ReceiveList.
  8153. // However, FindReceiveCB will find the first one matched.
  8154. // As a result, the one we just appended to the ReceiveList will
  8155. // not be found.
  8156. // Even worse, if there is receiveCB (of same sender, priority, and
  8157. // channel), the first-matched receiveCB will be totally confused
  8158. // when more data come in. This is bug #578.
  8159. TRACE_OUT(("Removing receiveCB {"));
  8160. while (FindReceiveCB(pDomain, pNetSendInd, pNetMessage, &p) == 0)
  8161. {
  8162. //
  8163. // Remove the message from the list it's in (either the pending
  8164. // receives list if this message was never bounced or the bounce
  8165. // list if it has been bounced):
  8166. //
  8167. COM_BasedListRemove(&(p->chain));
  8168. //
  8169. // Now free the message and the receive control block (NOT THE
  8170. // DATA! If there was any, it's just been used for an object
  8171. // add/update etc.)
  8172. //
  8173. UT_FreeRefCount((void**)&(p->pHeader), FALSE);
  8174. UT_FreeRefCount((void**)&p, FALSE);
  8175. }
  8176. }
  8177. rc = CreateReceiveCB(pDomain, pNetSendInd, pNetMessage, &pReceiveCB);
  8178. }
  8179. if (rc != 0)
  8180. {
  8181. ERROR_OUT(("%s failed, rc=0x0x%08x",
  8182. (messageType == OMNET_MORE_DATA) ? "FindReceiveCB" : "CreateReceiveCB",
  8183. rc));
  8184. DC_QUIT;
  8185. }
  8186. TRACE_OUT(("%s ok, pRecvCB=0x0x%p",
  8187. (messageType == OMNET_MORE_DATA) ? "FindReceiveCB" : "CreateReceiveCB",
  8188. pReceiveCB));
  8189. //
  8190. // Now we copy the data, if any, from the network buffer into the chunk
  8191. // we allocated when we called CreateReceiveCB.
  8192. //
  8193. if (thisDataSize != 0)
  8194. {
  8195. //
  8196. // We copy the data across using memcpy.
  8197. //
  8198. bytesStillExpected = ((long) (pReceiveCB->pHeader->totalSize) -
  8199. (long) (pReceiveCB->bytesRecd));
  8200. TRACE_OUT(("thisDataSize=0x0x%08x, bytesStillExpected=0x0x%08x, totalSize=0x0x%08x, bytesRecd=0x0x%08x",
  8201. (long) thisDataSize,
  8202. (long) bytesStillExpected,
  8203. (long) pReceiveCB->pHeader->totalSize,
  8204. (long) pReceiveCB->bytesRecd));
  8205. ASSERT((long) thisDataSize <= bytesStillExpected);
  8206. memcpy(pReceiveCB->pCurrentPosition,
  8207. ((LPBYTE) pNetMessage) + thisHeaderSize,
  8208. thisDataSize);
  8209. pReceiveCB->bytesRecd += thisDataSize;
  8210. pReceiveCB->pCurrentPosition += thisDataSize;
  8211. bytesStillExpected -= thisDataSize;
  8212. TRACE_OUT((" Still expecting %u bytes", bytesStillExpected));
  8213. }
  8214. //
  8215. // If we are expecting no more data for this transfer, process it:
  8216. //
  8217. if (bytesStillExpected <= 0)
  8218. {
  8219. rc = ProcessMessage(pomPrimary, pReceiveCB, OK_TO_RETRY_BOUNCE_LIST);
  8220. if (rc == OM_RC_BOUNCED)
  8221. {
  8222. //
  8223. // If ProcessMessage can't deal with the message immediately
  8224. // (because e.g. it's an update for an object we don't yet
  8225. // have), it will have added it to the bounce list so it will
  8226. // be tried again later.
  8227. //
  8228. // We special case this return code as it's not a problem for
  8229. // us here (it exists because other parts of the code need it):
  8230. //
  8231. WARNING_OUT(("Bounced message type 0x%08x", messageType));
  8232. rc = 0;
  8233. }
  8234. if (rc != 0)
  8235. {
  8236. //
  8237. // Any other non-zero return code is more serious:
  8238. //
  8239. DC_QUIT;
  8240. }
  8241. }
  8242. DC_EXIT_POINT:
  8243. if (rc != 0)
  8244. {
  8245. ERROR_OUT(("Error %d from message type 0x%08x", rc, messageType));
  8246. if (rc == OM_RC_OUT_OF_RESOURCES)
  8247. {
  8248. //
  8249. // If we couldn't allocate memory for the data to be recd, we
  8250. // act as if we've been kicked out of the channel:
  8251. //
  8252. ERROR_OUT(( "Leaving chann 0x%08x, simulating expulsion", pNetSendInd->channel));
  8253. MG_ChannelLeave(pomPrimary->pmgClient, pNetSendInd->channel);
  8254. ProcessNetLeaveChannel(pomPrimary, pDomain, pNetSendInd->channel);
  8255. }
  8256. }
  8257. DebugExitDWORD(ReceiveData, rc);
  8258. return(rc);
  8259. }
  8260. //
  8261. // CreateReceiveCB(...)
  8262. //
  8263. UINT CreateReceiveCB
  8264. (
  8265. POM_DOMAIN pDomain,
  8266. PNET_SEND_IND_EVENT pNetSendInd,
  8267. POMNET_OPERATION_PKT pNetMessage,
  8268. POM_RECEIVE_CB * ppReceiveCB
  8269. )
  8270. {
  8271. POM_RECEIVE_CB pReceiveCB = NULL;
  8272. POMNET_OPERATION_PKT pHeader = NULL;
  8273. UINT headerSize;
  8274. UINT totalDataSize;
  8275. UINT rc = 0;
  8276. DebugEntry(CreateReceiveCB);
  8277. //
  8278. // We get here when the first packet of a message arrives . What we
  8279. // need to do is to set up a "receive" structure and add it to the list
  8280. // of receives-in-progress for the Domain. Then, when the ensuing data
  8281. // packets (if any) arrive, they will be correlated and concatenated.
  8282. // When all the data has arrived, the receive CB will be passed to
  8283. // ProcessMessage.
  8284. //
  8285. //
  8286. // Allocate some memory for the receive CB:
  8287. //
  8288. pReceiveCB = (POM_RECEIVE_CB)UT_MallocRefCount(sizeof(OM_RECEIVE_CB), TRUE);
  8289. if (!pReceiveCB)
  8290. {
  8291. rc = UT_RC_NO_MEM;
  8292. DC_QUIT;
  8293. }
  8294. SET_STAMP(pReceiveCB, RCVCB);
  8295. pReceiveCB->pDomain = pDomain;
  8296. pReceiveCB->priority = pNetSendInd->priority;
  8297. pReceiveCB->channel = pNetSendInd->channel;
  8298. //
  8299. // Allocate some memory for the message header and copy the packet into
  8300. // it from the network buffer (note: we must copy the header since at
  8301. // the moment it is in a network buffer which we can't hang on to for
  8302. // the entire duration of the transfer):
  8303. //
  8304. headerSize = GetMessageSize(pNetMessage->header.messageType);
  8305. pHeader = (POMNET_OPERATION_PKT)UT_MallocRefCount(sizeof(OMNET_OPERATION_PKT), TRUE);
  8306. if (!pHeader)
  8307. {
  8308. rc = UT_RC_NO_MEM;
  8309. DC_QUIT;
  8310. }
  8311. memcpy(pHeader, pNetMessage, headerSize);
  8312. pReceiveCB->pHeader = pHeader;
  8313. //
  8314. // Not all messages sent over the network have a totalSize field, but
  8315. // our subsequent processing requires one. So, if the message we've
  8316. // just received didn't have one, we set the value (our local copy of
  8317. // the header has room since we alloacated enough memory for the
  8318. // largest type of header):
  8319. //
  8320. if (headerSize >= (offsetof(OMNET_OPERATION_PKT, totalSize) +
  8321. (sizeof(pNetMessage->totalSize))))
  8322. {
  8323. TRACE_OUT(("Header contains <totalSize> field (value: %u)",
  8324. pNetMessage->totalSize));
  8325. }
  8326. else
  8327. {
  8328. TRACE_OUT(("Header doesn't contain <totalSize> field"));
  8329. pReceiveCB->pHeader->totalSize = headerSize;
  8330. }
  8331. //
  8332. // Now determine the total number of data bytes involved in this
  8333. // operation:
  8334. //
  8335. totalDataSize = pReceiveCB->pHeader->totalSize - ((UINT) headerSize);
  8336. //
  8337. // If there is any data, allocate some memory to receive it and set the
  8338. // <pData> pointer to point to it (otherwise NULL it):
  8339. //
  8340. if (totalDataSize != 0)
  8341. {
  8342. TRACE_OUT(( "Allocating %u bytes for data for this transfer",
  8343. totalDataSize));
  8344. pReceiveCB->pData = UT_MallocRefCount(totalDataSize, FALSE);
  8345. if (!pReceiveCB->pData)
  8346. {
  8347. ERROR_OUT(( "Failed to allocate %u bytes for object to be recd "
  8348. "from node 0x%08x - will remove WSG from Domain",
  8349. totalDataSize, pNetMessage->header.sender));
  8350. rc = OM_RC_OUT_OF_RESOURCES;
  8351. DC_QUIT;
  8352. }
  8353. }
  8354. else
  8355. {
  8356. pReceiveCB->pData = NULL;
  8357. }
  8358. pReceiveCB->pCurrentPosition = (LPBYTE)pReceiveCB->pData;
  8359. //
  8360. // Set <bytesRecd> to the size of the header. We may have recd some
  8361. // data bytes as well, but they'll be added to the header size in
  8362. // ReceiveData.
  8363. //
  8364. pReceiveCB->bytesRecd = headerSize;
  8365. //
  8366. // Now insert in the list hung off the Domain record:
  8367. //
  8368. COM_BasedListInsertBefore(&(pDomain->receiveList),
  8369. &(pReceiveCB->chain));
  8370. //
  8371. // Set caller's pointer:
  8372. //
  8373. *ppReceiveCB = pReceiveCB;
  8374. DC_EXIT_POINT:
  8375. if (rc != 0)
  8376. {
  8377. ERROR_OUT(( "Error %d receiving first packet of message type %u from node 0x%08x",
  8378. rc, pHeader->header.messageType, pHeader->header.sender));
  8379. if (pReceiveCB != NULL)
  8380. {
  8381. if (pReceiveCB->pData != NULL)
  8382. {
  8383. UT_FreeRefCount((void**)&(pReceiveCB->pData), FALSE);
  8384. }
  8385. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8386. }
  8387. if (pHeader != NULL)
  8388. {
  8389. UT_FreeRefCount((void**)&pHeader, FALSE);
  8390. }
  8391. }
  8392. DebugExitDWORD(CreateReceiveCB, rc);
  8393. return(rc);
  8394. }
  8395. //
  8396. //
  8397. //
  8398. // FindReceiveCB(...)
  8399. //
  8400. //
  8401. //
  8402. UINT FindReceiveCB(POM_DOMAIN pDomain,
  8403. PNET_SEND_IND_EVENT pNetSendInd,
  8404. POMNET_OPERATION_PKT pPacket,
  8405. POM_RECEIVE_CB * ppReceiveCB)
  8406. {
  8407. POM_RECEIVE_CB pReceiveCB;
  8408. NET_PRIORITY priority;
  8409. NET_CHANNEL_ID channel;
  8410. NET_UID sender;
  8411. POMNET_OPERATION_PKT pHeader;
  8412. UINT rc = 0;
  8413. DebugEntry(FindReceiveCB);
  8414. //
  8415. // First thing to do is to find the receive control block for the
  8416. // transfer. It should be in the list hung off the Domain record:
  8417. //
  8418. sender = pPacket->header.sender;
  8419. priority = pNetSendInd->priority;
  8420. channel = pNetSendInd->channel;
  8421. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->receiveList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8422. while (pReceiveCB != NULL)
  8423. {
  8424. //
  8425. // We check for a match on sender's user ID, channel and priority.
  8426. //
  8427. // We assume that, for a given channel, MCS does not reorder packets
  8428. // sent by the same user at the same priority.
  8429. //
  8430. pHeader = pReceiveCB->pHeader;
  8431. if ((pHeader->header.sender == sender) &&
  8432. (pReceiveCB->priority == priority) &&
  8433. (pReceiveCB->channel == channel))
  8434. {
  8435. //
  8436. // Found!
  8437. //
  8438. TRACE_OUT(("Found receive CB for user %hu, chann 0x%08x, pri %hu, at pRecvCB=0x0x%p",
  8439. sender, channel, priority, pReceiveCB));
  8440. break;
  8441. }
  8442. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->receiveList), pReceiveCB,
  8443. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8444. }
  8445. if (pReceiveCB == NULL)
  8446. {
  8447. rc = OM_RC_RECEIVE_CB_NOT_FOUND;
  8448. DC_QUIT;
  8449. }
  8450. else
  8451. {
  8452. *ppReceiveCB = pReceiveCB;
  8453. }
  8454. DC_EXIT_POINT:
  8455. DebugExitDWORD(FindReceiveCB, rc);
  8456. return(rc);
  8457. }
  8458. //
  8459. // PurgeReceiveCBs(...)
  8460. //
  8461. void PurgeReceiveCBs
  8462. (
  8463. POM_DOMAIN pDomain,
  8464. NET_CHANNEL_ID channel
  8465. )
  8466. {
  8467. POM_RECEIVE_CB pReceiveCB;
  8468. POM_RECEIVE_CB pNextReceiveCB;
  8469. DebugEntry(PurgeReceiveCBs);
  8470. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->receiveList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8471. while (pReceiveCB != NULL)
  8472. {
  8473. //
  8474. // Need to chain here since we may remove pReceiveCB from the list:
  8475. //
  8476. pNextReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->receiveList), pReceiveCB,
  8477. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8478. if (pReceiveCB->channel == channel)
  8479. {
  8480. //
  8481. // This receive CB is for the channel being purged - remove it
  8482. // from the list and free the memory.
  8483. //
  8484. WARNING_OUT(( "Purging receive CB from user %hu",
  8485. pReceiveCB->pHeader->header.sender));
  8486. COM_BasedListRemove(&(pReceiveCB->chain));
  8487. //
  8488. // Free the data memory.
  8489. //
  8490. if (pReceiveCB->pData != NULL)
  8491. {
  8492. UT_FreeRefCount(&pReceiveCB->pData, FALSE);
  8493. }
  8494. //
  8495. // Free the header memory.
  8496. //
  8497. if (pReceiveCB->pHeader != NULL)
  8498. {
  8499. UT_FreeRefCount((void**)&pReceiveCB->pHeader, FALSE);
  8500. }
  8501. //
  8502. // Finally free the control block.
  8503. //
  8504. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8505. }
  8506. pReceiveCB = pNextReceiveCB;
  8507. }
  8508. DebugExitVOID(PurgeReceiveCBs);
  8509. }
  8510. //
  8511. // ProcessMessage(...)
  8512. //
  8513. UINT ProcessMessage
  8514. (
  8515. POM_PRIMARY pomPrimary,
  8516. POM_RECEIVE_CB pReceiveCB,
  8517. UINT whatNext
  8518. )
  8519. {
  8520. POM_DOMAIN pDomain;
  8521. POMNET_OPERATION_PKT pHeader;
  8522. void * pData;
  8523. NET_PRIORITY priority;
  8524. OMNET_MESSAGE_TYPE messageType;
  8525. POM_WSGROUP pWSGroup;
  8526. POM_WORKSET pWorkset;
  8527. POM_OBJECT pObj;
  8528. BOOL bounced = FALSE;
  8529. BOOL retryBounceList = FALSE;
  8530. BOOL freeMemory = FALSE;
  8531. UINT rc = 0;
  8532. DebugEntry(ProcessMessage);
  8533. //
  8534. // Set up local variables:
  8535. //
  8536. pDomain = pReceiveCB->pDomain;
  8537. pHeader = pReceiveCB->pHeader;
  8538. priority = pReceiveCB->priority;
  8539. pData = pReceiveCB->pData;
  8540. messageType = pHeader->header.messageType;
  8541. //
  8542. // Extract pointers to workset group, workset and object record from
  8543. // the packet:
  8544. //
  8545. rc = PreProcessMessage(pDomain,
  8546. pHeader->wsGroupID,
  8547. pHeader->worksetID,
  8548. &pHeader->objectID,
  8549. pHeader->header.messageType,
  8550. &pWSGroup,
  8551. &pWorkset,
  8552. &pObj);
  8553. //
  8554. // PreProcess will have told us if it didn't find the relevant workset
  8555. // group, workset or object. Whether or not this is an error depends
  8556. // on the operation in question. We use a series of IF statements to
  8557. // detect and handle the following conditions:
  8558. //
  8559. //
  8560. // 1. Unknown workset group Discard the operation
  8561. //
  8562. // 2. Existing workset, WORKSET_NEW/CATCHUP Discard the operation
  8563. // 3. Unknown workset, any other operation Bounce the operation
  8564. //
  8565. // 4. Deleted object, any operation Discard the operation
  8566. // 5. Existing object, OBJECT_ADD/CATCHUP Discard the operation
  8567. // 6. Unknown object, any other operation Bounce the operation
  8568. //
  8569. //
  8570. //
  8571. // Test 1.:
  8572. //
  8573. if (rc == OM_RC_WSGROUP_NOT_FOUND)
  8574. {
  8575. //
  8576. // If we didn't even find the workset group, we just quit:
  8577. //
  8578. WARNING_OUT(( "Message is for unknown WSG (ID: %hu) in Domain %u",
  8579. pHeader->wsGroupID, pDomain->callID));
  8580. rc = 0;
  8581. //
  8582. // Mark the data memory allocated for this object to be freed.
  8583. //
  8584. freeMemory = TRUE;
  8585. DC_QUIT;
  8586. }
  8587. //
  8588. // Test 2.:
  8589. //
  8590. if (rc != OM_RC_WORKSET_NOT_FOUND) // i.e. existing workset
  8591. {
  8592. if ((messageType == OMNET_WORKSET_NEW) ||
  8593. (messageType == OMNET_WORKSET_CATCHUP))
  8594. {
  8595. //
  8596. // We've got a WORKSET_NEW or WORKSET_CATCHUP message, but the
  8597. // workset already exists. This is not a problem - we throw the
  8598. // message away - but check the priority and persistence fields
  8599. // are set to the right values.
  8600. //
  8601. // (They might be wrong if we created the workset on receipt of
  8602. // a lock request for a workset we didn't already have).
  8603. //
  8604. TRACE_OUT((
  8605. "Recd WORKSET_NEW/CATCHUP for extant workset %u in WSG %d",
  8606. pWorkset->worksetID, pWSGroup->wsg));
  8607. pWorkset->priority = *((NET_PRIORITY *) &(pHeader->position));
  8608. pWorkset->fTemp = *((BOOL *) &(pHeader->objectID));
  8609. rc = 0;
  8610. DC_QUIT;
  8611. }
  8612. }
  8613. //
  8614. // Test 3.:
  8615. //
  8616. else // rc == OM_RC_WORKSET_NOT_FOUND
  8617. {
  8618. if ((messageType != OMNET_WORKSET_NEW) &&
  8619. (messageType != OMNET_WORKSET_CATCHUP))
  8620. {
  8621. //
  8622. // Packet is for unknown workset and it's not a
  8623. // WORKSET_NEW/CATCHUP, so bounce it:
  8624. //
  8625. TRACE_OUT(( "Bouncing message for unknown workset %d WSG %d",
  8626. pHeader->worksetID, pWSGroup->wsg));
  8627. BounceMessage(pDomain, pReceiveCB);
  8628. bounced = TRUE;
  8629. rc = 0;
  8630. DC_QUIT;
  8631. }
  8632. }
  8633. //
  8634. // Test 4:.
  8635. //
  8636. if ((rc == OM_RC_OBJECT_DELETED) || (rc == OM_RC_OBJECT_PENDING_DELETE))
  8637. {
  8638. //
  8639. // Packet is for object which has been deleted, so we just throw it
  8640. // away (done for us by our caller):
  8641. //
  8642. TRACE_OUT(("Message 0x%08x for deleted obj 0x%08x:0x%08x in WSG %d:%hu",
  8643. messageType,
  8644. pHeader->objectID.creator, pHeader->objectID.sequence,
  8645. pWSGroup->wsg, pWorkset->worksetID));
  8646. rc = 0;
  8647. //
  8648. // Mark the data memory allocated for this object to be freed.
  8649. //
  8650. freeMemory = TRUE;
  8651. DC_QUIT;
  8652. }
  8653. //
  8654. // Test 5.:
  8655. //
  8656. if (rc != OM_RC_BAD_OBJECT_ID) // i.e. existing object
  8657. {
  8658. if ((messageType == OMNET_OBJECT_ADD) ||
  8659. (messageType == OMNET_OBJECT_CATCHUP))
  8660. {
  8661. //
  8662. // In this case, we DO have an OBEJCT_ADD/CATCHUP, but the
  8663. // object was found anyway! This must be a duplicate Add, so
  8664. // we just throw it away:
  8665. //
  8666. TRACE_OUT(( "Add for existing object 0x%08x:0x%08x in WSG %d:%hu",
  8667. pHeader->objectID.creator, pHeader->objectID.sequence,
  8668. pWSGroup->wsg, pWorkset->worksetID));
  8669. rc = 0;
  8670. //
  8671. // Mark the data memory allocated for this object to be freed.
  8672. //
  8673. freeMemory = TRUE;
  8674. DC_QUIT;
  8675. }
  8676. }
  8677. //
  8678. // Test 6.:
  8679. //
  8680. else // rc == OM_RC_BAD_OBJECT_ID
  8681. {
  8682. if ((messageType != OMNET_OBJECT_ADD) &&
  8683. (messageType != OMNET_OBJECT_CATCHUP))
  8684. {
  8685. //
  8686. // Packet is for unknown object, but it's not an
  8687. // OBJECT_ADD/CATCHUP, so bounce it:
  8688. //
  8689. TRACE_OUT(( "Message 0x%08x for unknown obj 0x%08x:0x%08x in WSG %d:%hu",
  8690. messageType,
  8691. pHeader->objectID.creator, pHeader->objectID.sequence,
  8692. pWSGroup->wsg, pWorkset->worksetID));
  8693. BounceMessage(pDomain, pReceiveCB);
  8694. bounced = TRUE;
  8695. rc = 0;
  8696. DC_QUIT;
  8697. }
  8698. }
  8699. //
  8700. // OK, we've passed all the tests above, so we must be in a position to
  8701. // process the operation. Switch on the message type and invoke the
  8702. // appropriate function:
  8703. //
  8704. switch (messageType)
  8705. {
  8706. case OMNET_LOCK_NOTIFY:
  8707. {
  8708. ProcessLockNotify(pomPrimary,
  8709. pDomain,
  8710. pWSGroup,
  8711. pWorkset,
  8712. ((POMNET_LOCK_PKT)pHeader)->data1);
  8713. }
  8714. break;
  8715. case OMNET_UNLOCK:
  8716. {
  8717. ProcessUnlock(pomPrimary,
  8718. pWorkset,
  8719. pHeader->header.sender);
  8720. }
  8721. break;
  8722. case OMNET_WORKSET_CATCHUP:
  8723. case OMNET_WORKSET_NEW:
  8724. {
  8725. rc = ProcessWorksetNew(pomPrimary->putTask, pHeader, pWSGroup);
  8726. //
  8727. // We will want to see if any bouncing messages can be
  8728. // processed because of this new workset, so set the reprocess
  8729. // flag:
  8730. //
  8731. retryBounceList = TRUE;
  8732. }
  8733. break;
  8734. case OMNET_WORKSET_CLEAR:
  8735. {
  8736. rc = ProcessWorksetClear(pomPrimary->putTask,
  8737. pomPrimary,
  8738. pHeader,
  8739. pWSGroup,
  8740. pWorkset);
  8741. }
  8742. break;
  8743. case OMNET_OBJECT_CATCHUP:
  8744. case OMNET_OBJECT_ADD:
  8745. {
  8746. rc = ProcessObjectAdd(pomPrimary->putTask,
  8747. pHeader,
  8748. pWSGroup,
  8749. pWorkset,
  8750. (POM_OBJECTDATA) pData,
  8751. &pObj);
  8752. retryBounceList = TRUE;
  8753. }
  8754. break;
  8755. case OMNET_OBJECT_MOVE:
  8756. {
  8757. ProcessObjectMove(pomPrimary->putTask,
  8758. pHeader,
  8759. pWorkset,
  8760. pObj);
  8761. }
  8762. break;
  8763. case OMNET_OBJECT_DELETE:
  8764. case OMNET_OBJECT_REPLACE:
  8765. case OMNET_OBJECT_UPDATE:
  8766. {
  8767. rc = ProcessObjectDRU(pomPrimary->putTask,
  8768. pHeader,
  8769. pWSGroup,
  8770. pWorkset,
  8771. pObj,
  8772. (POM_OBJECTDATA) pData);
  8773. }
  8774. break;
  8775. default:
  8776. {
  8777. ERROR_OUT(( "Default case in switch (message type: 0x%08x)",
  8778. messageType));
  8779. }
  8780. }
  8781. if (rc != 0)
  8782. {
  8783. ERROR_OUT(( "Error %d processing operation (type: 0x%08x)",
  8784. rc, messageType));
  8785. DC_QUIT;
  8786. }
  8787. TRACE_OUT(("Processed message type 0x%08x", messageType));
  8788. DC_EXIT_POINT:
  8789. //
  8790. // Unless we bounced the message, do some cleanup:
  8791. //
  8792. // Note: This must be after DC_EXIT_POINT because we want to do it
  8793. // even if we didn't process the message (unless we bounced it).
  8794. //
  8795. // If we haven't bounced the message then we may be able to free
  8796. // the data depending on the results of the above tests.
  8797. //
  8798. if (bounced == FALSE)
  8799. {
  8800. //
  8801. // Remove the message from the list it's in (either the pending
  8802. // receives list if this message was never bounced or the bounce
  8803. // list if it has been bounced):
  8804. //
  8805. COM_BasedListRemove(&(pReceiveCB->chain));
  8806. //
  8807. // Now free the message and the receive control block (NOT THE
  8808. // DATA! If there was any, it's just been used for an object
  8809. // add/update etc.)
  8810. //
  8811. UT_FreeRefCount((void**)&pHeader, FALSE);
  8812. UT_FreeRefCount((void**)&pReceiveCB, FALSE);
  8813. //
  8814. // ...unless of course we indicated that we should free the data:
  8815. //
  8816. if (freeMemory)
  8817. {
  8818. if (pData != NULL)
  8819. {
  8820. TRACE_OUT(("Freeing object data at 0x%08x", pData));
  8821. UT_FreeRefCount(&pData, FALSE);
  8822. }
  8823. }
  8824. }
  8825. else
  8826. {
  8827. rc = OM_RC_BOUNCED;
  8828. }
  8829. //
  8830. // If we're not already processing bounced messages, and this message
  8831. // is an "enabling" message (i.e. a WORKSET_NEW or OBJECT_ADD), then
  8832. // retry the bounce list:
  8833. //
  8834. if ((whatNext == OK_TO_RETRY_BOUNCE_LIST) &&
  8835. (retryBounceList))
  8836. {
  8837. ProcessBouncedMessages(pomPrimary, pDomain);
  8838. }
  8839. DebugExitDWORD(ProcessMessage, rc);
  8840. return(rc);
  8841. }
  8842. //
  8843. // BounceMessage()
  8844. //
  8845. void BounceMessage
  8846. (
  8847. POM_DOMAIN pDomain,
  8848. POM_RECEIVE_CB pReceiveCB
  8849. )
  8850. {
  8851. UINT count;
  8852. DebugEntry(BounceMessage);
  8853. TRACE_OUT(( "Bouncing message type 0x%08x (CB at 0x%08x)",
  8854. pReceiveCB->pHeader->header.messageType, pReceiveCB));
  8855. //
  8856. // Remove this receive CB from whichever list its currently in (either
  8857. // the list of pending receives if this is the first time it's been
  8858. // bounced or the bounce list if not) and insert it at the START of the
  8859. // bounce list for the Domain:
  8860. //
  8861. // Note: the reason why we insert at the start is because
  8862. // ProcessBouncedMessages may be chaining through the list and
  8863. // we don't want to put this one back in the list at a later
  8864. // point or else we might go infinite.
  8865. //
  8866. COM_BasedListRemove(&(pReceiveCB->chain));
  8867. COM_BasedListInsertAfter(&(pDomain->bounceList), &(pReceiveCB->chain));
  8868. DebugExitVOID(BounceMessage);
  8869. }
  8870. //
  8871. //
  8872. //
  8873. // ProcessBouncedMessages(...)
  8874. //
  8875. //
  8876. //
  8877. void ProcessBouncedMessages(POM_PRIMARY pomPrimary,
  8878. POM_DOMAIN pDomain)
  8879. {
  8880. UINT count;
  8881. POM_RECEIVE_CB pReceiveCB;
  8882. POM_RECEIVE_CB pTempReceiveCB;
  8883. BOOL listGettingShorter;
  8884. UINT numPasses;
  8885. UINT rc;
  8886. DebugEntry(ProcessBouncedMessages);
  8887. TRACE_OUT(( "Processing bounced messages"));
  8888. //
  8889. // It is important that we process bounced messages as soon as we are
  8890. // able. Since processing one may enable others to be processed, we
  8891. // must go through the list several times, until we can't do any more
  8892. // work on it. So, we keep track of whether the list is getting shorter
  8893. // - if it is, we must have processed something so it's worth going
  8894. // through again.
  8895. //
  8896. // Note: an alternative would be do do exactly three passes through the
  8897. // list: one to do all the WORKSET_NEWs, then one to do all the
  8898. // OBJECT_ADDs and then one to do any remaining operations. This
  8899. // is slightly less generic code and is tied in to the current
  8900. // dependencies between operations so is not ideal but it may
  8901. // prove to be a good performance improvement if the average
  8902. // number of passes we do now exceeds three.
  8903. //
  8904. listGettingShorter = TRUE;
  8905. numPasses = 0;
  8906. pReceiveCB = (POM_RECEIVE_CB)COM_BasedListFirst(&(pDomain->bounceList), FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8907. while (listGettingShorter)
  8908. {
  8909. numPasses++;
  8910. listGettingShorter = FALSE;
  8911. while (pReceiveCB != NULL)
  8912. {
  8913. //
  8914. // We want to chain through the list of bounced messages and try
  8915. // to process each one. However, trying to process a message
  8916. // could cause it to be removed from the list (if processed) or
  8917. // added back in at the start (if bounced again).
  8918. //
  8919. // So, we chain NOW to the next one in the list:
  8920. //
  8921. pTempReceiveCB = (POM_RECEIVE_CB)COM_BasedListNext(&(pDomain->bounceList), pReceiveCB,
  8922. FIELD_OFFSET(OM_RECEIVE_CB, chain));
  8923. TRACE_OUT(( "Retrying message type 0x%08x (CB at 0x%08x)",
  8924. pReceiveCB->pHeader->header.messageType, pReceiveCB));
  8925. rc = ProcessMessage(pomPrimary, pReceiveCB, DONT_RETRY_BOUNCE_LIST);
  8926. if (rc != OM_RC_BOUNCED)
  8927. {
  8928. //
  8929. // We processed a message, so set the flag for another run
  8930. // through the list:
  8931. //
  8932. TRACE_OUT(( "Successfully processed bounced message"));
  8933. listGettingShorter = TRUE;
  8934. }
  8935. //
  8936. // Now "chain" on to the next one, using the link we've already
  8937. // set up:
  8938. //
  8939. pReceiveCB = pTempReceiveCB;
  8940. }
  8941. }
  8942. TRACE_OUT(( "Processed as much of bounce list as possible in %hu passes",
  8943. numPasses));
  8944. DebugExitVOID(ProcessBouncedMessages);
  8945. }
  8946. //
  8947. // FreeSendInst(...)
  8948. //
  8949. void FreeSendInst
  8950. (
  8951. POM_SEND_INST pSendInst
  8952. )
  8953. {
  8954. DebugEntry(FreeSendInst);
  8955. if (pSendInst->pMessage != NULL)
  8956. {
  8957. UT_FreeRefCount((void**)&(pSendInst->pMessage), FALSE);
  8958. }
  8959. if (pSendInst->pWSGroup != NULL)
  8960. {
  8961. UT_FreeRefCount((void**)&(pSendInst->pWSGroup), FALSE);
  8962. }
  8963. if (pSendInst->pWorkset != NULL)
  8964. {
  8965. UT_FreeRefCount((void**)&(pSendInst->pWorkset), FALSE);
  8966. }
  8967. if (pSendInst->pObj != NULL)
  8968. {
  8969. UT_FreeRefCount((void**)&(pSendInst->pObj), FALSE);
  8970. }
  8971. if (pSendInst->pDataStart != NULL)
  8972. {
  8973. UT_FreeRefCount((void**)&(pSendInst->pDataStart), FALSE);
  8974. }
  8975. //
  8976. // Now free the send instruction itself:
  8977. //
  8978. COM_BasedListRemove(&(pSendInst->chain));
  8979. UT_FreeRefCount((void**)&pSendInst, FALSE);
  8980. DebugExitVOID(FreeSendInst);
  8981. }
  8982. //
  8983. // PreProcessMessage(...)
  8984. //
  8985. UINT PreProcessMessage
  8986. (
  8987. POM_DOMAIN pDomain,
  8988. OM_WSGROUP_ID wsGroupID,
  8989. OM_WORKSET_ID worksetID,
  8990. POM_OBJECT_ID pObjectID,
  8991. OMNET_MESSAGE_TYPE messageType,
  8992. POM_WSGROUP * ppWSGroup,
  8993. POM_WORKSET * ppWorkset,
  8994. POM_OBJECT * ppObj
  8995. )
  8996. {
  8997. POM_WSGROUP pWSGroup = NULL;
  8998. POM_WORKSET pWorkset = NULL;
  8999. POM_OBJECT pObj;
  9000. UINT rc = 0;
  9001. DebugEntry(PreProcessMessage);
  9002. //
  9003. // OK, we've got some sort of operation message: let's find the workset
  9004. // group it relates to:
  9005. //
  9006. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pDomain->wsGroups),
  9007. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  9008. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  9009. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  9010. if (pWSGroup == NULL)
  9011. {
  9012. //
  9013. // This is a message for a workset group which we are not/no longer
  9014. // registered with, so quit (our caller will throw it away):
  9015. //
  9016. rc = OM_RC_WSGROUP_NOT_FOUND;
  9017. DC_QUIT;
  9018. }
  9019. ValidateWSGroup(pWSGroup);
  9020. pWorkset = pWSGroup->apWorksets[worksetID];
  9021. //
  9022. // Check that this set up a valid workset pointer:
  9023. //
  9024. if (pWorkset == NULL)
  9025. {
  9026. rc = OM_RC_WORKSET_NOT_FOUND;
  9027. DC_QUIT;
  9028. }
  9029. ValidateWorkset(pWorkset);
  9030. //
  9031. // Search for the object ID, locking workset group mutex while we do
  9032. // so.
  9033. //
  9034. // Note: if the <pObjectID> parameter is NULL, it means that the caller
  9035. // doesn't want us to search for the object ID, so we skip this
  9036. // step
  9037. //
  9038. switch (messageType)
  9039. {
  9040. case OMNET_OBJECT_ADD:
  9041. case OMNET_OBJECT_CATCHUP:
  9042. case OMNET_OBJECT_REPLACE:
  9043. case OMNET_OBJECT_UPDATE:
  9044. case OMNET_OBJECT_DELETE:
  9045. case OMNET_OBJECT_MOVE:
  9046. {
  9047. rc = ObjectIDToPtr(pWorkset, *pObjectID, &pObj);
  9048. if (rc != 0)
  9049. {
  9050. //
  9051. // No object found with this ID (rc is BAD_ID, DELETED or
  9052. // PENDING_DELETE):
  9053. //
  9054. *ppObj = NULL;
  9055. }
  9056. else
  9057. {
  9058. ValidateObject(pObj);
  9059. *ppObj = pObj;
  9060. }
  9061. }
  9062. break;
  9063. default:
  9064. {
  9065. //
  9066. // Do nothing for other messages.
  9067. //
  9068. }
  9069. }
  9070. DC_EXIT_POINT:
  9071. *ppWorkset = pWorkset;
  9072. *ppWSGroup = pWSGroup;
  9073. TRACE_OUT(("Pre-processed message for Domain %u", pDomain->callID));
  9074. DebugExitDWORD(PreProcessMessage, rc);
  9075. return(rc);
  9076. }
  9077. //
  9078. // PurgeNonPersistent(...)
  9079. //
  9080. void PurgeNonPersistent
  9081. (
  9082. POM_PRIMARY pomPrimary,
  9083. POM_DOMAIN pDomain,
  9084. OM_WSGROUP_ID wsGroupID,
  9085. NET_UID userID
  9086. )
  9087. {
  9088. POM_WSGROUP pWSGroup;
  9089. POM_WORKSET pWorkset;
  9090. OM_WORKSET_ID worksetID;
  9091. POM_OBJECT pObj;
  9092. DebugEntry(PurgeNonPersistent);
  9093. //
  9094. // Find the workset group which has the specified ID:
  9095. //
  9096. COM_BasedListFind(LIST_FIND_FROM_FIRST, &pDomain->wsGroups,
  9097. (void**)&pWSGroup, FIELD_OFFSET(OM_WSGROUP, chain),
  9098. FIELD_OFFSET(OM_WSGROUP, wsGroupID), (DWORD)wsGroupID,
  9099. FIELD_SIZE(OM_WSGROUP, wsGroupID));
  9100. if (pWSGroup == NULL)
  9101. {
  9102. //
  9103. // SFR5794: Not an error if wsgroup not found - this just means
  9104. // someone has detached who was using a workset group which we were
  9105. // not using.
  9106. //
  9107. TRACE_OUT(("WSGroup %hu not found in domain %u",
  9108. wsGroupID, pDomain->callID));
  9109. DC_QUIT;
  9110. }
  9111. //
  9112. // Chain through each workset in the group - for those that are
  9113. // non-persistent, then chain through each object looking for a match
  9114. // on the user ID of the departed node:
  9115. //
  9116. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  9117. {
  9118. pWorkset = pWSGroup->apWorksets[worksetID];
  9119. if (pWorkset == NULL)
  9120. {
  9121. //
  9122. // Workset with this ID doesn't exist - continue
  9123. //
  9124. continue;
  9125. }
  9126. if (!pWorkset->fTemp)
  9127. {
  9128. //
  9129. // A persistent workset - we don't need to purge it of objects
  9130. //
  9131. continue;
  9132. }
  9133. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  9134. while (pObj != NULL)
  9135. {
  9136. ValidateObject(pObj);
  9137. //
  9138. // SFR6353: Don't try to delete the object if it's already
  9139. // pending delete.
  9140. //
  9141. if (!(pObj->flags & DELETED) &&
  9142. !(pObj->flags & PENDING_DELETE))
  9143. {
  9144. //
  9145. // If this object was added by the departed node, OR if
  9146. // ALL_REMOTES have gone and it was not added by us...
  9147. //
  9148. if ((pObj->objectID.creator == userID) ||
  9149. ((userID == NET_ALL_REMOTES) &&
  9150. (pObj->objectID.creator != pDomain->userID)))
  9151. {
  9152. //
  9153. // ...delete it:
  9154. //
  9155. ObjectDRU(pomPrimary->putTask,
  9156. pWSGroup,
  9157. pWorkset,
  9158. pObj,
  9159. NULL,
  9160. OMNET_OBJECT_DELETE);
  9161. }
  9162. }
  9163. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj,
  9164. FIELD_OFFSET(OM_OBJECT, chain));
  9165. }
  9166. }
  9167. DC_EXIT_POINT:
  9168. DebugExitVOID(PurgeNonPersistent);
  9169. }
  9170. //
  9171. // SetPersonData(...)
  9172. //
  9173. UINT SetPersonData
  9174. (
  9175. POM_PRIMARY pomPrimary,
  9176. POM_DOMAIN pDomain,
  9177. POM_WSGROUP pWSGroup
  9178. )
  9179. {
  9180. POM_WSGROUP pOMCWSGroup;
  9181. POM_WORKSET pOMCWorkset;
  9182. POM_OBJECT pObjReg;
  9183. POM_WSGROUP_REG_REC pRegObject;
  9184. POM_WSGROUP_REG_REC pNewRegObject;
  9185. UINT rc = 0;
  9186. DebugEntry(SetPersonData);
  9187. //
  9188. // Set up pointers to the ObManControl workset group and the workset
  9189. // which contains the object to be replaced:
  9190. //
  9191. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9192. pOMCWorkset = pOMCWSGroup->apWorksets[pWSGroup->wsGroupID];
  9193. //
  9194. // Set up pointers to the object record and the object data itself:
  9195. //
  9196. pObjReg = pWSGroup->pObjReg;
  9197. ValidateObject(pObjReg);
  9198. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  9199. if (!pRegObject)
  9200. {
  9201. ERROR_OUT(("SetPersonData: object 0x%08x has no data", pObjReg));
  9202. rc = UT_RC_NO_MEM;
  9203. DC_QUIT;
  9204. }
  9205. ValidateObjectDataWSGREGREC(pRegObject);
  9206. //
  9207. // Allocate some memory for the new object with which we are about to
  9208. // replace the old one:
  9209. //
  9210. pNewRegObject = (POM_WSGROUP_REG_REC)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_REC), TRUE);
  9211. if (!pNewRegObject)
  9212. {
  9213. rc = UT_RC_NO_MEM;
  9214. DC_QUIT;
  9215. }
  9216. //
  9217. // Set the fields in the new object to have the same data as the old:
  9218. //
  9219. pNewRegObject->length = pRegObject->length;
  9220. pNewRegObject->idStamp = pRegObject->idStamp;
  9221. pNewRegObject->userID = pRegObject->userID;
  9222. pNewRegObject->status = pRegObject->status;
  9223. //
  9224. // Fill in the person data fields and issue the replace:
  9225. //
  9226. COM_GetSiteName(pNewRegObject->personData.personName,
  9227. sizeof(pNewRegObject->personData.personName));
  9228. rc = ObjectDRU(pomPrimary->putTask,
  9229. pOMCWSGroup,
  9230. pOMCWorkset,
  9231. pObjReg,
  9232. (POM_OBJECTDATA) pNewRegObject,
  9233. OMNET_OBJECT_REPLACE);
  9234. if (rc != 0)
  9235. {
  9236. DC_QUIT;
  9237. }
  9238. TRACE_OUT((" Set person data for WSG %d", pWSGroup->wsg));
  9239. DC_EXIT_POINT:
  9240. if (rc != 0)
  9241. {
  9242. ERROR_OUT(("Error %d updating own reg object for WSG %d",
  9243. rc, pWSGroup->wsg));
  9244. }
  9245. DebugExitDWORD(SetPersonData, rc);
  9246. return(rc);
  9247. }
  9248. //
  9249. // RemoveInfoObject(...)
  9250. //
  9251. void RemoveInfoObject
  9252. (
  9253. POM_PRIMARY pomPrimary,
  9254. POM_DOMAIN pDomain,
  9255. OM_WSGROUP_ID wsGroupID
  9256. )
  9257. {
  9258. POM_WSGROUP pOMCWSGroup;
  9259. POM_WORKSET pOMCWorkset;
  9260. POM_OBJECT pObj;
  9261. DebugEntry(RemoveInfoObject);
  9262. //
  9263. // OK, we've got to delete the identification object in workset #0 in
  9264. // ObManControl which identified the workset group.
  9265. //
  9266. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9267. pOMCWorkset = GetOMCWorkset(pDomain, 0);
  9268. //
  9269. // ...search for the WSGROUP_INFO object (by wsGroupID - we don't know
  9270. // the name or function profile so leave them blank):
  9271. //
  9272. FindInfoObject(pDomain, wsGroupID, OMWSG_MAX, OMFP_MAX, &pObj);
  9273. if (pObj == NULL)
  9274. {
  9275. //
  9276. // This should happen only for the local Domain:
  9277. //
  9278. // SFR 2208 : No: This will also happen in a regular call when
  9279. // the call ends almost as soon as it has begun. The
  9280. // sequence of events is as follows:
  9281. //
  9282. // - on callee, ObMan sends WSG_SEND_REQ to caller
  9283. // - caller sends REG_REC object, then WORKSET_CATCHUP
  9284. // then the INFO object we can't find
  9285. // - callee receives REG_REC then WORKSET_CATHCUP
  9286. // - call ends and callee enters WSGRemoveFromDomain
  9287. // which finds the REG_REC then calls us here
  9288. //
  9289. // Therefore the DC_ABSence of the INFO object is valid
  9290. // and we just trace an alert:
  9291. //
  9292. // NOTE: It will also happen when we receive a DELETE from
  9293. // someone else who is doing the same purge process
  9294. // as us.
  9295. //
  9296. WARNING_OUT(("No INFO object found for wsGroup %hu", wsGroupID));
  9297. DC_QUIT;
  9298. }
  9299. else
  9300. {
  9301. ValidateObject(pObj);
  9302. }
  9303. //
  9304. // We found an object, so delete it from the workset:
  9305. //
  9306. TRACE_OUT(("Deleting INFO object for wsGroup %hu from domain %u",
  9307. wsGroupID, pDomain->callID));
  9308. ObjectDRU(pomPrimary->putTask,
  9309. pOMCWSGroup,
  9310. pOMCWorkset,
  9311. pObj,
  9312. NULL,
  9313. OMNET_OBJECT_DELETE);
  9314. DC_EXIT_POINT:
  9315. DebugExitVOID(RemoveInfoObject);
  9316. }
  9317. //
  9318. // RemovePersonObject(...)
  9319. //
  9320. void RemovePersonObject
  9321. (
  9322. POM_PRIMARY pomPrimary,
  9323. POM_DOMAIN pDomain,
  9324. OM_WSGROUP_ID wsGroupID,
  9325. NET_UID detachedUserID
  9326. )
  9327. {
  9328. POM_WSGROUP pOMCWSGroup;
  9329. POM_WORKSET pOMCWorkset;
  9330. POM_OBJECT pObjReg;
  9331. NET_UID userIDRemoved;
  9332. POM_WSGROUP_REG_REC pRegObject;
  9333. DebugEntry(RemovePersonObject);
  9334. //
  9335. // Set up pointers to the ObManControl workset group and the relevant
  9336. // workset within it:
  9337. //
  9338. pOMCWSGroup = GetOMCWsgroup(pDomain);
  9339. pOMCWorkset = pOMCWSGroup->apWorksets[wsGroupID];
  9340. //
  9341. // If there is no such workset, it could be because the workset group
  9342. // has been moved into the local Domain on call end etc. In this case,
  9343. // just quit out.
  9344. //
  9345. if (pOMCWorkset == NULL)
  9346. {
  9347. TRACE_OUT(("OMC Workset not found - no person objects to remove"));
  9348. DC_QUIT;
  9349. }
  9350. //
  9351. // If detachedUserID is NET_ALL_REMOTES, we've a lot of work to do and
  9352. // we'll do this loop many times - otherwise we'll just do it for a
  9353. // single person object.
  9354. //
  9355. for (;;)
  9356. {
  9357. if (detachedUserID == NET_ALL_REMOTES)
  9358. {
  9359. //
  9360. // This will find ANY person object that's NOT OURS:
  9361. //
  9362. FindPersonObject(pOMCWorkset,
  9363. pDomain->userID,
  9364. FIND_OTHERS,
  9365. &pObjReg);
  9366. }
  9367. else
  9368. {
  9369. //
  9370. // This will find a specific node's person object:
  9371. //
  9372. FindPersonObject(pOMCWorkset,
  9373. detachedUserID,
  9374. FIND_THIS,
  9375. &pObjReg);
  9376. }
  9377. //
  9378. // If we don't find one, get out of the loop:
  9379. //
  9380. if (pObjReg == NULL)
  9381. {
  9382. break;
  9383. }
  9384. ValidateObject(pObjReg);
  9385. //
  9386. // If detachedUserID was NET_ALL_REMOTES, the user ID in the object
  9387. // we're deleting will obviously be different. So, find out the
  9388. // real user ID from the object we're deleting:
  9389. //
  9390. pRegObject = (POM_WSGROUP_REG_REC)pObjReg->pData;
  9391. if (!pRegObject)
  9392. {
  9393. ERROR_OUT(("RemovePersonObject: object 0x%08x has no data", pObjReg));
  9394. }
  9395. else
  9396. {
  9397. ValidateObjectDataWSGREGREC(pRegObject);
  9398. userIDRemoved = pRegObject->userID;
  9399. //
  9400. // Now delete the object. If the return code is bad, don't quit -
  9401. // we may still want to delete the info object.
  9402. //
  9403. TRACE_OUT(("Deleting person object for node 0x%08x, wsGroup %hu",
  9404. userIDRemoved, wsGroupID));
  9405. if (ObjectDRU(pomPrimary->putTask,
  9406. pOMCWSGroup,
  9407. pOMCWorkset,
  9408. pObjReg,
  9409. NULL,
  9410. OMNET_OBJECT_DELETE) != 0)
  9411. {
  9412. ERROR_OUT(("Error from ObjectDRU - leaving loop"));
  9413. break;
  9414. }
  9415. }
  9416. }
  9417. DC_EXIT_POINT:
  9418. DebugExitVOID(RemovePersonObject);
  9419. }
  9420. //
  9421. // WSGRecordFind(...)
  9422. //
  9423. void WSGRecordFind
  9424. (
  9425. POM_DOMAIN pDomain,
  9426. OMWSG wsg,
  9427. OMFP fpHandler,
  9428. POM_WSGROUP * ppWSGroup
  9429. )
  9430. {
  9431. POM_WSGROUP pWSGroup = NULL;
  9432. DebugEntry(WSGRecordFind);
  9433. //
  9434. // Search for workset group record:
  9435. //
  9436. TRACE_OUT(("Searching WSG list for Domain %u for match on WSG %d FP %d",
  9437. pDomain->callID, wsg, fpHandler));
  9438. pWSGroup = (POM_WSGROUP)COM_BasedListFirst(&(pDomain->wsGroups), FIELD_OFFSET(OM_WSGROUP, chain));
  9439. while (pWSGroup != NULL)
  9440. {
  9441. if ((pWSGroup->wsg == wsg) && (pWSGroup->fpHandler == fpHandler))
  9442. {
  9443. break;
  9444. }
  9445. pWSGroup = (POM_WSGROUP)COM_BasedListNext(&(pDomain->wsGroups), pWSGroup,
  9446. FIELD_OFFSET(OM_WSGROUP, chain));
  9447. }
  9448. //
  9449. // Set up caller's pointer:
  9450. //
  9451. *ppWSGroup = pWSGroup;
  9452. DebugExitVOID(WSGRecordFind);
  9453. }
  9454. //
  9455. // AddClientToWSGList(...)
  9456. //
  9457. UINT AddClientToWSGList
  9458. (
  9459. PUT_CLIENT putTask,
  9460. POM_WSGROUP pWSGroup,
  9461. OM_WSGROUP_HANDLE hWSGroup,
  9462. UINT mode
  9463. )
  9464. {
  9465. POM_CLIENT_LIST pClientListEntry;
  9466. UINT count;
  9467. UINT rc = 0;
  9468. DebugEntry(AddClientToWSGList);
  9469. //
  9470. // Count the number of local primaries registered with the workset
  9471. // group:
  9472. //
  9473. count = 0;
  9474. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  9475. while (pClientListEntry != NULL)
  9476. {
  9477. if (pClientListEntry->mode == PRIMARY)
  9478. {
  9479. count++;
  9480. }
  9481. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWSGroup->clients), pClientListEntry,
  9482. FIELD_OFFSET(OM_CLIENT_LIST, chain));
  9483. }
  9484. //
  9485. // What we do now depends on whether this is a primary or a secondary
  9486. // registration:
  9487. //
  9488. if (mode == PRIMARY)
  9489. {
  9490. //
  9491. // If a primary, check that no other primaries are present:
  9492. //
  9493. if (count > 0)
  9494. {
  9495. ERROR_OUT(("Can't register TASK 0x%08x with WSG %d as primary: "
  9496. "another primary is already registered",
  9497. putTask, pWSGroup->wsg));
  9498. rc = OM_RC_TOO_MANY_CLIENTS;
  9499. DC_QUIT;
  9500. }
  9501. else
  9502. {
  9503. TRACE_OUT(("%hu primary Clients already registered with WSG %d",
  9504. count, pWSGroup->wsg));
  9505. }
  9506. }
  9507. else // mode == SECONDARY
  9508. {
  9509. if (count == 0)
  9510. {
  9511. WARNING_OUT(("Can't register TASK 0x%08x with WSG %d as secondary: "
  9512. "no primary registered",
  9513. putTask, pWSGroup->wsg));
  9514. rc = OM_RC_NO_PRIMARY;
  9515. DC_QUIT;
  9516. }
  9517. }
  9518. //
  9519. // OK, allocate some memory for the Client's entry in the list:
  9520. //
  9521. pClientListEntry = (POM_CLIENT_LIST)UT_MallocRefCount(sizeof(OM_CLIENT_LIST), TRUE);
  9522. if (!pClientListEntry)
  9523. {
  9524. rc = UT_RC_NO_MEM;
  9525. DC_QUIT;
  9526. }
  9527. SET_STAMP(pClientListEntry, CLIENTLIST);
  9528. pClientListEntry->putTask = putTask;
  9529. pClientListEntry->hWSGroup = hWSGroup;
  9530. pClientListEntry->mode = (WORD)mode;
  9531. COM_BasedListInsertBefore(&(pWSGroup->clients), &(pClientListEntry->chain));
  9532. TRACE_OUT(("Added TASK 0x%08x to Client list for WSG %d as %s",
  9533. putTask, pWSGroup->wsg,
  9534. mode == PRIMARY ? "primary" : "secondary"));
  9535. DC_EXIT_POINT:
  9536. DebugExitDWORD(AddClientToWSGList, rc);
  9537. return(rc);
  9538. }
  9539. //
  9540. // FindPersonObject(...)
  9541. //
  9542. void FindPersonObject
  9543. (
  9544. POM_WORKSET pOMCWorkset,
  9545. NET_UID userID,
  9546. UINT searchType,
  9547. POM_OBJECT * ppObjReg
  9548. )
  9549. {
  9550. BOOL found = FALSE;
  9551. POM_OBJECT pObj;
  9552. POM_WSGROUP_REG_REC pRegObject;
  9553. UINT rc = 0;
  9554. DebugEntry(FindPersonObject);
  9555. TRACE_OUT(("Searching OMC workset %u for reg obj %sowned by node 0x%08x",
  9556. pOMCWorkset->worksetID, searchType == FIND_THIS ? "" : "not ", userID));
  9557. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  9558. while (pObj != NULL)
  9559. {
  9560. ValidateObject(pObj);
  9561. if (pObj->flags & DELETED)
  9562. {
  9563. // Do nothing
  9564. }
  9565. else if (!pObj->pData)
  9566. {
  9567. ERROR_OUT(("FindPersonObject: object 0x%08x has no data", pObj));
  9568. }
  9569. else
  9570. {
  9571. ValidateObjectData(pObj->pData);
  9572. pRegObject = (POM_WSGROUP_REG_REC)pObj->pData;
  9573. if (pRegObject->idStamp == OM_WSGREGREC_ID_STAMP)
  9574. {
  9575. if (((searchType == FIND_THIS) &&
  9576. (pRegObject->userID == userID)) ||
  9577. ((searchType == FIND_OTHERS) &&
  9578. (pRegObject->userID != userID)))
  9579. {
  9580. //
  9581. // Got it:
  9582. //
  9583. found = TRUE;
  9584. break;
  9585. }
  9586. }
  9587. }
  9588. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  9589. }
  9590. if (found == TRUE)
  9591. {
  9592. *ppObjReg = pObj;
  9593. }
  9594. else
  9595. {
  9596. if (searchType == FIND_THIS)
  9597. {
  9598. TRACE_OUT(("No reg object found for node 0x%08x in workset %u",
  9599. userID, pOMCWorkset->worksetID));
  9600. }
  9601. *ppObjReg = NULL;
  9602. }
  9603. DebugExitVOID(FindPersonObject);
  9604. }
  9605. //
  9606. // PostWorksetNewEvents(...)
  9607. //
  9608. UINT PostWorksetNewEvents
  9609. (
  9610. PUT_CLIENT putFrom,
  9611. PUT_CLIENT putTo,
  9612. POM_WSGROUP pWSGroup,
  9613. OM_WSGROUP_HANDLE hWSGroup
  9614. )
  9615. {
  9616. OM_WORKSET_ID worksetID;
  9617. OM_EVENT_DATA16 eventData16;
  9618. POM_WORKSET pWorkset;
  9619. UINT count;
  9620. UINT rc = 0;
  9621. DebugEntry(PostWorksetNewEvents);
  9622. TRACE_OUT(("Posting WORKSET_NEW events to Client TASK 0x%08x for WSG %d",
  9623. putTo, pWSGroup->wsg));
  9624. count = 0;
  9625. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  9626. {
  9627. pWorkset = pWSGroup->apWorksets[worksetID];
  9628. if (pWorkset != NULL)
  9629. {
  9630. eventData16.hWSGroup = hWSGroup;
  9631. eventData16.worksetID = worksetID;
  9632. UT_PostEvent(putFrom, putTo, 0,
  9633. OM_WORKSET_NEW_IND,
  9634. *(PUINT) &eventData16,
  9635. 0);
  9636. count++;
  9637. }
  9638. }
  9639. TRACE_OUT(("Posted %hu WORKSET_NEW events (hWSGroup: %hu)", count,
  9640. hWSGroup));
  9641. DebugExitDWORD(PostWorksetNewEvents, rc);
  9642. return(rc);
  9643. }
  9644. //
  9645. // OM_Register(...)
  9646. //
  9647. UINT OM_Register
  9648. (
  9649. PUT_CLIENT putTask,
  9650. OMCLI omType,
  9651. POM_CLIENT * ppomClient
  9652. )
  9653. {
  9654. POM_CLIENT pomClient = NULL;
  9655. UINT rc = 0;
  9656. DebugEntry(OM_Register);
  9657. UT_Lock(UTLOCK_OM);
  9658. if (!g_pomPrimary)
  9659. {
  9660. ERROR_OUT(("OM_Register failed; primary doesn't exist"));
  9661. DC_QUIT;
  9662. }
  9663. ValidateOMP(g_pomPrimary);
  9664. ASSERT(omType >= OMCLI_FIRST);
  9665. ASSERT(omType < OMCLI_MAX);
  9666. //
  9667. // Make sure this task isn't registered as an OM client
  9668. //
  9669. pomClient = &(g_pomPrimary->clients[omType]);
  9670. if (pomClient->putTask)
  9671. {
  9672. ERROR_OUT(("OM secondary %d already exists", omType));
  9673. pomClient = NULL;
  9674. rc = OM_RC_ALREADY_REGISTERED;
  9675. DC_QUIT;
  9676. }
  9677. // Bump up ref count on OM primary
  9678. UT_BumpUpRefCount(g_pomPrimary);
  9679. //
  9680. // Fill in the client info
  9681. //
  9682. ZeroMemory(pomClient, sizeof(*pomClient));
  9683. SET_STAMP(pomClient, OCLIENT);
  9684. pomClient->putTask = putTask;
  9685. COM_BasedListInit(&(pomClient->locks));
  9686. //
  9687. // Register an exit procedure for cleanup
  9688. //
  9689. UT_RegisterExit(putTask, OMSExitProc, pomClient);
  9690. pomClient->exitProcReg = TRUE;
  9691. //
  9692. // Register our hidden event handler for the Client (the parameter to be
  9693. // passed to the event handler is the pointer to the Client record):
  9694. //
  9695. UT_RegisterEvent(putTask, OMSEventHandler, pomClient, UT_PRIORITY_OBMAN);
  9696. pomClient->hiddenHandlerReg = TRUE;
  9697. DC_EXIT_POINT:
  9698. *ppomClient = pomClient;
  9699. UT_Unlock(UTLOCK_OM);
  9700. DebugExitDWORD(OM_Register, rc);
  9701. return(rc);
  9702. }
  9703. //
  9704. // OM_Deregister()
  9705. //
  9706. void OM_Deregister(POM_CLIENT * ppomClient)
  9707. {
  9708. DebugEntry(OM_Deregister);
  9709. ASSERT(ppomClient);
  9710. OMSExitProc(*ppomClient);
  9711. *ppomClient = NULL;
  9712. DebugExitVOID(OM_Deregister);
  9713. }
  9714. //
  9715. // OMSExitProc(...)
  9716. //
  9717. void CALLBACK OMSExitProc(LPVOID uData)
  9718. {
  9719. POM_CLIENT pomClient = (POM_CLIENT)uData;
  9720. OM_WSGROUP_HANDLE hWSGroup;
  9721. OM_WSGROUP_HANDLE hWSGroupTemp;
  9722. DebugEntry(OMSecExitProc);
  9723. UT_Lock(UTLOCK_OM);
  9724. ValidateOMS(pomClient);
  9725. // Deregister the event handler and exit procedure (we do this early and
  9726. // clear the flags since we want to avoid recursive abends):
  9727. //
  9728. if (pomClient->hiddenHandlerReg)
  9729. {
  9730. UT_DeregisterEvent(pomClient->putTask, OMSEventHandler, pomClient);
  9731. pomClient->hiddenHandlerReg = FALSE;
  9732. }
  9733. if (pomClient->exitProcReg)
  9734. {
  9735. UT_DeregisterExit(pomClient->putTask, OMSExitProc, pomClient);
  9736. pomClient->exitProcReg = FALSE;
  9737. }
  9738. //
  9739. // Deregister the Client from any workset groups with which it is still
  9740. // registered.
  9741. //
  9742. // The code works as follows:
  9743. //
  9744. // FOR each record in the apUsageRecs array
  9745. // IF there is a valid offset there it refers to a registered
  9746. // workset group so deregister it.
  9747. //
  9748. TRACE_OUT(("Checking Client record for active workset group handles"));
  9749. for (hWSGroup = 0; hWSGroup < OMWSG_MAXPERCLIENT; hWSGroup++)
  9750. {
  9751. if ((pomClient->apUsageRecs[hWSGroup] != NULL) &&
  9752. (pomClient->apUsageRecs[hWSGroup] != (POM_USAGE_REC)-1))
  9753. {
  9754. //
  9755. // Need to copy hWSGroup into a temporary variable, since
  9756. // OM_WSGroupDeregister will set it to zero and that would
  9757. // mess up our for-loop otherwise:
  9758. //
  9759. hWSGroupTemp = hWSGroup;
  9760. OM_WSGroupDeregister(pomClient, &hWSGroupTemp);
  9761. }
  9762. }
  9763. //
  9764. // NULL out the task; that's how the OM primary knows the task is
  9765. // present or not.
  9766. //
  9767. pomClient->putTask = NULL;
  9768. UT_FreeRefCount((void**)&g_pomPrimary, TRUE);
  9769. UT_Unlock(UTLOCK_OM);
  9770. DebugExitVOID(OMSExitProc);
  9771. }
  9772. //
  9773. // OMSEventHandler(...)
  9774. //
  9775. BOOL CALLBACK OMSEventHandler
  9776. (
  9777. LPVOID uData,
  9778. UINT event,
  9779. UINT_PTR eventParam1,
  9780. UINT_PTR eventParam2
  9781. )
  9782. {
  9783. POM_CLIENT pomClient = (POM_CLIENT)uData;
  9784. OM_WSGROUP_HANDLE hWSGroup;
  9785. OM_WORKSET_ID worksetID;
  9786. POM_OBJECT pObj;
  9787. UINT correlator;
  9788. POM_PENDING_OP pPendingOp = NULL;
  9789. POM_LOCK pLock;
  9790. POM_WORKSET pWorkset;
  9791. UINT result;
  9792. POM_USAGE_REC pUsageRec;
  9793. OM_OPERATION_TYPE type = NULL_OP;
  9794. BOOL ObjectEvent = FALSE;
  9795. BOOL processed = FALSE;
  9796. DebugEntry(OMSEventHandler);
  9797. UT_Lock(UTLOCK_OM);
  9798. ValidateOMS(pomClient);
  9799. //
  9800. // First check if this is an ObMan event:
  9801. //
  9802. if ((event < OM_BASE_EVENT) || (event > OM_LAST_EVENT))
  9803. {
  9804. DC_QUIT;
  9805. }
  9806. TRACE_OUT(("Processing ObMan event %d (param1: 0x%08x, param2: 0x%08x)",
  9807. event, eventParam1, eventParam2));
  9808. //
  9809. // Extract the fields from the event parameters (some or all of these
  9810. // will be unused, depending on which event this is):
  9811. //
  9812. hWSGroup = (*(POM_EVENT_DATA16)&eventParam1).hWSGroup;
  9813. worksetID = (*(POM_EVENT_DATA16)&eventParam1).worksetID;
  9814. correlator = (*(POM_EVENT_DATA32)&eventParam2).correlator;
  9815. result = (*(POM_EVENT_DATA32)&eventParam2).result;
  9816. pObj = (POM_OBJECT) eventParam2;
  9817. //
  9818. // ObMan guarantees not to deliver out of date events to client e.g.
  9819. // workset open events for aworkset it has since closed, or object add
  9820. // events for a workset group from which it has deregistered.
  9821. //
  9822. // Filtering these events is the main purpose of this hidden handler
  9823. // function; we check each event and if the workset group handle or
  9824. // object handle are invalid or if the workset is closed, we swallow the
  9825. // event.
  9826. //
  9827. switch (event)
  9828. {
  9829. case OM_OUT_OF_RESOURCES_IND:
  9830. {
  9831. //
  9832. // Do nothing.
  9833. //
  9834. }
  9835. break;
  9836. case OM_WSGROUP_REGISTER_CON:
  9837. {
  9838. //
  9839. // Mark this workset group as valid for our client.
  9840. //
  9841. pomClient->wsgValid[hWSGroup] = TRUE;
  9842. ASSERT(ValidWSGroupHandle(pomClient, hWSGroup));
  9843. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9844. TRACE_OUT(("REGISTER_CON arrived for wsg %d (result %u, hWSGroup %u)",
  9845. pUsageRec->pWSGroup->wsg, result, hWSGroup));
  9846. if (result != 0)
  9847. {
  9848. //
  9849. // The registration has failed, so call WSGroupDeregister to
  9850. // free up all the resources, then quit:
  9851. //
  9852. WARNING_OUT(("Registration failed for wsg %d, deregistering",
  9853. pUsageRec->pWSGroup->wsg));
  9854. OM_WSGroupDeregister(pomClient, &hWSGroup);
  9855. DC_QUIT;
  9856. }
  9857. }
  9858. break;
  9859. case OMINT_EVENT_WSGROUP_DEREGISTER:
  9860. {
  9861. //
  9862. // This event is designed to flush the Client's message queue of
  9863. // all events relating to a particular workset group handle.
  9864. //
  9865. // Because this event has arrived, we know there are no more
  9866. // events containing this workset group handle in the queue, so
  9867. // we can safely mark the handle for re-use:
  9868. //
  9869. // So, do a quick sanity check then reset the slot in the array
  9870. // of usage record offsets:
  9871. //
  9872. ASSERT(!pomClient->wsgValid[hWSGroup]);
  9873. TRACE_OUT(("Got WSGROUP_DEREGISTER back marker event for "
  9874. "hWSGroup %u, marking handle as ready for re-use", hWSGroup));
  9875. pomClient->apUsageRecs[hWSGroup] = NULL;
  9876. //
  9877. // ...and swallow the event:
  9878. //
  9879. processed = TRUE;
  9880. }
  9881. break;
  9882. case OM_WSGROUP_MOVE_CON:
  9883. case OM_WSGROUP_MOVE_IND:
  9884. case OM_WORKSET_NEW_IND:
  9885. {
  9886. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9887. {
  9888. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9889. hWSGroup, event));
  9890. processed = TRUE;
  9891. DC_QUIT;
  9892. }
  9893. }
  9894. break;
  9895. case OM_WORKSET_OPEN_CON:
  9896. {
  9897. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9898. {
  9899. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9900. hWSGroup, event));
  9901. processed = TRUE;
  9902. DC_QUIT;
  9903. }
  9904. //
  9905. // Else mark the workset as open:
  9906. //
  9907. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9908. TRACE_OUT(("Marking workset %u in wsg %d open for Client 0x%08x",
  9909. worksetID, pUsageRec->pWSGroup->wsg, pomClient));
  9910. WORKSET_SET_OPEN(pUsageRec, worksetID);
  9911. }
  9912. break;
  9913. case OM_WORKSET_UNLOCK_IND:
  9914. {
  9915. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9916. {
  9917. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9918. hWSGroup, event));
  9919. processed = TRUE;
  9920. DC_QUIT;
  9921. }
  9922. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9923. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  9924. {
  9925. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  9926. worksetID, pUsageRec->pWSGroup->wsg, event));
  9927. processed = TRUE;
  9928. DC_QUIT;
  9929. }
  9930. }
  9931. break;
  9932. case OM_WORKSET_CLEAR_IND:
  9933. {
  9934. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9935. {
  9936. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9937. hWSGroup, event));
  9938. processed = TRUE;
  9939. DC_QUIT;
  9940. }
  9941. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9942. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  9943. {
  9944. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  9945. worksetID, pUsageRec->pWSGroup->wsg, event));
  9946. processed = TRUE;
  9947. DC_QUIT;
  9948. }
  9949. //
  9950. // Check if Clear still pending; quit if not:
  9951. //
  9952. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  9953. ASSERT((pWorkset != NULL));
  9954. FindPendingOp(pWorkset, pObj, WORKSET_CLEAR, &pPendingOp);
  9955. if (pPendingOp == NULL)
  9956. {
  9957. TRACE_OUT(("Clear already confirmed for workset %hu", worksetID));
  9958. processed = TRUE;
  9959. DC_QUIT;
  9960. }
  9961. }
  9962. break;
  9963. case OM_WORKSET_LOCK_CON:
  9964. {
  9965. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  9966. {
  9967. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  9968. hWSGroup, event));
  9969. processed = TRUE;
  9970. DC_QUIT;
  9971. }
  9972. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  9973. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  9974. {
  9975. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  9976. worksetID, pUsageRec->pWSGroup->wsg, event));
  9977. processed = TRUE;
  9978. DC_QUIT;
  9979. }
  9980. //
  9981. // Search for the lock on the lock stack:
  9982. //
  9983. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pomClient->locks),
  9984. (void**)&pLock, FIELD_OFFSET(OM_LOCK, chain),
  9985. FIELD_OFFSET(OM_LOCK, worksetID), (DWORD)worksetID,
  9986. FIELD_SIZE(OM_LOCK, worksetID));
  9987. //
  9988. // If the lock is not present on the lock stack, then the Client
  9989. // must have called Unlock since it called LockReq. So, we
  9990. // swallow the event:
  9991. //
  9992. if (pLock == NULL)
  9993. {
  9994. TRACE_OUT(("Lock already cancelled for workset %hu", worksetID));
  9995. processed = TRUE;
  9996. DC_QUIT;
  9997. }
  9998. //
  9999. // When object locking supported, the first lock which matches
  10000. // on worksetID might not be the workset lock, so more code will
  10001. // be needed here then. In the meantime, just assert:
  10002. //
  10003. ASSERT((OBJECT_ID_IS_NULL(pLock->objectID)));
  10004. //
  10005. // If lock request failed, remove the lock from the Client's
  10006. // lock stack:
  10007. //
  10008. if (result != 0)
  10009. {
  10010. TRACE_OUT(("Lock failed; removing lock from Client's lock stack"));
  10011. COM_BasedListRemove(&pLock->chain);
  10012. UT_FreeRefCount((void**)&pLock, FALSE);
  10013. }
  10014. }
  10015. break;
  10016. case OM_OBJECT_ADD_IND:
  10017. case OM_OBJECT_MOVE_IND:
  10018. {
  10019. ObjectEvent = TRUE;
  10020. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10021. {
  10022. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10023. hWSGroup, event));
  10024. processed = TRUE;
  10025. DC_QUIT;
  10026. }
  10027. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10028. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10029. {
  10030. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10031. worksetID, pUsageRec->pWSGroup->wsg, event));
  10032. processed = TRUE;
  10033. DC_QUIT;
  10034. }
  10035. if (!ValidObject(pObj) || (pObj->flags & DELETED))
  10036. {
  10037. processed = TRUE;
  10038. DC_QUIT;
  10039. }
  10040. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10041. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  10042. ASSERT((pWorkset != NULL));
  10043. if (WorksetClearPending(pWorkset, pObj))
  10044. {
  10045. TRACE_OUT(("Event %hu for object 0x%08x will be swallowed since "
  10046. "object about to be cleared from the workset",
  10047. event, pObj));
  10048. processed = TRUE;
  10049. DC_QUIT;
  10050. }
  10051. }
  10052. break;
  10053. case OM_OBJECT_DELETE_IND:
  10054. case OM_OBJECT_REPLACE_IND:
  10055. case OM_OBJECT_UPDATE_IND:
  10056. {
  10057. ObjectEvent = TRUE;
  10058. switch (event)
  10059. {
  10060. case OM_OBJECT_DELETE_IND:
  10061. type = OBJECT_DELETE;
  10062. break;
  10063. case OM_OBJECT_REPLACE_IND:
  10064. type = OBJECT_REPLACE;
  10065. break;
  10066. case OM_OBJECT_UPDATE_IND:
  10067. type = OBJECT_UPDATE;
  10068. break;
  10069. default:
  10070. ERROR_OUT(("Reached default case in switch"));
  10071. }
  10072. //
  10073. // Check workset group handle is still valid, workset is still
  10074. // open and object handle is still valid; if not, swallow event:
  10075. //
  10076. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10077. {
  10078. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10079. hWSGroup, event));
  10080. processed = TRUE;
  10081. DC_QUIT;
  10082. }
  10083. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10084. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10085. {
  10086. TRACE_OUT(("Workset %u in wsg %d no longer open; ignoring event %d",
  10087. worksetID, pUsageRec->pWSGroup->wsg, event));
  10088. processed = TRUE;
  10089. DC_QUIT;
  10090. }
  10091. //
  10092. // We also want to quit if the object is no longer valid or if
  10093. // there is a clear pending (just as for ADD/MOVE) but if we do
  10094. // so, we will also need to remove the pending op from the list.
  10095. // So, find the op now; if we quit and swallow the event, the
  10096. // function exit code will do the remove (this saves having to
  10097. // break up the QUIT_IF... macros for this special case).
  10098. //
  10099. // So, check the pending op list:
  10100. //
  10101. pWorkset = pUsageRec->pWSGroup->apWorksets[worksetID];
  10102. ASSERT((pWorkset != NULL));
  10103. FindPendingOp(pWorkset, pObj, type, &pPendingOp);
  10104. if (pPendingOp == NULL)
  10105. {
  10106. TRACE_OUT(("Operation type %hu already confirmed for object 0x%08x",
  10107. type, pObj));
  10108. processed = TRUE;
  10109. DC_QUIT;
  10110. }
  10111. if (!ValidObject(pObj) || (pObj->flags & DELETED))
  10112. {
  10113. processed = TRUE;
  10114. DC_QUIT;
  10115. }
  10116. if (WorksetClearPending(pWorkset, pObj))
  10117. {
  10118. TRACE_OUT(("Event %hu for object 0x%08x will be swallowed since "
  10119. "object about to be cleared from the workset",
  10120. event, pObj));
  10121. processed = TRUE;
  10122. DC_QUIT;
  10123. }
  10124. }
  10125. break;
  10126. case OM_WORKSET_CLEARED_IND:
  10127. case OM_OBJECT_DELETED_IND:
  10128. case OM_OBJECT_UPDATED_IND:
  10129. case OM_OBJECT_REPLACED_IND:
  10130. {
  10131. //
  10132. // All of these except the CLEARED_IND are object events:
  10133. //
  10134. if (event != OM_WORKSET_CLEARED_IND)
  10135. {
  10136. ObjectEvent = TRUE;
  10137. }
  10138. //
  10139. // These are secondary API events. Swallow them if the workset
  10140. // is closed, but DO NOT swallow if object handle invalid (since
  10141. // we don't make guarantees about validity of handles passed in
  10142. // these events):
  10143. //
  10144. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10145. {
  10146. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10147. hWSGroup, event));
  10148. processed = TRUE;
  10149. DC_QUIT;
  10150. }
  10151. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10152. if (!WORKSET_IS_OPEN(pUsageRec, worksetID))
  10153. {
  10154. TRACE_OUT(("Workset %u in WSG %d no longer open; ignoring event %d",
  10155. worksetID, pUsageRec->pWSGroup->wsg, event));
  10156. processed = TRUE;
  10157. DC_QUIT;
  10158. }
  10159. }
  10160. break;
  10161. case OM_PERSON_JOINED_IND:
  10162. case OM_PERSON_LEFT_IND:
  10163. case OM_PERSON_DATA_CHANGED_IND:
  10164. {
  10165. if (!ValidWSGroupHandle(pomClient, hWSGroup))
  10166. {
  10167. TRACE_OUT(("hWSGroup %d is not valid; ignoring event %d",
  10168. hWSGroup, event));
  10169. processed = TRUE;
  10170. DC_QUIT;
  10171. }
  10172. }
  10173. break;
  10174. default:
  10175. {
  10176. ERROR_OUT(("Unrecognised ObMan event 0x%08x", event));
  10177. }
  10178. }
  10179. DC_EXIT_POINT:
  10180. //
  10181. // Whenever an event containing an object handle is posted, the use
  10182. // count of the object record is bumped, so we free it now:
  10183. //
  10184. if (ObjectEvent)
  10185. {
  10186. ValidateObject(pObj);
  10187. UT_FreeRefCount((void**)&pObj, FALSE);
  10188. }
  10189. UT_Unlock(UTLOCK_OM);
  10190. DebugExitBOOL(OMSEventHandler, processed);
  10191. return(processed);
  10192. }
  10193. //
  10194. // OM_WSGroupRegisterS(...)
  10195. //
  10196. UINT OM_WSGroupRegisterS
  10197. (
  10198. POM_CLIENT pomClient,
  10199. UINT callID,
  10200. OMFP fpHandler,
  10201. OMWSG wsg,
  10202. OM_WSGROUP_HANDLE * phWSGroup
  10203. )
  10204. {
  10205. POM_DOMAIN pDomain;
  10206. POM_WSGROUP pWSGroup;
  10207. POM_USAGE_REC pUsageRec;
  10208. POM_CLIENT_LIST pClientListEntry;
  10209. BOOL setUpUsageRec = FALSE;
  10210. UINT rc = 0;
  10211. DebugEntry(OM_WSGroupRegisterS);
  10212. UT_Lock(UTLOCK_OM);
  10213. //
  10214. // Validate params:
  10215. //
  10216. ValidateOMS(pomClient);
  10217. //
  10218. // Search for this Domain and workset group:
  10219. //
  10220. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(g_pomPrimary->domains),
  10221. (void**)&pDomain, FIELD_OFFSET(OM_DOMAIN, chain),
  10222. FIELD_OFFSET(OM_DOMAIN, callID), (DWORD)callID,
  10223. FIELD_SIZE(OM_DOMAIN, callID));
  10224. if (pDomain == NULL)
  10225. {
  10226. //
  10227. // We don't have a record for this Domain so there can be no primary
  10228. // registered with the workset group:
  10229. //
  10230. TRACE_OUT(("Not attached to Domain %u", callID));
  10231. rc = OM_RC_NO_PRIMARY;
  10232. DC_QUIT;
  10233. }
  10234. WSGRecordFind(pDomain, wsg, fpHandler, &pWSGroup);
  10235. if (pWSGroup == NULL)
  10236. {
  10237. rc = OM_RC_NO_PRIMARY;
  10238. DC_QUIT;
  10239. }
  10240. //
  10241. // If we get here, then the workset group exists locally so see if the
  10242. // Client is already registered with it:
  10243. //
  10244. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWSGroup->clients),
  10245. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  10246. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)pomClient->putTask,
  10247. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  10248. if (pClientListEntry != NULL)
  10249. {
  10250. rc = OM_RC_ALREADY_REGISTERED;
  10251. ERROR_OUT(("Can't register Client 0x%08x with WSG %d - already registered",
  10252. pomClient, wsg));
  10253. DC_QUIT;
  10254. }
  10255. //
  10256. // OK, Client is not already registered so register it now:
  10257. //
  10258. rc = SetUpUsageRecord(pomClient, SECONDARY, &pUsageRec, phWSGroup);
  10259. if (rc != 0)
  10260. {
  10261. DC_QUIT;
  10262. }
  10263. //
  10264. // SetUpUsageRecord doesn't put the workset group pointer in the CB
  10265. // (since it's not known yet in the case of a PRIMARY registration), so
  10266. // we do this now ourselves:
  10267. //
  10268. pUsageRec->pWSGroup = pWSGroup;
  10269. setUpUsageRec = TRUE;
  10270. //
  10271. // add this Client to the workset group's Client list:
  10272. //
  10273. rc = AddClientToWSGList(pomClient->putTask,
  10274. pWSGroup,
  10275. *phWSGroup,
  10276. SECONDARY);
  10277. if (rc != 0)
  10278. {
  10279. DC_QUIT;
  10280. }
  10281. pUsageRec->flags |= ADDED_TO_WSGROUP_LIST;
  10282. pomClient->wsgValid[*phWSGroup] = TRUE;
  10283. //
  10284. // Post WORKSET_NEW events to the Client for the worksets in the group,
  10285. // if any:
  10286. //
  10287. PostWorksetNewEvents(pomClient->putTask, pomClient->putTask,
  10288. pWSGroup, *phWSGroup);
  10289. TRACE_OUT(("Registered 0x%08x as secondary Client for WSG %d (hWSGroup: %hu)",
  10290. pomClient, wsg, *phWSGroup));
  10291. DC_EXIT_POINT:
  10292. if (rc != 0)
  10293. {
  10294. if (rc == OM_RC_NO_PRIMARY)
  10295. {
  10296. //
  10297. // We do a regular trace here rather than an error because this
  10298. // happens normally:
  10299. //
  10300. TRACE_OUT(("No primary Client for WSG %d in Domain %u "
  10301. "- can't register secondary", wsg, callID));
  10302. }
  10303. else
  10304. {
  10305. ERROR_OUT(("Error %d registering Client 0x%08x as secondary"
  10306. "for WSG %d in Domain %u",
  10307. rc, pomClient, wsg, callID));
  10308. }
  10309. if (setUpUsageRec == TRUE)
  10310. {
  10311. pomClient->apUsageRecs[*phWSGroup] = NULL;
  10312. if (pUsageRec->flags & ADDED_TO_WSGROUP_LIST)
  10313. {
  10314. RemoveClientFromWSGList(pomClient->putTask, pomClient->putTask, pWSGroup);
  10315. }
  10316. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10317. }
  10318. pomClient->wsgValid[*phWSGroup] = FALSE;
  10319. }
  10320. UT_Unlock(UTLOCK_OM);
  10321. DebugExitDWORD(OM_WSGroupRegisterS, rc);
  10322. return(rc);
  10323. }
  10324. //
  10325. // OM_WorksetOpenS(...)
  10326. //
  10327. UINT OM_WorksetOpenS
  10328. (
  10329. POM_CLIENT pomClient,
  10330. OM_WSGROUP_HANDLE hWSGroup,
  10331. OM_WORKSET_ID worksetID
  10332. )
  10333. {
  10334. POM_WSGROUP pWSGroup;
  10335. POM_WORKSET pWorkset;
  10336. POM_USAGE_REC pUsageRec;
  10337. POM_CLIENT_LIST pClientListEntry = NULL;
  10338. UINT rc = 0;
  10339. DebugEntry(OM_WorksetOpenS);
  10340. UT_Lock(UTLOCK_OM);
  10341. //
  10342. // Validate params:
  10343. //
  10344. ValidateParams2(pomClient, hWSGroup, SECONDARY, &pUsageRec, &pWSGroup);
  10345. TRACE_OUT(("Secondary Client 0x%08x requesting to open workset %u in WSG %d",
  10346. pomClient, worksetID, pWSGroup->wsg));
  10347. //
  10348. // If the Client already has this workset open then return a (non-error)
  10349. // return code:
  10350. //
  10351. if (WORKSET_IS_OPEN(pUsageRec, worksetID) == TRUE)
  10352. {
  10353. TRACE_OUT(("Client 0x%08x already has workset %u in WSG %d open",
  10354. pomClient, worksetID, pWSGroup->wsg));
  10355. rc = OM_RC_WORKSET_ALREADY_OPEN;
  10356. DC_QUIT;
  10357. }
  10358. //
  10359. // Check workset group record to see if workset exists:
  10360. //
  10361. if (pWSGroup->apWorksets[worksetID] == NULL)
  10362. {
  10363. //
  10364. // Workset doesn't exist so return bad rc:
  10365. //
  10366. WARNING_OUT(("Workset %hu doesn't exist in WSG %d",
  10367. worksetID, pWSGroup->wsg));
  10368. rc = OM_RC_WORKSET_DOESNT_EXIST;
  10369. DC_QUIT;
  10370. }
  10371. else
  10372. {
  10373. //
  10374. // Workset already exists, so we don't need to do anything.
  10375. //
  10376. TRACE_OUT((" Workset %hu in WSG %d already exists",
  10377. worksetID, pWSGroup->wsg));
  10378. }
  10379. //
  10380. // If the workset didn't already exist, queueing the send instruction
  10381. // will have caused the workset to be created syncrhonously. So, either
  10382. // way the workset exists at this point.
  10383. //
  10384. //
  10385. // Get a pointer to the workset:
  10386. //
  10387. pWorkset = pWSGroup->apWorksets[worksetID];
  10388. ASSERT((pWorkset != NULL));
  10389. //
  10390. // Mark this workset as open in the Client's usage record:
  10391. //
  10392. WORKSET_SET_OPEN(pUsageRec, worksetID);
  10393. //
  10394. // Add this Client to the list kept in the workset record:
  10395. //
  10396. rc = AddClientToWsetList(pomClient->putTask,
  10397. pWorkset,
  10398. hWSGroup,
  10399. pUsageRec->mode,
  10400. &pClientListEntry);
  10401. if (rc != 0)
  10402. {
  10403. DC_QUIT;
  10404. }
  10405. rc = PostAddEvents(pomClient->putTask, pWorkset, hWSGroup, pomClient->putTask);
  10406. if (rc != 0)
  10407. {
  10408. DC_QUIT;
  10409. }
  10410. TRACE_OUT(("Opened workset %u in WSG %d for secondary Client 0x%08x",
  10411. worksetID, pWSGroup->wsg, pomClient));
  10412. DC_EXIT_POINT:
  10413. if ((rc != 0) && (rc != OM_RC_WORKSET_ALREADY_OPEN))
  10414. {
  10415. //
  10416. // Cleanup:
  10417. //
  10418. ERROR_OUT(("Error %d opening workset %u in WSG %d for Client 0x%08x",
  10419. rc, worksetID, pWSGroup->wsg, pomClient));
  10420. WORKSET_SET_CLOSED(pUsageRec, worksetID);
  10421. if (pClientListEntry != NULL)
  10422. {
  10423. COM_BasedListRemove(&(pClientListEntry->chain));
  10424. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  10425. }
  10426. }
  10427. UT_Unlock(UTLOCK_OM);
  10428. DebugExitDWORD(OM_WorksetOpenS, rc);
  10429. return(rc);
  10430. }
  10431. //
  10432. // OM_WSGroupRegisterPReq(...)
  10433. //
  10434. UINT OM_WSGroupRegisterPReq
  10435. (
  10436. POM_CLIENT pomClient,
  10437. UINT callID,
  10438. OMFP fpHandler,
  10439. OMWSG wsg,
  10440. OM_CORRELATOR * pCorrelator
  10441. )
  10442. {
  10443. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  10444. POM_USAGE_REC pUsageRec;
  10445. OM_WSGROUP_HANDLE hWSGroup;
  10446. BOOL setUpUsageRec = FALSE;
  10447. UINT rc = 0;
  10448. DebugEntry(OM_WSGroupRegisterPReq);
  10449. UT_Lock(UTLOCK_OM);
  10450. ValidateOMS(pomClient);
  10451. //
  10452. // Set up a usage record and workset group handle for the Client:
  10453. //
  10454. rc = SetUpUsageRecord(pomClient, PRIMARY, &pUsageRec, &hWSGroup);
  10455. if (rc != 0)
  10456. {
  10457. DC_QUIT;
  10458. }
  10459. setUpUsageRec = TRUE;
  10460. //
  10461. // Create a new correlator for the Client and put it in the Client's
  10462. // variable:
  10463. //
  10464. *pCorrelator = NextCorrelator(g_pomPrimary);
  10465. //
  10466. // Sub alloc a chunk of memory for the registration control block, in
  10467. // which we will pass the registration request parameters to the ObMan
  10468. // task:
  10469. //
  10470. pRegistrationCB = (POM_WSGROUP_REG_CB)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_CB), TRUE);
  10471. if (!pRegistrationCB)
  10472. {
  10473. rc = UT_RC_NO_MEM;
  10474. DC_QUIT;
  10475. }
  10476. SET_STAMP(pRegistrationCB, REGCB);
  10477. //
  10478. // Fill in the fields, but note that we don't yet know the Domain record
  10479. // or workset group, so we leave those ones blank:
  10480. //
  10481. pRegistrationCB->putTask = pomClient->putTask;
  10482. pRegistrationCB->callID = callID;
  10483. pRegistrationCB->correlator = *pCorrelator;
  10484. pRegistrationCB->hWSGroup = hWSGroup;
  10485. pRegistrationCB->wsg = wsg;
  10486. pRegistrationCB->fpHandler = fpHandler;
  10487. pRegistrationCB->retryCount = OM_REGISTER_RETRY_COUNT_DFLT;
  10488. pRegistrationCB->valid = TRUE;
  10489. pRegistrationCB->type = WSGROUP_REGISTER;
  10490. pRegistrationCB->mode = PRIMARY;
  10491. pRegistrationCB->pUsageRec = pUsageRec;
  10492. //
  10493. // Now put a pointer to the registration CB in the usage record, as
  10494. // described above, and set a flag so we know what we've done:
  10495. //
  10496. pUsageRec->pWSGroup = (POM_WSGROUP) pRegistrationCB;
  10497. pUsageRec->flags |= PWSGROUP_IS_PREGCB;
  10498. //
  10499. // Post an event to the ObMan task telling it to process this CB.
  10500. //
  10501. // The first parameter is the retry value for the event.
  10502. //
  10503. // The second parameter is the offset of the control block in the OMMISC
  10504. // memory block.
  10505. //
  10506. UT_PostEvent(pomClient->putTask, // Client's putTask
  10507. g_pomPrimary->putTask, // ObMan's putTask
  10508. 0,
  10509. OMINT_EVENT_WSGROUP_REGISTER,
  10510. 0,
  10511. (UINT_PTR)pRegistrationCB);
  10512. TRACE_OUT(("Requested to register Client 0x%08x with WSG %d",
  10513. pomClient, wsg));
  10514. DC_EXIT_POINT:
  10515. if (rc != 0)
  10516. {
  10517. ERROR_OUT(("Error 0x%08x registering Client 0x%08x with WSG %d",
  10518. rc, pomClient, wsg));
  10519. if (pRegistrationCB != NULL)
  10520. {
  10521. //
  10522. // We can free the reg CB safely since we know that if we hit an
  10523. // error, we never got around to inserting the item in the list or
  10524. // posting its offset to the ObMan task:
  10525. //
  10526. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  10527. }
  10528. if (setUpUsageRec)
  10529. {
  10530. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10531. pomClient->apUsageRecs[hWSGroup] = NULL;
  10532. }
  10533. }
  10534. UT_Unlock(UTLOCK_OM);
  10535. DebugExitDWORD(OM_WSGroupRegisterPReq, rc);
  10536. return(rc);
  10537. }
  10538. //
  10539. // OM_WSGroupMoveReq(...)
  10540. //
  10541. UINT OM_WSGroupMoveReq
  10542. (
  10543. POM_CLIENT pomClient,
  10544. OM_WSGROUP_HANDLE hWSGroup,
  10545. UINT callID,
  10546. OM_CORRELATOR * pCorrelator
  10547. )
  10548. {
  10549. POM_USAGE_REC pUsageRec;
  10550. POM_WSGROUP pWSGroup;
  10551. POM_DOMAIN pDomain;
  10552. POM_WSGROUP_REG_CB pRegistrationCB = NULL;
  10553. UINT rc = 0;
  10554. DebugEntry(OM_WSGroupMoveReq);
  10555. UT_Lock(UTLOCK_OM);
  10556. ValidateParams2(pomClient, hWSGroup, PRIMARY, &pUsageRec, &pWSGroup);
  10557. TRACE_OUT(("Client 0x%08x requesting to move WSG %d into Domain %u",
  10558. pomClient, hWSGroup, callID));
  10559. //
  10560. // Check workset group is not already in a Call: (this may be relaxed)
  10561. //
  10562. pDomain = pWSGroup->pDomain;
  10563. if (pDomain->callID != OM_NO_CALL)
  10564. {
  10565. ERROR_OUT(("Client 0x%08x attempted to move WSG %d out of a call "
  10566. "(Domain %u)",
  10567. pomClient, hWSGroup, pDomain->callID));
  10568. rc = OM_RC_ALREADY_IN_CALL;
  10569. DC_QUIT;
  10570. }
  10571. //
  10572. // Create a correlator, to correlate the MOVE_CON event:
  10573. //
  10574. *pCorrelator = NextCorrelator(g_pomPrimary);
  10575. //
  10576. // Create a control block to pass the relevant info to ObMan:
  10577. //
  10578. pRegistrationCB = (POM_WSGROUP_REG_CB)UT_MallocRefCount(sizeof(OM_WSGROUP_REG_CB), TRUE);
  10579. if (!pRegistrationCB)
  10580. {
  10581. rc = UT_RC_NO_MEM;
  10582. DC_QUIT;
  10583. }
  10584. SET_STAMP(pRegistrationCB, REGCB);
  10585. //
  10586. // Fill in the fields:
  10587. //
  10588. pRegistrationCB->putTask = pomClient->putTask;
  10589. pRegistrationCB->callID = callID; // DESTINATION Domain!
  10590. pRegistrationCB->correlator = *pCorrelator;
  10591. pRegistrationCB->hWSGroup = hWSGroup;
  10592. pRegistrationCB->wsg = pWSGroup->wsg;
  10593. pRegistrationCB->fpHandler = pWSGroup->fpHandler;
  10594. pRegistrationCB->retryCount = OM_REGISTER_RETRY_COUNT_DFLT;
  10595. pRegistrationCB->valid = TRUE;
  10596. pRegistrationCB->type = WSGROUP_MOVE;
  10597. pRegistrationCB->mode = pUsageRec->mode;
  10598. pRegistrationCB->pWSGroup = pWSGroup;
  10599. //
  10600. // Post an event to ObMan requesting it to process the CB:
  10601. //
  10602. UT_PostEvent(pomClient->putTask,
  10603. g_pomPrimary->putTask,
  10604. 0, // no delay
  10605. OMINT_EVENT_WSGROUP_MOVE,
  10606. 0,
  10607. (UINT_PTR)pRegistrationCB);
  10608. TRACE_OUT(("Requested to move WSG %d into Domain %u for Client 0x%08x",
  10609. hWSGroup, callID, pomClient));
  10610. DC_EXIT_POINT:
  10611. if (rc != 0)
  10612. {
  10613. ERROR_OUT(("Error 0x%08x requesting to move WSG %d into Domain %u",
  10614. rc, hWSGroup, callID));
  10615. if (pRegistrationCB != NULL)
  10616. {
  10617. UT_FreeRefCount((void**)&pRegistrationCB, FALSE);
  10618. }
  10619. }
  10620. UT_Unlock(UTLOCK_OM);
  10621. DebugExitDWORD(OM_WSGroupMoveReq, rc);
  10622. return(rc);
  10623. }
  10624. //
  10625. // OM_WSGroupDeregister(...)
  10626. //
  10627. void OM_WSGroupDeregister
  10628. (
  10629. POM_CLIENT pomClient,
  10630. OM_WSGROUP_HANDLE * phWSGroup
  10631. )
  10632. {
  10633. POM_WSGROUP pWSGroup;
  10634. POM_USAGE_REC pUsageRec;
  10635. OM_WORKSET_ID worksetID;
  10636. OM_EVENT_DATA16 eventData16;
  10637. OM_WSGROUP_HANDLE hWSGroup;
  10638. DebugEntry(OM_WSGroupDeregister);
  10639. UT_Lock(UTLOCK_OM);
  10640. ValidateOMS(pomClient);
  10641. hWSGroup = *phWSGroup;
  10642. //
  10643. // If this function has been called because of an abortive
  10644. // WSGroupRegister, or from OM_Deregister, the wsg might not yet be
  10645. // marked as VALID, so we check here and set it to VALID.
  10646. //
  10647. if (!pomClient->wsgValid[hWSGroup])
  10648. {
  10649. TRACE_OUT(("Deregistering Client before registration completed"));
  10650. pomClient->wsgValid[hWSGroup] = TRUE;
  10651. }
  10652. // lonchanc: bug #1986, make sure we have a valid wsg.
  10653. // pWSGroup can be invalid in a race condition that we hang up
  10654. // before Whiteboard initializes.
  10655. pUsageRec = NULL; // make sure this local is reset in case we bail out from here.
  10656. if (!ValidWSGroupHandle(pomClient, hWSGroup) ||
  10657. (pomClient->apUsageRecs[hWSGroup] == (POM_USAGE_REC)-1))
  10658. {
  10659. ERROR_OUT(("OM_WSGroupDeregister: Invalid wsg=0x0x%08x", hWSGroup));
  10660. DC_QUIT;
  10661. }
  10662. //
  10663. // Get a pointer to the associated usage record:
  10664. //
  10665. pUsageRec = pomClient->apUsageRecs[hWSGroup];
  10666. //
  10667. // Extract a Client pointer to the workset group from the usage record:
  10668. //
  10669. pWSGroup = pUsageRec->pWSGroup;
  10670. //
  10671. // Test the flag in the usage record to see whether the <pWSGroup> field
  10672. // is actually pointing to the registration CB (which will be the case
  10673. // if we are deregistering immediately after registering):
  10674. //
  10675. if (pUsageRec->flags & PWSGROUP_IS_PREGCB)
  10676. {
  10677. //
  10678. // Mark the registration CB as invalid in order to abort the
  10679. // registration (ObMan will test for this in ProcessWSGRegister):
  10680. //
  10681. // Note: the pWSGroup field of the usage record is actually a pointer
  10682. // to a registration CB in this case
  10683. //
  10684. TRACE_OUT(("Client deregistering before registration even started - aborting"));
  10685. ((POM_WSGROUP_REG_CB)pUsageRec->pWSGroup)->valid = FALSE;
  10686. DC_QUIT;
  10687. }
  10688. //
  10689. // Check the workset group record is valid:
  10690. //
  10691. ValidateWSGroup(pWSGroup);
  10692. //
  10693. // If it is valid, we continue with the deregistration process:
  10694. //
  10695. TRACE_OUT(("Deregistering Client 0x%08x from WSG %d", pomClient, hWSGroup));
  10696. //
  10697. // Close all the worksets in the group that the Client has open:
  10698. //
  10699. for (worksetID = 0; worksetID < OM_MAX_WORKSETS_PER_WSGROUP; worksetID++)
  10700. {
  10701. if (WORKSET_IS_OPEN(pUsageRec, worksetID))
  10702. {
  10703. OM_WorksetClose(pomClient, hWSGroup, worksetID);
  10704. }
  10705. }
  10706. //
  10707. // If we added this Client to the workset group's Client list, find it
  10708. // again and remove it:
  10709. //
  10710. if (pUsageRec->flags & ADDED_TO_WSGROUP_LIST)
  10711. {
  10712. TRACE_OUT(("Removing Client from workset group list"));
  10713. RemoveClientFromWSGList(pomClient->putTask, pomClient->putTask, pWSGroup);
  10714. pUsageRec->flags &= ~ADDED_TO_WSGROUP_LIST;
  10715. }
  10716. else
  10717. {
  10718. TRACE_OUT(("Client not added to wsGroup list, not removing"));
  10719. }
  10720. TRACE_OUT(("Deregistered Client 0x%08x from WSG %d", pomClient, hWSGroup));
  10721. DC_EXIT_POINT:
  10722. //
  10723. // Free the usage record (we put this after the DC_QUIT since we want to
  10724. // do this even if the workset group pointer was found to be invalid
  10725. // above):
  10726. //
  10727. UT_FreeRefCount((void**)&pUsageRec, FALSE);
  10728. //
  10729. // Mark the workset group handle as invalid, so that any events which
  10730. // the Client gets will be swallowed:
  10731. //
  10732. pomClient->wsgValid[hWSGroup] = FALSE;
  10733. //
  10734. // Note: we don't set the slot in the usage record offset array to zero,
  10735. // since we don't want the workset group handle to be reused yet.
  10736. // When the DEREGISTER events arrives (after flushing the Client's
  10737. // event queue), we will set the offset to zero.
  10738. //
  10739. // However, if we leave the offset as it is, OM_Deregister might
  10740. // call us again because it thinks we haven't yet deregistered
  10741. // from the workset group. So, we set it to -1, which ensures
  10742. // that
  10743. //
  10744. // a) it is seen as in use by FindUnusedWSGHandle, since that
  10745. // function checks for 0
  10746. //
  10747. // b) it is seen as not in use by OM_Deregister, since that
  10748. // function checks for 0 or -1.
  10749. //
  10750. pomClient->apUsageRecs[hWSGroup] = (POM_USAGE_REC)-1;
  10751. //
  10752. // Send an OMINT_EVENT_WSGROUP_DEREGISTER event to the hidden handler (which
  10753. // will swallow it) to flush the Client's message queue:
  10754. //
  10755. TRACE_OUT(("Posting WSGROUP_DEREGISTER event to Client's hidden handler"));
  10756. eventData16.hWSGroup = hWSGroup;
  10757. eventData16.worksetID = 0;
  10758. UT_PostEvent(pomClient->putTask,
  10759. pomClient->putTask,
  10760. 0,
  10761. OMINT_EVENT_WSGROUP_DEREGISTER,
  10762. *(PUINT) &eventData16,
  10763. 0);
  10764. *phWSGroup = 0;
  10765. UT_Unlock(UTLOCK_OM);
  10766. DebugExitVOID(OM_WSGroupDeregister);
  10767. }
  10768. //
  10769. // OM_WorksetOpenPReq(...)
  10770. //
  10771. UINT OM_WorksetOpenPReq
  10772. (
  10773. POM_CLIENT pomClient,
  10774. OM_WSGROUP_HANDLE hWSGroup,
  10775. OM_WORKSET_ID worksetID,
  10776. NET_PRIORITY priority,
  10777. BOOL fTemp,
  10778. OM_CORRELATOR * pCorrelator
  10779. )
  10780. {
  10781. POM_WSGROUP pWSGroup;
  10782. POM_WORKSET pWorkset;
  10783. POM_USAGE_REC pUsageRec;
  10784. OM_EVENT_DATA16 eventData16;
  10785. OM_EVENT_DATA32 eventData32;
  10786. POM_CLIENT_LIST pClientListEntry = NULL;
  10787. UINT rc = 0;
  10788. DebugEntry(OM_WorksetOpenPReq);
  10789. UT_Lock(UTLOCK_OM);
  10790. //
  10791. // Validate params:
  10792. //
  10793. ValidateParams2(pomClient, hWSGroup, PRIMARY, &pUsageRec, &pWSGroup);
  10794. TRACE_OUT(("Client 0x%08x opening workset %u in WSG %d at priority 0x%08x",
  10795. pomClient, worksetID, hWSGroup, priority));
  10796. //
  10797. // If the Client already has this workset open then return a (non-error)
  10798. // return code:
  10799. //
  10800. if (WORKSET_IS_OPEN(pUsageRec, worksetID) == TRUE)
  10801. {
  10802. TRACE_OUT(("Client 0x%08x already has workset %hu in WSG %d open",
  10803. pomClient, worksetID, hWSGroup));
  10804. rc = OM_RC_WORKSET_ALREADY_OPEN;
  10805. DC_QUIT;
  10806. }
  10807. //
  10808. // Check the Client has supplied a valid value for <priority>:
  10809. //
  10810. if ((priority < NET_HIGH_PRIORITY) || (priority > NET_LOW_PRIORITY))
  10811. {
  10812. ASSERT((priority == OM_OBMAN_CHOOSES_PRIORITY));
  10813. }
  10814. //
  10815. // Check workset group record to see if workset exists:
  10816. //
  10817. // Note: this check looks to see if the offset to the workset is zero,
  10818. // since workset records never reside at the start of the OMWORKSETS
  10819. // block.
  10820. //
  10821. if (pWSGroup->apWorksets[worksetID] == NULL)
  10822. {
  10823. rc = WorksetCreate(pomClient->putTask, pWSGroup, worksetID, fTemp, priority);
  10824. if (rc != 0)
  10825. {
  10826. DC_QUIT;
  10827. }
  10828. }
  10829. else
  10830. {
  10831. //
  10832. // Workset already exists, so we don't need to do anything.
  10833. //
  10834. TRACE_OUT((" Workset %hu in WSG %d already exists",
  10835. worksetID, hWSGroup));
  10836. }
  10837. //
  10838. // If the workset didn't already exist, queueing the send instruction
  10839. // will have caused the workset to be created syncrhonously. So, either
  10840. // way the workset exists at this point.
  10841. //
  10842. //
  10843. // Get a pointer to the workset:
  10844. //
  10845. pWorkset = pWSGroup->apWorksets[worksetID];
  10846. ASSERT((pWorkset != NULL));
  10847. //
  10848. // Set the persistence field for the workset - we might not have done
  10849. // this as part of the WorksetCreate above if someone else had created
  10850. // the workset already. However, we set our local copy to have the
  10851. // appropriate persistence value.
  10852. //
  10853. pWorkset->fTemp = fTemp;
  10854. //
  10855. // We need to mark this workset as open in the Client's usage record.
  10856. // However, we don't do this yet - we do it in our hidden handler when
  10857. // the OPEN_CON event is received.
  10858. //
  10859. // The reason for this is that a Client shouldn't start using a workset
  10860. // until it has received the event, so we want the workset to remain
  10861. // closed until then.
  10862. //
  10863. // Note that whether we do it this way or mark the workset as open here
  10864. // and now doesn't make much difference from ObMan's point of view but
  10865. // it will help detect applications which are badly behaved.
  10866. //
  10867. //
  10868. // Add this Client to the list kept in the workset record:
  10869. //
  10870. rc = AddClientToWsetList(pomClient->putTask,
  10871. pWorkset,
  10872. hWSGroup,
  10873. pUsageRec->mode,
  10874. &pClientListEntry);
  10875. if (rc != 0)
  10876. {
  10877. pClientListEntry = NULL;
  10878. DC_QUIT;
  10879. }
  10880. //
  10881. // Create correlator:
  10882. //
  10883. *pCorrelator = NextCorrelator(g_pomPrimary);
  10884. //
  10885. // Post WORKSET_OPEN_CON event to Client:
  10886. //
  10887. eventData16.hWSGroup = hWSGroup;
  10888. eventData16.worksetID = worksetID;
  10889. eventData32.result = 0;
  10890. eventData32.correlator = *pCorrelator;
  10891. TRACE_OUT((" Posting WORKSET_OPEN_CON to Client 0x%08x (task 0x%08x)"));
  10892. UT_PostEvent(pomClient->putTask,
  10893. pomClient->putTask,
  10894. 0, // no delay
  10895. OM_WORKSET_OPEN_CON,
  10896. *(UINT *) &eventData16,
  10897. *(UINT *) &eventData32);
  10898. //
  10899. // Now post OBJECT_ADD_IND events for each of the objects in the
  10900. // workset:
  10901. //
  10902. rc = PostAddEvents(pomClient->putTask, pWorkset, hWSGroup, pomClient->putTask);
  10903. if (rc != 0)
  10904. {
  10905. DC_QUIT;
  10906. }
  10907. TRACE_OUT(("Opened workset %hu in WSG %d for Client 0x%08x",
  10908. worksetID, hWSGroup, pomClient));
  10909. DC_EXIT_POINT:
  10910. if (rc != 0)
  10911. {
  10912. ERROR_OUT(("Error 0x%08x opening workset %u in WSG %d for Client 0x%08x",
  10913. rc, worksetID, hWSGroup, pomClient));
  10914. if (pClientListEntry != NULL)
  10915. {
  10916. COM_BasedListRemove(&(pClientListEntry->chain));
  10917. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  10918. }
  10919. }
  10920. UT_Unlock(UTLOCK_OM);
  10921. DebugExitDWORD(OM_WorksetOpenPReq, rc);
  10922. return(rc);
  10923. }
  10924. //
  10925. // OM_WorksetClose(...)
  10926. //
  10927. void OM_WorksetClose
  10928. (
  10929. POM_CLIENT pomClient,
  10930. OM_WSGROUP_HANDLE hWSGroup,
  10931. OM_WORKSET_ID worksetID
  10932. )
  10933. {
  10934. POM_WORKSET pWorkset;
  10935. POM_USAGE_REC pUsageRec;
  10936. POM_CLIENT_LIST pClientListEntry;
  10937. DebugEntry(OM_WorksetClose);
  10938. UT_Lock(UTLOCK_OM);
  10939. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  10940. &pUsageRec, &pWorkset);
  10941. //
  10942. // Mark the workset as closed in the Client's usage record:
  10943. //
  10944. TRACE_OUT(("Closing workset %u in WSG %d for Client 0x%08x",
  10945. worksetID, hWSGroup, pomClient));
  10946. WORKSET_SET_CLOSED(pUsageRec, worksetID);
  10947. //
  10948. // Now we release all the resources the Client is using which concern
  10949. // this workset. We
  10950. //
  10951. // - release all the locks the Client has for this workset
  10952. //
  10953. // - confirm any outstanding operations such as Deletes, etc.
  10954. //
  10955. // - release all the objects it is currently reading
  10956. //
  10957. // - discard any objects allocated but not yet used.
  10958. //
  10959. TRACE_OUT(("Releasing all resources in use by Client..."));
  10960. ReleaseAllLocks(pomClient, pUsageRec, pWorkset);
  10961. ReleaseAllObjects(pUsageRec, pWorkset);
  10962. ConfirmAll(pomClient, pUsageRec, pWorkset);
  10963. DiscardAllObjects(pUsageRec, pWorkset);
  10964. //
  10965. // Remove the Client from the list of Clients stored in the workset
  10966. // record:
  10967. //
  10968. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWorkset->clients),
  10969. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  10970. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)pomClient->putTask,
  10971. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  10972. //
  10973. // If we've got this far, the Client has the workset open, so it must be
  10974. // listed in the workset's list of Clients:
  10975. //
  10976. ASSERT((pClientListEntry != NULL));
  10977. COM_BasedListRemove(&(pClientListEntry->chain));
  10978. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  10979. TRACE_OUT(("Closed workset %u in WSG %d for Client 0x%08x",
  10980. worksetID, hWSGroup, pomClient));
  10981. UT_Unlock(UTLOCK_OM);
  10982. DebugExitVOID(OM_WorksetClose);
  10983. }
  10984. //
  10985. // OM_WorksetLockReq(...)
  10986. //
  10987. UINT OM_WorksetLockReq
  10988. (
  10989. POM_CLIENT pomClient,
  10990. OM_WSGROUP_HANDLE hWSGroup,
  10991. OM_WORKSET_ID worksetID,
  10992. OM_CORRELATOR * pCorrelator
  10993. )
  10994. {
  10995. POM_USAGE_REC pUsageRec;
  10996. POM_WSGROUP pWSGroup;
  10997. POM_WORKSET pWorkset;
  10998. POM_LOCK pLastLock;
  10999. POM_LOCK pThisLock = NULL;
  11000. BOOL inserted = FALSE;
  11001. UINT rc = 0;
  11002. DebugEntry(OM_WorksetLockReq);
  11003. UT_Lock(UTLOCK_OM);
  11004. //
  11005. // Validate params:
  11006. //
  11007. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11008. &pUsageRec, &pWorkset);
  11009. //
  11010. // Set up workset group pointer:
  11011. //
  11012. pWSGroup = pUsageRec->pWSGroup;
  11013. TRACE_OUT(("Client 0x%08x requesting to lock workset %u in WSG %d",
  11014. pomClient, worksetID, hWSGroup));
  11015. //
  11016. // Create a lock record which we will (eventually) put in the Client's
  11017. // lock stack:
  11018. //
  11019. pThisLock = (POM_LOCK)UT_MallocRefCount(sizeof(OM_LOCK), TRUE);
  11020. if (!pThisLock)
  11021. {
  11022. rc = UT_RC_NO_MEM;
  11023. DC_QUIT;
  11024. }
  11025. SET_STAMP(pThisLock, LOCK);
  11026. //
  11027. // Fill in the fields:
  11028. //
  11029. pThisLock->pWSGroup = pWSGroup;
  11030. pThisLock->worksetID = worksetID;
  11031. ZeroMemory(&(pThisLock->objectID), sizeof(OM_OBJECT_ID));
  11032. //
  11033. // Check that granting this lock won't result in a lock order violation:
  11034. // (it will if this lock is earlier than or equal to the last lock
  11035. // acquired).
  11036. //
  11037. TRACE_OUT(("Checking for lock order violation..."));
  11038. pLastLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  11039. if (pLastLock != NULL)
  11040. {
  11041. ASSERT(CompareLocks(pLastLock, pThisLock) < 0);
  11042. TRACE_OUT(("Last lock acquired by Client 0x%08x was workset %u in WSG %d",
  11043. pomClient, pLastLock->worksetID, pLastLock->pWSGroup->wsg));
  11044. }
  11045. else
  11046. {
  11047. //
  11048. // If there aren't any locks on the lock stack then there can't be
  11049. // any lock violation, so do nothing.
  11050. //
  11051. TRACE_OUT(("No locks on Client's lock stack"));
  11052. }
  11053. //
  11054. // Put a record of this lock in the Client's lock stack (we don't need
  11055. // to surround this with a mutex since a Client's lock stack is only
  11056. // accessed from that Client's task):
  11057. //
  11058. // Note: since this is a stack, we insert the item at the head of the
  11059. // list.
  11060. //
  11061. COM_BasedListInsertAfter(&(pomClient->locks), &(pThisLock->chain));
  11062. //
  11063. // Now start the process of requesting the lock from the ObMan task:
  11064. //
  11065. WorksetLockReq(pomClient->putTask, g_pomPrimary,
  11066. pWSGroup, pWorkset, hWSGroup, pCorrelator);
  11067. TRACE_OUT(("Requested lock for workset %u in WSG %d for Client 0x%08x",
  11068. worksetID, pWSGroup->wsg, pomClient));
  11069. DC_EXIT_POINT:
  11070. UT_Unlock(UTLOCK_OM);
  11071. DebugExitDWORD(OM_WorksetLockReq, rc);
  11072. return(rc);
  11073. }
  11074. //
  11075. // OM_WorksetUnlock(...)
  11076. //
  11077. void OM_WorksetUnlock
  11078. (
  11079. POM_CLIENT pomClient,
  11080. OM_WSGROUP_HANDLE hWSGroup,
  11081. OM_WORKSET_ID worksetID
  11082. )
  11083. {
  11084. POM_USAGE_REC pUsageRec;
  11085. POM_WSGROUP pWSGroup;
  11086. POM_WORKSET pWorkset;
  11087. POM_LOCK pLastLock;
  11088. OM_LOCK thisLock;
  11089. UINT rc = 0;
  11090. DebugEntry(OM_WorksetUnlock);
  11091. UT_Lock(UTLOCK_OM);
  11092. //
  11093. // Validate params:
  11094. //
  11095. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11096. &pUsageRec, &pWorkset);
  11097. pWSGroup = pUsageRec->pWSGroup;
  11098. TRACE_OUT(("Client 0x%08x requesting to unlock workset %u in WSG %d",
  11099. pomClient, worksetID, hWSGroup));
  11100. //
  11101. // Find the lock uppermost on the Client's lock stack:
  11102. //
  11103. pLastLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  11104. ASSERT((pLastLock != NULL));
  11105. //
  11106. // Assert that the lock uppermost on the lock stack is the one the
  11107. // Client is trying to release (i.e. that the workset IDs are the same
  11108. // and that the object ID of the lock on the stack is NULL):
  11109. //
  11110. thisLock.pWSGroup = pWSGroup;
  11111. thisLock.worksetID = worksetID;
  11112. ZeroMemory(&(thisLock.objectID), sizeof(OM_OBJECT_ID));
  11113. ASSERT(CompareLocks(pLastLock, &thisLock) == 0);
  11114. //
  11115. // Now call the common function to do the unlock:
  11116. //
  11117. WorksetUnlock(pomClient->putTask, pWSGroup, pWorkset);
  11118. //
  11119. // Remove the lock from the lock stack and free the memory:
  11120. //
  11121. COM_BasedListRemove(&(pLastLock->chain));
  11122. UT_FreeRefCount((void**)&pLastLock, FALSE);
  11123. TRACE_OUT(("Unlocked workset %u in WSG %d for Client 0x%08x",
  11124. worksetID, hWSGroup, pomClient));
  11125. UT_Unlock(UTLOCK_OM);
  11126. DebugExitVOID(OM_WorksetUnlock);
  11127. }
  11128. //
  11129. // OM_WorksetCountObjects(...)
  11130. //
  11131. void OM_WorksetCountObjects
  11132. (
  11133. POM_CLIENT pomClient,
  11134. OM_WSGROUP_HANDLE hWSGroup,
  11135. OM_WORKSET_ID worksetID,
  11136. UINT * pCount
  11137. )
  11138. {
  11139. POM_USAGE_REC pUsageRec;
  11140. POM_WORKSET pWorkset;
  11141. DebugEntry(OM_WorksetCountObjects);
  11142. UT_Lock(UTLOCK_OM);
  11143. //
  11144. // Validate params:
  11145. //
  11146. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  11147. &pUsageRec, &pWorkset);
  11148. //
  11149. // Extract <numObjects> field and put in *pCount:
  11150. //
  11151. *pCount = pWorkset->numObjects;
  11152. //
  11153. // Debug-only check:
  11154. //
  11155. CheckObjectCount(pUsageRec->pWSGroup, pWorkset);
  11156. TRACE_OUT(("Number of objects in workset %u in WSG %d = %u",
  11157. worksetID, hWSGroup, *pCount));
  11158. UT_Unlock(UTLOCK_OM);
  11159. DebugExitVOID(OM_WorksetCountObjects);
  11160. }
  11161. //
  11162. // OM_WorksetClear(...)
  11163. //
  11164. UINT OM_WorksetClear
  11165. (
  11166. POM_CLIENT pomClient,
  11167. OM_WSGROUP_HANDLE hWSGroup,
  11168. OM_WORKSET_ID worksetID
  11169. )
  11170. {
  11171. POM_USAGE_REC pUsageRec;
  11172. POM_WSGROUP pWSGroup;
  11173. POM_WORKSET pWorkset;
  11174. POMNET_OPERATION_PKT pPacket;
  11175. UINT rc = 0;
  11176. DebugEntry(OM_WorksetClear);
  11177. UT_Lock(UTLOCK_OM);
  11178. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11179. &pUsageRec, &pWorkset);
  11180. pWSGroup = pUsageRec->pWSGroup;
  11181. TRACE_OUT(("Client 0x%08x requesting to clear workset %u in WSG %d",
  11182. pomClient, worksetID, hWSGroup));
  11183. //
  11184. // Check workset isn't locked by somebody else (OK if locked by us):
  11185. //
  11186. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11187. //
  11188. // Check workset is not exhausted:
  11189. //
  11190. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11191. //
  11192. // Generate, process and queue the WORKSET_NEW message:
  11193. //
  11194. rc = GenerateOpMessage(pWSGroup,
  11195. worksetID,
  11196. NULL, // no object ID
  11197. NULL, // no object data
  11198. OMNET_WORKSET_CLEAR,
  11199. &pPacket);
  11200. if (rc != 0)
  11201. {
  11202. DC_QUIT;
  11203. }
  11204. rc = ProcessWorksetClear(pomClient->putTask, g_pomPrimary,
  11205. pPacket, pWSGroup, pWorkset);
  11206. if (rc != 0)
  11207. {
  11208. DC_QUIT;
  11209. }
  11210. rc = QueueMessage(pomClient->putTask,
  11211. pWSGroup->pDomain,
  11212. pWSGroup->channelID,
  11213. NET_HIGH_PRIORITY,
  11214. pWSGroup,
  11215. pWorkset,
  11216. NULL, // no object record
  11217. (POMNET_PKT_HEADER) pPacket,
  11218. NULL, // no object data
  11219. TRUE);
  11220. if (rc != 0)
  11221. {
  11222. DC_QUIT;
  11223. }
  11224. TRACE_OUT(("Issued WorksetClear for workset %u in WSG %d for Client 0x%08x",
  11225. worksetID, hWSGroup, pomClient));
  11226. DC_EXIT_POINT:
  11227. if (rc != 0)
  11228. {
  11229. ERROR_OUT(("Error 0x%08x clearing workset %u in WSG %d for Client 0x%08x",
  11230. rc, worksetID, hWSGroup, pomClient));
  11231. }
  11232. UT_Unlock(UTLOCK_OM);
  11233. DebugExitDWORD(OM_WorksetClear, rc);
  11234. return(rc);
  11235. }
  11236. //
  11237. // OM_WorksetClearConfirm(...)
  11238. //
  11239. void OM_WorksetClearConfirm
  11240. (
  11241. POM_CLIENT pomClient,
  11242. OM_WSGROUP_HANDLE hWSGroup,
  11243. OM_WORKSET_ID worksetID
  11244. )
  11245. {
  11246. POM_USAGE_REC pUsageRec;
  11247. POM_PENDING_OP pPendingOp;
  11248. POM_WORKSET pWorkset;
  11249. UINT rc = 0;
  11250. DebugEntry(OM_WorksetClearConfirm);
  11251. UT_Lock(UTLOCK_OM);
  11252. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11253. &pUsageRec, &pWorkset);
  11254. TRACE_OUT(("Client 0x%08x confirming WorksetClear for workest %u in WSG %d",
  11255. pomClient, worksetID, hWSGroup));
  11256. //
  11257. // Find the pending clear that we've been asked to confirm (assume it is
  11258. // first clear we find in the pending operation queue):
  11259. //
  11260. FindPendingOp(pWorkset, 0, WORKSET_CLEAR, &pPendingOp);
  11261. //
  11262. // We assert that a relevant pending op was found:
  11263. //
  11264. ASSERT(pPendingOp != NULL);
  11265. //
  11266. // In versions which support object locking, we will need to unlock any
  11267. // objects that are both
  11268. //
  11269. // - locked, and
  11270. //
  11271. // - deleted by this Clear (remember that a Clear doesn't delete ALL
  11272. // objects but only those that were added before the Clear was
  11273. // issued).
  11274. //
  11275. //
  11276. // We also need to release any objects
  11277. //
  11278. // - that the Client was using and
  11279. //
  11280. // - which are to be deleted.
  11281. //
  11282. // Since it's rather a lot of effort to ensure both conditions, we just
  11283. // release all the objects the Client was using i.e. invoking
  11284. // ClearConfirm invalidates ALL object pointers obtained via ObjectRead,
  11285. // as specified in the API:
  11286. //
  11287. ReleaseAllObjects(pUsageRec, pWorkset);
  11288. //
  11289. // If an object which is to be deleted because of the clear has an
  11290. // operation pending on it, the IND event will be swallowed by the
  11291. // HiddenHandler.
  11292. //
  11293. // Note that we cannot call ConfirmAll (to confirm any pending
  11294. // operations on objects in the workset) at this point for the following
  11295. // reasons:
  11296. //
  11297. // - this Clear might not affect the objects on which we were confirming
  11298. // operations
  11299. //
  11300. // - the Client might have received the IND events and try to call a
  11301. // Confirm function in the future, which would cause an assertion
  11302. // failure
  11303. //
  11304. // - if the Client hasn't yet got the IND events it will never get them
  11305. // because the hidden handler will swallow them if this DoClear causes
  11306. // them to be deleted.
  11307. //
  11308. //
  11309. // Here we actually perform the clear:
  11310. //
  11311. // (with multiple local access to workset groups as we may have in R2.0,
  11312. // we can't necessarily clear a workset when just one Client has
  11313. // confirmed; exactly what we will do depends on the design on R2.0).
  11314. //
  11315. WorksetDoClear(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pPendingOp);
  11316. TRACE_OUT(("Confirmed Clear for workset %u in WSG %d for Client 0x%08x",
  11317. worksetID, hWSGroup, pomClient));
  11318. UT_Unlock(UTLOCK_OM);
  11319. DebugExitVOID(OM_WorksetClearConfirm);
  11320. }
  11321. //
  11322. // OM_ObjectAdd()
  11323. //
  11324. UINT OM_ObjectAdd
  11325. (
  11326. POM_CLIENT pomClient,
  11327. OM_WSGROUP_HANDLE hWSGroup,
  11328. OM_WORKSET_ID worksetID,
  11329. POM_OBJECTDATA * ppData,
  11330. UINT updateSize,
  11331. POM_OBJECT * ppObj,
  11332. OM_POSITION position
  11333. )
  11334. {
  11335. POM_USAGE_REC pUsageRec;
  11336. POM_WSGROUP pWSGroup;
  11337. POM_WORKSET pWorkset;
  11338. POM_OBJECTDATA pData;
  11339. OM_OBJECT_ID newObjectID;
  11340. UINT rc = 0;
  11341. DebugEntry(OM_ObjectAdd);
  11342. UT_Lock(UTLOCK_OM);
  11343. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  11344. &pUsageRec, &pWorkset);
  11345. pData = *ppData;
  11346. ValidateObjectData(pData);
  11347. TRACE_OUT(("Client 0x%08x adding object to workset %u in WSG %d",
  11348. pomClient, worksetID, hWSGroup));
  11349. TRACE_OUT((" object data is at 0x%08x - size: %u",
  11350. pData, pData->length));
  11351. ASSERT((updateSize < OM_MAX_UPDATE_SIZE));
  11352. //
  11353. // Set up workset group pointer:
  11354. //
  11355. pWSGroup = pUsageRec->pWSGroup;
  11356. //
  11357. // Check workset isn't locked by somebody else (OK if locked by us):
  11358. //
  11359. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11360. //
  11361. // Check workset is not exhausted:
  11362. //
  11363. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11364. //
  11365. // Call the internal function to add the object:
  11366. //
  11367. rc = ObjectAdd(pomClient->putTask, g_pomPrimary,
  11368. pWSGroup, pWorkset, pData, updateSize,
  11369. position, &newObjectID, ppObj);
  11370. if (rc != 0)
  11371. {
  11372. DC_QUIT;
  11373. }
  11374. //
  11375. // Remove the object from the unused objects list:
  11376. //
  11377. RemoveFromUnusedList(pUsageRec, pData);
  11378. //
  11379. // If all has gone well, we NULL the Client's pointer to the object
  11380. // data, since we now own the object and the Client is not supposed to
  11381. // refer to it again (unless, of course, it does an OM_ObjectRead).
  11382. //
  11383. *ppData = NULL;
  11384. DC_EXIT_POINT:
  11385. if (rc != 0)
  11386. {
  11387. ERROR_OUT(("ERROR %d adding object to workset %u in WSG %d for Client 0x%08x",
  11388. rc, pWorkset->worksetID, hWSGroup, pomClient));
  11389. }
  11390. UT_Unlock(UTLOCK_OM);
  11391. DebugExitDWORD(OM_ObjectAdd, rc);
  11392. return(rc);
  11393. }
  11394. //
  11395. // OM_ObjectMove()
  11396. //
  11397. UINT OM_ObjectMove
  11398. (
  11399. POM_CLIENT pomClient,
  11400. OM_WSGROUP_HANDLE hWSGroup,
  11401. OM_WORKSET_ID worksetID,
  11402. POM_OBJECT pObj,
  11403. OM_POSITION position
  11404. )
  11405. {
  11406. POM_USAGE_REC pUsageRec;
  11407. POM_WSGROUP pWSGroup;
  11408. POM_WORKSET pWorkset;
  11409. POMNET_OPERATION_PKT pPacket = NULL;
  11410. UINT rc = 0;
  11411. DebugEntry(OM_ObjectMove);
  11412. UT_Lock(UTLOCK_OM);
  11413. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11414. &pUsageRec, &pWorkset);
  11415. TRACE_OUT(("Client 0x%08x moving object 0x%08x in workset %u in WSG %d (position: %s)...",
  11416. pomClient, pObj, worksetID, hWSGroup,
  11417. position == LAST ? "LAST" : "FIRST"));
  11418. //
  11419. // Set up workset group pointer:
  11420. //
  11421. pWSGroup = pUsageRec->pWSGroup;
  11422. //
  11423. // Check workset isn't locked by somebody else (OK if locked by us):
  11424. //
  11425. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11426. //
  11427. // Check workset is not exhausted:
  11428. //
  11429. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11430. //
  11431. // Here we generate, process and queue an OBJECT_MOVE message:
  11432. //
  11433. rc = GenerateOpMessage(pWSGroup,
  11434. pWorkset->worksetID,
  11435. &(pObj->objectID),
  11436. NULL, // no object data
  11437. OMNET_OBJECT_MOVE,
  11438. &pPacket);
  11439. if (rc != 0)
  11440. {
  11441. pPacket = NULL;
  11442. DC_QUIT;
  11443. }
  11444. //
  11445. // Generate message doesn't put the position in the <misc1> field, so we
  11446. // do it here:
  11447. //
  11448. pPacket->position = position;
  11449. //
  11450. // QueueMessage may free the packet (if we're not in a call) but we need
  11451. // to process it in a minute so bump the use count:
  11452. //
  11453. UT_BumpUpRefCount(pPacket);
  11454. rc = QueueMessage(pomClient->putTask,
  11455. pWSGroup->pDomain,
  11456. pWSGroup->channelID,
  11457. NET_HIGH_PRIORITY,
  11458. pWSGroup,
  11459. pWorkset,
  11460. pObj,
  11461. (POMNET_PKT_HEADER) pPacket,
  11462. NULL, // no object data for a MOVE
  11463. TRUE);
  11464. if (rc != 0)
  11465. {
  11466. DC_QUIT;
  11467. }
  11468. ProcessObjectMove(pomClient->putTask, pPacket, pWorkset, pObj);
  11469. DC_EXIT_POINT:
  11470. if (pPacket != NULL)
  11471. {
  11472. //
  11473. // Do this on success OR error since we bumped up the ref count above.
  11474. //
  11475. UT_FreeRefCount((void**)&pPacket, FALSE);
  11476. }
  11477. if (rc != 0)
  11478. {
  11479. ERROR_OUT(("ERROR %d moving object 0x%08x in workset %u in WSG %d",
  11480. rc, pObj, worksetID, hWSGroup));
  11481. }
  11482. UT_Unlock(UTLOCK_OM);
  11483. DebugExitDWORD(OM_ObjectMove, rc);
  11484. return(rc);
  11485. }
  11486. //
  11487. // OM_ObjectDelete(...)
  11488. //
  11489. UINT OM_ObjectDelete
  11490. (
  11491. POM_CLIENT pomClient,
  11492. OM_WSGROUP_HANDLE hWSGroup,
  11493. OM_WORKSET_ID worksetID,
  11494. POM_OBJECT pObj
  11495. )
  11496. {
  11497. POM_USAGE_REC pUsageRec;
  11498. POM_WORKSET pWorkset;
  11499. UINT rc = 0;
  11500. DebugEntry(OM_ObjectDelete);
  11501. UT_Lock(UTLOCK_OM);
  11502. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11503. &pUsageRec, &pWorkset);
  11504. TRACE_OUT(("Client 0x%08x requesting to delete object 0x%08x from workset %u in WSG %d",
  11505. pomClient, pObj, worksetID, hWSGroup));
  11506. //
  11507. // Check workset isn't locked by somebody else (OK if locked by us):
  11508. //
  11509. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11510. //
  11511. // Check workset is not exhausted:
  11512. //
  11513. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11514. //
  11515. // If there is already a Delete pending for the object, we return an
  11516. // error and do not post the delete indication event.
  11517. //
  11518. // If we returned success, we would then have to post another event,
  11519. // since the Client may wait for it. If we post the event, the Client
  11520. // will probably invoke DeleteConfirm a second time when it is
  11521. // unexpected, thereby causing an assertion failure.
  11522. //
  11523. // Note that we cannot rely on the hidden handler to get us out of this
  11524. // one, since the Client might receive the second event before
  11525. // processing the first one, so the handler would have no way of knowing
  11526. // to trap the event.
  11527. //
  11528. //
  11529. // So, to find out if there's a delete pending, check the flag in the
  11530. // object record:
  11531. //
  11532. if (pObj->flags & PENDING_DELETE)
  11533. {
  11534. TRACE_OUT(("Client tried to delete object already being deleted (0x%08x)",
  11535. pObj));
  11536. rc = OM_RC_OBJECT_DELETED;
  11537. DC_QUIT;
  11538. }
  11539. //
  11540. // Here we call the ObjectDelete function to generate, process and queue
  11541. // an OBJECT_DELETE message:
  11542. //
  11543. rc = ObjectDRU(pomClient->putTask,
  11544. pUsageRec->pWSGroup,
  11545. pWorkset,
  11546. pObj,
  11547. NULL,
  11548. OMNET_OBJECT_DELETE);
  11549. if (rc != 0)
  11550. {
  11551. DC_QUIT;
  11552. }
  11553. //
  11554. // Remember, the delete doesn't actually happen until the local
  11555. // Client(s) have invoked DeleteConfirm().
  11556. //
  11557. DC_EXIT_POINT:
  11558. //
  11559. // SFR5843: Don't trace an error if the object has been deleted - this
  11560. // is just safe race condition.
  11561. //
  11562. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11563. {
  11564. ERROR_OUT(("ERROR %d issuing delete for object 0x%08x in WSG %d:%hu",
  11565. rc, pObj, hWSGroup, worksetID));
  11566. }
  11567. UT_Unlock(UTLOCK_OM);
  11568. DebugExitDWORD(OM_ObjectDelete, rc);
  11569. return(rc);
  11570. }
  11571. //
  11572. // OM_ObjectDeleteConfirm
  11573. //
  11574. void OM_ObjectDeleteConfirm
  11575. (
  11576. POM_CLIENT pomClient,
  11577. OM_WSGROUP_HANDLE hWSGroup,
  11578. OM_WORKSET_ID worksetID,
  11579. POM_OBJECT pObj
  11580. )
  11581. {
  11582. POM_WORKSET pWorkset;
  11583. POM_USAGE_REC pUsageRec;
  11584. POM_PENDING_OP pPendingOp;
  11585. POM_PENDING_OP pOtherPendingOp;
  11586. UINT rc = 0;
  11587. DebugEntry(OM_ObjectDeleteConfirm);
  11588. UT_Lock(UTLOCK_OM);
  11589. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11590. &pUsageRec, &pWorkset);
  11591. //
  11592. // To check that there is indeed a Delete pending for the object, we
  11593. // look in the workset's pending operation list.
  11594. //
  11595. FindPendingOp(pWorkset, pObj, OBJECT_DELETE, &pPendingOp);
  11596. //
  11597. // We assert that a relevant pending op was found:
  11598. //
  11599. ASSERT((pPendingOp != NULL));
  11600. //
  11601. // Call ObjectRelease, to release the object (will be a no-op and return
  11602. // NOT_FOUND if the Client hasn't done a Read on it):
  11603. //
  11604. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11605. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11606. //
  11607. // If we are going to confirm the delete, then we must ensure that any
  11608. // pending update or replace is carried out too. There can be only one
  11609. // of each, so check as follows (ther order we do them in is not
  11610. // relevant):
  11611. //
  11612. FindPendingOp(pWorkset, pObj, OBJECT_REPLACE, &pOtherPendingOp);
  11613. if (pOtherPendingOp != NULL)
  11614. {
  11615. ObjectDoReplace(pomClient->putTask,
  11616. pUsageRec->pWSGroup, pWorkset, pObj, pOtherPendingOp);
  11617. }
  11618. FindPendingOp(pWorkset, pObj, OBJECT_UPDATE, &pOtherPendingOp);
  11619. if (pOtherPendingOp != NULL)
  11620. {
  11621. ObjectDoUpdate(pomClient->putTask,
  11622. pUsageRec->pWSGroup, pWorkset, pObj, pOtherPendingOp);
  11623. }
  11624. //
  11625. // Perform the Delete:
  11626. //
  11627. ObjectDoDelete(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11628. UT_Unlock(UTLOCK_OM);
  11629. DebugExitVOID(OM_ObjectDeleteConfirm);
  11630. }
  11631. //
  11632. // OM_ObjectReplace(...)
  11633. //
  11634. UINT OM_ObjectReplace
  11635. (
  11636. POM_CLIENT pomClient,
  11637. OM_WSGROUP_HANDLE hWSGroup,
  11638. OM_WORKSET_ID worksetID,
  11639. POM_OBJECT pObj,
  11640. POM_OBJECTDATA * ppData
  11641. )
  11642. {
  11643. POM_USAGE_REC pUsageRec;
  11644. POM_WORKSET pWorkset;
  11645. POM_OBJECTDATA pData;
  11646. UINT rc = 0;
  11647. DebugEntry(OM_ObjectReplace);
  11648. UT_Lock(UTLOCK_OM);
  11649. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11650. &pUsageRec, &pWorkset);
  11651. pData = *ppData;
  11652. ValidateObjectData(pData);
  11653. //
  11654. // Check that the Client is not attempting to replace the object with
  11655. // one smaller that the object's update size (which is the minimum size
  11656. // for a replace):
  11657. //
  11658. ASSERT((pData->length >= pObj->updateSize));
  11659. //
  11660. // Check workset isn't locked by somebody else (OK if locked by us):
  11661. //
  11662. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11663. //
  11664. // Check workset is not exhausted:
  11665. //
  11666. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11667. //
  11668. // If the object is in the process of being deleted, we prevent the
  11669. // Replace. This is because if we don't, the Client will get a
  11670. // REPLACE_IND event after it has got (and processed) a DELETE event for
  11671. // the object.
  11672. //
  11673. if (pObj->flags & PENDING_DELETE)
  11674. {
  11675. TRACE_OUT(("Client 0x%08x tried to replace object being deleted (0x%08x)",
  11676. pomClient, pObj));
  11677. rc = OM_RC_OBJECT_DELETED;
  11678. DC_QUIT;
  11679. }
  11680. //
  11681. // When object locking supported, need to prevent object replace when
  11682. // object is locked.
  11683. //
  11684. //
  11685. // Generate, process and queue an OBJECT_REPLACE message:
  11686. //
  11687. rc = ObjectDRU(pomClient->putTask,
  11688. pUsageRec->pWSGroup,
  11689. pWorkset,
  11690. pObj,
  11691. pData,
  11692. OMNET_OBJECT_REPLACE);
  11693. if (rc != 0)
  11694. {
  11695. DC_QUIT;
  11696. }
  11697. //
  11698. // Remove the object from the unused objects list:
  11699. //
  11700. RemoveFromUnusedList(pUsageRec, pData);
  11701. //
  11702. // NULL the Client's pointer to the object:
  11703. //
  11704. *ppData = NULL;
  11705. TRACE_OUT(("Queued replace for object 0x%08x in workset %u for Client 0x%08x",
  11706. pObj, worksetID, pomClient));
  11707. DC_EXIT_POINT:
  11708. //
  11709. // SFR5843: Don't trace an error if the object has been deleted - this
  11710. // is just safe race condition.
  11711. //
  11712. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11713. {
  11714. ERROR_OUT(("ERROR %d issuing replace for object 0x%08x in WSG %d:%hu",
  11715. rc, pObj, hWSGroup, worksetID));
  11716. }
  11717. UT_Unlock(UTLOCK_OM);
  11718. DebugExitDWORD(OM_ObjectReplace, rc);
  11719. return(rc);
  11720. }
  11721. //
  11722. // OM_ObjectUpdate
  11723. //
  11724. UINT OM_ObjectUpdate
  11725. (
  11726. POM_CLIENT pomClient,
  11727. OM_WSGROUP_HANDLE hWSGroup,
  11728. OM_WORKSET_ID worksetID,
  11729. POM_OBJECT pObj,
  11730. POM_OBJECTDATA * ppData
  11731. )
  11732. {
  11733. POM_USAGE_REC pUsageRec;
  11734. POM_WORKSET pWorkset;
  11735. POM_OBJECTDATA pData;
  11736. UINT rc = 0;
  11737. DebugEntry(OM_ObjectUpdate);
  11738. UT_Lock(UTLOCK_OM);
  11739. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11740. &pUsageRec, &pWorkset);
  11741. pData = *ppData;
  11742. ValidateObjectData(pData);
  11743. //
  11744. // Check size of update equals the update size for the object:
  11745. //
  11746. ASSERT((pData->length == pObj->updateSize));
  11747. TRACE_OUT(("Update request is for first 0x%08x bytes, starting at 0x%08x",
  11748. pData->length, pData->data));
  11749. //
  11750. // Check workset isn't locked by somebody else (OK if locked by us):
  11751. //
  11752. CHECK_WORKSET_NOT_LOCKED(pWorkset);
  11753. //
  11754. // Check workset is not exhausted:
  11755. //
  11756. CHECK_WORKSET_NOT_EXHAUSTED(pWorkset);
  11757. //
  11758. // If the object is in the process of being deleted, we prevent the
  11759. // Update. This is because if we don't, the Client will get a
  11760. // UPDATE_IND event after it has got (and processed) a DELETE event for
  11761. // the object.
  11762. //
  11763. if (pObj->flags & PENDING_DELETE)
  11764. {
  11765. TRACE_OUT(("Client 0x%08x tried to update object being deleted (0x%08x)",
  11766. pomClient, pObj));
  11767. rc = OM_RC_OBJECT_DELETED;
  11768. DC_QUIT;
  11769. }
  11770. //
  11771. // When object locking supported, need to prevent object update/replace
  11772. // when object is locked.
  11773. //
  11774. //
  11775. // Generate, process and queue an OBJECT_UPDATE message:
  11776. //
  11777. rc = ObjectDRU(pomClient->putTask,
  11778. pUsageRec->pWSGroup,
  11779. pWorkset,
  11780. pObj,
  11781. pData,
  11782. OMNET_OBJECT_UPDATE);
  11783. if (rc != 0)
  11784. {
  11785. DC_QUIT;
  11786. }
  11787. //
  11788. // Remove the object from the unused objects list:
  11789. //
  11790. RemoveFromUnusedList(pUsageRec, pData);
  11791. //
  11792. // NULL the Client's pointer to the object:
  11793. //
  11794. *ppData = NULL;
  11795. TRACE_OUT(("Queued update for object 0x%08x in workset %u for Client 0x%08x",
  11796. pObj, worksetID, pomClient));
  11797. DC_EXIT_POINT:
  11798. //
  11799. // SFR5843: Don't trace an error if the object has been deleted - this
  11800. // is just safe race condition.
  11801. //
  11802. if ((rc != 0) && (rc != OM_RC_OBJECT_DELETED))
  11803. {
  11804. ERROR_OUT(("ERROR %d issuing update for object 0x%08x in WSG %d:%hu",
  11805. rc, pObj, hWSGroup, worksetID));
  11806. }
  11807. UT_Unlock(UTLOCK_OM);
  11808. DebugExitDWORD(OM_ObjectUpdate, rc);
  11809. return(rc);
  11810. }
  11811. //
  11812. // OM_ObjectReplaceConfirm(...)
  11813. //
  11814. void OM_ObjectReplaceConfirm
  11815. (
  11816. POM_CLIENT pomClient,
  11817. OM_WSGROUP_HANDLE hWSGroup,
  11818. OM_WORKSET_ID worksetID,
  11819. POM_OBJECT pObj
  11820. )
  11821. {
  11822. POM_WORKSET pWorkset;
  11823. POM_USAGE_REC pUsageRec;
  11824. POM_PENDING_OP pPendingOp;
  11825. UINT rc = 0;
  11826. DebugEntry(OM_ObjectReplaceConfirm);
  11827. UT_Lock(UTLOCK_OM);
  11828. //
  11829. // Here, we do our usual parameter validation, but we don't want to
  11830. // assert if the object has been delete-confirmed already, so we modify
  11831. // the code from ValidateParams4 a bit:
  11832. //
  11833. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11834. &pUsageRec, &pWorkset);
  11835. //
  11836. // Retrieve the Replace operation from the object's pending op queue (we
  11837. // want the first REPLACE operation on the queue, so we start from the
  11838. // head):
  11839. //
  11840. FindPendingOp(pWorkset, pObj, OBJECT_REPLACE, &pPendingOp);
  11841. ASSERT((pPendingOp != NULL));
  11842. //
  11843. // Call ObjectRelease, to release the object (will be a no-op if the
  11844. // Client hasn't done a Read on it):
  11845. //
  11846. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11847. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11848. //
  11849. // Call the internal function to perform the actual Replace:
  11850. //
  11851. ObjectDoReplace(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11852. TRACE_OUT(("Confirmed Replace for object 0x%08x in workset %u for Client 0x%08x",
  11853. pObj, worksetID, pomClient));
  11854. UT_Unlock(UTLOCK_OM);
  11855. DebugExitVOID(OM_ObjectReplaceConfirm);
  11856. }
  11857. //
  11858. // OM_ObjectUpdateConfirm(...)
  11859. //
  11860. void OM_ObjectUpdateConfirm
  11861. (
  11862. POM_CLIENT pomClient,
  11863. OM_WSGROUP_HANDLE hWSGroup,
  11864. OM_WORKSET_ID worksetID,
  11865. POM_OBJECT pObj
  11866. )
  11867. {
  11868. POM_USAGE_REC pUsageRec;
  11869. POM_WORKSET pWorkset;
  11870. POM_PENDING_OP pPendingOp;
  11871. UINT rc = 0;
  11872. DebugEntry(OM_ObjectUpdateConfirm);
  11873. UT_Lock(UTLOCK_OM);
  11874. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY,
  11875. &pUsageRec, &pWorkset);
  11876. //
  11877. // Retrieve the Update operation from the object's pending op queue (we
  11878. // want the first UPDATE operation on the queue, so we start from the
  11879. // head):
  11880. //
  11881. FindPendingOp(pWorkset, pObj, OBJECT_UPDATE, &pPendingOp);
  11882. ASSERT((pPendingOp != NULL));
  11883. //
  11884. // Call ObjectRelease, to release the object (will be a no-op if the
  11885. // Client hasn't done a Read on it):
  11886. //
  11887. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  11888. ASSERT(((rc == 0) || (rc == OM_RC_OBJECT_NOT_FOUND)));
  11889. //
  11890. // Call the internal function to perform the actual Update:
  11891. //
  11892. ObjectDoUpdate(pomClient->putTask, pUsageRec->pWSGroup, pWorkset, pObj, pPendingOp);
  11893. TRACE_OUT(("Confirmed Update for object 0x%08x in workset %u for Client 0x%08x",
  11894. pObj, worksetID, pomClient));
  11895. UT_Unlock(UTLOCK_OM);
  11896. DebugExitVOID(OM_ObjectUpdateConfirm);
  11897. }
  11898. //
  11899. // OM_ObjectH()
  11900. // Gets a ptr to the first/next/previous/last object
  11901. //
  11902. UINT OM_ObjectH
  11903. (
  11904. POM_CLIENT pomClient,
  11905. OM_WSGROUP_HANDLE hWSGroup,
  11906. OM_WORKSET_ID worksetID,
  11907. POM_OBJECT pObjOther,
  11908. POM_OBJECT * ppObj,
  11909. OM_POSITION omPos
  11910. )
  11911. {
  11912. POM_USAGE_REC pUsageRec;
  11913. POM_WORKSET pWorkset;
  11914. UINT rc = 0;
  11915. DebugEntry(OM_ObjectH);
  11916. UT_Lock(UTLOCK_OM);
  11917. //
  11918. // Validate params. If no hOtherObject (like in first/last), don't validate hOtherObject
  11919. //
  11920. if ((omPos == FIRST) || (omPos == LAST))
  11921. {
  11922. ASSERT(pObjOther == NULL);
  11923. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  11924. &pUsageRec, &pWorkset);
  11925. if (omPos == FIRST)
  11926. omPos = AFTER;
  11927. else
  11928. omPos = BEFORE;
  11929. }
  11930. else
  11931. {
  11932. ValidateParams4(pomClient, hWSGroup, worksetID, pObjOther,
  11933. PRIMARY | SECONDARY, &pUsageRec, &pWorkset);
  11934. }
  11935. //
  11936. // Get the object pointer
  11937. //
  11938. //
  11939. // Here we derive a pointer to what is "probably" the object record
  11940. // we're looking for:
  11941. //
  11942. if (pObjOther == NULL)
  11943. {
  11944. //
  11945. // Remember, if *ppObj == 0, then we're looking for the first or
  11946. // last object in the workset:
  11947. //
  11948. if (omPos == AFTER)
  11949. {
  11950. TRACE_OUT(("Getting first object in workset %u", worksetID));
  11951. *ppObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  11952. }
  11953. else
  11954. {
  11955. TRACE_OUT(("Getting last object in workset %u", worksetID));
  11956. *ppObj = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  11957. }
  11958. }
  11959. else
  11960. {
  11961. *ppObj = pObjOther;
  11962. if (omPos == AFTER)
  11963. {
  11964. TRACE_OUT(("Getting object after 0x%08x in workset %u",
  11965. pObjOther, worksetID));
  11966. *ppObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjOther, FIELD_OFFSET(OM_OBJECT, chain));
  11967. }
  11968. else
  11969. {
  11970. TRACE_OUT(("Getting object before 0x%08x in workset %u",
  11971. pObjOther, worksetID));
  11972. *ppObj = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObjOther, FIELD_OFFSET(OM_OBJECT, chain));
  11973. }
  11974. }
  11975. //
  11976. // ppObj now has "probably" a pointer to the object we're looking for,
  11977. // but now we need to skip deleted objects.
  11978. //
  11979. while ((*ppObj != NULL) && ((*ppObj)->flags & DELETED))
  11980. {
  11981. ValidateObject(*ppObj);
  11982. if (omPos == AFTER)
  11983. {
  11984. *ppObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), *ppObj, FIELD_OFFSET(OM_OBJECT, chain));
  11985. }
  11986. else
  11987. {
  11988. *ppObj = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), *ppObj, FIELD_OFFSET(OM_OBJECT, chain));
  11989. }
  11990. }
  11991. if (*ppObj == NULL)
  11992. {
  11993. rc = OM_RC_NO_SUCH_OBJECT;
  11994. }
  11995. UT_Unlock(UTLOCK_OM);
  11996. DebugExitDWORD(OM_ObjectH, rc);
  11997. return(rc);
  11998. }
  11999. //
  12000. // OM_ObjectIDToPtr(...)
  12001. //
  12002. UINT OM_ObjectIDToPtr
  12003. (
  12004. POM_CLIENT pomClient,
  12005. OM_WSGROUP_HANDLE hWSGroup,
  12006. OM_WORKSET_ID worksetID,
  12007. OM_OBJECT_ID objectID,
  12008. POM_OBJECT * ppObj
  12009. )
  12010. {
  12011. POM_USAGE_REC pUsageRec;
  12012. POM_WORKSET pWorkset;
  12013. UINT rc = 0;
  12014. DebugEntry(OM_ObjectIDToPtr);
  12015. UT_Lock(UTLOCK_OM);
  12016. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY | SECONDARY,
  12017. &pUsageRec, &pWorkset);
  12018. //
  12019. // Now call the internal function to do the search for the ID:
  12020. //
  12021. rc = ObjectIDToPtr(pWorkset, objectID, ppObj);
  12022. if (rc == OM_RC_OBJECT_DELETED)
  12023. {
  12024. //
  12025. // This internal function returns OBJECT_DELETED if the object record
  12026. // was found but is marked as deleted. We map this to BAD_OBJECT_ID
  12027. // since that's all we externalise to Clients:
  12028. //
  12029. rc = OM_RC_BAD_OBJECT_ID;
  12030. }
  12031. else if (rc == OM_RC_OBJECT_PENDING_DELETE)
  12032. {
  12033. //
  12034. // If we get back PENDING_DELETE, then we map this to OK, since as
  12035. // far as the Client is concerned, the object still exists:
  12036. //
  12037. rc = 0;
  12038. }
  12039. if (rc == OM_RC_BAD_OBJECT_ID)
  12040. {
  12041. WARNING_OUT(("No object found in workset with ID 0x%08x:0x%08x",
  12042. objectID.creator, objectID.sequence));
  12043. }
  12044. else if (rc != 0)
  12045. {
  12046. ERROR_OUT(("ERROR %d converting object ID (0x%08x:0x%08x) to handle",
  12047. rc, objectID.creator, objectID.sequence));
  12048. }
  12049. else
  12050. {
  12051. TRACE_OUT(("Converted object ID (0x%08x:0x%08x) to handle (0x%08x)",
  12052. objectID.creator, objectID.sequence, *ppObj));
  12053. }
  12054. UT_Unlock(UTLOCK_OM);
  12055. DebugExitDWORD(OM_ObjectIDToPtr, rc);
  12056. return(rc);
  12057. }
  12058. //
  12059. // OM_ObjectPtrToID(...)
  12060. //
  12061. void OM_ObjectPtrToID
  12062. (
  12063. POM_CLIENT pomClient,
  12064. OM_WSGROUP_HANDLE hWSGroup,
  12065. OM_WORKSET_ID worksetID,
  12066. POM_OBJECT pObj,
  12067. POM_OBJECT_ID pObjectID
  12068. )
  12069. {
  12070. POM_USAGE_REC pUsageRec;
  12071. POM_WORKSET pWorkset;
  12072. UINT rc = 0;
  12073. DebugEntry(OM_ObjectPtrToID);
  12074. UT_Lock(UTLOCK_OM);
  12075. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12076. &pUsageRec, &pWorkset);
  12077. //
  12078. // Extract ID from object record:
  12079. //
  12080. memcpy(pObjectID, &pObj->objectID, sizeof(OM_OBJECT_ID));
  12081. TRACE_OUT(("Retrieved object ID 0x%08x:0x%08x for object 0x%08x in workset %u",
  12082. pObjectID->creator, pObjectID->sequence, pObj, worksetID));
  12083. UT_Unlock(UTLOCK_OM);
  12084. DebugExitVOID(OM_ObjectHandleToID);
  12085. }
  12086. //
  12087. // OM_ObjectRead(...)
  12088. //
  12089. UINT OM_ObjectRead
  12090. (
  12091. POM_CLIENT pomClient,
  12092. OM_WSGROUP_HANDLE hWSGroup,
  12093. OM_WORKSET_ID worksetID,
  12094. POM_OBJECT pObj,
  12095. POM_OBJECTDATA * ppData
  12096. )
  12097. {
  12098. POM_USAGE_REC pUsageRec;
  12099. POM_WORKSET pWorkset;
  12100. POM_OBJECT_LIST pListEntry;
  12101. UINT rc = 0;
  12102. DebugEntry(OM_ObjectRead);
  12103. UT_Lock(UTLOCK_OM);
  12104. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12105. &pUsageRec, &pWorkset);
  12106. //
  12107. // Check the Client hasn't already read this object without releasing
  12108. // it:
  12109. //
  12110. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12111. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12112. FIELD_OFFSET(OM_OBJECT_LIST, pObj), (DWORD_PTR)pObj,
  12113. FIELD_SIZE(OM_OBJECT_LIST, pObj));
  12114. ASSERT(pListEntry == NULL);
  12115. //
  12116. // Convert object handle to a pointer to the object data:
  12117. //
  12118. *ppData = pObj->pData;
  12119. if (!*ppData)
  12120. {
  12121. ERROR_OUT(("OM_ObjectRead: Object 0x%08x has no data", pObj));
  12122. rc = UT_RC_NO_MEM;
  12123. DC_QUIT;
  12124. }
  12125. //
  12126. // Bump up the use count of the chunk so it won't be freed until the
  12127. // Client calls OM_ObjectRelease (explicitly or implicitly via e.g
  12128. // DeleteConfirm)
  12129. //
  12130. UT_BumpUpRefCount(*ppData);
  12131. //
  12132. // We need to add this object's handle to the Client's list of
  12133. // objects-in-use, so allocate some memory for the object...
  12134. //
  12135. pListEntry = (POM_OBJECT_LIST)UT_MallocRefCount(sizeof(OM_OBJECT_LIST), TRUE);
  12136. if (!pListEntry)
  12137. {
  12138. rc = UT_RC_NO_MEM;
  12139. DC_QUIT;
  12140. }
  12141. SET_STAMP(pListEntry, OLIST);
  12142. //
  12143. // ...fill in the fields...
  12144. //
  12145. pListEntry->pObj = pObj;
  12146. pListEntry->worksetID = worksetID;
  12147. //
  12148. // ...and insert into the list:
  12149. //
  12150. COM_BasedListInsertBefore(&(pUsageRec->objectsInUse),
  12151. &(pListEntry->chain));
  12152. TRACE_OUT(("Read object at 0x%08x (handle: 0x%08x) for Client 0x%08x",
  12153. *ppData, pObj, pomClient));
  12154. DC_EXIT_POINT:
  12155. if (rc != 0)
  12156. {
  12157. //
  12158. // Cleanup:
  12159. //
  12160. ERROR_OUT(("ERROR %d reading object 0x%08x in workset %u in WSG %d",
  12161. rc, pObj, worksetID, hWSGroup));
  12162. if (pListEntry != NULL)
  12163. {
  12164. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12165. }
  12166. if (*ppData)
  12167. UT_FreeRefCount((void**)ppData, FALSE);
  12168. }
  12169. UT_Unlock(UTLOCK_OM);
  12170. DebugExitDWORD(OM_ObjectRead, rc);
  12171. return(rc);
  12172. }
  12173. //
  12174. // OM_ObjectRelease()
  12175. //
  12176. void OM_ObjectRelease
  12177. (
  12178. POM_CLIENT pomClient,
  12179. OM_WSGROUP_HANDLE hWSGroup,
  12180. OM_WORKSET_ID worksetID,
  12181. POM_OBJECT pObj,
  12182. POM_OBJECTDATA * ppData
  12183. )
  12184. {
  12185. POM_USAGE_REC pUsageRec;
  12186. POM_WORKSET pWorkset;
  12187. UINT rc = 0;
  12188. DebugEntry(OM_ObjectRelease);
  12189. UT_Lock(UTLOCK_OM);
  12190. ValidateParams4(pomClient, hWSGroup, worksetID, pObj, PRIMARY | SECONDARY,
  12191. &pUsageRec, &pWorkset);
  12192. //
  12193. // Check that the object pointer and object handle match:
  12194. //
  12195. ASSERT(pObj->pData == *ppData);
  12196. //
  12197. // Now try to release the object from the objects-in-use list:
  12198. //
  12199. rc = ObjectRelease(pUsageRec, worksetID, pObj);
  12200. //
  12201. // ObjectRelease will return an error if the object handle wasn't found
  12202. // in the objects-in-use list. As far as we're concerned, this is an
  12203. // assert-level error:
  12204. //
  12205. ASSERT((rc == 0));
  12206. //
  12207. // NULL the Client's pointer:
  12208. //
  12209. *ppData = NULL;
  12210. TRACE_OUT(("Released Client 0x%08x's hold on object 0x%08x in workset %u in WSG %d",
  12211. pomClient, pObj, worksetID, hWSGroup));
  12212. UT_Unlock(UTLOCK_OM);
  12213. DebugExitVOID(OM_ObjectRelease);
  12214. }
  12215. //
  12216. // OM_ObjectAlloc(...)
  12217. //
  12218. UINT OM_ObjectAlloc
  12219. (
  12220. POM_CLIENT pomClient,
  12221. OM_WSGROUP_HANDLE hWSGroup,
  12222. OM_WORKSET_ID worksetID,
  12223. UINT size,
  12224. POM_OBJECTDATA * ppData
  12225. )
  12226. {
  12227. POM_USAGE_REC pUsageRec;
  12228. POM_WORKSET pWorkset;
  12229. POM_OBJECTDATA_LIST pListEntry = NULL;
  12230. UINT rc = 0;
  12231. DebugEntry(OM_ObjectAlloc);
  12232. UT_Lock(UTLOCK_OM);
  12233. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  12234. &pUsageRec, &pWorkset);
  12235. TRACE_OUT(("Client 0x%08x requesting to allocate 0x%08x bytes "
  12236. "for object for workset %u in WSG %d",
  12237. pomClient, size, worksetID, hWSGroup));
  12238. //
  12239. // Check request not too big:
  12240. //
  12241. ASSERT((size < OM_MAX_OBJECT_SIZE - sizeof(OM_MAX_OBJECT_SIZE)));
  12242. //
  12243. // Check request not too small:
  12244. //
  12245. ASSERT((size > 0));
  12246. //
  12247. // Allocate a chunk of memory for the object (note that we add 4 bytes
  12248. // to the size the Client asked for (i.e. the <size> parameter) since
  12249. // the API stipulates that this does not include the <size> field which
  12250. // is at the start of the object.
  12251. //
  12252. *ppData = (POM_OBJECTDATA)UT_MallocRefCount(size + sizeof(OM_MAX_OBJECT_SIZE), FALSE);
  12253. if (! *ppData)
  12254. {
  12255. rc = UT_RC_NO_MEM;
  12256. DC_QUIT;
  12257. }
  12258. ZeroMemory(*ppData, min(size, OM_ZERO_OBJECT_SIZE));
  12259. //
  12260. // Now insert a reference to this chunk in the Client's unused-objects
  12261. // list (will be removed by Add, Replace, Update or Discard functions).
  12262. //
  12263. pListEntry = (POM_OBJECTDATA_LIST)UT_MallocRefCount(sizeof(OM_OBJECTDATA_LIST), TRUE);
  12264. if (!pListEntry)
  12265. {
  12266. rc = UT_RC_NO_MEM;
  12267. DC_QUIT;
  12268. }
  12269. SET_STAMP(pListEntry, ODLIST);
  12270. pListEntry->pData = *ppData;
  12271. pListEntry->size = size;
  12272. pListEntry->worksetID = worksetID;
  12273. COM_BasedListInsertBefore(&(pUsageRec->unusedObjects),
  12274. &(pListEntry->chain));
  12275. TRACE_OUT(("Allocated object starting at 0x%08x", *ppData));
  12276. DC_EXIT_POINT:
  12277. if (rc != 0)
  12278. {
  12279. //
  12280. // Cleanup:
  12281. //
  12282. ERROR_OUT(("ERROR %d allocating object (size: 0x%08x) for Client 0x%08x",
  12283. rc, size + sizeof(OM_MAX_OBJECT_SIZE), pomClient));
  12284. if (pListEntry != NULL)
  12285. {
  12286. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12287. }
  12288. if (*ppData != NULL)
  12289. {
  12290. UT_FreeRefCount((void**)ppData, FALSE);
  12291. }
  12292. }
  12293. UT_Unlock(UTLOCK_OM);
  12294. DebugExitDWORD(OM_ObjectAlloc, rc);
  12295. return(rc);
  12296. }
  12297. //
  12298. // OM_ObjectDiscard(...)
  12299. //
  12300. void OM_ObjectDiscard
  12301. (
  12302. POM_CLIENT pomClient,
  12303. OM_WSGROUP_HANDLE hWSGroup,
  12304. OM_WORKSET_ID worksetID,
  12305. POM_OBJECTDATA * ppData
  12306. )
  12307. {
  12308. POM_USAGE_REC pUsageRec;
  12309. POM_WORKSET pWorkset;
  12310. POM_OBJECTDATA pData;
  12311. UINT rc = 0;
  12312. DebugEntry(OM_ObjectDiscard);
  12313. UT_Lock(UTLOCK_OM);
  12314. ValidateParams3(pomClient, hWSGroup, worksetID, PRIMARY,
  12315. &pUsageRec, &pWorkset);
  12316. pData = *ppData;
  12317. //
  12318. // Remove the object from the unused objects list:
  12319. //
  12320. RemoveFromUnusedList(pUsageRec, pData);
  12321. //
  12322. // Free the chunk containing the object, NULLing the caller's pointer at
  12323. // the same time:
  12324. //
  12325. UT_FreeRefCount((void**)ppData, FALSE);
  12326. TRACE_OUT(("Discarded object at 0x%08x in workset %u in WSG %d for Client 0x%08x",
  12327. pData, worksetID, hWSGroup, pomClient));
  12328. UT_Unlock(UTLOCK_OM);
  12329. DebugExitVOID(OM_ObjectDiscard);
  12330. }
  12331. //
  12332. // OM_GetNetworkUserID
  12333. //
  12334. UINT OM_GetNetworkUserID
  12335. (
  12336. POM_CLIENT pomClient,
  12337. OM_WSGROUP_HANDLE hWSGroup,
  12338. NET_UID * pNetUserID
  12339. )
  12340. {
  12341. POM_DOMAIN pDomain;
  12342. POM_USAGE_REC pUsageRec;
  12343. POM_WSGROUP pWSGroup;
  12344. UINT rc = 0;
  12345. DebugEntry(OM_GetNetworkUserID);
  12346. UT_Lock(UTLOCK_OM);
  12347. ValidateParams2(pomClient, hWSGroup, PRIMARY | SECONDARY,
  12348. &pUsageRec, &pWSGroup);
  12349. //
  12350. // Get a pointer to the relevant Domain:
  12351. //
  12352. pDomain = pWSGroup->pDomain;
  12353. if (pDomain->callID == OM_NO_CALL)
  12354. {
  12355. rc = OM_RC_LOCAL_WSGROUP;
  12356. DC_QUIT;
  12357. }
  12358. //
  12359. // Otherwise, everything's OK, so we fill in the caller's pointer and
  12360. // return:
  12361. //
  12362. if (pDomain->userID == 0)
  12363. {
  12364. WARNING_OUT(("Client requesting userID for Domain %u before we've attached",
  12365. pDomain->callID));
  12366. rc = OM_RC_NOT_ATTACHED;
  12367. DC_QUIT;
  12368. }
  12369. *pNetUserID = pDomain->userID;
  12370. TRACE_OUT(("Returned Network user ID (0x%08x) to Client 0x%08x for '%s'",
  12371. *pNetUserID, pomClient, hWSGroup));
  12372. DC_EXIT_POINT:
  12373. UT_Unlock(UTLOCK_OM);
  12374. DebugExitDWORD(OM_GetNetworkUserID, rc);
  12375. return(rc);
  12376. }
  12377. //
  12378. // SetUpUsageRecord(...)
  12379. //
  12380. UINT SetUpUsageRecord
  12381. (
  12382. POM_CLIENT pomClient,
  12383. UINT mode,
  12384. POM_USAGE_REC * ppUsageRec,
  12385. OM_WSGROUP_HANDLE * phWSGroup
  12386. )
  12387. {
  12388. UINT rc = 0;
  12389. DebugEntry(SetUpUsageRecord);
  12390. ValidateOMS(pomClient);
  12391. //
  12392. // Find an unused workset group handle for the Client:
  12393. //
  12394. rc = FindUnusedWSGHandle(pomClient, phWSGroup);
  12395. if (rc != 0)
  12396. {
  12397. DC_QUIT;
  12398. }
  12399. //
  12400. // Client has a spare handle so create a new usage record for this
  12401. // Client's use of the workset group:
  12402. //
  12403. *ppUsageRec = (POM_USAGE_REC)UT_MallocRefCount(sizeof(OM_USAGE_REC), TRUE);
  12404. if (! *ppUsageRec)
  12405. {
  12406. rc = UT_RC_NO_MEM;
  12407. DC_QUIT;
  12408. }
  12409. SET_STAMP((*ppUsageRec), USAGEREC);
  12410. //
  12411. // Next, fill in the fields, but note that:
  12412. //
  12413. // - until the registration gets to pre-Stage1, the only way to abort it
  12414. // from the Client context is to mark the registration CB as invalid.
  12415. // To do this (e.g. in WSGroupDeregister) we need access to the
  12416. // registration CB, so we will put a pointer to it in the usage record
  12417. // below.
  12418. //
  12419. // - the <worksetOpenFlags> field is zero initially (it will be changed
  12420. // when the Client does a WorksetOpen), so we do nothing
  12421. //
  12422. // - the <wsGroupMutex> field also needs to be zero initially (the
  12423. // correct value is inserted by the hidden handler), so we leave this
  12424. // blank too.
  12425. //
  12426. (*ppUsageRec)->mode = (BYTE)mode;
  12427. COM_BasedListInit(&((*ppUsageRec)->unusedObjects));
  12428. COM_BasedListInit(&((*ppUsageRec)->objectsInUse));
  12429. //
  12430. // Put the offset to the usage record in the array of offsets:
  12431. //
  12432. pomClient->apUsageRecs[*phWSGroup] = *ppUsageRec;
  12433. TRACE_OUT(("Set up usage record for Client 0x%08x at 0x%08x (hWSGroup: %hu)",
  12434. pomClient, *ppUsageRec, *phWSGroup));
  12435. DC_EXIT_POINT:
  12436. DebugExitDWORD(SetUpUsageRecord, rc);
  12437. return(rc);
  12438. }
  12439. //
  12440. // FindUnusedWSGHandle(...)
  12441. //
  12442. UINT FindUnusedWSGHandle
  12443. (
  12444. POM_CLIENT pomClient,
  12445. OM_WSGROUP_HANDLE * phWSGroup
  12446. )
  12447. {
  12448. BOOL found;
  12449. OM_WSGROUP_HANDLE hWSGroup;
  12450. UINT rc = 0;
  12451. DebugEntry(FindUnusedWSGHandle);
  12452. ValidateOMS(pomClient);
  12453. //
  12454. // Workset group handles are indexes into an array of offsets to usage
  12455. // records. When one of these offsets is 0, the slot is available for
  12456. // use.
  12457. //
  12458. // We start our loop at 1 because 0 is never used as a workset group
  12459. // handle. Because we start at 1, we end at MAX + 1 to ensure that we
  12460. // use MAX handles.
  12461. //
  12462. found = FALSE;
  12463. for (hWSGroup = 1; hWSGroup < OMWSG_MAXPERCLIENT; hWSGroup++)
  12464. {
  12465. if (pomClient->apUsageRecs[hWSGroup] == NULL)
  12466. {
  12467. found = TRUE;
  12468. TRACE_OUT(("Found unused workset group handle %hu for Client 0x%08x",
  12469. hWSGroup, pomClient));
  12470. ASSERT(!pomClient->wsgValid[hWSGroup]);
  12471. break;
  12472. }
  12473. }
  12474. //
  12475. // If there aren't any, quit with an error:
  12476. //
  12477. if (!found)
  12478. {
  12479. WARNING_OUT(("Client 0x%08x has no more workset group handles", pomClient));
  12480. rc = OM_RC_NO_MORE_HANDLES;
  12481. DC_QUIT;
  12482. }
  12483. else
  12484. {
  12485. *phWSGroup = hWSGroup;
  12486. }
  12487. DC_EXIT_POINT:
  12488. DebugExitDWORD(FindUnusedWSGHandle, rc);
  12489. return(rc);
  12490. }
  12491. //
  12492. // RemoveFromUnusedList()
  12493. //
  12494. void RemoveFromUnusedList
  12495. (
  12496. POM_USAGE_REC pUsageRec,
  12497. POM_OBJECTDATA pData
  12498. )
  12499. {
  12500. POM_OBJECTDATA_LIST pListEntry;
  12501. DebugEntry(RemoveFromUnusedList);
  12502. //
  12503. // Search in the unused-objects list hung off the usage record for an
  12504. // entry whose field is the same as the offset of this object:
  12505. //
  12506. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->unusedObjects),
  12507. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECTDATA_LIST, chain),
  12508. FIELD_OFFSET(OM_OBJECTDATA_LIST, pData), (DWORD_PTR)pData,
  12509. FIELD_SIZE(OM_OBJECTDATA_LIST, pData));
  12510. //
  12511. // This object must have been previously allocated, so it must be in the
  12512. // list. Assert failure if not:
  12513. //
  12514. ASSERT((pListEntry != NULL));
  12515. //
  12516. // Also, we check to make sure the Client hasn't set the <size> field to
  12517. // more memory than we originally allocated for the object:
  12518. //
  12519. if (pData->length != pListEntry->size)
  12520. {
  12521. ASSERT((pData->length < pListEntry->size));
  12522. TRACE_OUT(("Client has shrunk object from %u to %u bytes",
  12523. pListEntry->size, pData->length));
  12524. }
  12525. COM_BasedListRemove(&(pListEntry->chain));
  12526. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12527. DebugExitVOID(RemoveFromUnusedList);
  12528. }
  12529. //
  12530. // ReleaseAllObjects(...)
  12531. //
  12532. void ReleaseAllObjects
  12533. (
  12534. POM_USAGE_REC pUsageRec,
  12535. POM_WORKSET pWorkset
  12536. )
  12537. {
  12538. DebugEntry(ReleaseAllObjects);
  12539. while (ObjectRelease(pUsageRec, pWorkset->worksetID, 0) == 0)
  12540. {
  12541. //
  12542. // Calling ObjectRelease with pObj set to NULL will cause the
  12543. // first object in the objects-in-use list which is in this workset
  12544. // to be released. When there are no more, rc will be set to
  12545. // OM_RC_OBJECT_NOT_FOUND and we will break out of our loop:
  12546. //
  12547. }
  12548. DebugExitVOID(ReleaseAllObjects);
  12549. }
  12550. //
  12551. // ReleaseAllLocks(...)
  12552. //
  12553. void ReleaseAllLocks
  12554. (
  12555. POM_CLIENT pomClient,
  12556. POM_USAGE_REC pUsageRec,
  12557. POM_WORKSET pWorkset
  12558. )
  12559. {
  12560. POM_LOCK pThisLock;
  12561. POM_LOCK pTempLock;
  12562. DebugEntry(ReleaseAllLocks);
  12563. ValidateOMS(pomClient);
  12564. //
  12565. // Here we chain through the Client's lock stack and unlock any locks
  12566. // that relate to this workset.
  12567. //
  12568. // Note that, since object locking is not currently supported, the if
  12569. // statement in the loop will succeed at most once (i.e. if the workset
  12570. // itself is locked). The code is nonetheless implemented as a loop for
  12571. // forward compatibility. If this is deemed to be performance critical,
  12572. // we could put a break statement in.
  12573. //
  12574. pThisLock = (POM_LOCK)COM_BasedListFirst(&(pomClient->locks), FIELD_OFFSET(OM_LOCK, chain));
  12575. while (pThisLock != NULL)
  12576. {
  12577. //
  12578. // Since we will remove and free the entry in the lock stack if we
  12579. // find a match, we must chain to the next item beforehand:
  12580. //
  12581. pTempLock = (POM_LOCK)COM_BasedListNext(&(pomClient->locks), pThisLock, FIELD_OFFSET(OM_LOCK, chain));
  12582. if ((pThisLock->pWSGroup == pUsageRec->pWSGroup) &&
  12583. (pThisLock->worksetID == pWorkset->worksetID))
  12584. {
  12585. if (OBJECT_ID_IS_NULL(pThisLock->objectID)) // always TRUE in R1.1
  12586. {
  12587. //
  12588. // ...we're dealing with a workset lock:
  12589. //
  12590. WorksetUnlock(pomClient->putTask, pUsageRec->pWSGroup, pWorkset);
  12591. }
  12592. else
  12593. {
  12594. //
  12595. // ...this is an object lock, so call ObjectUnlock (when it's
  12596. // supported!). In the meantime, assert:
  12597. //
  12598. ERROR_OUT(("Object locking not supported in R1.1!!"));
  12599. }
  12600. COM_BasedListRemove(&(pThisLock->chain));
  12601. UT_FreeRefCount((void**)&pThisLock, FALSE);
  12602. //
  12603. // Could put the break in here for performance improvement.
  12604. //
  12605. }
  12606. pThisLock = pTempLock;
  12607. }
  12608. DebugExitVOID(ReleaseAllLocks);
  12609. }
  12610. //
  12611. // ConfirmAll(...)
  12612. //
  12613. void ConfirmAll
  12614. (
  12615. POM_CLIENT pomClient,
  12616. POM_USAGE_REC pUsageRec,
  12617. POM_WORKSET pWorkset
  12618. )
  12619. {
  12620. POM_PENDING_OP pThisPendingOp;
  12621. POM_OBJECT pObj;
  12622. UINT rc = 0;
  12623. DebugEntry(ConfirmAll);
  12624. ValidateOMS(pomClient);
  12625. //
  12626. // To confirm all outstanding operations for this workset, we search
  12627. // the list of pending ops stored off the workset record:
  12628. //
  12629. //
  12630. // Chain through the workset's list of pending operations and confirm
  12631. // them one by one:
  12632. //
  12633. pThisPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12634. while (pThisPendingOp != NULL)
  12635. {
  12636. pObj = pThisPendingOp->pObj;
  12637. switch (pThisPendingOp->type)
  12638. {
  12639. case WORKSET_CLEAR:
  12640. {
  12641. WorksetDoClear(pomClient->putTask,
  12642. pUsageRec->pWSGroup, pWorkset, pThisPendingOp);
  12643. break;
  12644. }
  12645. case OBJECT_DELETE:
  12646. {
  12647. ObjectDoDelete(pomClient->putTask,
  12648. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12649. break;
  12650. }
  12651. case OBJECT_UPDATE:
  12652. {
  12653. ObjectDoUpdate(pomClient->putTask,
  12654. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12655. break;
  12656. }
  12657. case OBJECT_REPLACE:
  12658. {
  12659. ObjectDoReplace(pomClient->putTask,
  12660. pUsageRec->pWSGroup, pWorkset, pObj, pThisPendingOp);
  12661. break;
  12662. }
  12663. default:
  12664. {
  12665. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  12666. pThisPendingOp->type));
  12667. break;
  12668. }
  12669. }
  12670. //
  12671. // The above functions all remove the pending op from the list, so get
  12672. // the new first item
  12673. //
  12674. pThisPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12675. }
  12676. DebugExitVOID(ConfirmAll);
  12677. }
  12678. //
  12679. // DiscardAllObjects()
  12680. //
  12681. void DiscardAllObjects
  12682. (
  12683. POM_USAGE_REC pUsageRec,
  12684. POM_WORKSET pWorkset
  12685. )
  12686. {
  12687. POM_OBJECTDATA_LIST pThisEntry;
  12688. POM_OBJECTDATA_LIST pTempEntry;
  12689. POM_OBJECTDATA pData;
  12690. DebugEntry(DiscardAllObjects);
  12691. //
  12692. // Chain through the Client's list of unused objects for this workset
  12693. // group, free any unused objects which were allocated for this workset
  12694. // and remove the entry from the list:
  12695. //
  12696. pThisEntry = (POM_OBJECTDATA_LIST)COM_BasedListFirst(&(pUsageRec->unusedObjects), FIELD_OFFSET(OM_OBJECTDATA_LIST, chain));
  12697. while (pThisEntry != NULL)
  12698. {
  12699. //
  12700. // Since we may be removing and freeing items from the list, we must
  12701. // set up a pointer to the next link in the chain before proceeding:
  12702. //
  12703. pTempEntry = (POM_OBJECTDATA_LIST)COM_BasedListNext(&(pUsageRec->unusedObjects), pThisEntry, FIELD_OFFSET(OM_OBJECTDATA_LIST, chain));
  12704. if (pThisEntry->worksetID == pWorkset->worksetID)
  12705. {
  12706. //
  12707. // OK, this entry in the list is for an object allocated for this
  12708. // workset, so find the object...
  12709. //
  12710. pData = pThisEntry->pData;
  12711. if (!pData)
  12712. {
  12713. ERROR_OUT(("DiscardAllObjects: object 0x%08x has no data", pThisEntry));
  12714. }
  12715. else
  12716. {
  12717. ValidateObjectData(pData);
  12718. //
  12719. // ...free it...
  12720. //
  12721. TRACE_OUT(("Discarding object at 0x%08x", pData));
  12722. UT_FreeRefCount((void**)&pData, FALSE);
  12723. }
  12724. //
  12725. // ...and remove the entry from the list:
  12726. //
  12727. COM_BasedListRemove(&(pThisEntry->chain));
  12728. UT_FreeRefCount((void**)&pThisEntry, FALSE);
  12729. }
  12730. pThisEntry = pTempEntry;
  12731. }
  12732. DebugExitVOID(DiscardAllObjects);
  12733. }
  12734. //
  12735. // ObjectRelease(...)
  12736. //
  12737. UINT ObjectRelease
  12738. (
  12739. POM_USAGE_REC pUsageRec,
  12740. OM_WORKSET_ID worksetID,
  12741. POM_OBJECT pObj
  12742. )
  12743. {
  12744. POM_OBJECT_LIST pListEntry;
  12745. POM_OBJECTDATA pData;
  12746. UINT rc = 0;
  12747. DebugEntry(ObjectRelease);
  12748. if (pObj == NULL)
  12749. {
  12750. //
  12751. // If <pObj> is NULL, our caller wants us to release the first
  12752. // object in the objects-in-use list which is in the specified
  12753. // workset:
  12754. //
  12755. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12756. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12757. FIELD_OFFSET(OM_OBJECT_LIST, worksetID), (DWORD)worksetID,
  12758. FIELD_SIZE(OM_OBJECT_LIST, worksetID));
  12759. }
  12760. else
  12761. {
  12762. //
  12763. // Otherwise, we do the lookup based on the object handle passed in:
  12764. //
  12765. // Note: since object handles are unique across worksets, we can just
  12766. // do a match on the handle. If the implementation of object handles
  12767. // changes and they become specific to a workset and not globally
  12768. // valid within a machine, we will need to do a double match here.
  12769. //
  12770. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pUsageRec->objectsInUse),
  12771. (void**)&pListEntry, FIELD_OFFSET(OM_OBJECT_LIST, chain),
  12772. FIELD_OFFSET(OM_OBJECT_LIST, pObj), (DWORD_PTR)pObj,
  12773. FIELD_SIZE(OM_OBJECT_LIST, pObj));
  12774. }
  12775. //
  12776. // If we didn't find a relevant list entry, set rc and quit:
  12777. //
  12778. if (pListEntry == NULL)
  12779. {
  12780. rc = OM_RC_OBJECT_NOT_FOUND;
  12781. DC_QUIT;
  12782. }
  12783. //
  12784. // Now set pObj (will be a no-op if it wasn't originally NULL):
  12785. //
  12786. ASSERT((pListEntry->worksetID == worksetID));
  12787. pObj = pListEntry->pObj;
  12788. ValidateObject(pObj);
  12789. pData = pObj->pData;
  12790. if (!pData)
  12791. {
  12792. ERROR_OUT(("ObjectRelease: object 0x%08x has no data", pObj));
  12793. }
  12794. else
  12795. {
  12796. ValidateObjectData(pData);
  12797. //
  12798. // Decrement use count of memory chunk holding object:
  12799. //
  12800. UT_FreeRefCount((void**)&pData, FALSE);
  12801. }
  12802. //
  12803. // Remove the entry for this object from the objects-in-use list:
  12804. //
  12805. COM_BasedListRemove(&(pListEntry->chain));
  12806. UT_FreeRefCount((void**)&pListEntry, FALSE);
  12807. DC_EXIT_POINT:
  12808. DebugExitDWORD(ObjectRelease, rc);
  12809. return(rc);
  12810. }
  12811. //
  12812. // WorksetClearPending(...)
  12813. //
  12814. BOOL WorksetClearPending
  12815. (
  12816. POM_WORKSET pWorkset,
  12817. POM_OBJECT pObj
  12818. )
  12819. {
  12820. POM_PENDING_OP pPendingOp;
  12821. BOOL rc = FALSE;
  12822. DebugEntry(WorksetClearPending);
  12823. //
  12824. // Try to find a pending workset clear for the given workset.
  12825. //
  12826. // N.B. We can't use FindPendingOp because we may want to check more
  12827. // than just the first pending workset clear.
  12828. //
  12829. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  12830. while (pPendingOp != NULL)
  12831. {
  12832. if (pPendingOp->type == WORKSET_CLEAR)
  12833. {
  12834. ValidateObject(pObj);
  12835. //
  12836. // Check that this clear affects the given object
  12837. //
  12838. if (STAMP_IS_LOWER(pObj->addStamp, pPendingOp->seqStamp))
  12839. {
  12840. TRACE_OUT(("Clear pending which affects object 0x%08x", pObj));
  12841. rc = TRUE;
  12842. DC_QUIT;
  12843. }
  12844. else
  12845. {
  12846. TRACE_OUT(("Clear pending but doesn't affect object 0x%08x", pObj));
  12847. }
  12848. }
  12849. //
  12850. // On to the next pending op...
  12851. //
  12852. pPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  12853. }
  12854. DC_EXIT_POINT:
  12855. DebugExitDWORD(WorksetClearPending, rc);
  12856. return(rc);
  12857. }
  12858. //
  12859. // ProcessWorksetNew(...)
  12860. //
  12861. UINT ProcessWorksetNew
  12862. (
  12863. PUT_CLIENT putClient,
  12864. POMNET_OPERATION_PKT pPacket,
  12865. POM_WSGROUP pWSGroup
  12866. )
  12867. {
  12868. POM_DOMAIN pDomain;
  12869. POM_WORKSET pWorkset;
  12870. OM_WORKSET_ID worksetID;
  12871. UINT rc = 0;
  12872. DebugEntry(ProcessWorksetNew);
  12873. worksetID = pPacket->worksetID;
  12874. TRACE_OUT(("Creating workset %u in WSG %d", worksetID, pWSGroup->wsg));
  12875. //
  12876. // Allocate some memory for the workset record:
  12877. //
  12878. pWorkset = (POM_WORKSET)UT_MallocRefCount(sizeof(OM_WORKSET), TRUE);
  12879. if (!pWorkset)
  12880. {
  12881. rc = UT_RC_NO_MEM;
  12882. DC_QUIT;
  12883. }
  12884. //
  12885. // Fill in the fields (this chunk is taken from a huge block so we have
  12886. // to set it to zero explicitly):
  12887. //
  12888. // Note: the <position> and <flags> fields of the packet hold a
  12889. // two-byte quantity representing the network priority for the workset.
  12890. //
  12891. SET_STAMP(pWorkset, WORKSET);
  12892. pWorkset->priority = *((NET_PRIORITY *) &(pPacket->position));
  12893. pWorkset->fTemp = *((BOOL *) &(pPacket->objectID));
  12894. pWorkset->worksetID = worksetID;
  12895. pWorkset->lockState = UNLOCKED;
  12896. pWorkset->lockedBy = 0;
  12897. pWorkset->lockCount = 0;
  12898. COM_BasedListInit(&(pWorkset->objects));
  12899. COM_BasedListInit(&(pWorkset->clients));
  12900. COM_BasedListInit(&(pWorkset->pendingOps));
  12901. if (pPacket->header.messageType == OMNET_WORKSET_CATCHUP)
  12902. {
  12903. //
  12904. // For a WORKSET_CATCHUP message, the <userID> field of the
  12905. // <seqStamp> field in the message holds the user ID of the node
  12906. // which holds the workset lock, if it is locked.
  12907. //
  12908. if (pPacket->seqStamp.userID != 0)
  12909. {
  12910. //
  12911. // If the <userID> field is the same as our user ID, then the
  12912. // remote node must think that we've got the workset locked -
  12913. // but we're just catching up, so something is wrong:
  12914. //
  12915. pDomain = pWSGroup->pDomain;
  12916. ASSERT((pPacket->seqStamp.userID != pDomain->userID));
  12917. pWorkset->lockState = LOCK_GRANTED;
  12918. pWorkset->lockedBy = pPacket->seqStamp.userID;
  12919. pWorkset->lockCount = 0;
  12920. TRACE_OUT(("Catching up with workset %u in WSG %d while locked by %hu",
  12921. worksetID, pWSGroup->wsg, pWorkset->lockedBy));
  12922. }
  12923. //
  12924. // In addition, the current generation number for the workset is
  12925. // held in the <genNumber> field of the <seqStamp> field in the
  12926. // message:
  12927. //
  12928. pWorkset->genNumber = pPacket->seqStamp.genNumber;
  12929. }
  12930. //
  12931. // Find the offset within OMWORKSETS of the workset record and put it
  12932. // in the array of offsets in the workset group record:
  12933. //
  12934. pWSGroup->apWorksets[worksetID] = pWorkset;
  12935. //
  12936. // Post a WORKSET_NEW event to all Clients registered with the workset
  12937. // group:
  12938. //
  12939. WSGroupEventPost(putClient,
  12940. pWSGroup,
  12941. PRIMARY | SECONDARY,
  12942. OM_WORKSET_NEW_IND,
  12943. worksetID,
  12944. 0);
  12945. TRACE_OUT(("Processed WORKSET_NEW for workset ID %hu in WSG %d",
  12946. worksetID, pWSGroup->wsg));
  12947. DC_EXIT_POINT:
  12948. if (rc != 0)
  12949. {
  12950. ERROR_OUT(("ERROR %d creating workset %u in workset group '%s'",
  12951. rc, worksetID, pWSGroup->wsg));
  12952. if (pWorkset != NULL)
  12953. {
  12954. UT_FreeRefCount((void**)&pWorkset, FALSE);
  12955. }
  12956. pWSGroup->apWorksets[worksetID] = NULL;
  12957. }
  12958. DebugExitDWORD(ProcessWorksetNew, rc);
  12959. return(rc);
  12960. }
  12961. //
  12962. // ProcessWorksetClear(...)
  12963. //
  12964. UINT ProcessWorksetClear
  12965. (
  12966. PUT_CLIENT putClient,
  12967. POM_PRIMARY pomPrimary,
  12968. POMNET_OPERATION_PKT pPacket,
  12969. POM_WSGROUP pWSGroup,
  12970. POM_WORKSET pWorkset
  12971. )
  12972. {
  12973. POM_PENDING_OP pPendingOp = NULL;
  12974. UINT numPosts;
  12975. UINT rc = 0;
  12976. DebugEntry(ProcessWorksetClear);
  12977. //
  12978. // Update the workset generation number:
  12979. //
  12980. UpdateWorksetGeneration(pWorkset, pPacket);
  12981. //
  12982. // See if this Clear operation can be spoiled (it will be spoiled if
  12983. // another Clear operation with a later sequence stamp has already been
  12984. // issued):
  12985. //
  12986. if (STAMP_IS_LOWER(pPacket->seqStamp, pWorkset->clearStamp))
  12987. {
  12988. TRACE_OUT(("Spoiling Clear with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  12989. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  12990. pWorkset->clearStamp.userID, pWorkset->clearStamp.genNumber));
  12991. DC_QUIT;
  12992. }
  12993. //
  12994. // Update the workset clear stamp:
  12995. //
  12996. COPY_SEQ_STAMP(pWorkset->clearStamp, pPacket->seqStamp);
  12997. //
  12998. // Now create a pending op CB to add to the list:
  12999. //
  13000. // Note: even if there is another Clear outstanding for the workset,
  13001. // we go ahead and put this one in the list and post another event
  13002. // to the Client. If we didn't, then we would expose ourselves
  13003. // to the following situation:
  13004. //
  13005. // 1. Clear issued
  13006. // 1a. Clear indication recd
  13007. // 2. Object added
  13008. // 3. Delete issued
  13009. // 3a. Delete indication recd - not filtered because unaffected
  13010. // by pending clear
  13011. // 4. Clear issued again - "takes over" previous Clear
  13012. // 5. Clear confirmed - causes object added in 2 to be deleted
  13013. // 6. Delete confirmed - assert because the delete WAS affected
  13014. // by the second clear which "took over" earlier one.
  13015. //
  13016. // A Client can still cause an assert by juggling the events and
  13017. // confirms, but we don't care because youo're not supposed to
  13018. // reorder ObMan events in any case.
  13019. //
  13020. pPendingOp = (POM_PENDING_OP)UT_MallocRefCount(sizeof(OM_PENDING_OP), FALSE);
  13021. if (!pPendingOp)
  13022. {
  13023. rc = UT_RC_NO_MEM;
  13024. DC_QUIT;
  13025. }
  13026. SET_STAMP(pPendingOp, PENDINGOP);
  13027. pPendingOp->pObj = 0;
  13028. pPendingOp->pData = NULL;
  13029. pPendingOp->type = WORKSET_CLEAR;
  13030. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13031. COM_BasedListInsertBefore(&(pWorkset->pendingOps), &(pPendingOp->chain));
  13032. //
  13033. // Post a workset clear indication event to the Client:
  13034. //
  13035. numPosts = WorksetEventPost(putClient,
  13036. pWorkset,
  13037. PRIMARY,
  13038. OM_WORKSET_CLEAR_IND,
  13039. 0);
  13040. //
  13041. // If there are no primaries present, then we won't be getting any
  13042. // ClearConfirms, so we do it now:
  13043. //
  13044. if (numPosts == 0)
  13045. {
  13046. TRACE_OUT(("No local primary Client has workset %u in WSG %d open - clearing",
  13047. pWorkset->worksetID, pWSGroup->wsg));
  13048. WorksetDoClear(putClient, pWSGroup, pWorkset, pPendingOp);
  13049. }
  13050. TRACE_OUT(("Processed WORKSET_CLEAR for workset %u in WSG %d",
  13051. pWorkset->worksetID, pWSGroup->wsg));
  13052. DC_EXIT_POINT:
  13053. if (rc != 0)
  13054. {
  13055. ERROR_OUT(("ERROR %d processing clear for workset %u in WSG %d",
  13056. rc, pWorkset->worksetID, pWSGroup->wsg));
  13057. if (pPendingOp != NULL)
  13058. {
  13059. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13060. }
  13061. }
  13062. DebugExitDWORD(ProcessWorksetClear, rc);
  13063. return(rc);
  13064. }
  13065. //
  13066. // ProcessObjectAdd(...)
  13067. //
  13068. UINT ProcessObjectAdd
  13069. (
  13070. PUT_CLIENT putTask,
  13071. POMNET_OPERATION_PKT pPacket,
  13072. POM_WSGROUP pWSGroup,
  13073. POM_WORKSET pWorkset,
  13074. POM_OBJECTDATA pData,
  13075. POM_OBJECT * ppObj
  13076. )
  13077. {
  13078. POM_OBJECT pObj;
  13079. UINT rc = 0;
  13080. DebugEntry(ProcessObjectAdd);
  13081. //
  13082. // Update the workset generation number:
  13083. //
  13084. UpdateWorksetGeneration(pWorkset, pPacket);
  13085. //
  13086. // Create a new record for the object:
  13087. //
  13088. //
  13089. // Allocate memory for the object record:
  13090. //
  13091. *ppObj = (POM_OBJECT)UT_MallocRefCount(sizeof(OM_OBJECT), FALSE);
  13092. if (! *ppObj)
  13093. {
  13094. rc = UT_RC_NO_MEM;
  13095. DC_QUIT;
  13096. }
  13097. pObj = *ppObj;
  13098. //
  13099. // Fill in the fields (remember, pData will be NULL if this is a
  13100. // catchup for a deleted object):
  13101. //
  13102. SET_STAMP(pObj, OBJECT);
  13103. pObj->updateSize = pPacket->updateSize;
  13104. pObj->pData = pData;
  13105. memcpy(&(pObj->objectID), &(pPacket->objectID), sizeof(OM_OBJECT_ID));
  13106. //
  13107. // How to set to the <flags> field and the sequence stamps depends on
  13108. // whether this is a CATCHUP:
  13109. //
  13110. if (pPacket->header.messageType == OMNET_OBJECT_CATCHUP)
  13111. {
  13112. COPY_SEQ_STAMP(pObj->addStamp, pPacket->seqStamp);
  13113. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->positionStamp);
  13114. COPY_SEQ_STAMP(pObj->updateStamp, pPacket->updateStamp);
  13115. COPY_SEQ_STAMP(pObj->replaceStamp, pPacket->replaceStamp);
  13116. pObj->flags = pPacket->flags;
  13117. }
  13118. else
  13119. {
  13120. COPY_SEQ_STAMP(pObj->addStamp, pPacket->seqStamp);
  13121. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->seqStamp);
  13122. COPY_SEQ_STAMP(pObj->updateStamp, pPacket->seqStamp);
  13123. COPY_SEQ_STAMP(pObj->replaceStamp, pPacket->seqStamp);
  13124. pObj->flags = 0;
  13125. }
  13126. //
  13127. // The following fields are not filled in since they are handled
  13128. // by ObjectInsert, when the object is actually inserted into the
  13129. // workset:
  13130. //
  13131. // - chain
  13132. // - position
  13133. //
  13134. //
  13135. // Insert the object into the workset:
  13136. //
  13137. ObjectInsert(pWorkset, pObj, pPacket->position);
  13138. //
  13139. // If the object has been deleted (which will only happen for a Catchup
  13140. // of a deleted object), we don't need to do anything else, so just
  13141. // quit:
  13142. if (pObj->flags & DELETED)
  13143. {
  13144. ASSERT((pPacket->header.messageType == OMNET_OBJECT_CATCHUP));
  13145. TRACE_OUT(("Processing Catchup for deleted object (ID: 0x%08x:0x%08x)",
  13146. pObj->objectID.creator, pObj->objectID.sequence));
  13147. DC_QUIT;
  13148. }
  13149. //
  13150. // Otherwise, we continue...
  13151. //
  13152. // Increment the numObjects field:
  13153. //
  13154. // (we don't do this inside ObjectInsert since that's called when moving
  13155. // objects also)
  13156. //
  13157. pWorkset->numObjects++;
  13158. TRACE_OUT(("Number of objects in workset %u in WSG %d is now %u",
  13159. pWorkset->worksetID, pWSGroup->wsg, pWorkset->numObjects));
  13160. //
  13161. // See if this Add can be spoiled (it is spoilable if the workset has
  13162. // been cleared since the Add was issued):
  13163. //
  13164. // Note: even if the Add is to be spoiled, we must create a record for
  13165. // it and insert it in the workset, for the same reason that we keep
  13166. // records of deleted objects in the workset (i.e. to differentiate
  13167. // between operations which are for deleted objects and those which are
  13168. // for objects not yet arrived).
  13169. //
  13170. if (STAMP_IS_LOWER(pPacket->seqStamp, pWorkset->clearStamp))
  13171. {
  13172. TRACE_OUT(("Spoiling Add with stamp 0x%08x:0x%08x (workset cleared at 0x%08x:0x%08x)",
  13173. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  13174. pWorkset->clearStamp.userID, pWorkset->clearStamp.genNumber));
  13175. //
  13176. // We "spoil" an Add by simply deleting it:
  13177. //
  13178. ObjectDoDelete(putTask, pWSGroup, pWorkset, pObj, NULL);
  13179. DC_QUIT;
  13180. }
  13181. //
  13182. // Post an add indication to all local Clients with the workset open:
  13183. //
  13184. WorksetEventPost(putTask,
  13185. pWorkset,
  13186. PRIMARY | SECONDARY,
  13187. OM_OBJECT_ADD_IND,
  13188. pObj);
  13189. TRACE_OUT(("Added object to workset %u in WSG %d (handle: 0x%08x - ID: 0x%08x:0x%08x)",
  13190. pWorkset->worksetID, pWSGroup->wsg, pObj,
  13191. pObj->objectID.creator, pObj->objectID.sequence));
  13192. TRACE_OUT((" position: %s - data at 0x%08x - size: %u - update size: %u",
  13193. pPacket->position == LAST ? "LAST" : "FIRST", pData,
  13194. pData->length, pPacket->updateSize));
  13195. DC_EXIT_POINT:
  13196. if (rc != 0)
  13197. {
  13198. ERROR_OUT(("Error 0x%08x processing Add message", rc));
  13199. }
  13200. DebugExitDWORD(ProcessObjectAdd, rc);
  13201. return(rc);
  13202. }
  13203. //
  13204. // ProcessObjectMove(...)
  13205. //
  13206. void ProcessObjectMove
  13207. (
  13208. PUT_CLIENT putTask,
  13209. POMNET_OPERATION_PKT pPacket,
  13210. POM_WORKSET pWorkset,
  13211. POM_OBJECT pObj
  13212. )
  13213. {
  13214. DebugEntry(ProcessObjectMove);
  13215. //
  13216. // Update the workset generation number:
  13217. //
  13218. UpdateWorksetGeneration(pWorkset, pPacket);
  13219. //
  13220. // See if we can spoil this move:
  13221. //
  13222. if (STAMP_IS_LOWER(pPacket->seqStamp, pObj->positionStamp))
  13223. {
  13224. TRACE_OUT(("Spoiling Move with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  13225. pPacket->seqStamp.userID,
  13226. pPacket->seqStamp.genNumber,
  13227. pObj->positionStamp.userID,
  13228. pObj->positionStamp.genNumber));
  13229. DC_QUIT;
  13230. }
  13231. //
  13232. // Moving an object in a workset involves
  13233. //
  13234. // 1. removing the object from its current position in the workset,
  13235. //
  13236. // 2. setting its position stamp to the new value
  13237. //
  13238. // 3. inserting it at its new position.
  13239. //
  13240. COM_BasedListRemove(&(pObj->chain));
  13241. COPY_SEQ_STAMP(pObj->positionStamp, pPacket->seqStamp);
  13242. ObjectInsert(pWorkset, pObj, pPacket->position);
  13243. //
  13244. // Post an indication to all local Clients with the workset open:
  13245. //
  13246. WorksetEventPost(putTask,
  13247. pWorkset,
  13248. PRIMARY | SECONDARY,
  13249. OM_OBJECT_MOVE_IND,
  13250. pObj);
  13251. DC_EXIT_POINT:
  13252. TRACE_OUT(("Moved object 0x%08x to %s of workset %u",
  13253. pObj, (pPacket->position == LAST ? "end" : "start"),
  13254. pWorkset->worksetID));
  13255. DebugExitVOID(ProcessObjectMove);
  13256. }
  13257. //
  13258. // ProcessObjectDRU(...)
  13259. //
  13260. UINT ProcessObjectDRU
  13261. (
  13262. PUT_CLIENT putTask,
  13263. POMNET_OPERATION_PKT pPacket,
  13264. POM_WSGROUP pWSGroup,
  13265. POM_WORKSET pWorkset,
  13266. POM_OBJECT pObj,
  13267. POM_OBJECTDATA pData
  13268. )
  13269. {
  13270. UINT numPosts;
  13271. POM_PENDING_OP pPendingOp = NULL;
  13272. POM_OBJECTDATA pPrevData;
  13273. UINT event = 0; // event to post to Client
  13274. OM_OPERATION_TYPE type = 0; // type for pendingOp struct
  13275. POM_SEQUENCE_STAMP pSeqStamp = NULL; // sequence stamp to update
  13276. void (* fnObjectDoAction)(PUT_CLIENT, POM_WSGROUP, POM_WORKSET,
  13277. POM_OBJECT,
  13278. POM_PENDING_OP) = NULL;
  13279. UINT rc = 0;
  13280. DebugEntry(ProcessObjectDRU);
  13281. //
  13282. // Set up the type variables:
  13283. //
  13284. switch (pPacket->header.messageType)
  13285. {
  13286. case OMNET_OBJECT_DELETE:
  13287. event = OM_OBJECT_DELETE_IND;
  13288. type = OBJECT_DELETE;
  13289. pSeqStamp = NULL;
  13290. fnObjectDoAction = ObjectDoDelete;
  13291. break;
  13292. case OMNET_OBJECT_REPLACE:
  13293. event = OM_OBJECT_REPLACE_IND;
  13294. type = OBJECT_REPLACE;
  13295. pSeqStamp = &(pObj->replaceStamp);
  13296. fnObjectDoAction = ObjectDoReplace;
  13297. break;
  13298. case OMNET_OBJECT_UPDATE:
  13299. event = OM_OBJECT_UPDATE_IND;
  13300. type = OBJECT_UPDATE;
  13301. pSeqStamp = &(pObj->updateStamp);
  13302. fnObjectDoAction = ObjectDoUpdate;
  13303. break;
  13304. default:
  13305. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  13306. pPacket->header.messageType));
  13307. break;
  13308. }
  13309. //
  13310. // Update the workset generation number:
  13311. //
  13312. UpdateWorksetGeneration(pWorkset, pPacket);
  13313. //
  13314. // Now do some spoiling checks, unless the object is a Delete (Deletes
  13315. // can't be spoiled):
  13316. //
  13317. if (type != OBJECT_DELETE)
  13318. {
  13319. ASSERT(((pSeqStamp != NULL) && (pData != NULL)));
  13320. //
  13321. // The first check is to see if this operation can be spoiled. It
  13322. // will be spoilable if the object has been updated/replaced since
  13323. // the operation took place. Since this function is called
  13324. // synchronously for a local Update/Replace, this will only event
  13325. // happen when a remote Update/Replace arrives "too late".
  13326. //
  13327. // The way we check is to compare the current stamp for the object
  13328. // with the stamp for the operation:
  13329. //
  13330. if (STAMP_IS_LOWER(pPacket->seqStamp, *pSeqStamp))
  13331. {
  13332. TRACE_OUT(("Spoiling with stamp 0x%08x:0x%08x ('previous': 0x%08x:0x%08x)",
  13333. pPacket->seqStamp.userID, pPacket->seqStamp.genNumber,
  13334. (*pSeqStamp).userID, (*pSeqStamp).genNumber));
  13335. UT_FreeRefCount((void**)&pData, FALSE);
  13336. DC_QUIT;
  13337. }
  13338. //
  13339. // Update whichever of the object's stamps is involved by copying
  13340. // in the stamp from the packet:
  13341. //
  13342. COPY_SEQ_STAMP(*pSeqStamp, pPacket->seqStamp);
  13343. //
  13344. // The second check is to see if this operation spoils a previous
  13345. // one. This will happen when a Client does two updates or two
  13346. // replaces in quick succession i.e. does the second
  13347. // update/replace before confirming the first.
  13348. //
  13349. // In this case, we "spoil" the previous operation by removing the
  13350. // previous pending op from the pending op list and inserting this
  13351. // one instead. Note that we do NOT post another event, as to do
  13352. // so without adding net a new pending op would cause the Client to
  13353. // assert on its second call to Confirm().
  13354. //
  13355. // Note: although in general a Replace will spoil a previous
  13356. // Update, it cannot do so in this case because if there is
  13357. // an Update outstanding, the Client will call UpdateConfirm
  13358. // so we must leave the Update pending and post a Replace
  13359. // event also.
  13360. //
  13361. FindPendingOp(pWorkset, pObj, type, &pPendingOp);
  13362. if (pPendingOp != NULL)
  13363. {
  13364. //
  13365. // OK, there is an operation of this type already outstanding
  13366. // for this object. So, we change the entry in the pending op
  13367. // list to refer to this operation instead. Before doing so,
  13368. // however, we must free up the chunk holding the previous
  13369. // (superceded) update/replace:
  13370. //
  13371. pPrevData = pPendingOp->pData;
  13372. if (pPrevData != NULL)
  13373. {
  13374. UT_FreeRefCount((void**)&pPrevData, FALSE);
  13375. }
  13376. //
  13377. // Now put the reference to the new update/replace in the
  13378. // pending op:
  13379. //
  13380. pPendingOp->pData = pData;
  13381. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13382. //
  13383. // The rest of this function inserts the pending op in the
  13384. // list, posts an event to local Client and performs the op if
  13385. // there are none. We know that
  13386. //
  13387. // - the op is in the list
  13388. //
  13389. // - there is an event outstanding because we found a pending
  13390. // op in the list
  13391. //
  13392. // - there are local Clients, for the same reason.
  13393. //
  13394. // Therefore, just quit:
  13395. //
  13396. DC_QUIT;
  13397. }
  13398. else
  13399. {
  13400. //
  13401. // No outstanding operation of this type for this object, so do
  13402. // nothing here and fall through to the standard processing:
  13403. //
  13404. }
  13405. }
  13406. else
  13407. {
  13408. //
  13409. // Sanity check:
  13410. //
  13411. ASSERT((pData == NULL));
  13412. pObj->flags |= PENDING_DELETE;
  13413. }
  13414. //
  13415. // Add this operation to the workset's pending operation list:
  13416. //
  13417. pPendingOp = (POM_PENDING_OP)UT_MallocRefCount(sizeof(OM_PENDING_OP), FALSE);
  13418. if (!pPendingOp)
  13419. {
  13420. rc = UT_RC_NO_MEM;
  13421. DC_QUIT;
  13422. }
  13423. SET_STAMP(pPendingOp, PENDINGOP);
  13424. pPendingOp->type = type;
  13425. pPendingOp->pData = pData;
  13426. pPendingOp->pObj = pObj;
  13427. COPY_SEQ_STAMP(pPendingOp->seqStamp, pPacket->seqStamp);
  13428. TRACE_OUT(("Inserting %d in pending op list for workset %u", type,
  13429. pWorkset->worksetID));
  13430. COM_BasedListInsertBefore(&(pWorkset->pendingOps), &(pPendingOp->chain));
  13431. //
  13432. // Post an indication to all local Clients with the workset open:
  13433. //
  13434. numPosts = WorksetEventPost(putTask,
  13435. pWorkset,
  13436. PRIMARY,
  13437. event,
  13438. pObj);
  13439. //
  13440. // If no one has the workset open, we won't be getting any
  13441. // DeleteConfirms, so we'd better do the delete straight away:
  13442. //
  13443. if (numPosts == 0)
  13444. {
  13445. TRACE_OUT(("Workset %hu in WSG %d not open: performing %d immediately",
  13446. pWorkset->worksetID, pWSGroup->wsg, type));
  13447. fnObjectDoAction(putTask, pWSGroup, pWorkset, pObj, pPendingOp);
  13448. }
  13449. TRACE_OUT(("Processed %d message for object 0x%08x in workset %u in WSG %d",
  13450. type, pObj, pWorkset->worksetID, pWSGroup->wsg));
  13451. DC_EXIT_POINT:
  13452. if (rc != 0)
  13453. {
  13454. //
  13455. // Cleanup:
  13456. //
  13457. ERROR_OUT(("ERROR %d processing WSG %d message", rc, pWSGroup->wsg));
  13458. if (pPendingOp != NULL)
  13459. {
  13460. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13461. }
  13462. }
  13463. DebugExitDWORD(ProcessObjectDRU, rc);
  13464. return(rc);
  13465. }
  13466. //
  13467. // ObjectInsert(...)
  13468. //
  13469. void ObjectInsert
  13470. (
  13471. POM_WORKSET pWorkset,
  13472. POM_OBJECT pObj,
  13473. OM_POSITION position
  13474. )
  13475. {
  13476. POM_OBJECT pObjTemp;
  13477. PBASEDLIST pChain;
  13478. DebugEntry(ObjectInsert);
  13479. //
  13480. // The algorithm for inserting an object at the start (end) of a workset
  13481. // is as follows:
  13482. //
  13483. // - search forward (back) from the first (last) object until one of the
  13484. // following happens:
  13485. //
  13486. // - we find an object which does not have FIRST (LAST) as a position
  13487. // stamp
  13488. //
  13489. // - we find an object which has a lower (lower) position stamp.
  13490. //
  13491. // - we reach the root of the list of objects in the workset
  13492. //
  13493. // - insert the new object before (after) this object.
  13494. //
  13495. switch (position)
  13496. {
  13497. case FIRST:
  13498. {
  13499. pObjTemp = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13500. while (pObjTemp != NULL)
  13501. {
  13502. ValidateObject(pObjTemp);
  13503. if ((pObjTemp->position != position) ||
  13504. (STAMP_IS_LOWER(pObjTemp->positionStamp,
  13505. pObj->positionStamp)))
  13506. {
  13507. break;
  13508. }
  13509. pObjTemp = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjTemp, FIELD_OFFSET(OM_OBJECT, chain));
  13510. }
  13511. break;
  13512. }
  13513. case LAST:
  13514. {
  13515. pObjTemp = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13516. while (pObjTemp != NULL)
  13517. {
  13518. ValidateObject(pObjTemp);
  13519. if ((pObjTemp->position != position) ||
  13520. (STAMP_IS_LOWER(pObjTemp->positionStamp,
  13521. pObj->positionStamp)))
  13522. {
  13523. break;
  13524. }
  13525. pObjTemp = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObjTemp, FIELD_OFFSET(OM_OBJECT, chain));
  13526. }
  13527. break;
  13528. }
  13529. default:
  13530. {
  13531. ERROR_OUT(("Reached default case in switch (position: %hu)", position));
  13532. break;
  13533. }
  13534. }
  13535. //
  13536. // OK, we've found the correct position for the object. If we reached
  13537. // the end (start) of the workset, then we want to insert the object
  13538. // before (after) the root, so we set up pChain accordingly:
  13539. //
  13540. if (pObjTemp == NULL)
  13541. {
  13542. pChain = &(pWorkset->objects);
  13543. TRACE_OUT(("Inserting object into workset %u as the %s object",
  13544. pWorkset->worksetID, position == LAST ? "last" : "first"));
  13545. }
  13546. else
  13547. {
  13548. pChain = &(pObjTemp->chain);
  13549. TRACE_OUT(("Inserting object into workset %u %s object "
  13550. "with record at 0x%08x (position stamp: 0x%08x:0x%08x)",
  13551. pWorkset->worksetID,
  13552. (position == LAST ? "after" : "before"),
  13553. pObjTemp, pObjTemp->objectID.creator,
  13554. pObjTemp->objectID.sequence));
  13555. }
  13556. //
  13557. // Now insert the object, either before or after the position we
  13558. // determined above:
  13559. //
  13560. if (position == FIRST)
  13561. {
  13562. COM_BasedListInsertBefore(pChain, &(pObj->chain));
  13563. }
  13564. else
  13565. {
  13566. COM_BasedListInsertAfter(pChain, &(pObj->chain));
  13567. }
  13568. pObj->position = position;
  13569. //
  13570. // Now do a debug-only check to ensure correct order of objects:
  13571. //
  13572. CheckObjectOrder(pWorkset);
  13573. DebugExitVOID(ObjectInsert);
  13574. }
  13575. //
  13576. // ObjectDoDelete(...)
  13577. //
  13578. void ObjectDoDelete
  13579. (
  13580. PUT_CLIENT putTask,
  13581. POM_WSGROUP pWSGroup,
  13582. POM_WORKSET pWorkset,
  13583. POM_OBJECT pObj,
  13584. POM_PENDING_OP pPendingOp
  13585. )
  13586. {
  13587. POM_DOMAIN pDomain;
  13588. DebugEntry(ObjectDoDelete);
  13589. //
  13590. // We should never be called for an object that's already been deleted:
  13591. //
  13592. ValidateObject(pObj);
  13593. ASSERT(!(pObj->flags & DELETED));
  13594. //
  13595. // Derive a pointer to the object itself and then free it:
  13596. //
  13597. if (!pObj->pData)
  13598. {
  13599. ERROR_OUT(("ObjectDoDelete: object 0x%08x has no data", pObj));
  13600. }
  13601. else
  13602. {
  13603. ValidateObjectData(pObj->pData);
  13604. UT_FreeRefCount((void**)&pObj->pData, FALSE);
  13605. }
  13606. //
  13607. // Set the deleted flag in the object record:
  13608. //
  13609. // (note that we don't delete the object record entirely as we need to
  13610. // keep track of deleted objects so that when we get operations from the
  13611. // network for objects not in the workset, we can differentiate between
  13612. // operations on objects
  13613. //
  13614. // - that haven't yet been added at this node (we keep these operations
  13615. // and perform them later) and
  13616. //
  13617. // - that have been deleted (we throw these operations away).
  13618. //
  13619. // A slight space optimisation would be to store the IDs of deleted
  13620. // objects in a separate list, since we don't need any of the other
  13621. // fields in the record.
  13622. //
  13623. pObj->flags |= DELETED;
  13624. pObj->flags &= ~PENDING_DELETE;
  13625. //
  13626. // Remove the pending op from the list, if the pointer passed in is
  13627. // valid (it won't be if we're called from WorksetDoClear, since those
  13628. // deletes have not been "pending").
  13629. //
  13630. // In addition, if pPendingOp is not NULL, we post the DELETED event to
  13631. // registered secondaries:
  13632. //
  13633. if (pPendingOp != NULL)
  13634. {
  13635. COM_BasedListRemove(&(pPendingOp->chain));
  13636. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13637. WorksetEventPost(putTask,
  13638. pWorkset,
  13639. SECONDARY,
  13640. OM_OBJECT_DELETED_IND,
  13641. pObj);
  13642. }
  13643. //
  13644. // If we are in the local domain, we can safely delete the object rec:
  13645. //
  13646. pDomain = pWSGroup->pDomain;
  13647. if (pDomain->callID == OM_NO_CALL)
  13648. {
  13649. TRACE_OUT(("Freeing pObj at 0x%08x", pObj));
  13650. ValidateObject(pObj);
  13651. COM_BasedListRemove(&(pObj->chain));
  13652. UT_FreeRefCount((void**)&pObj, FALSE);
  13653. }
  13654. //
  13655. // Decrement the number of objects in the workset:
  13656. //
  13657. ASSERT(pWorkset->numObjects > 0);
  13658. pWorkset->numObjects--;
  13659. DebugExitVOID(ObjectDoDelete);
  13660. }
  13661. //
  13662. // ObjectDoReplace(...)
  13663. //
  13664. void ObjectDoReplace
  13665. (
  13666. PUT_CLIENT putTask,
  13667. POM_WSGROUP pWSGroup,
  13668. POM_WORKSET pWorkset,
  13669. POM_OBJECT pObj,
  13670. POM_PENDING_OP pPendingOp
  13671. )
  13672. {
  13673. POM_OBJECTDATA pDataNew;
  13674. POM_OBJECTDATA pDataOld;
  13675. UINT rc = 0;
  13676. DebugEntry(ObjectDoReplace);
  13677. ValidateObject(pObj);
  13678. //
  13679. // If the object has already been deleted for whatever reason, quit:
  13680. //
  13681. if (pObj->flags & DELETED)
  13682. {
  13683. WARNING_OUT(("Asked to do replace for deleted object 0x%08x!", pObj));
  13684. DC_QUIT;
  13685. }
  13686. //
  13687. // Set up some local variables:
  13688. //
  13689. pDataOld = pObj->pData;
  13690. pDataNew = pPendingOp->pData;
  13691. ValidateObjectData(pDataNew);
  13692. pObj->pData = pDataNew;
  13693. //
  13694. // If this object has been updated since this replace was issued, we
  13695. // must ensure that the replace doesn't overwrite the "later" update:
  13696. //
  13697. // Initial object at t=1 AAAAAA
  13698. // Object updated (two bytes) at t=3;
  13699. // Object becomes: CCAAAA
  13700. //
  13701. // Object replaced at t=2: BBBB
  13702. // Must now re-enact the update: CCBB
  13703. //
  13704. // Therefore, if the update stamp for the object is later than the stamp
  13705. // of the replace instruction, we copy the first N bytes back over the
  13706. // new object, where N is the size of the last update:
  13707. //
  13708. if (STAMP_IS_LOWER(pPendingOp->seqStamp, pObj->updateStamp))
  13709. {
  13710. ASSERT((pDataNew->length >= pObj->updateSize));
  13711. memcpy(&(pDataNew->data), &(pDataOld->data), pObj->updateSize);
  13712. }
  13713. TRACE_OUT(("Replacing object 0x%08x with data at 0x%08x (old data at 0x%08x)",
  13714. pObj, pDataNew, pDataOld));
  13715. //
  13716. // We also need to free up the chunk holding the old object:
  13717. //
  13718. if (!pDataOld)
  13719. {
  13720. ERROR_OUT(("ObjectDoReplace: object 0x%08x has no data", pObj));
  13721. }
  13722. else
  13723. {
  13724. UT_FreeRefCount((void**)&pDataOld, FALSE);
  13725. }
  13726. //
  13727. // Now that we've replaced the object, post a REPLACED event to all
  13728. // secondaries:
  13729. //
  13730. WorksetEventPost(putTask,
  13731. pWorkset,
  13732. SECONDARY,
  13733. OM_OBJECT_REPLACED_IND,
  13734. pObj);
  13735. DC_EXIT_POINT:
  13736. //
  13737. // We've either done the replace or abandoned it because the object has
  13738. // been deleted; either way, free up the entry in the pending op list:
  13739. //
  13740. COM_BasedListRemove(&(pPendingOp->chain));
  13741. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13742. DebugExitVOID(ObjectDoReplace);
  13743. }
  13744. //
  13745. // ObjectDoUpdate(...)
  13746. //
  13747. void ObjectDoUpdate
  13748. (
  13749. PUT_CLIENT putTask,
  13750. POM_WSGROUP pWSGroup,
  13751. POM_WORKSET pWorkset,
  13752. POM_OBJECT pObj,
  13753. POM_PENDING_OP pPendingOp
  13754. )
  13755. {
  13756. POM_OBJECTDATA pDataNew;
  13757. UINT rc = 0;
  13758. DebugEntry(ObjectDoUpdate);
  13759. ValidateObject(pObj);
  13760. //
  13761. // If the object has already been deleted for whatever reason, quit:
  13762. //
  13763. if (pObj->flags & DELETED)
  13764. {
  13765. WARNING_OUT(("Asked to do update for deleted object 0x%08x!", pObj));
  13766. DC_QUIT;
  13767. }
  13768. pDataNew = pPendingOp->pData;
  13769. if (!pObj->pData)
  13770. {
  13771. ERROR_OUT(("ObjectDoUpdate: object 0x%08x has no data", pObj));
  13772. }
  13773. else
  13774. {
  13775. ValidateObjectData(pObj->pData);
  13776. //
  13777. // Updating an object involves copying <length> bytes from the <data>
  13778. // field of the update over the start of the <data> field of the
  13779. // existing object:
  13780. //
  13781. memcpy(&(pObj->pData->data), &(pDataNew->data), pDataNew->length);
  13782. }
  13783. UT_FreeRefCount((void**)&pDataNew, FALSE);
  13784. //
  13785. // Now that we've updated the object, post an UPDATED event to all
  13786. // secondaries:
  13787. //
  13788. WorksetEventPost(putTask,
  13789. pWorkset,
  13790. SECONDARY,
  13791. OM_OBJECT_UPDATED_IND,
  13792. pObj);
  13793. DC_EXIT_POINT:
  13794. //
  13795. // We've done the update, so free up the entry in the pending op list:
  13796. //
  13797. COM_BasedListRemove(&(pPendingOp->chain));
  13798. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  13799. DebugExitVOID(ObjectDoUpdate);
  13800. }
  13801. //
  13802. // ObjectIDToPtr(...)
  13803. //
  13804. UINT ObjectIDToPtr
  13805. (
  13806. POM_WORKSET pWorkset,
  13807. OM_OBJECT_ID objectID,
  13808. POM_OBJECT * ppObj
  13809. )
  13810. {
  13811. POM_OBJECT pObj;
  13812. UINT rc = 0;
  13813. DebugEntry(ObjectIDToPtr);
  13814. //
  13815. // To find the handle, we chain through each of the object records in
  13816. // the workset and compare the id of each one with the required ID:
  13817. //
  13818. TRACE_OUT(("About to search object records looking for ID 0x%08x:0x%08x",
  13819. objectID.creator, objectID.sequence));
  13820. ValidateWorkset(pWorkset);
  13821. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  13822. while (pObj != NULL)
  13823. {
  13824. ValidateObject(pObj);
  13825. TRACE_OUT(("Comparing against object at 0x%08x (ID: 0x%08x:0x%08x)",
  13826. pObj,
  13827. pObj->objectID.creator,
  13828. pObj->objectID.sequence));
  13829. if (OBJECT_IDS_ARE_EQUAL(pObj->objectID, objectID))
  13830. {
  13831. break;
  13832. }
  13833. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  13834. }
  13835. //
  13836. // If object record not found, warn:
  13837. //
  13838. if (pObj == NULL)
  13839. {
  13840. TRACE_OUT(("Object with ID 0x%08x:0x%08x not found",
  13841. objectID.creator, objectID.sequence));
  13842. rc = OM_RC_BAD_OBJECT_ID;
  13843. DC_QUIT;
  13844. }
  13845. *ppObj = pObj;
  13846. //
  13847. // If object record found but object deleted or pending delete, warn:
  13848. //
  13849. if (pObj->flags & DELETED)
  13850. {
  13851. TRACE_OUT(("Object record found (handle: 0x%08x) for ID 0x%08x:0x%08x "
  13852. "but object deleted",
  13853. *ppObj, objectID.creator, objectID.sequence));
  13854. rc = OM_RC_OBJECT_DELETED;
  13855. DC_QUIT;
  13856. }
  13857. if (pObj->flags & PENDING_DELETE)
  13858. {
  13859. TRACE_OUT(("Object record found (handle: 0x%08x) for ID 0x%08x:0x%08x "
  13860. "but object pending delete",
  13861. *ppObj, objectID.creator, objectID.sequence));
  13862. rc = OM_RC_OBJECT_PENDING_DELETE;
  13863. DC_QUIT;
  13864. }
  13865. DC_EXIT_POINT:
  13866. DebugExitDWORD(ObjectIDToPtr, rc);
  13867. return(rc);
  13868. }
  13869. //
  13870. // GenerateOpMessage(...)
  13871. //
  13872. UINT GenerateOpMessage
  13873. (
  13874. POM_WSGROUP pWSGroup,
  13875. OM_WORKSET_ID worksetID,
  13876. POM_OBJECT_ID pObjectID,
  13877. POM_OBJECTDATA pData,
  13878. OMNET_MESSAGE_TYPE messageType,
  13879. POMNET_OPERATION_PKT * ppPacket
  13880. )
  13881. {
  13882. POMNET_OPERATION_PKT pPacket;
  13883. POM_DOMAIN pDomain;
  13884. POM_WORKSET pWorkset = NULL;
  13885. UINT rc = 0;
  13886. DebugEntry(GenerateOpMessage);
  13887. //
  13888. // Set up Domain record pointer:
  13889. //
  13890. pDomain = pWSGroup->pDomain;
  13891. TRACE_OUT(("Generating message for operation type 0x%08x", messageType));
  13892. //
  13893. // Allocate some memory for the packet:
  13894. //
  13895. pPacket = (POMNET_OPERATION_PKT)UT_MallocRefCount(sizeof(OMNET_OPERATION_PKT), TRUE);
  13896. if (!pPacket)
  13897. {
  13898. rc = UT_RC_NO_MEM;
  13899. DC_QUIT;
  13900. }
  13901. //
  13902. // Here, we fill in the fields common to all types of messages:
  13903. //
  13904. pPacket->header.sender = pDomain->userID;
  13905. pPacket->header.messageType = messageType;
  13906. //
  13907. // The <totalSize> field is the number of bytes in the header packet
  13908. // plus the number of associated data bytes, if any. For the moment, we
  13909. // set it to the size of the header only; we'll add the size of the data
  13910. // later:
  13911. //
  13912. pPacket->totalSize = GetMessageSize(messageType);
  13913. pPacket->wsGroupID = pWSGroup->wsGroupID;
  13914. pPacket->worksetID = worksetID;
  13915. //
  13916. // If this is a WorksetNew operation, there will be no workset yet and
  13917. // thus no valid sequence stamp, so we use a null sequence stamp.
  13918. // Otherwise, we take the value from the workset.
  13919. //
  13920. if (messageType == OMNET_WORKSET_NEW)
  13921. {
  13922. SET_NULL_SEQ_STAMP(pPacket->seqStamp);
  13923. }
  13924. else
  13925. {
  13926. pWorkset = pWSGroup->apWorksets[worksetID];
  13927. ASSERT((pWorkset != NULL));
  13928. GET_CURR_SEQ_STAMP(pPacket->seqStamp, pDomain, pWorkset);
  13929. }
  13930. //
  13931. // If this is a workset operation, <pObjectID> will be NULL, so we set
  13932. // the object ID in the packet to NULL also:
  13933. //
  13934. if (pObjectID == NULL)
  13935. {
  13936. ZeroMemory(&(pPacket->objectID), sizeof(OM_OBJECT_ID));
  13937. }
  13938. else
  13939. {
  13940. memcpy(&(pPacket->objectID), pObjectID, sizeof(OM_OBJECT_ID));
  13941. }
  13942. //
  13943. // If this message is associated with object data, we must add the size
  13944. // of this data (including the size of the <length> field itself). The
  13945. // test for this is if the <pData> parameter is not NULL:
  13946. //
  13947. if (pData != NULL)
  13948. {
  13949. pPacket->totalSize += pData->length + sizeof(pData->length);
  13950. }
  13951. //
  13952. // For a WORKSET_CATCHUP message, we need to let the other node know if
  13953. // the workset is locked and if so, by whom:
  13954. //
  13955. if (messageType == OMNET_WORKSET_CATCHUP)
  13956. {
  13957. //
  13958. // pWorkset should have been set up above:
  13959. //
  13960. ASSERT((pWorkset != NULL));
  13961. //
  13962. // Put the ID of the node which owns the workset lock in the <userID>
  13963. // field of the <seqStamp> field of the packet:
  13964. //
  13965. pPacket->seqStamp.userID = pWorkset->lockedBy;
  13966. TRACE_OUT(("Set <lockedBy> field in WORKSET_CATCHUP to %hu",
  13967. pWorkset->lockedBy));
  13968. //
  13969. // Now we put the current generation number for the workset in the
  13970. // <genNumber> field of the <seqStamp> field of the packet:
  13971. //
  13972. pPacket->seqStamp.genNumber = pWorkset->genNumber;
  13973. TRACE_OUT(("Set generation number field in WORKSET_CATCHUP to %u",
  13974. pPacket->seqStamp.genNumber));
  13975. //
  13976. // Fill in the priority value for the workset, which goes in the two
  13977. // bytes occupied by the <position> and <flags> fields:
  13978. //
  13979. *((NET_PRIORITY *) &(pPacket->position)) = pWorkset->priority;
  13980. *((BOOL *) &(pPacket->objectID)) = pWorkset->fTemp;
  13981. }
  13982. //
  13983. // We do not fill in the following fields:
  13984. //
  13985. // position
  13986. // flags
  13987. // updateSize
  13988. //
  13989. // This is because these are used only in a minority of messages and to
  13990. // add the extra parameters to the GenerateOpMessage function seemed
  13991. // undesirable. Messages where these fields are used should be filled
  13992. // in by the calling function as appropriate.
  13993. //
  13994. //
  13995. // Set the caller's pointer:
  13996. //
  13997. *ppPacket = pPacket;
  13998. DC_EXIT_POINT:
  13999. if (rc != 0)
  14000. {
  14001. ERROR_OUT(("ERROR %d generating message of type 0x%08x",
  14002. rc, messageType));
  14003. }
  14004. DebugExitDWORD(GenerateOpMessage, rc);
  14005. return(rc);
  14006. }
  14007. //
  14008. // QueueMessage(...)
  14009. //
  14010. UINT QueueMessage
  14011. (
  14012. PUT_CLIENT putTask,
  14013. POM_DOMAIN pDomain,
  14014. NET_CHANNEL_ID channelID,
  14015. NET_PRIORITY priority,
  14016. POM_WSGROUP pWSGroup,
  14017. POM_WORKSET pWorkset,
  14018. POM_OBJECT pObj,
  14019. POMNET_PKT_HEADER pMessage,
  14020. POM_OBJECTDATA pData,
  14021. BOOL compressOrNot
  14022. )
  14023. {
  14024. POM_SEND_INST pSendInst;
  14025. NET_PRIORITY queuePriority;
  14026. BOOL locked = FALSE;
  14027. BOOL bumped = FALSE;
  14028. UINT rc = 0;
  14029. DebugEntry(QueueMessage);
  14030. //
  14031. // If this is the local Domain, we don't put the op on the send queue;
  14032. // just free the packet and quit:
  14033. //
  14034. if (pDomain->callID == NET_INVALID_DOMAIN_ID)
  14035. {
  14036. TRACE_OUT(("Not queueing message (it's for the local Domain)"));
  14037. UT_FreeRefCount((void**)&pMessage, FALSE);
  14038. DC_QUIT;
  14039. }
  14040. //
  14041. // Allocate some memory in OMGLOBAL for the send instruction:
  14042. //
  14043. pSendInst = (POM_SEND_INST)UT_MallocRefCount(sizeof(OM_SEND_INST), TRUE);
  14044. if (!pSendInst)
  14045. {
  14046. rc = UT_RC_NO_MEM;
  14047. DC_QUIT;
  14048. }
  14049. SET_STAMP(pSendInst, SENDINST);
  14050. //
  14051. // Fill in the fields of the send instruction:
  14052. //
  14053. pSendInst->messageSize = (WORD)GetMessageSize(pMessage->messageType);
  14054. DeterminePriority(&priority, pData);
  14055. pSendInst->priority = priority;
  14056. pSendInst->callID = pDomain->callID;
  14057. pSendInst->channel = channelID;
  14058. pSendInst->messageType = pMessage->messageType;
  14059. pSendInst->compressOrNot = compressOrNot;
  14060. //
  14061. // Now calculate the relevant offsets so we can add them to the ObMan
  14062. // base pointers:
  14063. //
  14064. // SFR 2560 { : bump use counts of all non-zero pointers, not just pData
  14065. //
  14066. if (pMessage != NULL)
  14067. {
  14068. pSendInst->pMessage = pMessage;
  14069. //
  14070. // SFR 5488 { : No! Don't bump use count of pMessage - we're the
  14071. // only people using it now so we don't need to. }
  14072. //
  14073. }
  14074. if (pWSGroup != NULL)
  14075. {
  14076. UT_BumpUpRefCount(pWSGroup);
  14077. pSendInst->pWSGroup = pWSGroup;
  14078. }
  14079. if (pWorkset != NULL)
  14080. {
  14081. UT_BumpUpRefCount(pWorkset);
  14082. pSendInst->pWorkset = pWorkset;
  14083. }
  14084. if (pObj != NULL)
  14085. {
  14086. UT_BumpUpRefCount(pObj);
  14087. pSendInst->pObj = pObj;
  14088. }
  14089. if (pData != NULL)
  14090. {
  14091. UT_BumpUpRefCount(pData);
  14092. pSendInst->pDataStart = pData;
  14093. pSendInst->pDataNext = pData;
  14094. //
  14095. // In addition, we set up some send instruction fields which are
  14096. // specific to operations which involve object data:
  14097. //
  14098. pSendInst->dataLeftToGo = pData->length + sizeof(pData->length);
  14099. //
  14100. // Increment the <bytesUnacked> fields in the workset and workset
  14101. // group:
  14102. //
  14103. pWorkset->bytesUnacked += pSendInst->dataLeftToGo;
  14104. pWSGroup->bytesUnacked += pSendInst->dataLeftToGo;
  14105. TRACE_OUT(("Bytes unacked for workset %u in WSG %d now %u "
  14106. "(for wsGroup: %u)", pWorkset->worksetID, pWSGroup->wsg,
  14107. pWorkset->bytesUnacked, pWSGroup->bytesUnacked));
  14108. }
  14109. //
  14110. // Set a flag so we can clean up a bit better on error:
  14111. //
  14112. bumped = TRUE;
  14113. //
  14114. // Unless there's a send event outstanding, post an event to the ObMan
  14115. // task prompting it to examine the send queue. Providing we have
  14116. // received a Net Attach indication.
  14117. //
  14118. if ( !pDomain->sendEventOutstanding &&
  14119. (pDomain->state > PENDING_ATTACH) )
  14120. {
  14121. TRACE_OUT(("No send event outstanding - posting SEND_QUEUE event"));
  14122. //
  14123. // Bump up the use count of the Domain record (since we're passing it
  14124. // in an event):
  14125. //
  14126. UT_BumpUpRefCount(pDomain);
  14127. //
  14128. // NFC - we used to pass the pDomain pointer as param2 in this
  14129. // event, but the event may get processed in a different process
  14130. // where the pointer is no longer valid, so pass the offset instead.
  14131. //
  14132. ValidateOMP(g_pomPrimary);
  14133. UT_PostEvent(putTask,
  14134. g_pomPrimary->putTask,
  14135. 0, // no delay
  14136. OMINT_EVENT_SEND_QUEUE,
  14137. 0,
  14138. (UINT_PTR)pDomain);
  14139. pDomain->sendEventOutstanding = TRUE;
  14140. }
  14141. else
  14142. {
  14143. TRACE_OUT(("Send event outstanding/state %u: not posting SEND_QUEUE event",
  14144. pDomain->state));
  14145. }
  14146. //
  14147. // Place the event at the end of the relevant send queue. This depends
  14148. // on priority - but remember, the priority value passed in might have
  14149. // the NET_SEND_ALL_PRIORITIES flag set - so clear it when determining
  14150. // the queue.
  14151. //
  14152. // NB: Do this after any possible DC-QUIT so we're not left with a
  14153. // NULL entry in the list.
  14154. //
  14155. queuePriority = priority;
  14156. queuePriority &= ~NET_SEND_ALL_PRIORITIES;
  14157. COM_BasedListInsertBefore(&(pDomain->sendQueue[queuePriority]),
  14158. &(pSendInst->chain));
  14159. TRACE_OUT((" Queued instruction (type: 0x%08x) at priority %hu "
  14160. "on channel 0x%08x in Domain %u",
  14161. pMessage->messageType, priority, channelID, pDomain->callID));
  14162. DC_EXIT_POINT:
  14163. if (rc != 0)
  14164. {
  14165. //
  14166. // Cleanup:
  14167. //
  14168. ERROR_OUT(("ERROR %d queueing send instruction (message type: %hu)",
  14169. rc, pMessage->messageType));
  14170. if (pSendInst != NULL)
  14171. {
  14172. UT_FreeRefCount((void**)&pSendInst, FALSE);
  14173. }
  14174. if (bumped == TRUE)
  14175. {
  14176. // SFR 2560 { : Free all non-zero pointers not just pData
  14177. if (pMessage != NULL)
  14178. {
  14179. UT_FreeRefCount((void**)&pMessage, FALSE);
  14180. }
  14181. if (pWSGroup != NULL)
  14182. {
  14183. UT_FreeRefCount((void**)&pWSGroup, FALSE);
  14184. }
  14185. if (pWorkset != NULL)
  14186. {
  14187. UT_FreeRefCount((void**)&pWorkset, FALSE);
  14188. }
  14189. if (pObj != NULL)
  14190. {
  14191. UT_FreeRefCount((void**)&pObj, FALSE);
  14192. }
  14193. if (pData != NULL)
  14194. {
  14195. UT_FreeRefCount((void**)&pData, FALSE);
  14196. }
  14197. }
  14198. }
  14199. DebugExitDWORD(QueueMessage, rc);
  14200. return(rc);
  14201. }
  14202. //
  14203. // DeterminePriority(...)
  14204. //
  14205. void DeterminePriority
  14206. (
  14207. NET_PRIORITY * pPriority,
  14208. POM_OBJECTDATA pData
  14209. )
  14210. {
  14211. DebugEntry(DeterminePriority);
  14212. if (OM_OBMAN_CHOOSES_PRIORITY == *pPriority)
  14213. {
  14214. if (pData != NULL)
  14215. {
  14216. if (pData->length < OM_NET_HIGH_PRI_THRESHOLD)
  14217. {
  14218. *pPriority = NET_HIGH_PRIORITY;
  14219. }
  14220. else if (pData->length < OM_NET_MED_PRI_THRESHOLD)
  14221. {
  14222. *pPriority = NET_MEDIUM_PRIORITY;
  14223. }
  14224. else
  14225. {
  14226. *pPriority = NET_LOW_PRIORITY;
  14227. }
  14228. TRACE_OUT(("Priority chosen: %hu (data size: %u)",
  14229. *pPriority, pData->length));
  14230. }
  14231. else
  14232. {
  14233. *pPriority = NET_HIGH_PRIORITY;
  14234. }
  14235. }
  14236. else
  14237. {
  14238. TRACE_OUT(("Priority specified is %hu - not changing", *pPriority));
  14239. }
  14240. DebugExitVOID(DeterminePriority);
  14241. }
  14242. //
  14243. // GetMessageSize(...)
  14244. //
  14245. UINT GetMessageSize
  14246. (
  14247. OMNET_MESSAGE_TYPE messageType
  14248. )
  14249. {
  14250. UINT size;
  14251. DebugEntry(GetMessageSize);
  14252. switch (messageType)
  14253. {
  14254. case OMNET_HELLO:
  14255. case OMNET_WELCOME:
  14256. size = sizeof(OMNET_JOINER_PKT);
  14257. break;
  14258. case OMNET_LOCK_REQ:
  14259. case OMNET_LOCK_GRANT:
  14260. case OMNET_LOCK_DENY:
  14261. case OMNET_LOCK_NOTIFY:
  14262. case OMNET_UNLOCK:
  14263. size = sizeof(OMNET_LOCK_PKT);
  14264. break;
  14265. case OMNET_WSGROUP_SEND_REQ:
  14266. case OMNET_WSGROUP_SEND_MIDWAY:
  14267. case OMNET_WSGROUP_SEND_COMPLETE:
  14268. case OMNET_WSGROUP_SEND_DENY:
  14269. size = sizeof(OMNET_WSGROUP_SEND_PKT);
  14270. break;
  14271. //
  14272. // The remaining messages all use OMNET_OPERATION_PKT packets, but
  14273. // each uses different amounts of the generic packet. Therefore, we
  14274. // can't use sizeof so we've got some defined constants instead:
  14275. //
  14276. case OMNET_WORKSET_NEW:
  14277. size = OMNET_WORKSET_NEW_SIZE;
  14278. break;
  14279. case OMNET_WORKSET_CATCHUP:
  14280. size = OMNET_WORKSET_CATCHUP_SIZE;
  14281. break;
  14282. case OMNET_WORKSET_CLEAR:
  14283. size = OMNET_WORKSET_CLEAR_SIZE;
  14284. break;
  14285. case OMNET_OBJECT_MOVE:
  14286. size = OMNET_OBJECT_MOVE_SIZE;
  14287. break;
  14288. case OMNET_OBJECT_DELETE:
  14289. size = OMNET_OBJECT_DELETE_SIZE;
  14290. break;
  14291. case OMNET_OBJECT_REPLACE:
  14292. size = OMNET_OBJECT_REPLACE_SIZE;
  14293. break;
  14294. case OMNET_OBJECT_UPDATE:
  14295. size = OMNET_OBJECT_UPDATE_SIZE;
  14296. break;
  14297. case OMNET_OBJECT_ADD:
  14298. size = OMNET_OBJECT_ADD_SIZE;
  14299. break;
  14300. case OMNET_OBJECT_CATCHUP:
  14301. size = OMNET_OBJECT_CATCHUP_SIZE;
  14302. break;
  14303. case OMNET_MORE_DATA:
  14304. size = OMNET_MORE_DATA_SIZE;
  14305. break;
  14306. default:
  14307. ERROR_OUT(("Reached default case in switch statement (type: %hu)",
  14308. messageType));
  14309. size = 0;
  14310. break;
  14311. }
  14312. DebugExitDWORD(GetMessageSize, size);
  14313. return(size);
  14314. }
  14315. //
  14316. // WorksetEventPost()
  14317. //
  14318. UINT WorksetEventPost
  14319. (
  14320. PUT_CLIENT putTask,
  14321. POM_WORKSET pWorkset,
  14322. BYTE target,
  14323. UINT event,
  14324. POM_OBJECT pObj
  14325. )
  14326. {
  14327. POM_CLIENT_LIST pClientListEntry;
  14328. OM_EVENT_DATA16 eventData16;
  14329. UINT numPosts;
  14330. DebugEntry(WorksetEventPost);
  14331. //
  14332. // Need to post the event to each Client which has the workset open, so
  14333. // we chain through the list of Clients stored in the workset record:
  14334. //
  14335. numPosts = 0;
  14336. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWorkset->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14337. while (pClientListEntry != NULL)
  14338. {
  14339. //
  14340. // <target> specifies which type of Client we are to post events to
  14341. // and is PRIMARY and/or SECONDARY (ORed together if both). Check
  14342. // against this Client's registration mode:
  14343. //
  14344. if (target & pClientListEntry->mode)
  14345. {
  14346. //
  14347. // If the pObj was not NULL, bump the use count for the object
  14348. // record. If this fails, give up:
  14349. //
  14350. if (pObj != NULL)
  14351. {
  14352. ValidateObject(pObj);
  14353. UT_BumpUpRefCount(pObj);
  14354. }
  14355. //
  14356. // Fill in the fields of the event parameter, using the workset
  14357. // group handle as found in the Client list and the workset ID as
  14358. // found in the workset record:
  14359. //
  14360. eventData16.hWSGroup = pClientListEntry->hWSGroup;
  14361. eventData16.worksetID = pWorkset->worksetID;
  14362. UT_PostEvent(putTask,
  14363. pClientListEntry->putTask,
  14364. 0,
  14365. event,
  14366. *(PUINT) &eventData16,
  14367. (UINT_PTR)pObj);
  14368. numPosts++;
  14369. }
  14370. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWorkset->clients), pClientListEntry,
  14371. FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14372. }
  14373. TRACE_OUT(("Posted event 0x%08x to %hu Clients (those with workset %u open)",
  14374. event, numPosts, pWorkset->worksetID));
  14375. DebugExitDWORD(WorksetEventPost, numPosts);
  14376. return(numPosts);
  14377. }
  14378. //
  14379. // WSGroupEventPost(...)
  14380. //
  14381. UINT WSGroupEventPost
  14382. (
  14383. PUT_CLIENT putFrom,
  14384. POM_WSGROUP pWSGroup,
  14385. BYTE target,
  14386. UINT event,
  14387. OM_WORKSET_ID worksetID,
  14388. UINT_PTR param2
  14389. )
  14390. {
  14391. POM_CLIENT_LIST pClientListEntry;
  14392. OM_EVENT_DATA16 eventData16;
  14393. UINT numPosts;
  14394. UINT rc = 0;
  14395. DebugEntry(WSGroupEventPost);
  14396. //
  14397. // Need to post the event to each Client which is registered with the
  14398. // workset group, so we chain through the list of Clients stored in the
  14399. // workset group record:
  14400. //
  14401. numPosts = 0;
  14402. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListFirst(&(pWSGroup->clients), FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14403. while (pClientListEntry != NULL)
  14404. {
  14405. //
  14406. // <target> specifies which type of Client we are to post events to
  14407. // and is PRIMARY and/or SECONDARY (ORed together if both). Check
  14408. // against this Client's registration mode:
  14409. //
  14410. if (target & pClientListEntry->mode)
  14411. {
  14412. //
  14413. // Fill in the fields of the event parameter, using the workset
  14414. // group handle as found in the Client list and the workset ID
  14415. // passed in:
  14416. //
  14417. eventData16.hWSGroup = pClientListEntry->hWSGroup;
  14418. eventData16.worksetID = worksetID;
  14419. TRACE_OUT(("Posting event 0x%08x to 0x%08x (hWSGroup: %hu - worksetID: %hu)",
  14420. event, pClientListEntry->putTask, eventData16.hWSGroup,
  14421. eventData16.worksetID));
  14422. UT_PostEvent(putFrom,
  14423. pClientListEntry->putTask,
  14424. 0,
  14425. event,
  14426. *(PUINT) &eventData16,
  14427. param2);
  14428. numPosts++;
  14429. }
  14430. pClientListEntry = (POM_CLIENT_LIST)COM_BasedListNext(&(pWSGroup->clients), pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain));
  14431. }
  14432. TRACE_OUT(("Posted event 0x%08x to %hu Clients (all registered with '%s')",
  14433. event, numPosts, pWSGroup->wsg));
  14434. DebugExitDWORD(WSGroupEventPost, numPosts);
  14435. return(numPosts);
  14436. }
  14437. //
  14438. // WorksetDoClear(...)
  14439. //
  14440. void WorksetDoClear
  14441. (
  14442. PUT_CLIENT putTask,
  14443. POM_WSGROUP pWSGroup,
  14444. POM_WORKSET pWorkset,
  14445. POM_PENDING_OP pPendingOp
  14446. )
  14447. {
  14448. POM_OBJECT pObj;
  14449. POM_OBJECT pObj2;
  14450. BOOL locked = FALSE;
  14451. DebugEntry(WorksetDoClear);
  14452. //
  14453. // To clear a workset, we chain through each object in the workset and
  14454. // compare its addition stamp to the stamp of the clear operation we're
  14455. // performing. If the object was added before the workset clear was
  14456. // issued, we delete the object. Otherwise, we ignore it.
  14457. //
  14458. TRACE_OUT(("Clearing workset %u...", pWorkset->worksetID));
  14459. pObj = (POM_OBJECT)COM_BasedListLast(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  14460. while (pObj != NULL)
  14461. {
  14462. ValidateObject(pObj);
  14463. pObj2 = (POM_OBJECT)COM_BasedListPrev(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  14464. if (pObj->flags & DELETED)
  14465. {
  14466. //
  14467. // Do nothing
  14468. //
  14469. }
  14470. else
  14471. {
  14472. if (STAMP_IS_LOWER(pObj->addStamp, pPendingOp->seqStamp))
  14473. {
  14474. TRACE_OUT(("Object 0x%08x added before workset cleared, deleting",
  14475. pObj));
  14476. PurgePendingOps(pWorkset, pObj);
  14477. ObjectDoDelete(putTask, pWSGroup, pWorkset, pObj, NULL);
  14478. }
  14479. }
  14480. // restore the previous one
  14481. pObj = pObj2;
  14482. }
  14483. //
  14484. // This operation isn't pending anymore, so we remove it from the
  14485. // pending operation list and free the memory:
  14486. //
  14487. COM_BasedListRemove(&(pPendingOp->chain));
  14488. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  14489. //
  14490. // Now that we've cleared the workset, post a CLEARED event to all
  14491. // secondaries:
  14492. //
  14493. WorksetEventPost(putTask,
  14494. pWorkset,
  14495. SECONDARY,
  14496. OM_WORKSET_CLEARED_IND,
  14497. 0);
  14498. TRACE_OUT(("Cleared workset %u", pWorkset->worksetID));
  14499. DebugExitVOID(WorksetDoClear);
  14500. }
  14501. //
  14502. // WorksetCreate(...)
  14503. //
  14504. UINT WorksetCreate
  14505. (
  14506. PUT_CLIENT putTask,
  14507. POM_WSGROUP pWSGroup,
  14508. OM_WORKSET_ID worksetID,
  14509. BOOL fTemp,
  14510. NET_PRIORITY priority
  14511. )
  14512. {
  14513. POMNET_OPERATION_PKT pPacket;
  14514. UINT rc = 0;
  14515. DebugEntry(WorksetCreate);
  14516. //
  14517. // Here we create the new workset by generating the message to be
  14518. // broadcast, processing it as if it had just arrived, and then
  14519. // queueing it to be sent:
  14520. //
  14521. rc = GenerateOpMessage(pWSGroup,
  14522. worksetID,
  14523. NULL, // no object ID
  14524. NULL, // no object
  14525. OMNET_WORKSET_NEW,
  14526. &pPacket);
  14527. if (rc != 0)
  14528. {
  14529. DC_QUIT;
  14530. }
  14531. //
  14532. // Fill in the priority value for the workset, which goes in the two
  14533. // bytes occupied by the <position> and <flags> fields:
  14534. //
  14535. *((NET_PRIORITY *) &(pPacket->position)) = priority;
  14536. *((BOOL *) &(pPacket->objectID)) = fTemp;
  14537. rc = ProcessWorksetNew(putTask, pPacket, pWSGroup);
  14538. if (rc != 0)
  14539. {
  14540. DC_QUIT;
  14541. }
  14542. //
  14543. // NEW FOR R2.0
  14544. //
  14545. // In R2.0, the checkpointing mechanism used by a helper to get up to
  14546. // date before sending a workset group to a late joiner relies on
  14547. // locking a "dummy" workset (#255) in the workset group in question.
  14548. // So, if the workset ID is 255, this is the dummy workset. We do not
  14549. // broadcast the WORKSET_NEW for this dummy workset, for two reasons:
  14550. //
  14551. // - it will confuse R1.1 systems
  14552. //
  14553. // - all other R2.0 systems will create it locally just as we have, so
  14554. // there isn't any need.
  14555. //
  14556. // So, do a check and free up the send packet if necessary; otherwise
  14557. // queue the message as normal:
  14558. //
  14559. if (worksetID == OM_CHECKPOINT_WORKSET)
  14560. {
  14561. TRACE_OUT(("WORKSET_NEW for checkpointing dummy workset - not queueing"));
  14562. UT_FreeRefCount((void**)&pPacket, FALSE);
  14563. }
  14564. else
  14565. {
  14566. rc = QueueMessage(putTask,
  14567. pWSGroup->pDomain,
  14568. pWSGroup->channelID,
  14569. priority,
  14570. pWSGroup,
  14571. NULL,
  14572. NULL, // no object
  14573. (POMNET_PKT_HEADER) pPacket,
  14574. NULL, // no object data
  14575. TRUE);
  14576. if (rc != 0)
  14577. {
  14578. DC_QUIT;
  14579. }
  14580. }
  14581. TRACE_OUT(("Created workset ID %hu in WSG %d for TASK 0x%08x",
  14582. worksetID, pWSGroup->wsg, putTask));
  14583. DC_EXIT_POINT:
  14584. if (rc != 0)
  14585. {
  14586. //
  14587. // Cleanup:
  14588. //
  14589. ERROR_OUT(("Error 0x%08x creating workset ID %hu in WSG %d for TASK 0x%08x",
  14590. rc, worksetID, pWSGroup->wsg, putTask));
  14591. }
  14592. DebugExitDWORD(WorksetCreate, rc);
  14593. return(rc);
  14594. }
  14595. //
  14596. // ObjectAdd(...)
  14597. //
  14598. UINT ObjectAdd
  14599. (
  14600. PUT_CLIENT putTask,
  14601. POM_PRIMARY pomPrimary,
  14602. POM_WSGROUP pWSGroup,
  14603. POM_WORKSET pWorkset,
  14604. POM_OBJECTDATA pData,
  14605. UINT updateSize,
  14606. OM_POSITION position,
  14607. OM_OBJECT_ID * pObjectID,
  14608. POM_OBJECT * ppObj
  14609. )
  14610. {
  14611. POM_OBJECT pObj;
  14612. POMNET_OPERATION_PKT pPacket;
  14613. POM_DOMAIN pDomain;
  14614. UINT rc = 0;
  14615. DebugEntry(ObjectAdd);
  14616. TRACE_OUT(("Adding object to workset %u in WSG %d",
  14617. pWorkset->worksetID, pWSGroup->wsg));
  14618. //
  14619. // Allocate a new ID for this object:
  14620. //
  14621. pDomain = pWSGroup->pDomain;
  14622. GET_NEXT_OBJECT_ID(*pObjectID, pDomain, pomPrimary);
  14623. //
  14624. // Generate the OMNET_OBJECT_ADD message:
  14625. //
  14626. rc = GenerateOpMessage(pWSGroup,
  14627. pWorkset->worksetID,
  14628. pObjectID,
  14629. pData,
  14630. OMNET_OBJECT_ADD,
  14631. &pPacket);
  14632. if (rc != 0)
  14633. {
  14634. pPacket = NULL;
  14635. DC_QUIT;
  14636. }
  14637. //
  14638. // Generate message doesn't fill in the <updateSize> or <position>
  14639. // fields (as they are specific to ObjectAdd) so we do them here:
  14640. //
  14641. pPacket->updateSize = updateSize;
  14642. pPacket->position = position;
  14643. //
  14644. // This processes the message, as if it has just been received from the
  14645. // network (i.e. allocates the record, sets up the object handle,
  14646. // inserts the object in the workset, etc.)
  14647. //
  14648. rc = ProcessObjectAdd(putTask, pPacket, pWSGroup,
  14649. pWorkset, pData, ppObj);
  14650. if (rc != 0)
  14651. {
  14652. DC_QUIT;
  14653. }
  14654. pObj = *ppObj;
  14655. //
  14656. // This queues the OMNET_OBJECT_ADD message on the send queue for this
  14657. // Domain and priority:
  14658. //
  14659. rc = QueueMessage(putTask,
  14660. pWSGroup->pDomain,
  14661. pWSGroup->channelID,
  14662. pWorkset->priority,
  14663. pWSGroup,
  14664. pWorkset,
  14665. pObj,
  14666. (POMNET_PKT_HEADER) pPacket,
  14667. pData,
  14668. TRUE);
  14669. if (rc != 0)
  14670. {
  14671. ValidateObject(pObj);
  14672. //
  14673. // If we failed to queue the message, we must unwind by deleting the
  14674. // object and its record from the workset (since otherwise it would
  14675. // be present on this node and no another, which we want to avoid):
  14676. //
  14677. // We don't want to call ObjectDoDelete since that frees the object
  14678. // data (which our caller will expect still to be valid if the
  14679. // function fails). We could, of course, bump the use count and then
  14680. // call ObjectDoDelete but if we fail on the bump, what next?
  14681. //
  14682. // Instead, we
  14683. //
  14684. // - set the DELETED flag so the hidden handler will swallow the
  14685. // Add event
  14686. //
  14687. // - decrement the numObjects field in the workset
  14688. //
  14689. // - free the object record after removing it from the workset.
  14690. //
  14691. pObj->flags |= DELETED;
  14692. pWorkset->numObjects--;
  14693. TRACE_OUT(("Freeing object record at 0x%08x)", pObj));
  14694. COM_BasedListRemove(&(pObj->chain));
  14695. UT_FreeRefCount((void**)&pObj, FALSE);
  14696. DC_QUIT;
  14697. }
  14698. DC_EXIT_POINT:
  14699. if (rc != 0)
  14700. {
  14701. ERROR_OUT(("ERROR %d adding object to workset %u in WSG %d for TASK 0x%08x",
  14702. rc, pWorkset->worksetID, pWSGroup->wsg, putTask));
  14703. if (pPacket != NULL)
  14704. {
  14705. UT_FreeRefCount((void**)&pPacket, FALSE);
  14706. }
  14707. }
  14708. DebugExitDWORD(ObjectAdd, rc);
  14709. return(rc);
  14710. }
  14711. //
  14712. // ObjectDRU(...)
  14713. //
  14714. UINT ObjectDRU
  14715. (
  14716. PUT_CLIENT putTask,
  14717. POM_WSGROUP pWSGroup,
  14718. POM_WORKSET pWorkset,
  14719. POM_OBJECT pObj,
  14720. POM_OBJECTDATA pData,
  14721. OMNET_MESSAGE_TYPE type
  14722. )
  14723. {
  14724. POMNET_OPERATION_PKT pPacket;
  14725. UINT rc = 0;
  14726. DebugEntry(ObjectDRU);
  14727. TRACE_OUT(("Issuing operation type 0x%08x for object 0x%08x in workset %u in WSG %d",
  14728. type, pData, pWorkset->worksetID, pWSGroup->wsg));
  14729. rc = GenerateOpMessage(pWSGroup,
  14730. pWorkset->worksetID,
  14731. &(pObj->objectID),
  14732. pData,
  14733. type,
  14734. &pPacket);
  14735. if (rc != 0)
  14736. {
  14737. pPacket = NULL;
  14738. DC_QUIT;
  14739. }
  14740. //
  14741. // QueueMessage may free the packet (if we're not in a call) but we need
  14742. // to process it in a minute so bump the use count:
  14743. //
  14744. UT_BumpUpRefCount(pPacket);
  14745. rc = QueueMessage(putTask,
  14746. pWSGroup->pDomain,
  14747. pWSGroup->channelID,
  14748. pWorkset->priority,
  14749. pWSGroup,
  14750. pWorkset,
  14751. pObj,
  14752. (POMNET_PKT_HEADER) pPacket,
  14753. pData,
  14754. TRUE);
  14755. if (rc != 0)
  14756. {
  14757. DC_QUIT;
  14758. }
  14759. rc = ProcessObjectDRU(putTask,
  14760. pPacket,
  14761. pWSGroup,
  14762. pWorkset,
  14763. pObj,
  14764. pData);
  14765. DC_EXIT_POINT:
  14766. //
  14767. // Now free the packet since we bumped its use count above:
  14768. //
  14769. if (pPacket != NULL)
  14770. {
  14771. UT_FreeRefCount((void**)&pPacket, FALSE);
  14772. }
  14773. if (rc != 0)
  14774. {
  14775. ERROR_OUT(("ERROR %d issuing D/R/U (type %hu) for object 0x%08x "
  14776. "in workset %u in WSG %d",
  14777. rc, type, pData, pWorkset->worksetID, pWSGroup->wsg));
  14778. }
  14779. DebugExitDWORD(ObjectDRU, rc);
  14780. return(rc);
  14781. }
  14782. //
  14783. // FindPendingOp(...)
  14784. //
  14785. void FindPendingOp
  14786. (
  14787. POM_WORKSET pWorkset,
  14788. POM_OBJECT pObj,
  14789. OM_OPERATION_TYPE type,
  14790. POM_PENDING_OP * ppPendingOp
  14791. )
  14792. {
  14793. POM_PENDING_OP pPendingOp;
  14794. DebugEntry(FindPendingOp);
  14795. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  14796. while (pPendingOp != NULL)
  14797. {
  14798. if ((pPendingOp->type == type) && (pPendingOp->pObj == pObj))
  14799. {
  14800. break;
  14801. }
  14802. pPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  14803. }
  14804. if (pPendingOp == NULL)
  14805. {
  14806. TRACE_OUT(("No pending op of type %hu found for object 0x%08x",
  14807. type, pObj));
  14808. }
  14809. *ppPendingOp = pPendingOp;
  14810. DebugExitVOID(FindPendingOp);
  14811. }
  14812. //
  14813. // AddClientToWsetList(...)
  14814. //
  14815. UINT AddClientToWsetList
  14816. (
  14817. PUT_CLIENT putTask,
  14818. POM_WORKSET pWorkset,
  14819. OM_WSGROUP_HANDLE hWSGroup,
  14820. UINT mode,
  14821. POM_CLIENT_LIST * ppClientListEntry
  14822. )
  14823. {
  14824. UINT rc = 0;
  14825. DebugEntry(AddClientToWsetList);
  14826. //
  14827. // Adding a task to a workset's client list means that that task will
  14828. // get events relating to that workset.
  14829. //
  14830. TRACE_OUT((" Adding TASK 0x%08x to workset's client list"));
  14831. *ppClientListEntry = (POM_CLIENT_LIST)UT_MallocRefCount(sizeof(OM_CLIENT_LIST), FALSE);
  14832. if (! *ppClientListEntry)
  14833. {
  14834. rc = UT_RC_NO_MEM;
  14835. DC_QUIT;
  14836. }
  14837. SET_STAMP((*ppClientListEntry), CLIENTLIST);
  14838. (*ppClientListEntry)->putTask = putTask;
  14839. (*ppClientListEntry)->hWSGroup = hWSGroup;
  14840. (*ppClientListEntry)->mode = (WORD)mode;
  14841. //
  14842. // Now insert the entry into the list:
  14843. //
  14844. COM_BasedListInsertBefore(&(pWorkset->clients),
  14845. &((*ppClientListEntry)->chain));
  14846. TRACE_OUT((" Inserted Client list item into workset's Client list"));
  14847. DC_EXIT_POINT:
  14848. DebugExitDWORD(AddClientToWsetList, rc);
  14849. return(rc);
  14850. }
  14851. //
  14852. // RemoveClientFromWSGList(...)
  14853. //
  14854. void RemoveClientFromWSGList
  14855. (
  14856. PUT_CLIENT putUs,
  14857. PUT_CLIENT putTask,
  14858. POM_WSGROUP pWSGroup
  14859. )
  14860. {
  14861. POM_CLIENT_LIST pClientListEntry;
  14862. BOOL locked = FALSE;
  14863. DebugEntry(RemoveClientFromWSGList);
  14864. TRACE_OUT(("Searching for Client TASK 0x%08x in WSG %d",
  14865. putTask, pWSGroup->wsg));
  14866. COM_BasedListFind(LIST_FIND_FROM_FIRST, &(pWSGroup->clients),
  14867. (void**)&pClientListEntry, FIELD_OFFSET(OM_CLIENT_LIST, chain),
  14868. FIELD_OFFSET(OM_CLIENT_LIST, putTask), (DWORD_PTR)putTask,
  14869. FIELD_SIZE(OM_CLIENT_LIST, putTask));
  14870. //
  14871. // If it's not there, the Client may have already deregistered itself:
  14872. //
  14873. if (pClientListEntry == NULL)
  14874. {
  14875. WARNING_OUT(("Client TASK 0x%08x not found in list for WSG %d",
  14876. putTask, pWSGroup->wsg));
  14877. DC_QUIT;
  14878. }
  14879. //
  14880. // Remove the Client from the list and free the memory:
  14881. //
  14882. COM_BasedListRemove(&(pClientListEntry->chain));
  14883. UT_FreeRefCount((void**)&pClientListEntry, FALSE);
  14884. //
  14885. // If there are now no local Clients registered with the workset group,
  14886. // post an event to ObMan so it can discard the workset group (unless
  14887. // the workset group is marked non-discardable e.g the ObManControl
  14888. // workset group)
  14889. //
  14890. // The event parameter is the offset of the workset group record.
  14891. //
  14892. // Note: this discard is done asynchronously since it may involve
  14893. // allocating resources (broadcasting to other nodes that
  14894. // we've deregistered), and we want this function to always
  14895. // succeed.
  14896. //
  14897. // However, we clear the <valid> flag synchronously so that
  14898. // ObMan will not try to process events etc. which arrive
  14899. // for it.
  14900. //
  14901. if (COM_BasedListIsEmpty(&(pWSGroup->clients)))
  14902. {
  14903. pWSGroup->toBeDiscarded = TRUE;
  14904. pWSGroup->valid = FALSE;
  14905. TRACE_OUT(("Last local Client deregistered from WSG %d, "
  14906. "marking invalid and posting DISCARD event", pWSGroup->wsg));
  14907. ValidateOMP(g_pomPrimary);
  14908. UT_PostEvent(putUs,
  14909. g_pomPrimary->putTask,
  14910. 0, // no delay
  14911. OMINT_EVENT_WSGROUP_DISCARD,
  14912. 0,
  14913. (UINT_PTR)pWSGroup);
  14914. }
  14915. else
  14916. {
  14917. TRACE_OUT(("Clients still registered with WSG %d", pWSGroup->wsg));
  14918. }
  14919. DC_EXIT_POINT:
  14920. DebugExitVOID(RemoveClientFromWSGList);
  14921. }
  14922. //
  14923. // FindInfoObject(...)
  14924. //
  14925. void FindInfoObject
  14926. (
  14927. POM_DOMAIN pDomain,
  14928. OM_WSGROUP_ID wsGroupID,
  14929. OMWSG wsg,
  14930. OMFP fpHandler,
  14931. POM_OBJECT * ppObjInfo
  14932. )
  14933. {
  14934. POM_WORKSET pOMCWorkset;
  14935. POM_OBJECT pObj;
  14936. POM_WSGROUP_INFO pInfoObject;
  14937. DebugEntry(FindInfoObject);
  14938. TRACE_OUT(("FindInfoObject: FP %d WSG %d ID %d, domain %u",
  14939. fpHandler, wsg, wsGroupID, pDomain->callID));
  14940. //
  14941. // In this function, we search workset #0 in ObManControl for a
  14942. // Function Profile/workset group name combination which matches the
  14943. // ones specified
  14944. //
  14945. // So, we need to start with a pointer to this workset:
  14946. //
  14947. pOMCWorkset = GetOMCWorkset(pDomain, OM_INFO_WORKSET);
  14948. //
  14949. // Now chain through each of the objects in the workset to look for a
  14950. // match.
  14951. //
  14952. pObj = (POM_OBJECT)COM_BasedListLast(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  14953. while (pObj != NULL)
  14954. {
  14955. ValidateObject(pObj);
  14956. //
  14957. // If the object has not been deleted...
  14958. //
  14959. if (pObj->flags & DELETED)
  14960. {
  14961. }
  14962. else if (!pObj->pData)
  14963. {
  14964. ERROR_OUT(("FindInfoObject: object 0x%08x has no data", pObj));
  14965. }
  14966. else
  14967. {
  14968. ValidateObjectData(pObj->pData);
  14969. pInfoObject = (POM_WSGROUP_INFO)pObj->pData;
  14970. //
  14971. // ...and if it is an INFO object...
  14972. //
  14973. if (pInfoObject->idStamp == OM_WSGINFO_ID_STAMP)
  14974. {
  14975. // If no FP provided, check the group IDs match
  14976. if (fpHandler == OMFP_MAX)
  14977. {
  14978. //
  14979. // ...and the ID matches, we've got what we want:
  14980. //
  14981. if (wsGroupID == pInfoObject->wsGroupID)
  14982. {
  14983. break;
  14984. }
  14985. }
  14986. //
  14987. // ...but otherwise, try match on functionProfile...
  14988. //
  14989. else
  14990. {
  14991. if (!lstrcmp(pInfoObject->functionProfile,
  14992. OMMapFPToName(fpHandler)))
  14993. {
  14994. //
  14995. // ...and also on WSG unless it is not provided
  14996. //
  14997. if ((wsg == OMWSG_MAX) ||
  14998. (!lstrcmp(pInfoObject->wsGroupName,
  14999. OMMapWSGToName(wsg))))
  15000. {
  15001. break;
  15002. }
  15003. }
  15004. }
  15005. }
  15006. }
  15007. pObj = (POM_OBJECT)COM_BasedListPrev(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15008. }
  15009. TRACE_OUT(("%s info object in Domain %u",
  15010. pObj == NULL ? "Didn't find" : "Found", pDomain->callID));
  15011. //
  15012. // Set up the caller's pointer:
  15013. //
  15014. *ppObjInfo = pObj;
  15015. DebugExitVOID(FindInfoObject);
  15016. }
  15017. //
  15018. // PostAddEvents(...)
  15019. //
  15020. UINT PostAddEvents
  15021. (
  15022. PUT_CLIENT putFrom,
  15023. POM_WORKSET pWorkset,
  15024. OM_WSGROUP_HANDLE hWSGroup,
  15025. PUT_CLIENT putTo
  15026. )
  15027. {
  15028. OM_EVENT_DATA16 eventData16;
  15029. POM_OBJECT pObj;
  15030. UINT rc = 0;
  15031. DebugEntry(PostAddEvents);
  15032. eventData16.hWSGroup = hWSGroup;
  15033. eventData16.worksetID = pWorkset->worksetID;
  15034. if (pWorkset->numObjects != 0)
  15035. {
  15036. TRACE_OUT(("Workset has %u objects - posting OBJECT_ADD events",
  15037. pWorkset->numObjects));
  15038. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15039. while (pObj != NULL)
  15040. {
  15041. ValidateObject(pObj);
  15042. //
  15043. // Don't post events for DELETED objects:
  15044. //
  15045. if (!(pObj->flags & DELETED))
  15046. {
  15047. //
  15048. // We're posting an event with an pObj in it, so bump the
  15049. // use count of the object record it refers to:
  15050. //
  15051. UT_BumpUpRefCount(pObj);
  15052. UT_PostEvent(putFrom, putTo,
  15053. 0, // no delay
  15054. OM_OBJECT_ADD_IND,
  15055. *(PUINT) &eventData16,
  15056. (UINT_PTR)pObj);
  15057. }
  15058. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15059. }
  15060. }
  15061. else
  15062. {
  15063. TRACE_OUT(("No objects in workset"));
  15064. }
  15065. DebugExitDWORD(PostAddEvents, rc);
  15066. return(rc);
  15067. }
  15068. //
  15069. // PurgePendingOps(...)
  15070. //
  15071. void PurgePendingOps
  15072. (
  15073. POM_WORKSET pWorkset,
  15074. POM_OBJECT pObj
  15075. )
  15076. {
  15077. POM_PENDING_OP pPendingOp;
  15078. POM_PENDING_OP pTempPendingOp;
  15079. DebugEntry(PurgePendingOps);
  15080. //
  15081. // Chain through the workset's list of pending operations and confirm
  15082. // them one by one:
  15083. //
  15084. pPendingOp = (POM_PENDING_OP)COM_BasedListFirst(&(pWorkset->pendingOps), FIELD_OFFSET(OM_PENDING_OP, chain));
  15085. while (pPendingOp != NULL)
  15086. {
  15087. pTempPendingOp = (POM_PENDING_OP)COM_BasedListNext(&(pWorkset->pendingOps), pPendingOp, FIELD_OFFSET(OM_PENDING_OP, chain));
  15088. if (pPendingOp->pObj == pObj)
  15089. {
  15090. TRACE_OUT(("Purging operation type %hd", pPendingOp->type));
  15091. COM_BasedListRemove(&(pPendingOp->chain));
  15092. UT_FreeRefCount((void**)&pPendingOp, FALSE);
  15093. }
  15094. pPendingOp = pTempPendingOp;
  15095. }
  15096. DebugExitVOID(PurgePendingOps);
  15097. }
  15098. //
  15099. // WorksetLockReq(...)
  15100. //
  15101. void WorksetLockReq
  15102. (
  15103. PUT_CLIENT putTask,
  15104. POM_PRIMARY pomPrimary,
  15105. POM_WSGROUP pWSGroup,
  15106. POM_WORKSET pWorkset,
  15107. OM_WSGROUP_HANDLE hWSGroup,
  15108. OM_CORRELATOR * pCorrelator
  15109. )
  15110. {
  15111. POM_DOMAIN pDomain;
  15112. POM_LOCK_REQ pLockReq = NULL;
  15113. POMNET_LOCK_PKT pLockReqPkt = NULL;
  15114. UINT rc = 0;
  15115. DebugEntry(WorksetLockReq);
  15116. TRACE_OUT(("TASK 0x%08x requesting to lock workset %u in WSG %d",
  15117. putTask, pWorkset->worksetID, hWSGroup));
  15118. //
  15119. // The caller will need a correlator value to correlate the eventual
  15120. // lock success/failure event:
  15121. //
  15122. *pCorrelator = NextCorrelator(pomPrimary);
  15123. //
  15124. // Set up a pointer to the Domain record:
  15125. //
  15126. pDomain = pWSGroup->pDomain;
  15127. //
  15128. // Allocate some memory for the lock request control block:
  15129. //
  15130. pLockReq = (POM_LOCK_REQ)UT_MallocRefCount(sizeof(OM_LOCK_REQ), TRUE);
  15131. if (!pLockReq)
  15132. {
  15133. rc = OM_RC_OUT_OF_RESOURCES;
  15134. DC_QUIT;
  15135. }
  15136. SET_STAMP(pLockReq, LREQ);
  15137. //
  15138. // Set up the fields:
  15139. //
  15140. pLockReq->putTask = putTask;
  15141. pLockReq->correlator = *pCorrelator;
  15142. pLockReq->wsGroupID = pWSGroup->wsGroupID;
  15143. pLockReq->worksetID = pWorkset->worksetID;
  15144. pLockReq->hWSGroup = hWSGroup;
  15145. pLockReq->type = LOCK_PRIMARY;
  15146. pLockReq->retriesToGo = OM_LOCK_RETRY_COUNT_DFLT;
  15147. pLockReq->pWSGroup = pWSGroup;
  15148. COM_BasedListInit(&(pLockReq->nodes));
  15149. //
  15150. // Insert this lock request in the Domain's list of pending lock
  15151. // requests:
  15152. //
  15153. COM_BasedListInsertBefore(&(pDomain->pendingLocks), &(pLockReq->chain));
  15154. //
  15155. // Now examine the workset lock state to see if we can grant the lock
  15156. // immediately:
  15157. //
  15158. TRACE_OUT(("Lock state for workset %u in WSG %d is %hu",
  15159. pWorkset->worksetID, hWSGroup, pWorkset->lockState));
  15160. switch (pWorkset->lockState)
  15161. {
  15162. case LOCKING:
  15163. case LOCKED:
  15164. {
  15165. TRACE_OUT((
  15166. "Workset %hu in WSG %d already locked/locking - bumping count",
  15167. pWorkset->worksetID, hWSGroup));
  15168. pLockReq->type = LOCK_SECONDARY;
  15169. pWorkset->lockCount++;
  15170. if (pWorkset->lockState == LOCKED)
  15171. {
  15172. //
  15173. // If we've already got the lock, post success immediately:
  15174. //
  15175. WorksetLockResult(putTask, &pLockReq, 0);
  15176. }
  15177. else
  15178. {
  15179. //
  15180. // Otherwise, this request will be handled when the primary
  15181. // request completes, so do nothing for now.
  15182. //
  15183. }
  15184. }
  15185. break;
  15186. case LOCK_GRANTED:
  15187. {
  15188. //
  15189. // We've already granted the lock to another node so we fail
  15190. // our local client's request for it:
  15191. //
  15192. WorksetLockResult(putTask, &pLockReq, OM_RC_WORKSET_LOCK_GRANTED);
  15193. }
  15194. break;
  15195. case UNLOCKED:
  15196. {
  15197. //
  15198. // Build up a list of other nodes using the workset group:
  15199. //
  15200. rc = BuildNodeList(pDomain, pLockReq);
  15201. if (rc != 0)
  15202. {
  15203. DC_QUIT;
  15204. }
  15205. pWorkset->lockState = LOCKING;
  15206. pWorkset->lockCount++;
  15207. pWorkset->lockedBy = pDomain->userID;
  15208. //
  15209. // If the list is empty, we have got the lock:
  15210. //
  15211. if (COM_BasedListIsEmpty(&pLockReq->nodes))
  15212. {
  15213. TRACE_OUT(("No remote nodes, granting lock immediately"));
  15214. pWorkset->lockState = LOCKED;
  15215. WorksetLockResult(putTask, &pLockReq, 0);
  15216. }
  15217. //
  15218. // Otherwise, we need to broadcast a lock request CB:
  15219. //
  15220. else
  15221. {
  15222. pLockReqPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  15223. if (!pLockReqPkt)
  15224. {
  15225. rc = UT_RC_NO_MEM;
  15226. DC_QUIT;
  15227. }
  15228. pLockReqPkt->header.messageType = OMNET_LOCK_REQ;
  15229. pLockReqPkt->header.sender = pDomain->userID;
  15230. pLockReqPkt->data1 = pLockReq->correlator;
  15231. pLockReqPkt->wsGroupID = pLockReq->wsGroupID;
  15232. pLockReqPkt->worksetID = pLockReq->worksetID;
  15233. //
  15234. // Lock messages go at the priority of the workset
  15235. // involved. If this is OBMAN_CHOOSES_PRIORITY, then
  15236. // all bets are off and we send them TOP_PRIORITY.
  15237. //
  15238. rc = QueueMessage(putTask,
  15239. pDomain,
  15240. pWSGroup->channelID,
  15241. (NET_PRIORITY)((pWorkset->priority == OM_OBMAN_CHOOSES_PRIORITY) ?
  15242. NET_TOP_PRIORITY : pWorkset->priority),
  15243. NULL,
  15244. NULL,
  15245. NULL,
  15246. (POMNET_PKT_HEADER) pLockReqPkt,
  15247. NULL,
  15248. TRUE);
  15249. if (rc != 0)
  15250. {
  15251. DC_QUIT;
  15252. }
  15253. //
  15254. // Post a timeout event to the ObMan task so that we don't hang around
  15255. // forever waiting for the lock replies:
  15256. //
  15257. UT_PostEvent(putTask,
  15258. pomPrimary->putTask, // ObMan's utH
  15259. OM_LOCK_RETRY_DELAY_DFLT,
  15260. OMINT_EVENT_LOCK_TIMEOUT,
  15261. pLockReq->correlator,
  15262. pDomain->callID);
  15263. }
  15264. }
  15265. break;
  15266. }
  15267. DC_EXIT_POINT:
  15268. //
  15269. // For the checkpointing dummy workset, we always "forget" our lock
  15270. // state so that subsequent requests to lock it will result in the
  15271. // required end-to-end ping:
  15272. //
  15273. if (pWorkset->worksetID == OM_CHECKPOINT_WORKSET)
  15274. {
  15275. TRACE_OUT(("Resetting lock state of checkpoint workset in WSG %d",
  15276. hWSGroup));
  15277. pWorkset->lockState = UNLOCKED;
  15278. pWorkset->lockCount = 0;
  15279. pWorkset->lockedBy = 0;
  15280. }
  15281. if (rc != 0)
  15282. {
  15283. if (pLockReqPkt != NULL)
  15284. {
  15285. UT_FreeRefCount((void**)&pLockReqPkt, FALSE);
  15286. }
  15287. //
  15288. // This function never returns an error to its caller directly;
  15289. // instead, we call WorksetLockResult which will post a failure
  15290. // event to the calling task (this means the caller doesn't have to
  15291. // have two error processing paths)
  15292. //
  15293. if (pLockReq != NULL)
  15294. {
  15295. WorksetLockResult(putTask, &pLockReq, rc);
  15296. }
  15297. else
  15298. {
  15299. WARNING_OUT(("ERROR %d requesting lock for workset %u in WSG %d ",
  15300. rc, pWorkset->worksetID, hWSGroup));
  15301. }
  15302. }
  15303. DebugExitVOID(WorksetLockReq);
  15304. }
  15305. //
  15306. // BuildNodeList(...)
  15307. //
  15308. UINT BuildNodeList
  15309. (
  15310. POM_DOMAIN pDomain,
  15311. POM_LOCK_REQ pLockReq
  15312. )
  15313. {
  15314. NET_PRIORITY priority;
  15315. POM_WORKSET pOMCWorkset;
  15316. POM_OBJECT pObj;
  15317. POM_WSGROUP_REG_REC pPersonObject;
  15318. POM_NODE_LIST pNodeEntry;
  15319. NET_UID ownUserID;
  15320. BOOL foundOurRegObject;
  15321. UINT rc = 0;
  15322. DebugEntry(BuildNodeList);
  15323. //
  15324. // OK, we're about to broadcast a lock request throughout this Domain
  15325. // on this workset group's channel. Before we do so, however, we build
  15326. // up a list of the nodes we expect to respond to the request. As the
  15327. // replies come in we tick them off against this list; when all of them
  15328. // have been received, the lock is granted.
  15329. //
  15330. // SFR 6117: Since the lock replies will come back on all priorities
  15331. // (to correctly flush the channel), we add 4 items for each remote
  15332. // node - one for each priority.
  15333. //
  15334. // So, we examine the control workset for this workset group, adding
  15335. // items to our list for each person object we find in it (except our
  15336. // own, of course).
  15337. //
  15338. //
  15339. // First, get a pointer to the relevant control workset:
  15340. //
  15341. pOMCWorkset = GetOMCWorkset(pDomain, pLockReq->wsGroupID);
  15342. ASSERT((pOMCWorkset != NULL));
  15343. //
  15344. // We want to ignore our own registration object, so make a note of our
  15345. // user ID:
  15346. //
  15347. ownUserID = pDomain->userID;
  15348. //
  15349. // Now chain through the workset:
  15350. //
  15351. foundOurRegObject = FALSE;
  15352. pObj = (POM_OBJECT)COM_BasedListFirst(&(pOMCWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15353. while (pObj != NULL)
  15354. {
  15355. ValidateObject(pObj);
  15356. if (pObj->flags & DELETED)
  15357. {
  15358. //
  15359. // Do nothing
  15360. //
  15361. }
  15362. else if (!pObj->pData)
  15363. {
  15364. ERROR_OUT(("BuildNodeList: object 0x%08x has no data", pObj));
  15365. }
  15366. else
  15367. {
  15368. ValidateObjectData(pObj->pData);
  15369. pPersonObject = (POM_WSGROUP_REG_REC)pObj->pData;
  15370. if (pPersonObject->idStamp != OM_WSGREGREC_ID_STAMP)
  15371. {
  15372. TRACE_OUT(("Not a person object, skipping"));
  15373. }
  15374. else
  15375. {
  15376. if (pPersonObject->userID == ownUserID)
  15377. {
  15378. if (foundOurRegObject)
  15379. {
  15380. ERROR_OUT(("Duplicate person object in workset %u",
  15381. pOMCWorkset->worksetID));
  15382. }
  15383. else
  15384. {
  15385. TRACE_OUT(("Found own person object, skipping"));
  15386. foundOurRegObject = TRUE;
  15387. }
  15388. }
  15389. else
  15390. {
  15391. //
  15392. // Add an item to our expected respondents list (this
  15393. // memory is freed in each case when the remote node
  15394. // replies, or the timer expires and we notice that the
  15395. // node has disappeared).
  15396. //
  15397. // SFR 6117: We add one item for each priority value, since
  15398. // the lock replies will come back on all priorities.
  15399. //
  15400. for (priority = NET_TOP_PRIORITY;
  15401. priority <= NET_LOW_PRIORITY;
  15402. priority++)
  15403. {
  15404. TRACE_OUT(("Adding node 0x%08x to node list at priority %hu",
  15405. pPersonObject->userID, priority));
  15406. pNodeEntry = (POM_NODE_LIST)UT_MallocRefCount(sizeof(OM_NODE_LIST), TRUE);
  15407. if (!pNodeEntry)
  15408. {
  15409. rc = UT_RC_NO_MEM;
  15410. DC_QUIT;
  15411. }
  15412. SET_STAMP(pNodeEntry, NODELIST);
  15413. pNodeEntry->userID = pPersonObject->userID;
  15414. COM_BasedListInsertAfter(&(pLockReq->nodes),
  15415. &(pNodeEntry->chain));
  15416. //
  15417. // BUT! We only do this for R20 and later (i.e.
  15418. // anything over real MCS). For R11 calls, just put
  15419. // one entry on the list.
  15420. //
  15421. // ALSO! For ObManControl worksets, we only expect one
  15422. // lock reply (at TOP_PRIORITY) - this is to speed up
  15423. // processing of registration attempts. So, if this is
  15424. // for ObManControl, don't go around this loop again -
  15425. // just get out.
  15426. //
  15427. if (pLockReq->wsGroupID == WSGROUPID_OMC)
  15428. {
  15429. break;
  15430. }
  15431. }
  15432. }
  15433. }
  15434. }
  15435. pObj = (POM_OBJECT)COM_BasedListNext(&(pOMCWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15436. }
  15437. DC_EXIT_POINT:
  15438. if (rc != 0)
  15439. {
  15440. ERROR_OUT(("ERROR %d trying to build node list", rc));
  15441. }
  15442. DebugExitDWORD(BuildNodeList, rc);
  15443. return(rc);
  15444. }
  15445. //
  15446. // WorksetLockResult(...)
  15447. //
  15448. void WorksetLockResult
  15449. (
  15450. PUT_CLIENT putTask,
  15451. POM_LOCK_REQ * ppLockReq,
  15452. UINT result
  15453. )
  15454. {
  15455. POM_LOCK_REQ pLockReq;
  15456. POM_WSGROUP pWSGroup;
  15457. POM_WORKSET pWorkset;
  15458. OM_EVENT_DATA16 eventData16;
  15459. OM_EVENT_DATA32 eventData32;
  15460. POM_NODE_LIST pNodeEntry;
  15461. DebugEntry(WorksetLockResult);
  15462. //
  15463. // First some sanity checks:
  15464. //
  15465. ASSERT((ppLockReq != NULL));
  15466. ASSERT((*ppLockReq != NULL));
  15467. pLockReq = *ppLockReq;
  15468. //
  15469. // Set up a local pointer to the workset:
  15470. //
  15471. pWSGroup = pLockReq->pWSGroup;
  15472. pWorkset = pWSGroup->apWorksets[pLockReq->worksetID];
  15473. ASSERT((pWorkset != NULL));
  15474. TRACE_OUT(("Lock %s: lock state: %hu - locked by: 0x%08x - lock count: %hu",
  15475. (result == 0) ? "succeded" : "failed",
  15476. pWorkset->lockState, pWorkset->lockedBy, pWorkset->lockCount));
  15477. //
  15478. // We merge the LOCKED and LOCK_GRANTED return codes at the API level:
  15479. //
  15480. if (result == OM_RC_WORKSET_LOCK_GRANTED)
  15481. {
  15482. result = OM_RC_WORKSET_LOCKED;
  15483. }
  15484. //
  15485. // Fill in fields of event parameter and post the result:
  15486. //
  15487. eventData16.hWSGroup = pLockReq->hWSGroup;
  15488. eventData16.worksetID = pLockReq->worksetID;
  15489. eventData32.correlator = pLockReq->correlator;
  15490. eventData32.result = (WORD)result;
  15491. UT_PostEvent(putTask,
  15492. pLockReq->putTask, // task that wants the lock
  15493. 0, // i.e. ObMan or Client
  15494. OM_WORKSET_LOCK_CON,
  15495. *((PUINT) &eventData16),
  15496. *((LPUINT) &eventData32));
  15497. //
  15498. // Remove any node entries left hanging off the lockReqCB:
  15499. //
  15500. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  15501. while (pNodeEntry != NULL)
  15502. {
  15503. COM_BasedListRemove(&pNodeEntry->chain);
  15504. UT_FreeRefCount((void**)&pNodeEntry, FALSE);
  15505. pNodeEntry = (POM_NODE_LIST)COM_BasedListFirst(&(pLockReq->nodes), FIELD_OFFSET(OM_NODE_LIST, chain));
  15506. }
  15507. //
  15508. // Remove the lock request itself from the list and free the memory:
  15509. //
  15510. COM_BasedListRemove(&pLockReq->chain);
  15511. UT_FreeRefCount((void**)&pLockReq, FALSE);
  15512. *ppLockReq = NULL;
  15513. DebugExitVOID(WorksetLockResult);
  15514. }
  15515. //
  15516. // WorksetUnlock(...)
  15517. //
  15518. void WorksetUnlock
  15519. (
  15520. PUT_CLIENT putTask,
  15521. POM_WSGROUP pWSGroup,
  15522. POM_WORKSET pWorkset
  15523. )
  15524. {
  15525. DebugEntry(WorksetUnlock);
  15526. TRACE_OUT(("Unlocking workset %u in WSG %d for TASK 0x%08x",
  15527. pWorkset->worksetID, pWSGroup->wsg, putTask));
  15528. TRACE_OUT((" lock state: %hu - locked by: 0x%08x - lock count: %hu",
  15529. pWorkset->lockState, pWorkset->lockedBy, pWorkset->lockCount));
  15530. //
  15531. // Check the workset lock state
  15532. //
  15533. if ((pWorkset->lockState != LOCKED) &&
  15534. (pWorkset->lockState != LOCKING))
  15535. {
  15536. ERROR_OUT(("Unlock error for workset %u in WSG %d - not locked",
  15537. pWorkset->worksetID, pWSGroup->wsg));
  15538. DC_QUIT;
  15539. }
  15540. //
  15541. // If this workset is "multiply locked" (i.e. locked more than one
  15542. // time by the same task), then all we want to do is decrement the lock
  15543. // count. Otherwise, we want to release the lock.
  15544. //
  15545. pWorkset->lockCount--;
  15546. if (pWorkset->lockCount == 0)
  15547. {
  15548. TRACE_OUT(("Lock count now 0 - really unlocking"));
  15549. WorksetUnlockLocal(putTask, pWorkset);
  15550. QueueUnlock(putTask, pWSGroup->pDomain,
  15551. pWSGroup->wsGroupID,
  15552. pWorkset->worksetID,
  15553. pWSGroup->channelID,
  15554. pWorkset->priority);
  15555. }
  15556. DC_EXIT_POINT:
  15557. DebugExitVOID(WorksetUnlock);
  15558. }
  15559. //
  15560. // WorksetUnlockLocal(...)
  15561. //
  15562. void WorksetUnlockLocal
  15563. (
  15564. PUT_CLIENT putTask,
  15565. POM_WORKSET pWorkset
  15566. )
  15567. {
  15568. DebugEntry(WorksetUnlockLocal);
  15569. //
  15570. // To unlock a workset, we
  15571. //
  15572. // - check that it's not already unlocked
  15573. //
  15574. // - check that the lock count is zero, so we can now unlock it
  15575. //
  15576. // - set the lock fields in the workset record
  15577. //
  15578. // - post an OM_WORKSET_UNLOCK_IND to all Clients with the workset
  15579. // open.
  15580. //
  15581. if (pWorkset->lockState == UNLOCKED)
  15582. {
  15583. WARNING_OUT(("Workset %hu is already UNLOCKED!", pWorkset->worksetID));
  15584. DC_QUIT;
  15585. }
  15586. ASSERT((pWorkset->lockCount == 0));
  15587. pWorkset->lockedBy = 0;
  15588. pWorkset->lockState = UNLOCKED;
  15589. WorksetEventPost(putTask,
  15590. pWorkset,
  15591. PRIMARY | SECONDARY,
  15592. OM_WORKSET_UNLOCK_IND,
  15593. 0);
  15594. DC_EXIT_POINT:
  15595. DebugExitVOID(WorksetUnlockLocal);
  15596. }
  15597. //
  15598. // QueueUnlock(...)
  15599. //
  15600. UINT QueueUnlock
  15601. (
  15602. PUT_CLIENT putTask,
  15603. POM_DOMAIN pDomain,
  15604. OM_WSGROUP_ID wsGroupID,
  15605. OM_WORKSET_ID worksetID,
  15606. NET_UID destination,
  15607. NET_PRIORITY priority
  15608. )
  15609. {
  15610. POMNET_LOCK_PKT pUnlockPkt;
  15611. UINT rc = 0;
  15612. DebugEntry(QueueUnlock);
  15613. //
  15614. // Allocate memory for the message, fill in the fields and queue it:
  15615. //
  15616. pUnlockPkt = (POMNET_LOCK_PKT)UT_MallocRefCount(sizeof(OMNET_LOCK_PKT), TRUE);
  15617. if (!pUnlockPkt)
  15618. {
  15619. rc = UT_RC_NO_MEM;
  15620. DC_QUIT;
  15621. }
  15622. pUnlockPkt->header.messageType = OMNET_UNLOCK;
  15623. pUnlockPkt->header.sender = pDomain->userID;
  15624. pUnlockPkt->wsGroupID = wsGroupID;
  15625. pUnlockPkt->worksetID = worksetID;
  15626. //
  15627. // Unlock messages go at the priority of the workset involved. If this
  15628. // is OBMAN_CHOOSES_PRIORITY, then all bets are off and we send them
  15629. // TOP_PRIORITY.
  15630. //
  15631. if (priority == OM_OBMAN_CHOOSES_PRIORITY)
  15632. {
  15633. priority = NET_TOP_PRIORITY;
  15634. }
  15635. rc = QueueMessage(putTask,
  15636. pDomain,
  15637. destination,
  15638. priority,
  15639. NULL,
  15640. NULL,
  15641. NULL, // no object
  15642. (POMNET_PKT_HEADER) pUnlockPkt,
  15643. NULL, // no object data
  15644. TRUE);
  15645. if (rc != 0)
  15646. {
  15647. DC_QUIT;
  15648. }
  15649. DC_EXIT_POINT:
  15650. if (rc != 0)
  15651. {
  15652. ERROR_OUT(("ERROR %d in FindInfoObject"));
  15653. if (pUnlockPkt != NULL)
  15654. {
  15655. UT_FreeRefCount((void**)&pUnlockPkt, FALSE);
  15656. }
  15657. }
  15658. DebugExitDWORD(QueueUnlock, rc);
  15659. return(rc);
  15660. }
  15661. //
  15662. //
  15663. // DEBUG ONLY FUNCTIONS
  15664. //
  15665. // These functions are debug code only - for normal compilations, they are
  15666. // #defined to nothing.
  15667. //
  15668. #ifdef _DEBUG
  15669. //
  15670. // CheckObjectCount(...)
  15671. //
  15672. void CheckObjectCount
  15673. (
  15674. POM_WSGROUP pWSGroup,
  15675. POM_WORKSET pWorkset
  15676. )
  15677. {
  15678. POM_OBJECT pObj;
  15679. UINT count;
  15680. DebugEntry(CheckObjectCount);
  15681. count = 0;
  15682. pObj = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15683. while (pObj != NULL)
  15684. {
  15685. ValidateObject(pObj);
  15686. if (!(pObj->flags & DELETED))
  15687. {
  15688. count++;
  15689. }
  15690. pObj = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObj, FIELD_OFFSET(OM_OBJECT, chain));
  15691. }
  15692. ASSERT((count == pWorkset->numObjects));
  15693. TRACE_OUT(("Counted %u items in workset %u in WSG %d, agrees with numObjects",
  15694. count, pWorkset->worksetID, pWSGroup->wsg));
  15695. DebugExitVOID(CheckObjectCount);
  15696. }
  15697. //
  15698. // CheckObjectOrder(...)
  15699. //
  15700. void CheckObjectOrder
  15701. (
  15702. POM_WORKSET pWorkset
  15703. )
  15704. {
  15705. POM_OBJECT pObjThis;
  15706. POM_OBJECT pObjNext;
  15707. BOOL orderIsGood = TRUE;
  15708. DebugEntry(CheckObjectOrder);
  15709. //
  15710. // This function checks that objects in the specified workset have been
  15711. // correctly positioned. The correct order of objects is one where
  15712. //
  15713. // - all FIRST objects are before all LAST objects
  15714. //
  15715. // - the position stamps of the FIRST objects decrease monotonically
  15716. // from the start of the workset onwards
  15717. //
  15718. // - the position stamps of the LAST objects decrease monotonically
  15719. // from the end of the workset backwards.
  15720. //
  15721. //
  15722. //
  15723. // This can be represented grahpically as follows:
  15724. //
  15725. // * *
  15726. // * * * *
  15727. // * * * * * *
  15728. // * * * * * * * *
  15729. // * * * * * * * * * *
  15730. // * * * * * * * * * * * *
  15731. //
  15732. // F F F F F F L L L L L L
  15733. //
  15734. // ...where taller columns indicate later sequence stamps and 'F' and
  15735. // 'L' indicate the FIRST or LAST objects.
  15736. //
  15737. //
  15738. //
  15739. // The way we test for correct order is to compare each adjacent pair of
  15740. // objects. If the overall order is correct, the for each pair of
  15741. // objects where A immediately precedes B, one of the following is true:
  15742. //
  15743. // - both are FIRST and B has a lower sequence stamp than A
  15744. //
  15745. // - A is FIRST and B is LAST
  15746. //
  15747. // - both are LAST and A has a lower sequence stamp than B.
  15748. //
  15749. pObjThis = (POM_OBJECT)COM_BasedListFirst(&(pWorkset->objects), FIELD_OFFSET(OM_OBJECT, chain));
  15750. if (!pObjThis)
  15751. {
  15752. //
  15753. // Hitting the end of the workset at any stage means order is
  15754. // correct, so quit:
  15755. //
  15756. DC_QUIT;
  15757. }
  15758. pObjNext = pObjThis;
  15759. orderIsGood = TRUE;
  15760. while (orderIsGood)
  15761. {
  15762. pObjNext = (POM_OBJECT)COM_BasedListNext(&(pWorkset->objects), pObjNext, FIELD_OFFSET(OM_OBJECT, chain));
  15763. if (!pObjNext)
  15764. {
  15765. DC_QUIT;
  15766. }
  15767. switch (pObjThis->position)
  15768. {
  15769. case FIRST: // condition 3 has failed
  15770. if (pObjNext->position == FIRST) // condition 2 has failed
  15771. {
  15772. if (!STAMP_IS_LOWER(pObjNext->positionStamp,
  15773. pObjThis->positionStamp))
  15774. {
  15775. ERROR_OUT(("Object order check failed (1)"));
  15776. orderIsGood = FALSE; // final condition (1) has failed
  15777. DC_QUIT;
  15778. }
  15779. }
  15780. break;
  15781. case LAST: // conditions 1 and 2 have failed
  15782. if ((pObjNext->position != LAST) ||
  15783. (!STAMP_IS_LOWER(pObjThis->positionStamp,
  15784. pObjNext->positionStamp)))
  15785. {
  15786. ERROR_OUT(("Object order check failed (2)"));
  15787. orderIsGood = FALSE; // final condition (3) has failed
  15788. DC_QUIT;
  15789. }
  15790. break;
  15791. default:
  15792. ERROR_OUT(("Reached default case in switch statement (value: %hu)",
  15793. pObjThis->position));
  15794. break;
  15795. }
  15796. pObjThis = pObjNext;
  15797. }
  15798. DC_EXIT_POINT:
  15799. if (!orderIsGood)
  15800. {
  15801. ERROR_OUT(("This object (handle: 0x%08x - ID: 0x%08x:0x%08x) "
  15802. "has position stamp 0x%08x:0x%08x (position %s)",
  15803. pObjThis,
  15804. pObjThis->objectID.creator, pObjThis->objectID.sequence,
  15805. pObjThis->positionStamp.userID,
  15806. pObjThis->positionStamp.genNumber,
  15807. (pObjThis->position == LAST) ? "LAST" : "FIRST"));
  15808. ERROR_OUT(("This object (handle: 0x%08x - ID: 0x%08x:0x%08x) "
  15809. "has position stamp 0x%08x:0x%08x (position %s)",
  15810. pObjNext,
  15811. pObjNext->objectID.creator, pObjNext->objectID.sequence,
  15812. pObjNext->positionStamp.userID,
  15813. pObjNext->positionStamp.genNumber,
  15814. (pObjNext->position == LAST) ? "LAST" : "FIRST"));
  15815. ERROR_OUT(("Object order check failed for workset %u. "
  15816. "See trace for more details",
  15817. pWorkset->worksetID));
  15818. }
  15819. TRACE_OUT(("Object order in workset %u is correct",
  15820. pWorkset->worksetID));
  15821. DebugExitVOID(CheckObjectOrder);
  15822. }
  15823. #endif // _DEBUG
  15824. //
  15825. // OMMapNameToFP()
  15826. //
  15827. OMFP OMMapNameToFP(LPCSTR szFunctionProfile)
  15828. {
  15829. int fp;
  15830. DebugEntry(OMMapNameToFP);
  15831. for (fp = OMFP_FIRST; fp < OMFP_MAX; fp++)
  15832. {
  15833. if (!lstrcmp(szFunctionProfile, c_aFpMap[fp].szName))
  15834. {
  15835. // Found it
  15836. break;
  15837. }
  15838. }
  15839. //
  15840. // Note that OMFP_MAX means "not found"
  15841. //
  15842. DebugExitDWORD(OMMapNameToFP, fp);
  15843. return((OMFP)fp);
  15844. }
  15845. //
  15846. // OMMapFPToName()
  15847. //
  15848. // This returns a data pointer of the FP name to the caller. The caller
  15849. // can only copy it or compare it; it may not write into or otherwise
  15850. // modify/hang on to the pointer.
  15851. //
  15852. LPCSTR OMMapFPToName(OMFP fp)
  15853. {
  15854. LPCSTR szFunctionProfile;
  15855. DebugEntry(OMMapFPToName);
  15856. ASSERT(fp >= OMFP_FIRST);
  15857. ASSERT(fp < OMFP_MAX);
  15858. szFunctionProfile = c_aFpMap[fp].szName;
  15859. DebugExitPVOID(OMMapFPToName, (PVOID)szFunctionProfile);
  15860. return(szFunctionProfile);
  15861. }
  15862. //
  15863. // OMMapNameToWSG()
  15864. //
  15865. OMWSG OMMapNameToWSG(LPCSTR szWSGName)
  15866. {
  15867. int wsg;
  15868. DebugEntry(OMMapNameToWSG);
  15869. for (wsg = OMWSG_FIRST; wsg < OMWSG_MAX; wsg++)
  15870. {
  15871. if (!lstrcmp(szWSGName, c_aWsgMap[wsg].szName))
  15872. {
  15873. // Found it
  15874. break;
  15875. }
  15876. }
  15877. //
  15878. // Note that OMWSG_MAX means "not found"
  15879. //
  15880. DebugExitDWORD(OMMapNameToWSG, wsg);
  15881. return((OMWSG)wsg);
  15882. }
  15883. //
  15884. // OMMapWSGToName()
  15885. //
  15886. LPCSTR OMMapWSGToName(OMWSG wsg)
  15887. {
  15888. LPCSTR szWSGName;
  15889. DebugEntry(OMMapWSGToName);
  15890. ASSERT(wsg >= OMWSG_FIRST);
  15891. ASSERT(wsg < OMWSG_MAX);
  15892. szWSGName = c_aWsgMap[wsg].szName;
  15893. DebugExitPVOID(OMMapWSGToName, (PVOID)szWSGName);
  15894. return(szWSGName);
  15895. }
  15896.