Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1782 lines
53 KiB

  1. /*++
  2. Copyright (c) 1996 Microsoft Corporation
  3. Module Name:
  4. receive.c
  5. Abstract:
  6. Routines for registering for global updates and dispensing
  7. received global updates to the routines that have registered
  8. for them.
  9. Author:
  10. John Vert (jvert) 17-Apr-1996
  11. Revision History:
  12. --*/
  13. #include "gump.h"
  14. VOID
  15. GumReceiveUpdates(
  16. IN BOOL IsJoining,
  17. IN GUM_UPDATE_TYPE UpdateType,
  18. IN PGUM_UPDATE_ROUTINE UpdateRoutine,
  19. IN PGUM_LOG_ROUTINE LogRoutine,
  20. IN DWORD DispatchCount,
  21. IN OPTIONAL PGUM_DISPATCH_ENTRY DispatchTable,
  22. IN OPTIONAL PGUM_VOTE_ROUTINE VoteRoutine
  23. )
  24. /*++
  25. Routine Description:
  26. Registers a handler for a particular global update type.
  27. Arguments:
  28. IsJoining - TRUE if the current node is joining. If this is true,
  29. updates will not be delivered until GumEndJoinUpdate has
  30. completed successfully. If this is FALSE, updates will be
  31. delivered immediately.
  32. UpdateType - Supplies the update type to register for.
  33. UpdateRoutine - Supplies the routine to be called when a global update
  34. of the specified type occurs.
  35. LogRoutine - If supplied, it specifies the logging routine that must be called to
  36. log transaction to the quorum logs.
  37. DispatchCount - Supplies the number of entries in the dispatch table.
  38. This can be zero.
  39. DispatchTable - Supplies a pointer to the dispatch table. If this is
  40. NULL, no updates of this type will be automatically dispatched.
  41. VoteRoutine - If supplied, this specifies the routine to be called when
  42. a vote for this update type is requested.
  43. Return Value:
  44. None.
  45. --*/
  46. {
  47. PGUM_RECEIVER Receiver;
  48. CL_ASSERT(UpdateType < GumUpdateMaximum);
  49. Receiver = LocalAlloc(LMEM_FIXED, sizeof(GUM_RECEIVER));
  50. if (Receiver == NULL) {
  51. CL_LOGFAILURE(ERROR_NOT_ENOUGH_MEMORY);
  52. return;
  53. }
  54. Receiver->UpdateRoutine = UpdateRoutine;
  55. Receiver->LogRoutine = LogRoutine;
  56. Receiver->DispatchCount = DispatchCount;
  57. Receiver->DispatchTable = DispatchTable;
  58. Receiver->VoteRoutine = VoteRoutine;
  59. //
  60. // John Vert (jvert) 8/2/1996
  61. // remove below debug print if we ever want to support
  62. // multiple GUM handlers.
  63. //
  64. if (GumTable[UpdateType].Receivers != NULL) {
  65. ClRtlLogPrint(LOG_CRITICAL,
  66. "[GUM] Multiple GUM handlers registered for UpdateType %1!d!\n",
  67. UpdateType);
  68. }
  69. EnterCriticalSection(&GumpLock);
  70. Receiver->Next = GumTable[UpdateType].Receivers;
  71. GumTable[UpdateType].Receivers = Receiver;
  72. if (IsJoining) {
  73. GumTable[UpdateType].Joined = FALSE;
  74. } else {
  75. GumTable[UpdateType].Joined = TRUE;
  76. }
  77. LeaveCriticalSection(&GumpLock);
  78. }
  79. VOID
  80. GumIgnoreUpdates(
  81. IN GUM_UPDATE_TYPE UpdateType,
  82. IN PGUM_UPDATE_ROUTINE UpdateRoutine
  83. )
  84. /*++
  85. Routine Description:
  86. Removes an update handler from the GUM table. This is the opposite
  87. of GumReceiveUpdates
  88. Arguments:
  89. UpdateType - Supplies the update type to register for.
  90. UpdateRoutine - Supplies the routine to be called when a global update
  91. of the specified type occurs.
  92. Return Value:
  93. None
  94. --*/
  95. {
  96. PGUM_RECEIVER Receiver;
  97. PGUM_RECEIVER *Last;
  98. //
  99. // We cannot safely de-registr from Gum... ASSERT if anyone calls this
  100. // function.
  101. //
  102. CL_ASSERT(FALSE);
  103. //
  104. // Walk the list of receivers until we find the specified UpdateRoutine
  105. //
  106. Last = &GumTable[UpdateType].Receivers;
  107. EnterCriticalSection(&GumpLock);
  108. while ((Receiver = *Last) != NULL) {
  109. if (Receiver->UpdateRoutine == UpdateRoutine) {
  110. *Last = Receiver->Next;
  111. break;
  112. }
  113. Last = &Receiver->Next;
  114. }
  115. LeaveCriticalSection(&GumpLock);
  116. if (Receiver != NULL) {
  117. LocalFree(Receiver);
  118. }
  119. }
  120. DWORD
  121. WINAPI
  122. GumpDispatchUpdate(
  123. IN GUM_UPDATE_TYPE Type,
  124. IN DWORD Context,
  125. IN BOOL IsLocker,
  126. IN BOOL SourceNode,
  127. IN DWORD BufferLength,
  128. IN PUCHAR Buffer
  129. )
  130. /*++
  131. Routine Description:
  132. Dispatches a GUM update to all the registered handlers on this node
  133. Arguments:
  134. Sequence - Supplies the GUM sequence number for the update
  135. Type - Supplies the GUM_UPDATE_TYPE for the update
  136. Context - Supplies a DWORD of context to be passed to the
  137. GUM update handlers
  138. IsLocker - Specifies if this is a locker node.
  139. SourceNode - Specifies whether the update originated on this node or not.
  140. BufferLength - Supplies the length of the update data
  141. Buffer - Supplies a pointer to the update data
  142. Return Value:
  143. ERROR_SUCCESS if successful
  144. Win32 error otherwise.
  145. --*/
  146. {
  147. PGUM_INFO GumInfo;
  148. PGUM_RECEIVER Receiver;
  149. DWORD Status = ERROR_SUCCESS;
  150. PGUM_DISPATCH_ENTRY Dispatch;
  151. GumInfo = &GumTable[Type];
  152. if (GumInfo->Joined) {
  153. Receiver = GumInfo->Receivers;
  154. while (Receiver != NULL) {
  155. if (Receiver->LogRoutine) {
  156. Status = (*(Receiver->LogRoutine))(PRE_GUM_DISPATCH, GumpSequence,
  157. Context, Buffer, BufferLength);
  158. if (Status != ERROR_SUCCESS)
  159. {
  160. return(Status);
  161. }
  162. }
  163. try {
  164. if ((Receiver->DispatchTable == NULL) ||
  165. (Receiver->DispatchCount < Context) ||
  166. (Receiver->DispatchTable[Context].Dispatch1 == NULL)) {
  167. Status = (Receiver->UpdateRoutine)(Context,
  168. SourceNode,
  169. BufferLength,
  170. Buffer);
  171. } else {
  172. Dispatch = &Receiver->DispatchTable[Context];
  173. //
  174. // This update should be unmarshalled and dispatched to the
  175. // appropriate dispatch routine. The format generated by
  176. // GumpMarshallArgs is an array of offsets into the buffer,
  177. // followed by the actual args. The dispatch table is
  178. // responsible for recording the number of arguments.
  179. //
  180. CL_ASSERT(Dispatch->ArgCount <= GUM_MAX_DISPATCH_ARGS);
  181. CL_ASSERT(Dispatch->ArgCount != 0);
  182. switch (Dispatch->ArgCount) {
  183. case 1:
  184. Status = (Dispatch->Dispatch1)(SourceNode,
  185. GET_ARG(Buffer,0));
  186. break;
  187. case 2:
  188. Status = (Dispatch->Dispatch2)(SourceNode,
  189. GET_ARG(Buffer,0),
  190. GET_ARG(Buffer,1));
  191. break;
  192. case 3:
  193. Status = (Dispatch->Dispatch3)(SourceNode,
  194. GET_ARG(Buffer,0),
  195. GET_ARG(Buffer,1),
  196. GET_ARG(Buffer,2));
  197. break;
  198. case 4:
  199. Status = (Dispatch->Dispatch4)(SourceNode,
  200. GET_ARG(Buffer,0),
  201. GET_ARG(Buffer,1),
  202. GET_ARG(Buffer,2),
  203. GET_ARG(Buffer,3));
  204. break;
  205. case 5:
  206. Status = (Dispatch->Dispatch5)(SourceNode,
  207. GET_ARG(Buffer,0),
  208. GET_ARG(Buffer,1),
  209. GET_ARG(Buffer,2),
  210. GET_ARG(Buffer,3),
  211. GET_ARG(Buffer,4));
  212. break;
  213. case 6:
  214. Status = (Dispatch->Dispatch6)(SourceNode,
  215. GET_ARG(Buffer,0),
  216. GET_ARG(Buffer,1),
  217. GET_ARG(Buffer,2),
  218. GET_ARG(Buffer,3),
  219. GET_ARG(Buffer,4),
  220. GET_ARG(Buffer,5));
  221. break;
  222. case 7:
  223. Status = (Dispatch->Dispatch7)(SourceNode,
  224. GET_ARG(Buffer,0),
  225. GET_ARG(Buffer,1),
  226. GET_ARG(Buffer,2),
  227. GET_ARG(Buffer,3),
  228. GET_ARG(Buffer,4),
  229. GET_ARG(Buffer,5),
  230. GET_ARG(Buffer,6));
  231. break;
  232. case 8:
  233. Status = (Dispatch->Dispatch8)(SourceNode,
  234. GET_ARG(Buffer,0),
  235. GET_ARG(Buffer,1),
  236. GET_ARG(Buffer,2),
  237. GET_ARG(Buffer,3),
  238. GET_ARG(Buffer,4),
  239. GET_ARG(Buffer,5),
  240. GET_ARG(Buffer,6),
  241. GET_ARG(Buffer,7));
  242. break;
  243. default:
  244. CL_ASSERT(FALSE);
  245. }
  246. }
  247. } except (CL_UNEXPECTED_ERROR(GetExceptionCode()),
  248. EXCEPTION_EXECUTE_HANDLER
  249. )
  250. {
  251. Status = GetExceptionCode();
  252. }
  253. if (Status != ERROR_SUCCESS) {
  254. ClRtlLogPrint(LOG_CRITICAL,
  255. "[GUM] Update routine of type %1!u!, context %2!u! failed with status %3!d!\n",
  256. Type,
  257. Context,
  258. Status);
  259. break;
  260. }
  261. if (Receiver->LogRoutine) {
  262. if (IsLocker && (Status == ERROR_SUCCESS))
  263. (*(Receiver->LogRoutine))(POST_GUM_DISPATCH, GumpSequence,
  264. Context, Buffer, BufferLength);
  265. if (!IsLocker)
  266. (*(Receiver->LogRoutine))(POST_GUM_DISPATCH, GumpSequence,
  267. Context, Buffer, BufferLength);
  268. }
  269. Receiver = Receiver->Next;
  270. }
  271. }
  272. if (Status == ERROR_SUCCESS) {
  273. GumpSequence += 1;
  274. // Check if we've received a DM or FM update (other than join) that we care about:
  275. if (( Type == GumUpdateRegistry && Context != DmUpdateJoin )
  276. || ( Type == GumUpdateFailoverManager && Context != FmUpdateJoin )) {
  277. CsDmOrFmHasChanged = TRUE;
  278. }
  279. }
  280. return(Status);
  281. }
  282. //rod wants to call this a mandatory update instead of H...word
  283. //some times reupdates get delivered in different views on different
  284. //nodes causing a problem
  285. //For instance, a locker node might see an update and complete it
  286. //successfully in one view but when it replays it in another view
  287. //other nodes may not be able to complete it successfully and may be
  288. //banished.
  289. //in one particular case, the locker node approved of a node join
  290. //because it had finished the node down processing for that node.
  291. //subsequently another node and hence the joiner went down.
  292. //the locker node tried to replay the approval update and banished
  293. //other nodes that were seeing this update after the joiner the joiner
  294. //went down for the second time.
  295. //The correct solution would involve GUM delivering the node down
  296. //message as a gum update and delivering it in the same order with
  297. //respect to other messages on all nodes
  298. //However this will require some restructuring of code which
  299. //cant be done in this time frame(for dtc) hence we are using
  300. //this workaround
  301. //this workaround is safe for gums initiated by the joiner node during
  302. //the join process
  303. void GumpIgnoreSomeUpdatesOnReupdate(
  304. IN DWORD Type,
  305. IN DWORD Context)
  306. {
  307. if ((Type == GumUpdateFailoverManager) &&
  308. (Context == FmUpdateApproveJoin))
  309. GumpLastBufferValid = FALSE;
  310. }
  311. VOID
  312. GumpCompleteAsyncRpcCall(
  313. IN PRPC_ASYNC_STATE AsyncState,
  314. IN DWORD Status
  315. )
  316. {
  317. DWORD RpcStatus;
  318. RpcStatus = RpcAsyncCompleteCall(AsyncState, &Status);
  319. if ( (RpcStatus != RPC_S_OK) &&
  320. (RpcStatus != RPC_S_CALL_CANCELLED)
  321. )
  322. {
  323. CL_ASSERT(RpcStatus != RPC_S_ASYNC_CALL_PENDING);
  324. ClRtlLogPrint(LOG_CRITICAL,
  325. "[GUM] GumpCompleteAsyncRpcCall: Error completing async RPC call, status %1!u!\n",
  326. RpcStatus
  327. );
  328. //
  329. // This next call will cause the process to exit. We exit here,
  330. // rather than have the sender evict us, to avoid the situation
  331. // where the sender crashes and none of the other surviving nodes
  332. // know to evict us.
  333. //
  334. CL_UNEXPECTED_ERROR( RpcStatus );
  335. }
  336. return;
  337. } // GumpCompleteAsyncRpcCall
  338. error_status_t
  339. s_GumUpdateNode(
  340. IN PRPC_ASYNC_STATE AsyncState,
  341. IN handle_t IDL_handle,
  342. IN DWORD Type,
  343. IN DWORD Context,
  344. IN DWORD Sequence,
  345. IN DWORD BufferLength,
  346. IN UCHAR Buffer[]
  347. )
  348. /*++
  349. Routine Description:
  350. Server side routine for GumUpdateNode. This is the side that
  351. receives the update and dispatches it to the appropriate
  352. handlers.
  353. Arguments:
  354. IDL_handle - RPC binding handle, not used
  355. Type - Supplies the GUM_UPDATE_TYPE
  356. Context - Supplies a DWORD of context to be passed to the
  357. GUM update handlers
  358. Sequence - Supplies the GUM sequence number for the specified update type
  359. BufferLength - Supplies the length of the update data
  360. Buffer - Supplies a pointer to the update data.
  361. Return Value:
  362. ERROR_SUCCESS if the update completed successfully
  363. ERROR_CLUSTER_DATABASE_SEQMISMATCH if the GUM sequence number is invalid
  364. --*/
  365. {
  366. DWORD Status;
  367. PGUM_INFO GumInfo;
  368. //
  369. // We need to grap the gumsendupdate lock to serialize send/replay
  370. //
  371. EnterCriticalSection(&GumpSendUpdateLock);
  372. GumInfo = &GumTable[Type];
  373. //SS: Could s_GumUpdateNode run on a newly elected locker node after a Reupdate() is completed
  374. //and concurrenly update the GumpLastXXX variables with s_GumQueueLockingUpdate() issued
  375. //by another client?
  376. //Worse yet, if the old client continue to send this update at the same sequence number
  377. //as the new client and different might reject different udpates as duplicates. That could
  378. //result in a cluster inconsistency. But THAT CANNOT happen. If s_Update() call is pending at the
  379. //new locker, the client is holding the GumpSendUpdateLock(). Reupdate() cannot finish at the
  380. //locker, since it will issue a dispath via s_updateNode() to that client and get blocked at the
  381. //lock. If the client dies, Reupdate() will kill it. Hence that client could not pollute other
  382. //members of the cluster with an update of the same sequence number.
  383. //This would result in updating those variables non-atomically. If the new
  384. //locking node then dies, the reupdate() could pick up incorrect combinations of these variable
  385. //
  386. //SOLN -???
  387. if (Sequence != GumpSequence) {
  388. MIDL_user_free(Buffer);
  389. if (Sequence+1 == GumpSequence) {
  390. //
  391. // This is a duplicate of a previously seen update, probably due to
  392. // a node failure during GUM. Return success since we have already done
  393. // this.
  394. //
  395. ClRtlLogPrint(LOG_UNUSUAL,
  396. "[GUM] s_GumUpdateNode: Sequence %1!u! is a duplicate of last sequence for Type %2!u!\n",
  397. Sequence,
  398. Type);
  399. GumpCompleteAsyncRpcCall(AsyncState, ERROR_SUCCESS);
  400. LeaveCriticalSection(&GumpSendUpdateLock);
  401. return(ERROR_SUCCESS);
  402. } else {
  403. ClRtlLogPrint(LOG_UNUSUAL,
  404. "[GUM] s_GumUpdateNode: Sequence %1!u! does not match current %2!u! for Type %3!u!\n",
  405. Sequence,
  406. GumpSequence,
  407. Type);
  408. GumpCompleteAsyncRpcCall(AsyncState, ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  409. LeaveCriticalSection(&GumpSendUpdateLock);
  410. //
  411. // [GorN] 10/07/1999. The following code will allow the test program
  412. // to recognize this sitiation and to restart clustering service
  413. //
  414. if( NmGetExtendedNodeState( NmLocalNode ) != ClusterNodeUp){
  415. CsInconsistencyHalt(ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  416. }
  417. return(ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  418. }
  419. }
  420. ClRtlLogPrint(LOG_NOISE,
  421. "[GUM] s_GumUpdateNode: dispatching seq %1!u!\ttype %2!u! context %3!u!\n",
  422. Sequence,
  423. Type,
  424. Context);
  425. //SS: set IsLocker to FALSE,
  426. Status = GumpDispatchUpdate(Type,
  427. Context,
  428. FALSE,
  429. FALSE,
  430. BufferLength,
  431. Buffer);
  432. if (Status != ERROR_SUCCESS) {
  433. ClRtlLogPrint(LOG_CRITICAL,
  434. "[GUM] Cluster state inconsistency check\n");
  435. ClRtlLogPrint(LOG_CRITICAL,
  436. "[GUM] s_GumUpdateNode update routine type %1!u! context %2!d! failed with error %3!d! on non-locker node\n",
  437. Type,
  438. Context,
  439. Status);
  440. //
  441. // Complete the call back to the client. This ensures that
  442. // the client gets a return value before we exit the process
  443. // due to the error in the handler.
  444. //
  445. GumpCompleteAsyncRpcCall(AsyncState, Status);
  446. //
  447. // This next call will cause the process to exit. We exit here,
  448. // rather than have the sender evict us, to avoid the situation
  449. // where the sender crashes and none of the other surviving nodes
  450. // know to evict us.
  451. //
  452. CL_UNEXPECTED_ERROR( Status );
  453. MIDL_user_free(Buffer);
  454. LeaveCriticalSection(&GumpSendUpdateLock);
  455. return(Status);
  456. }
  457. ClRtlLogPrint(LOG_NOISE,
  458. "[GUM] s_GumUpdateNode: completed update seq %1!u!\ttype %2!u! context %3!u!\n",
  459. Sequence,
  460. Type,
  461. Context);
  462. GumpCompleteAsyncRpcCall(AsyncState, Status);
  463. if (GumpLastBuffer != NULL) {
  464. MIDL_user_free(GumpLastBuffer);
  465. }
  466. GumpLastBuffer = Buffer;
  467. GumpLastContext = Context;
  468. GumpLastBufferLength = BufferLength;
  469. GumpLastUpdateType = Type;
  470. GumpLastBufferValid = TRUE;
  471. GumpIgnoreSomeUpdatesOnReupdate(GumpLastUpdateType, GumpLastContext);
  472. LeaveCriticalSection(&GumpSendUpdateLock);
  473. return(Status);
  474. }
  475. error_status_t
  476. s_GumGetNodeSequence(
  477. IN handle_t IDL_handle,
  478. IN DWORD Type,
  479. OUT LPDWORD Sequence,
  480. OUT LPDWORD LockerNodeId,
  481. OUT PGUM_NODE_LIST *ReturnNodeList
  482. )
  483. /*++
  484. Routine Description:
  485. Returns the node's current GUM sequence number for the specified type
  486. Arguments:
  487. IDL_handle - Supplies the RPC binding handle, not used
  488. Type - Supplies the GUM_UPDATE_TYPE
  489. Sequence - Returns the sequence number for the specified GUM_UPDATE_TYPE
  490. LockerNodeId - Returns the current locker node
  491. ReturnNodeList - Returns the list of active nodes
  492. Return Value:
  493. ERROR_SUCCESS
  494. --*/
  495. {
  496. DWORD i;
  497. DWORD NodeCount;
  498. PGUM_INFO GumInfo;
  499. PGUM_NODE_LIST NodeList;
  500. CL_ASSERT(Type < GumUpdateMaximum);
  501. GumInfo = &GumTable[Type];
  502. NodeCount = 0;
  503. *Sequence = 0; // In case of failure set sequence to 0
  504. EnterCriticalSection(&GumpUpdateLock);
  505. //
  506. // Count up the number of nodes in the list.
  507. //
  508. for (i=ClusterMinNodeId; i <= NmMaxNodeId; i++) {
  509. if (GumInfo->ActiveNode[i] == TRUE) {
  510. ++NodeCount;
  511. }
  512. }
  513. CL_ASSERT(NodeCount > 0); // must be at least us in the list.
  514. //
  515. // Allocate node list
  516. //
  517. NodeList = MIDL_user_allocate(sizeof(GUM_NODE_LIST) + (NodeCount-1)*sizeof(DWORD));
  518. if (NodeList == NULL) {
  519. LeaveCriticalSection(&GumpUpdateLock);
  520. return(ERROR_NOT_ENOUGH_MEMORY);
  521. }
  522. NodeList->NodeCount = NodeCount;
  523. NodeCount = 0;
  524. //
  525. // Fill in the node id array to be returned.
  526. //
  527. for (i=ClusterMinNodeId; i <= NmMaxNodeId; i++) {
  528. if (GumInfo->ActiveNode[i] == TRUE) {
  529. NodeList->NodeId[NodeCount] = i;
  530. ++NodeCount;
  531. }
  532. }
  533. *ReturnNodeList = NodeList;
  534. *Sequence = GumpSequence;
  535. *LockerNodeId = GumpLockerNode;
  536. LeaveCriticalSection(&GumpUpdateLock);
  537. return(ERROR_SUCCESS);
  538. }
  539. /*
  540. // SS: Anytime the gum lock is assigned to a node, a generation number of the locking node
  541. at the time of assignment wrt to the locker node is returned to the locking node.
  542. The locking node must present this generation number to unlock the update.
  543. If the generation number wrt to the locker at the time of update doesnt match what is
  544. passed in, the unlock request is failed.
  545. That is a lock is only released if the unlock occurs in the same generation in which it
  546. was acquire. This ensures that on node down processing the gum lock is handled correctly -
  547. only a single waiter is woken up
  548. //The following description explains the details and how it works
  549. Sync1, Sync2 represent gum sync handlers processing the death of a node holding the gum lock
  550. occuring at different points in time
  551. //This where the current locker acquired its lock on the locker node
  552. s_GumQueueLockingUpdate()
  553. {
  554. }
  555. <------------------------ Sync 1 (Sync handler invoked before the lock has been released
  556. s_UnlockUpdate()
  557. {
  558. }
  559. <------------------------ Sync 2 (Sync handler invoked after the lock has been released
  560. // The waiters for the gum lock may be anywhere within this routine
  561. // Interesting case is for a waiter that is declared dead as well by this sync handler
  562. // E.g node 1 is locker, and sync 1/2 are for node 3 and node 4. Node 4 was the old locking
  563. // node and s_GumQueulockingUpdate() for node 3 is blocked at the locker.
  564. s_GumQueueLockingUpdate()
  565. {
  566. <------------------------- Sync 2 #0
  567. GetGenNum
  568. <------------------------- Sync 2 #1
  569. GumpDoLockingUpdate
  570. <------------------------- Sync 2 #2
  571. DispatchStart
  572. <------------------------- Sync 2 #3
  573. if (DeadNode)
  574. {
  575. GumpDoUnlockingUpdate()
  576. }
  577. else
  578. {
  579. DispatchUpdate
  580. }
  581. <------------------------ Sync 2 #4
  582. DispatchEnd
  583. <------------------------ Sync 2 #5
  584. }
  585. Sync 1: s_unlockUpdate wont realease the lock, since the sync1 handler will update its
  586. generation number and the locker will take over the responsibility for unlock after reupdates
  587. Sync2: s_unlockUpdate will release a waiter. If the waiter is a dead node, the locker will also
  588. usurp the lock. If the waiter is not a dead node, the locker will not usurp the lock and the waiter
  589. must then free it.
  590. We can consider several cases of sync 2.
  591. Sync 2 #0 : This thread is not on the list. So, the lock is either free or assigned to somebody else.
  592. If free or if assigned to some other thread, and then subsequently assigned to this thread,
  593. DispatchStart() will fail and GumpDoUnlockingUpdate() will succeed.
  594. Sync 2 #1 : This thread is not on the list. So, the lock is either free or assigned to somebody else.
  595. If free or if assigned to some other thread and then subsequently assigned to this thread,
  596. DispatchStart() will fail and GumpDoUnlockingUpdate() will succeed
  597. Sync 2 #2: This thread is woken up by the now dead node. In this case, the locker will usurp.
  598. If this thread is woken up before the sync handler, locker will still usurp (because the waiter
  599. is also dead) but the generation number handed out to this thread will be old =>
  600. Locker node will release the lock after reupdate.
  601. This thread cannot be woken up after the sync handler by the dead locker, since the s_UnlockUpdate from the
  602. dead node will fail with a generation mismatch.
  603. If it is woken up after the sync handler it must be up the Reupdate, in which case, the generation
  604. number handed out to this thread is new and it will proceed to unlock.
  605. DispatchStart() will fail. GumpDoUnlockingUpdate() will fail to unlock.
  606. Sync 2 #3: Locker will usurp, Reupdate will not occur if the async handler runs before DispatchEnd because
  607. UpdatePending is set to TRUE. Reupdate will not occur if the async handler runs after DispatchEnd()
  608. because the GumReplay flag will be set to FALSE.
  609. This thread will Dispatch and call Reupdate() at the end of DispatchEnd(). Unlock from there
  610. will succeed since it will pass the generation number not for this thread running on behalf of
  611. the dead node but the local node.
  612. Sync 2 #4: Locker will usurp, but not reupdate because UpdatePending is set to true by DispatchStart.
  613. This thread will dispatch and call reupdate at the end of DispatchEnd(). Unlock from there
  614. will succeed since it will not pass the generation number for the dead node invoking this
  615. rpc but the local node.
  616. Sync 2 #5: Locker will usurp. Reupdate will replay this update and then unlock this update. DispatchEnd()
  617. will not call Reupdate() or unlock.
  618. */
  619. error_status_t
  620. s_GumQueueLockingUpdate(
  621. IN handle_t IDL_handle,
  622. IN DWORD NodeId,
  623. IN DWORD Type,
  624. IN DWORD Context,
  625. OUT LPDWORD Sequence,
  626. IN DWORD BufferLength,
  627. IN UCHAR Buffer[]
  628. )
  629. /*++
  630. Routine Description:
  631. Queues a locking update. When the lock can be acquired, the update will
  632. be issued and this routine will return with the lock held.
  633. Arguments:
  634. IDL_handle - Supplies the RPC binding context, not used.
  635. NodeId - Supplies the node id of the issuing node.
  636. Type - Supplies the GUM_UPDATE_TYPE of the update
  637. Context - Supplies the GUM update context
  638. IsLocker - is this is the locker node
  639. Sequence - Returns the sequence that the GUM update must be issued with
  640. BufferLength - Supplies the length of the update.
  641. Buffer - Supplies the update data.
  642. Return Value:
  643. ERROR_SUCCESS if successful
  644. Win32 error otherwise.
  645. --*/
  646. {
  647. DWORD dwLockObtainedAtGenNum;
  648. return(s_GumQueueLockingUpdate2(IDL_handle, NodeId, Type, Context,
  649. Sequence, BufferLength, Buffer, &dwLockObtainedAtGenNum));
  650. }
  651. error_status_t
  652. s_GumQueueLockingUpdate2(
  653. IN handle_t IDL_handle,
  654. IN DWORD NodeId,
  655. IN DWORD Type,
  656. IN DWORD Context,
  657. OUT LPDWORD Sequence,
  658. IN DWORD BufferLength,
  659. IN UCHAR Buffer[],
  660. OUT LPDWORD GenerationNum
  661. )
  662. /*++
  663. Routine Description:
  664. Queues a locking update. When the lock can be acquired, the update will
  665. be issued and this routine will return with the lock held.
  666. Arguments:
  667. IDL_handle - Supplies the RPC binding context, not used.
  668. NodeId - Supplies the node id of the issuing node.
  669. Type - Supplies the GUM_UPDATE_TYPE of the update
  670. Context - Supplies the GUM update context
  671. IsLocker - is this is the locker node
  672. Sequence - Returns the sequence that the GUM update must be issued with
  673. BufferLength - Supplies the length of the update.
  674. Buffer - Supplies the update data.
  675. GenerationNum - Specifies the generation number wrt to the locker in which this node
  676. obtains the lock
  677. Return Value:
  678. ERROR_SUCCESS if successful
  679. Win32 error otherwise.
  680. --*/
  681. {
  682. DWORD Status;
  683. PGUM_INFO GumInfo;
  684. DWORD dwGennum;
  685. GumInfo = &GumTable[Type];
  686. // SS: Note we get the generation number before going on the wait
  687. // queue so that if we are woken up after the node is dead,
  688. // we will be comparing the old generation number against the new one
  689. //
  690. // Get current node generation number
  691. //
  692. dwGennum = GumpGetNodeGenNum(GumInfo, NodeId);
  693. Status = GumpDoLockingUpdate(Type, NodeId, Sequence, GenerationNum);
  694. if (Status != ERROR_SUCCESS) {
  695. ClRtlLogPrint(LOG_UNUSUAL,
  696. "[GUM] s_GumQueueLockingUpdate: GumpDoLockingUpdate failed %1!u!\n",
  697. Status);
  698. MIDL_user_free(Buffer);
  699. return(Status);
  700. }
  701. //
  702. // If the node that is granted ownership is no longer a member of the
  703. // cluster or the remote node went down and came back up again, give it up.
  704. //
  705. if (GumpDispatchStart(NodeId, dwGennum) != TRUE)
  706. {
  707. //skip the dispatch and unlock the lock
  708. ClRtlLogPrint(LOG_CRITICAL,
  709. "[GUM] s_GumQueueLockingUpdate: The new locker %1!u! no longer belongs to the cluster\n",
  710. NodeId);
  711. Status = ERROR_CLUSTER_NODE_NOT_READY;
  712. //
  713. // Note we have to use Sequence-1 for the unlock because GumpDispatchUpdate
  714. // failed and did not increment the sequence number.
  715. //
  716. GumpDoUnlockingUpdate(Type, *Sequence - 1, NodeId, *GenerationNum);
  717. MIDL_user_free(Buffer);
  718. return(Status);
  719. }
  720. ClRtlLogPrint(LOG_NOISE,
  721. "[GUM] s_GumQueueLockingUpdate: dispatching seq %1!u!\ttype %2!u! context %3!u!\n",
  722. *Sequence,
  723. Type,
  724. Context);
  725. //SS: Set IsLocker to TRUE
  726. Status = GumpDispatchUpdate(Type,
  727. Context,
  728. TRUE,
  729. FALSE,
  730. BufferLength,
  731. Buffer);
  732. if (Status != ERROR_SUCCESS) {
  733. //
  734. // Note we have to use Sequence-1 for the unlock because GumpDispatchUpdate
  735. // failed and did not increment the sequence number.
  736. //
  737. GumpDispatchAbort();
  738. GumpDoUnlockingUpdate(Type, *Sequence - 1, NodeId, *GenerationNum);
  739. if (Buffer != NULL)
  740. MIDL_user_free(Buffer);
  741. } else {
  742. if (GumpLastBuffer != NULL) {
  743. MIDL_user_free(GumpLastBuffer);
  744. }
  745. GumpLastBuffer = Buffer;
  746. GumpLastContext = Context;
  747. GumpLastBufferLength = BufferLength;
  748. GumpLastUpdateType = Type;
  749. GumpLastBufferValid = TRUE;
  750. GumpIgnoreSomeUpdatesOnReupdate(GumpLastUpdateType, GumpLastContext);
  751. //
  752. // Just in case our client dies
  753. //
  754. // SS: Note that if the client dies after GumpDispatchStart, then Reupdate()
  755. // may not be able to send the last known update until we synchronize with
  756. // this update. Hence, Reupdate() doesnt send the updates but GumpDispatchEnd()
  757. // does.
  758. GumpDispatchEnd(NodeId, dwGennum);
  759. }
  760. ClRtlLogPrint(LOG_NOISE,
  761. "[GUM] s_GumQueueLockingUpdate: completed update seq %1!u!\ttype %2!u! context %3!u! result %4!u!\n",
  762. *Sequence,
  763. Type,
  764. Context,
  765. Status);
  766. return(Status);
  767. }
  768. #ifdef GUM_POST_SUPPORT
  769. John Vert (jvert) 11/18/1996
  770. POST is disabled for now since nobody uses it.
  771. N.B. The below code does not handle locker node failures
  772. error_status_t
  773. s_GumQueueLockingPost(
  774. IN handle_t IDL_handle,
  775. IN DWORD NodeId,
  776. IN DWORD Type,
  777. IN DWORD Context,
  778. OUT LPDWORD Sequence,
  779. IN DWORD BufferLength,
  780. IN UCHAR Buffer[],
  781. IN DWORD ActualBuffer
  782. )
  783. /*++
  784. Routine Description:
  785. Queues a post update.
  786. If the GUM lock can be immediately acquired, this routine
  787. behaves exactly like GumQueueLockingUpdate and returns
  788. ERROR_SUCCESS.
  789. If the GUM lock is held, this routine queues an asynchronous
  790. wait block onto the GUM queue and returns ERROR_IO_PENDING.
  791. When the wait block is removed from the GUM queue, the unlocking
  792. thread will call GumpDeliverPostUpdate on the specified node
  793. and supply the passed in context. The calling node can then
  794. deliver the update.
  795. Arguments:
  796. IDL_handle - Supplies the RPC binding context, not used.
  797. NodeId - Supplies the node id of the issuing node.
  798. Type - Supplies the GUM_UPDATE_TYPE of the update
  799. Context - Supplies the GUM update context
  800. Sequence - Returns the sequence that the GUM update must be issued with
  801. BufferLength - Supplies the length of the update.
  802. Buffer - Supplies the update data.
  803. ActualBuffer - Supplies the value of the pointer to the GUM data on the
  804. client side. This will be returned to the callback if this update
  805. is completed asynchronously.
  806. Return Value:
  807. ERROR_SUCCESS if successful
  808. Win32 error otherwise.
  809. --*/
  810. {
  811. DWORD Status;
  812. Status = GumpDoLockingPost(Type, NodeId, Sequence, Context, BufferLength,
  813. ActualBuffer, Buffer);
  814. if (Status != ERROR_SUCCESS) {
  815. if (Status != ERROR_IO_PENDING) {
  816. ClRtlLogPrint(LOG_UNUSUAL,
  817. "[GUM] s_GumQueueLockingPost: GumpDoLockingPost failed %1!u!\n",
  818. Status);
  819. } else {
  820. ClRtlLogPrint(LOG_NOISE,
  821. "[GUM] s_GumQueueLockingPost: GumpDoLockingPost pended update type %1!u! context %2!u!\n",
  822. Type,
  823. Context);
  824. }
  825. return(Status);
  826. }
  827. ClRtlLogPrint(LOG_NOISE,
  828. "[GUM] s_GumQueueLockingPost: dispatching seq %1!u!\ttype %2!u! context %3!u!\n",
  829. *Sequence,
  830. Type,
  831. Context);
  832. //SS: setting IsLocker to FALSE
  833. Status = GumpDispatchUpdate(Type,
  834. Context,
  835. FALSE,
  836. FALSE,
  837. BufferLength,
  838. Buffer);
  839. CL_ASSERT(Status == ERROR_SUCCESS); // posts must never fail
  840. ClRtlLogPrint(LOG_NOISE,
  841. "[GUM] s_GumQueueLockingPost: completed update seq %1!u!\ttype %2!u! context %3!u! result %4!u!\n",
  842. *Sequence,
  843. Type,
  844. Context,
  845. Status);
  846. MIDL_user_free(Buffer);
  847. return(Status);
  848. }
  849. #endif
  850. error_status_t
  851. s_GumAttemptLockingUpdate(
  852. IN handle_t IDL_handle,
  853. IN DWORD NodeId,
  854. IN DWORD Type,
  855. IN DWORD Context,
  856. IN DWORD Sequence,
  857. IN DWORD BufferLength,
  858. IN UCHAR Buffer[]
  859. )
  860. /*++
  861. Routine Description:
  862. Attempts a locking update. If the supplied sequence number
  863. matches and the update lock is not already held, the update
  864. will be issued and this routine will return with the lock held.
  865. Arguments:
  866. IDL_handle - Supplies the RPC binding context, not used.
  867. NodeId - Supplies the node id of the issuing node.
  868. Type - Supplies the GUM_UPDATE_TYPE of the update
  869. Context - Supplies the GUM update context
  870. Sequence - Supplies the sequence that the GUM update must be issued with
  871. BufferLength - Supplies the length of the update.
  872. Buffer - Supplies the update data.
  873. Return Value:
  874. ERROR_SUCCESS if successful
  875. Win32 error otherwise.
  876. --*/
  877. {
  878. DWORD dwGenerationNum;
  879. return(GumpAttemptLockingUpdate(NodeId, Type, Context, Sequence, BufferLength,
  880. Buffer, &dwGenerationNum));
  881. }
  882. error_status_t
  883. s_GumAttemptLockingUpdate2(
  884. IN handle_t IDL_handle,
  885. IN DWORD NodeId,
  886. IN DWORD Type,
  887. IN DWORD Context,
  888. IN DWORD Sequence,
  889. IN DWORD BufferLength,
  890. IN UCHAR Buffer[],
  891. OUT LPDWORD pdwGenerationNum
  892. )
  893. /*++
  894. Routine Description:
  895. Attempts a locking update. If the supplied sequence number
  896. matches and the update lock is not already held, the update
  897. will be issued and this routine will return with the lock held.
  898. Arguments:
  899. IDL_handle - Supplies the RPC binding context, not used.
  900. NodeId - Supplies the node id of the issuing node.
  901. Type - Supplies the GUM_UPDATE_TYPE of the update
  902. Context - Supplies the GUM update context
  903. Sequence - Supplies the sequence that the GUM update must be issued with
  904. BufferLength - Supplies the length of the update.
  905. Buffer - Supplies the update data.
  906. pdwGenerationNum - If successful, it returns the generation number of this
  907. node wrt to the locker at the time at which the lock is acquired
  908. Return Value:
  909. ERROR_SUCCESS if successful
  910. Win32 error otherwise.
  911. --*/
  912. {
  913. return(GumpAttemptLockingUpdate(NodeId, Type, Context, Sequence, BufferLength,
  914. Buffer, pdwGenerationNum));
  915. }
  916. error_status_t
  917. GumpAttemptLockingUpdate(
  918. IN DWORD NodeId,
  919. IN DWORD Type,
  920. IN DWORD Context,
  921. IN DWORD Sequence,
  922. IN DWORD BufferLength,
  923. IN UCHAR Buffer[],
  924. OUT LPDWORD pdwGenerationNum
  925. )
  926. {
  927. DWORD Status;
  928. DWORD dwGenerationNum;
  929. if (!GumpTryLockingUpdate(Type, NodeId, Sequence, &dwGenerationNum)) {
  930. MIDL_user_free(Buffer);
  931. return(ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  932. }
  933. //SS: setting Islocker false
  934. Status = GumpDispatchUpdate(Type,
  935. Context,
  936. FALSE,
  937. FALSE,
  938. BufferLength,
  939. Buffer);
  940. if (Status != ERROR_SUCCESS) {
  941. //
  942. // The update has failed on this node, unlock here
  943. // Note we have to use Sequence-1 for the unlock because GumpDispatchUpdate
  944. // failed and did not increment the sequence number.
  945. //
  946. GumpDoUnlockingUpdate(Type, Sequence-1, NodeId, dwGenerationNum);
  947. }
  948. MIDL_user_free(Buffer);
  949. return(Status);
  950. }
  951. error_status_t
  952. s_GumUnlockUpdate(
  953. IN handle_t IDL_handle,
  954. IN DWORD Type,
  955. IN DWORD Sequence
  956. )
  957. /*++
  958. Routine Description:
  959. Unlocks a locked update.
  960. Arguments:
  961. IDL_handle - Supplies the RPC binding context, not used.
  962. Type - Supplies the GUM_UPDATE_TYPE of the update
  963. Sequence - Supplies the sequence that the GUM update was issued with
  964. Return Value:
  965. ERROR_SUCCESS if successful
  966. Win32 error code otherwise
  967. --*/
  968. {
  969. //SS: If this is executing on behalf of a node that is already been
  970. //declared dead and on whose behalf a Replay has been queued, can
  971. //we have two simultaneous unlocks one by this thread and the other
  972. //by the replay thread
  973. // SOLN: We could check the generation number and not release a waiter
  974. // This should probably be done externally
  975. GumpDoUnlockingUpdate(Type, Sequence, ClusterInvalidNodeId, 0);
  976. return(ERROR_SUCCESS);
  977. }
  978. error_status_t
  979. s_GumUnlockUpdate2(
  980. IN handle_t IDL_handle,
  981. IN DWORD Type,
  982. IN DWORD Sequence,
  983. IN DWORD NodeId,
  984. IN DWORD GenerationNum
  985. )
  986. /*++
  987. Routine Description:
  988. Unlocks a locked update.
  989. Arguments:
  990. IDL_handle - Supplies the RPC binding context, not used.
  991. Type - Supplies the GUM_UPDATE_TYPE of the update
  992. Sequence - Supplies the sequence that the GUM update was issued with
  993. Return Value:
  994. ERROR_SUCCESS if successful
  995. Win32 error code otherwise
  996. --*/
  997. {
  998. //SS: If this is executing on behalf of a node that is already been
  999. //declared dead and on whose behalf a Replay has been queued, we could
  1000. //have two simultaneous unlocks one by this thread and the other
  1001. //by the replay thread
  1002. //SOLN: We check against the generation number when the lock was granted
  1003. //against the current generation number.
  1004. GumpDoUnlockingUpdate(Type, Sequence, NodeId, GenerationNum);
  1005. return(ERROR_SUCCESS);
  1006. }
  1007. error_status_t
  1008. s_GumJoinUpdateNode(
  1009. IN handle_t IDL_handle,
  1010. IN DWORD JoiningId,
  1011. IN DWORD Type,
  1012. IN DWORD Context,
  1013. IN DWORD Sequence,
  1014. IN DWORD BufferLength,
  1015. IN UCHAR Buffer[]
  1016. )
  1017. /*++
  1018. Routine Description:
  1019. Server side routine for GumJoinUpdateNode. This is the side that
  1020. receives the update, adds the node to the update list, and dispatches
  1021. it to the appropriate handlers.
  1022. Arguments:
  1023. IDL_handle - RPC binding handle, not used
  1024. JoiningId - Supplies the nodeid of the joining node.
  1025. Type - Supplies the GUM_UPDATE_TYPE
  1026. Context - Supplies a DWORD of context to be passed to the
  1027. GUM update handlers
  1028. Sequence - Supplies the GUM sequence number for the specified update type
  1029. BufferLength - Supplies the length of the update data
  1030. Buffer - Supplies a pointer to the update data.
  1031. Return Value:
  1032. ERROR_SUCCESS if the update completed successfully
  1033. ERROR_INVALID_HANDLE if the GUM sequence number is invalid
  1034. --*/
  1035. {
  1036. DWORD Status;
  1037. PGUM_INFO GumInfo;
  1038. // Buffer is [unique].
  1039. if ( BufferLength == 0 )
  1040. Buffer = NULL;
  1041. else if ( Buffer == NULL )
  1042. BufferLength = 0;
  1043. GumInfo = &GumTable[Type];
  1044. // sync with replay/updates
  1045. EnterCriticalSection(&GumpSendUpdateLock);
  1046. // [ahm] This is an aborted endjoin, we just resync our seq. with master.
  1047. // This should be its own GumUpdateSequence RPC, but for now it ok to
  1048. // to this.
  1049. if (JoiningId == (DWORD) -1)
  1050. {
  1051. // we must be off by one at the most
  1052. if (Sequence+1 != GumpSequence)
  1053. {
  1054. CL_ASSERT(Sequence == GumpSequence);
  1055. GumpSequence = Sequence+1;
  1056. ClRtlLogPrint(LOG_UNUSUAL,
  1057. "[GUM] s_GumJoinUpdateNode: pretend we have seen Sequence %1!u!\n",
  1058. Sequence);
  1059. }
  1060. Status = 0;
  1061. goto done;
  1062. }
  1063. if (Sequence != GumpSequence) {
  1064. ClRtlLogPrint(LOG_UNUSUAL,
  1065. "[GUM] s_GumJoinUpdateNode: Sequence %1!u! does not match current %2!u! for Type %3!u!\n",
  1066. Sequence,
  1067. GumpSequence,
  1068. Type);
  1069. LeaveCriticalSection(&GumpSendUpdateLock);
  1070. MIDL_user_free(Buffer);
  1071. return(ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  1072. }
  1073. ClRtlLogPrint(LOG_NOISE,
  1074. "[GUM] s_GumJoinUpdateNode: dispatching seq %1!u!\ttype %2!u! context %3!u!\n",
  1075. Sequence,
  1076. Type,
  1077. Context);
  1078. CL_ASSERT(NmIsValidNodeId(JoiningId));
  1079. CL_ASSERT(GumpRpcBindings[JoiningId] != NULL);
  1080. CL_ASSERT(GumpReplayRpcBindings[JoiningId] != NULL);
  1081. ClRtlLogPrint(LOG_UNUSUAL,
  1082. "[GUM] s_GumJoinUpdateNode Adding node %1!d! to update list for GUM type %2!d!\n",
  1083. JoiningId,
  1084. Type);
  1085. //SS: setting IsLocker to FALSE
  1086. Status = GumpDispatchUpdate(Type,
  1087. Context,
  1088. FALSE,
  1089. FALSE,
  1090. BufferLength,
  1091. Buffer);
  1092. // [ahm]: We need to make sure the node is still up, otherwise ignore
  1093. EnterCriticalSection(&GumpLock);
  1094. if (MMIsNodeUp(JoiningId) == TRUE) {
  1095. GumTable[Type].ActiveNode[JoiningId] = TRUE;
  1096. }
  1097. LeaveCriticalSection(&GumpLock);
  1098. ClRtlLogPrint(LOG_NOISE,
  1099. "[GUM] s_GumJoinUpdateNode: completed update seq %1!u!\ttype %2!u! context %3!u!\n",
  1100. Sequence,
  1101. Type,
  1102. Context);
  1103. done:
  1104. if (GumpLastBuffer != NULL) {
  1105. MIDL_user_free(GumpLastBuffer);
  1106. }
  1107. GumpLastBuffer = NULL;
  1108. GumpLastContext = Context;
  1109. GumpLastBufferLength = 0;
  1110. GumpLastUpdateType = Type;
  1111. GumpLastBufferValid = FALSE;
  1112. LeaveCriticalSection(&GumpSendUpdateLock);
  1113. MIDL_user_free(Buffer);
  1114. return(Status);
  1115. }
  1116. error_status_t
  1117. s_GumAttemptJoinUpdate(
  1118. IN handle_t IDL_handle,
  1119. IN DWORD JoiningId,
  1120. IN DWORD Type,
  1121. IN DWORD Context,
  1122. IN DWORD Sequence,
  1123. IN DWORD BufferLength,
  1124. IN UCHAR Buffer[]
  1125. )
  1126. /*++
  1127. Routine Description:
  1128. Attempts a locking join update. If the supplied sequence number
  1129. matches and the update lock is not already held, the join update
  1130. will be issued, the joining node will be added to the update list,
  1131. and this routine will return with the lock held.
  1132. Arguments:
  1133. IDL_handle - Supplies the RPC binding context, not used.
  1134. JoiningId - Supplies the nodeid of the joining node.
  1135. Type - Supplies the GUM_UPDATE_TYPE of the update
  1136. Context - Supplies the GUM update context
  1137. Sequence - Supplies the sequence that the GUM update must be issued with
  1138. BufferLength - Supplies the length of the update.
  1139. Buffer - Supplies the update data.
  1140. Return Value:
  1141. ERROR_SUCCESS if successful
  1142. Win32 error otherwise.
  1143. --*/
  1144. {
  1145. DWORD dwGenerationNum;
  1146. // Buffer is [unique].
  1147. if ( BufferLength == 0 )
  1148. Buffer = NULL;
  1149. else if ( Buffer == NULL )
  1150. BufferLength = 0;
  1151. return(GumpAttemptJoinUpdate(JoiningId, Type, Context, Sequence, BufferLength,
  1152. Buffer, &dwGenerationNum));
  1153. }
  1154. error_status_t
  1155. s_GumAttemptJoinUpdate2(
  1156. IN handle_t IDL_handle,
  1157. IN DWORD JoiningId,
  1158. IN DWORD Type,
  1159. IN DWORD Context,
  1160. IN DWORD Sequence,
  1161. IN DWORD BufferLength,
  1162. IN UCHAR Buffer[],
  1163. IN LPDWORD pdwGenerationNum
  1164. )
  1165. /*++
  1166. Routine Description:
  1167. Attempts a locking join update. If the supplied sequence number
  1168. matches and the update lock is not already held, the join update
  1169. will be issued, the joining node will be added to the update list,
  1170. and this routine will return with the lock held.
  1171. Arguments:
  1172. IDL_handle - Supplies the RPC binding context, not used.
  1173. JoiningId - Supplies the nodeid of the joining node.
  1174. Type - Supplies the GUM_UPDATE_TYPE of the update
  1175. Context - Supplies the GUM update context
  1176. Sequence - Supplies the sequence that the GUM update must be issued with
  1177. BufferLength - Supplies the length of the update.
  1178. Buffer - Supplies the update data.
  1179. pdwGenerationNum - If successful, then the generation number at which the lock
  1180. is acquired is returned via this parameter.
  1181. Return Value:
  1182. ERROR_SUCCESS if successful
  1183. Win32 error otherwise.
  1184. --*/
  1185. {
  1186. // Buffer is [unique].
  1187. if ( BufferLength == 0 )
  1188. Buffer = NULL;
  1189. else if ( Buffer == NULL )
  1190. BufferLength = 0;
  1191. return(GumpAttemptJoinUpdate(JoiningId, Type, Context, Sequence, BufferLength,
  1192. Buffer, pdwGenerationNum));
  1193. }
  1194. error_status_t
  1195. GumpAttemptJoinUpdate(
  1196. IN DWORD JoiningId,
  1197. IN DWORD Type,
  1198. IN DWORD Context,
  1199. IN DWORD Sequence,
  1200. IN DWORD BufferLength,
  1201. IN UCHAR Buffer[],
  1202. IN LPDWORD pdwGenerationNum
  1203. )
  1204. {
  1205. DWORD Status;
  1206. PGUM_INFO GumInfo;
  1207. GumInfo = &GumTable[Type];
  1208. if (!GumpTryLockingUpdate(Type, JoiningId, Sequence, pdwGenerationNum)) {
  1209. MIDL_user_free(Buffer);
  1210. return(ERROR_CLUSTER_DATABASE_SEQMISMATCH);
  1211. }
  1212. // sync with replay/updates
  1213. EnterCriticalSection(&GumpSendUpdateLock);
  1214. //SS: set IsLocker to TRUE
  1215. Status = GumpDispatchUpdate(Type,
  1216. Context,
  1217. TRUE,
  1218. FALSE,
  1219. BufferLength,
  1220. Buffer);
  1221. if (Status != ERROR_SUCCESS) {
  1222. //
  1223. // The update has failed on this node, unlock here
  1224. // Note we have to use Sequence-1 for the unlock because
  1225. // GumpDispatchUpdate failed and did not increment the
  1226. // sequence number.
  1227. //
  1228. // SS: The generation number should help, if the joining node is declared
  1229. // dead anytime between the joiner acquiring the lock and releasing it
  1230. GumpDoUnlockingUpdate(Type, Sequence-1, JoiningId, *pdwGenerationNum);
  1231. } else {
  1232. CL_ASSERT(NmIsValidNodeId(JoiningId));
  1233. CL_ASSERT(GumpRpcBindings[JoiningId] != NULL);
  1234. CL_ASSERT(GumpReplayRpcBindings[JoiningId] != NULL);
  1235. ClRtlLogPrint(LOG_UNUSUAL,
  1236. "[GUM] s_GumAttemptJoinUpdate Adding node %1!d! to update list for GUM type %2!d!\n",
  1237. JoiningId,
  1238. Type);
  1239. // [ahm]: We need to make sure the node is still up, otherwise ignore
  1240. EnterCriticalSection(&GumpLock);
  1241. if (MMIsNodeUp(JoiningId) == TRUE) {
  1242. GumTable[Type].ActiveNode[JoiningId] = TRUE;
  1243. }
  1244. LeaveCriticalSection(&GumpLock);
  1245. if (GumpLastBuffer != NULL) {
  1246. MIDL_user_free(GumpLastBuffer);
  1247. }
  1248. GumpLastBuffer = NULL;
  1249. GumpLastContext = Context;
  1250. GumpLastBufferLength = 0;
  1251. GumpLastUpdateType = Type;
  1252. GumpLastBufferValid = FALSE;
  1253. }
  1254. LeaveCriticalSection(&GumpSendUpdateLock);
  1255. MIDL_user_free(Buffer);
  1256. return(Status);
  1257. }
  1258. /****
  1259. @func DWORD | s_GumCollectVoteFromNode| The is the server side
  1260. routine for GumCollectVoteFromNode.
  1261. @parm IN IDL_handle | RPC binding handle, not used.
  1262. @parm IN GUM_UPDATE_TYPE | Type | The update type for which this
  1263. vote is requested.
  1264. @parn IN DWORD | dwContext | This specifies the context related to the
  1265. Updatetype for which a vote is being seeked.
  1266. @parm IN DWORD | dwInputBufLength | The length of the input buffer
  1267. passed in via pInputBuffer.
  1268. @parm IN PVOID | pInputBuffer | A pointer to the input buffer via
  1269. which the input data for the vote is supplied.
  1270. @parm IN DWORD | dwVoteLength | The length of the vote. This is
  1271. also the size of the buffer to which pBuf points to.
  1272. @parm OUT PUCHAR | pVoteBuf| A pointer to a buffer in which
  1273. this node may cast its vote. The length of the vote must
  1274. not exceed dwVoteLength.
  1275. @rdesc Returns a result code. ERROR_SUCCESS on success.
  1276. @comm A node collecting votes invokes this routine to collect a vote
  1277. from the remote node. This routine simply invokes GumpDispatchVote().
  1278. @xref <f GumpCollectVote> <f GumpDispatchVote>
  1279. ****/
  1280. DWORD
  1281. WINAPI
  1282. s_GumCollectVoteFromNode(
  1283. IN handle_t IDL_handle,
  1284. IN DWORD UpdateType,
  1285. IN DWORD dwContext,
  1286. IN DWORD dwInputBufLength,
  1287. IN PUCHAR pInputBuf,
  1288. IN DWORD dwVoteLength,
  1289. OUT PUCHAR pVoteBuf
  1290. )
  1291. {
  1292. DWORD dwStatus;
  1293. ClRtlLogPrint(LOG_NOISE,
  1294. "[GUM] s_GumCollectVote: collecting vote for type %1!u!\tcontext %2!u!\n",
  1295. UpdateType,
  1296. dwContext);
  1297. dwStatus = GumpDispatchVote(UpdateType,
  1298. dwContext,
  1299. dwInputBufLength,
  1300. pInputBuf,
  1301. dwVoteLength,
  1302. pVoteBuf);
  1303. ClRtlLogPrint(LOG_NOISE,
  1304. "[GUM] s_GumCollectVote: completed, VoteStatus=%1!u!\n",
  1305. dwStatus);
  1306. return(dwStatus);
  1307. }
  1308. #ifdef GUM_POST_SUPPORT
  1309. John Vert (jvert) 11/18/1996
  1310. POST is disabled for now since nobody uses it.
  1311. error_status_t
  1312. s_GumDeliverPostCallback(
  1313. IN handle_t IDL_handle,
  1314. IN DWORD FirstNode,
  1315. IN DWORD Type,
  1316. IN DWORD Context,
  1317. IN DWORD Sequence,
  1318. IN DWORD BufferLength,
  1319. IN DWORD Buffer
  1320. )
  1321. /*++
  1322. Routine Description:
  1323. Callback function used to deliver a posted update that was
  1324. queued.
  1325. Arguments:
  1326. IDL_handle - Supplies the RPC binding context, not used.
  1327. FirstNode - Supplies the node ID where the posts should start.
  1328. This is generally the LockerNode+1.
  1329. Type - Supplies the GUM_UPDATE_TYPE of the update
  1330. Context - Supplies the GUM update context
  1331. Sequence - Supplies the sequence that the GUM update must be issued with
  1332. BufferLength - Supplies the length of the update.
  1333. Buffer - Supplies the update data.
  1334. Return Value:
  1335. ERROR_SUCCESS
  1336. --*/
  1337. {
  1338. GumpDeliverPosts(FirstNode,
  1339. Type,
  1340. Sequence,
  1341. Context,
  1342. BufferLength,
  1343. (PVOID)Buffer);
  1344. return(ERROR_SUCCESS);
  1345. }
  1346. #endif