Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2213 lines
62 KiB

  1. /*++
  2. Copyright (c) 1996 Microsoft Corporation
  3. Module Name:
  4. init.c
  5. Abstract:
  6. This module provides the main cluster initialization.
  7. Author:
  8. John Vert (jvert) 6/5/1996
  9. Revision History:
  10. --*/
  11. extern "C"
  12. {
  13. #include "initp.h"
  14. #include <objbase.h>
  15. RPC_STATUS ApipConnectCallback(
  16. IN RPC_IF_ID * Interface,
  17. IN void * Context
  18. );
  19. }
  20. #define CLUSTER_PRIORITY_CLASS HIGH_PRIORITY_CLASS
  21. #include "CVssCluster.h"
  22. //
  23. // Global Data
  24. //
  25. RPC_BINDING_VECTOR *CsRpcBindingVector = NULL;
  26. LPTOP_LEVEL_EXCEPTION_FILTER lpfnOriginalExceptionFilter = NULL;
  27. BOOLEAN bFormCluster = TRUE;
  28. //
  29. // LocalData
  30. //
  31. BOOLEAN CspIntraclusterRpcServerStarted = FALSE;
  32. HANDLE CspMutex = NULL;
  33. PCLRTL_WORK_QUEUE CspEventReportingWorkQueue = NULL;
  34. //
  35. // Prototypes
  36. //
  37. LONG
  38. CspExceptionFilter(
  39. IN PEXCEPTION_POINTERS ExceptionInfo
  40. );
  41. //
  42. // Routines.
  43. //
  44. VOID CspLogStartEvent(
  45. IN BOOL bJoin)
  46. {
  47. LPWSTR pszClusterName = NULL;
  48. LPWSTR pszName = NULL;
  49. DWORD dwClusterNameSize;
  50. DWORD dwSize;
  51. DWORD dwStatus;
  52. WCHAR szUnknownClusterName[]=L"Unknown";
  53. pszClusterName = NULL;
  54. dwClusterNameSize = 0;
  55. dwStatus = DmQueryString(DmClusterParametersKey,
  56. CLUSREG_NAME_CLUS_NAME,
  57. REG_SZ,
  58. &pszClusterName,
  59. &dwClusterNameSize,
  60. &dwSize);
  61. if (dwStatus != ERROR_SUCCESS)
  62. {
  63. //we dont treat this error as fatal, since
  64. //the cluster did start, but we really shouldnt get this
  65. ClRtlLogPrint(LOG_UNUSUAL,
  66. "[INIT] Couldnt get the cluster name, status=%1!u!\n",
  67. dwStatus);
  68. pszName = szUnknownClusterName;
  69. }
  70. else
  71. pszName = pszClusterName;
  72. //log events in the cluster log to mark the start of the cluster server
  73. if (bJoin)
  74. CsLogEvent1(LOG_NOISE, SERVICE_SUCCESSFUL_JOIN, pszName);
  75. else
  76. CsLogEvent1(LOG_NOISE, SERVICE_SUCCESSFUL_FORM, pszName);
  77. if (pszClusterName)
  78. LocalFree(pszClusterName);
  79. }
  80. DWORD
  81. ClusterInitialize(
  82. VOID
  83. )
  84. /*++
  85. Routine Description:
  86. This is the main cluster initialization path. It calls the
  87. initialization routines of all the other components. It then
  88. attempts to join an existing cluster. If the existing cluster
  89. cannot be found, it forms a new cluster.
  90. Arguments:
  91. None.
  92. Return Value:
  93. ERROR_SUCCESS if successful
  94. Win32 error code otherwise.
  95. --*/
  96. {
  97. DWORD Status;
  98. DWORD JoinStatus;
  99. DWORD StringBufferSize = 0, StringSize = 0;
  100. SIZE_T minWorkingSetSize;
  101. SIZE_T maxWorkingSetSize;
  102. BOOL bJoin;
  103. BOOL bEvicted;
  104. PNM_NODE_ENUM2 pNodeEnum = NULL;
  105. HRESULT hr = S_OK;
  106. ClRtlLogPrint(LOG_NOISE, "[INIT] ClusterInitialize called to start cluster.\n");
  107. //
  108. // give us a fighting chance on loaded server
  109. //
  110. #if CLUSTER_PRIORITY_CLASS
  111. if ( !SetPriorityClass( GetCurrentProcess(), CLUSTER_PRIORITY_CLASS ) ) {
  112. ClRtlLogPrint(LOG_UNUSUAL,
  113. "[INIT] Failed to set cluster service priority class, Status %1!lx!.\n",
  114. GetLastError() );
  115. }
  116. #endif
  117. // initialize our product suite
  118. CsMyProductSuite = (SUITE_TYPE)ClRtlGetSuiteType();
  119. CL_ASSERT(CsMyProductSuite != 0);
  120. //
  121. // First check our OS to make sure it is ok to run.
  122. //
  123. if (!ClRtlIsOSValid() ||
  124. !ClRtlIsOSTypeValid()) {
  125. //
  126. // Bail out, machine is running something odd.
  127. //
  128. CsLogEvent(LOG_CRITICAL, SERVICE_FAILED_INVALID_OS);
  129. return(ERROR_REVISION_MISMATCH);
  130. }
  131. Status = ClRtlHasNodeBeenEvicted( &bEvicted );
  132. if ( Status != ERROR_SUCCESS )
  133. {
  134. ClRtlLogPrint(LOG_CRITICAL,
  135. "[CS] Unable to determine if this node was previously evicted or not, status %1!u!\n",
  136. Status);
  137. return Status;
  138. }
  139. if ( bEvicted != FALSE )
  140. {
  141. // This node has been evicted previously, but cleanup could not complete.
  142. ClRtlLogPrint(LOG_UNUSUAL,
  143. "[CS] This node has been evicted from the cluster, but cleanup was not completed. Restarting cleanup\n"
  144. );
  145. // Reinitiate cleanup
  146. hr = ClRtlCleanupNode(
  147. NULL, // Name of the node to be cleaned up (NULL means this node)
  148. 60000, // Amount of time (in milliseconds) to wait before starting cleanup
  149. 0 // timeout interval in milliseconds
  150. );
  151. if ( FAILED( hr ) && ( hr != RPC_S_CALLPENDING ) )
  152. {
  153. Status = HRESULT_CODE( hr );
  154. ClRtlLogPrint(LOG_CRITICAL,
  155. "[CS] Unable to reinitiate cleanup, status 0x%1!x!\n",
  156. hr);
  157. }
  158. else
  159. {
  160. Status = ERROR_SUCCESS;
  161. }
  162. return Status;
  163. }
  164. //
  165. // Acquire our named mutex in order to prevent multiple copies
  166. // of the cluster service from accidentally getting started.
  167. //
  168. CspMutex = CreateMutexW(
  169. NULL,
  170. FALSE,
  171. L"ClusterServer_Running"
  172. );
  173. if (CspMutex==NULL) {
  174. Status = GetLastError();
  175. ClRtlLogPrint(LOG_CRITICAL,
  176. "[CS] Unable to create cluster mutex, status %1!u!\n",
  177. Status);
  178. return Status;
  179. }
  180. if (WaitForSingleObject(CspMutex, 30000) == WAIT_TIMEOUT) {
  181. //
  182. // Somebody already has this mutex, exit immediately.
  183. //
  184. ClRtlLogPrint(LOG_CRITICAL,
  185. "[CS] The Cluster Service is already running.\n");
  186. return(ERROR_SERVICE_ALREADY_RUNNING);
  187. }
  188. //
  189. // Set our unhandled exception filter so that if anything horrible
  190. // goes wrong, we can exit immediately.
  191. //
  192. lpfnOriginalExceptionFilter = SetUnhandledExceptionFilter(CspExceptionFilter);
  193. //
  194. // Next initialize the testpoint code
  195. //
  196. TestpointInit();
  197. g_pCVssWriterCluster = new CVssWriterCluster;
  198. if ( g_pCVssWriterCluster == NULL ) {
  199. Status = ERROR_NOT_ENOUGH_MEMORY;
  200. ClRtlLogPrint(LOG_CRITICAL,
  201. "[CS] VSS: Unable to allocate VssWriter, %1!u!\n", Status);
  202. return(Status);
  203. }
  204. //
  205. // Create the global work queues.
  206. //
  207. CsDelayedWorkQueue = ClRtlCreateWorkQueue(CS_MAX_DELAYED_WORK_THREADS,
  208. THREAD_PRIORITY_NORMAL);
  209. if (CsDelayedWorkQueue == NULL) {
  210. Status = GetLastError();
  211. ClRtlLogPrint(LOG_CRITICAL,
  212. "[CS] Unable to create delayed work queue, %1!u!\n",
  213. Status);
  214. return(Status);
  215. }
  216. CsCriticalWorkQueue = ClRtlCreateWorkQueue(CS_MAX_CRITICAL_WORK_THREADS,
  217. THREAD_PRIORITY_ABOVE_NORMAL);
  218. if (CsCriticalWorkQueue == NULL) {
  219. Status = GetLastError();
  220. ClRtlLogPrint(LOG_CRITICAL,
  221. "[CS] Unable to create critical work queue, %1!u!\n",
  222. Status);
  223. return(Status);
  224. }
  225. #if 0
  226. CspEventReportingWorkQueue = ClRtlCreateWorkQueue(1, THREAD_PRIORITY_NORMAL);
  227. if (CspEventReportingWorkQueue == NULL) {
  228. Status = GetLastError();
  229. ClRtlLogPrint(LOG_CRITICAL,
  230. "[CS] Unable to create event reporting work queue, %1!u!\n",
  231. Status);
  232. return(Status);
  233. }
  234. ClRtlEventLogSetWorkQueue( CspEventReportingWorkQueue );
  235. #endif
  236. //
  237. // Init COM
  238. //
  239. Status = CoInitializeEx( NULL, COINIT_DISABLE_OLE1DDE | COINIT_MULTITHREADED );
  240. if ( !SUCCEEDED( Status )) {
  241. ClRtlLogPrint(LOG_CRITICAL, "[CS] Couldn't init COM %1!08X!\n", Status );
  242. return Status;
  243. }
  244. //
  245. // Initialize Object Manager
  246. //
  247. Status = OmInitialize();
  248. #ifdef CLUSTER_TESTPOINT
  249. TESTPT( TpFailOmInit ) {
  250. Status = 99999;
  251. }
  252. #endif
  253. if (Status != ERROR_SUCCESS) {
  254. return(Status);
  255. }
  256. //
  257. // Initialize Event Processor
  258. //
  259. Status = EpInitialize();
  260. #ifdef CLUSTER_TESTPOINT
  261. TESTPT( TpFailEpInit ) {
  262. Status = 99999;
  263. }
  264. #endif
  265. if (Status != ERROR_SUCCESS) {
  266. return(Status);
  267. }
  268. //
  269. // Chittur Subbaraman (chitturs) - 12/4/99
  270. //
  271. // Initialize the restore database manager. This function is a NOOP
  272. // if restore database is not being done. This function MUST be called
  273. // before the DM is initialized.
  274. //
  275. Status = RdbInitialize();
  276. if (Status != ERROR_SUCCESS) {
  277. return(Status);
  278. }
  279. //
  280. // Initialize Database Manager
  281. //
  282. Status = DmInitialize();
  283. #ifdef CLUSTER_TESTPOINT
  284. TESTPT( TpFailDmInit ) {
  285. Status = 99999;
  286. }
  287. #endif
  288. if (Status != ERROR_SUCCESS) {
  289. return(Status);
  290. }
  291. //
  292. // Initialize Node Manager
  293. //
  294. Status = NmInitialize();
  295. #ifdef CLUSTER_TESTPOINT
  296. TESTPT( TpFailNmInit ) {
  297. Status = 99999;
  298. }
  299. #endif
  300. if (Status != ERROR_SUCCESS) {
  301. return(Status);
  302. }
  303. //
  304. // Initialize Global Update Manager
  305. //
  306. Status = GumInitialize();
  307. #ifdef CLUSTER_TESTPOINT
  308. TESTPT( TpFailGumInit ) {
  309. Status = 99999;
  310. }
  311. #endif
  312. if (Status != ERROR_SUCCESS) {
  313. return(Status);
  314. }
  315. //
  316. // Initialize the cluster wide event logging
  317. //
  318. if (!CsNoRepEvtLogging) {
  319. Status = EvInitialize();
  320. //if this fails, we still start the cluster service
  321. if ( Status != ERROR_SUCCESS ) {
  322. ClRtlLogPrint(LOG_CRITICAL,
  323. "[INIT] Error calling EvInitialize, Status = %1!u!\n",
  324. Status
  325. );
  326. }
  327. }
  328. //
  329. // Initialize Failover Manager component
  330. //
  331. Status = FmInitialize();
  332. #ifdef CLUSTER_TESTPOINT
  333. TESTPT( TpFailFmInit ) {
  334. Status = 99999;
  335. }
  336. #endif
  337. if (Status != ERROR_SUCCESS) {
  338. return(Status);
  339. }
  340. //
  341. // Initialize API
  342. //
  343. Status = ApiInitialize();
  344. if (Status != ERROR_SUCCESS) {
  345. return(Status);
  346. }
  347. //
  348. // Initialize Log Manager component
  349. //
  350. Status = LmInitialize();
  351. #ifdef CLUSTER_TESTPOINT
  352. TESTPT( TpFailLmInit ) {
  353. Status = 99999;
  354. }
  355. #endif
  356. if (Status != ERROR_SUCCESS) {
  357. return(Status);
  358. }
  359. //
  360. // Initialize the Checkpoint Manager component
  361. //
  362. Status = CpInitialize();
  363. #ifdef CLUSTER_TESTPOINT
  364. TESTPT( TpFailCpInit ) {
  365. Status = 99999;
  366. }
  367. #endif
  368. if (Status != ERROR_SUCCESS) {
  369. return(Status);
  370. }
  371. //
  372. // find out what domain account we're running under. This is needed by
  373. // some packages
  374. //
  375. Status = ClRtlGetRunningAccountInfo( &CsServiceDomainAccount );
  376. if ( Status != ERROR_SUCCESS ) {
  377. ClRtlLogPrint(LOG_CRITICAL, "[CS] Couldn't determine Service Domain Account. status %1!u!\n",
  378. Status);
  379. return Status;
  380. }
  381. ClRtlLogPrint(LOG_NOISE, "[CS] Service Domain Account = %1!ws!\n",
  382. CsServiceDomainAccount);
  383. //
  384. // Prepare the RPC server. This does not enable us to receive any calls.
  385. //
  386. Status = ClusterInitializeRpcServer();
  387. if (Status != ERROR_SUCCESS) {
  388. return(Status);
  389. }
  390. //
  391. // Read the cluster name from the database.
  392. //
  393. Status = DmQuerySz(
  394. DmClusterParametersKey,
  395. CLUSREG_NAME_CLUS_NAME,
  396. &CsClusterName,
  397. &StringBufferSize,
  398. &StringSize
  399. );
  400. if (Status != ERROR_SUCCESS) {
  401. ClRtlLogPrint(LOG_UNUSUAL,
  402. "[CS] Unable to read cluster name from database. Service initialization failed.\n"
  403. );
  404. return(Status);
  405. }
  406. //
  407. // First, attempt to join the cluster.
  408. //
  409. ClRtlLogPrint(LOG_NOISE,
  410. "[INIT] Attempting to join cluster %1!ws!\n",
  411. CsClusterName
  412. );
  413. bFormCluster = TRUE;
  414. JoinStatus = ClusterJoin();
  415. //
  416. // If this node was evicted when it was down, this error code is returned by the
  417. // sponsor when it tries to rejoin the cluster. In this case, initiate a cleanup
  418. // of this node and exit.
  419. //
  420. if ( (JoinStatus == ERROR_CLUSTER_NODE_NOT_MEMBER) ||
  421. (JoinStatus == ERROR_CLUSTER_INSTANCE_ID_MISMATCH))
  422. {
  423. DWORD CleanupStatus;
  424. //SS: If the instance mismatch occurs the first time the service runs after
  425. //configuration, then it implies there is some sort of confusion(duplicate ip
  426. //addresses or name) during the cluster configuration process, in that case,
  427. //we would like the setup to make the cleanup decision
  428. // If this is not the first run after a clean install, the service will
  429. // initiate cleanup itself
  430. if (!CsFirstRun || CsUpgrade)
  431. {
  432. WCHAR wStatus[32];
  433. ClRtlLogPrint(LOG_UNUSUAL,
  434. "[INIT] This node has been evicted from the cluster when it was unavailable. Initiating cleanup.\n"
  435. );
  436. // Initiate cleanup of this node.
  437. hr = ClRtlCleanupNode(
  438. NULL, // Name of the node to be cleaned up (NULL means this node)
  439. 60000, // Amount of time (in milliseconds) to wait before starting cleanup
  440. 0 // timeout interval in milliseconds
  441. );
  442. if ( FAILED( hr ) && ( hr != RPC_S_CALLPENDING ) )
  443. {
  444. CleanupStatus = HRESULT_CODE( hr );
  445. ClRtlLogPrint(LOG_CRITICAL,
  446. "[INIT] Failed to initiate cleanup of this node, status 0x%1!x!\n",
  447. hr
  448. );
  449. }
  450. else
  451. {
  452. CleanupStatus = ERROR_SUCCESS;
  453. }
  454. wsprintfW(&(wStatus[0]), L"%u", CleanupStatus);
  455. CsLogEvent1(
  456. LOG_NOISE,
  457. CS_EVENT_CLEANUP_ON_EVICTION,
  458. wStatus
  459. );
  460. }
  461. return(JoinStatus);
  462. }
  463. //
  464. // Chittur Subbaraman (chitturs) - 10/27/98
  465. //
  466. // If a database restore operation is requested, check whether
  467. // you succeeded in establishing a connection. If so, check
  468. // whether you are forced to restore the DB. If not, abort the
  469. // whole operation and return. If you are forced to restore,
  470. // you will first stop the service in other nodes and then
  471. // try to form a cluster.
  472. //
  473. if ( CsDatabaseRestore == TRUE ) {
  474. if ( JoinStatus == ERROR_CLUSTER_NODE_UP ) {
  475. if ( CsForceDatabaseRestore == FALSE ) {
  476. ClRtlLogPrint(LOG_UNUSUAL,
  477. "[INIT] Cannot restore DB while the cluster is up, service init failed\n"
  478. );
  479. ClRtlLogPrint(LOG_UNUSUAL,
  480. "[INIT] You may try to restart the service with the forcerestore option\n"
  481. );
  482. RpcBindingFree(&CsJoinSponsorBinding);
  483. return(JoinStatus);
  484. }
  485. //
  486. // At this point, a restore database operation is forced by
  487. // the user. So, enumerate the cluster nodes with the help
  488. // of the sponsor and then stop the services on all the
  489. // cluster nodes.
  490. //
  491. Status = NmRpcEnumNodeDefinitions2(
  492. CsJoinSponsorBinding,
  493. 0,
  494. L"0",
  495. &pNodeEnum
  496. );
  497. RpcBindingFree(&CsJoinSponsorBinding);
  498. if ( Status != ERROR_SUCCESS ) {
  499. ClRtlLogPrint(LOG_UNUSUAL,
  500. "[INIT] Cannot force a restore DB: Unable to enumerate cluster nodes\n"
  501. );
  502. LocalFree( pNodeEnum );
  503. return (Status);
  504. }
  505. //
  506. // Attempt to stop the clussvc on all nodes, except of course
  507. // this node
  508. //
  509. Status = RdbStopSvcOnNodes (
  510. pNodeEnum,
  511. L"clussvc"
  512. );
  513. LocalFree( pNodeEnum );
  514. if ( Status != ERROR_SUCCESS ) {
  515. ClRtlLogPrint(LOG_UNUSUAL,
  516. "[INIT] Cannot force a restore DB: Unable to stop cluster nodes\n"
  517. );
  518. return(Status);
  519. } else {
  520. CL_LOGCLUSWARNING( CS_STOPPING_SVC_ON_REMOTE_NODES );
  521. }
  522. }
  523. }
  524. if (JoinStatus != ERROR_SUCCESS) {
  525. ClRtlLogPrint(LOG_UNUSUAL,
  526. "[INIT] Failed to join cluster, status %1!u!\n",
  527. JoinStatus
  528. );
  529. //
  530. // Forming a cluster will also attempt to arbitrate the quorum
  531. // resource.
  532. //
  533. bJoin = FALSE;
  534. //
  535. // If we failed join and found a sponsor, skip clusterform
  536. //
  537. if (bFormCluster == FALSE) {
  538. return (JoinStatus);
  539. }
  540. ClRtlLogPrint(LOG_NOISE,
  541. "[INIT] Attempting to form cluster %1!ws!\n",
  542. CsClusterName
  543. );
  544. Status = ClusterForm();
  545. if (Status != ERROR_SUCCESS) {
  546. ClRtlLogPrint(LOG_CRITICAL,
  547. "[INIT] Failed to form cluster, status %1!u!.\n",
  548. Status
  549. );
  550. if (Status == ERROR_BUSY) {
  551. //
  552. // Couldn't arbitrate for the quorum disk. Return
  553. // the join status, since that is the real failure.
  554. //
  555. Status = JoinStatus;
  556. }
  557. CsLogEventData(
  558. LOG_CRITICAL,
  559. SERVICE_FAILED_JOIN_OR_FORM,
  560. sizeof(Status),
  561. &Status
  562. );
  563. return(Status);
  564. }
  565. }
  566. else {
  567. bJoin = TRUE;
  568. }
  569. //
  570. // We are now a full cluster member.
  571. //
  572. //
  573. // Register the ExtroCluster (join) RPC interface so we can sponsor a
  574. // joining node.
  575. //
  576. Status = ClusterRegisterExtroclusterRpcInterface();
  577. if (Status != RPC_S_OK) {
  578. return(Status);
  579. }
  580. //
  581. // Register the Join Version RPC interface so we can determine
  582. // the version of a joining node.
  583. //
  584. Status = ClusterRegisterJoinVersionRpcInterface();
  585. if (Status != RPC_S_OK) {
  586. return(Status);
  587. }
  588. //
  589. // Enable this node to participate in regroups.
  590. //
  591. MmSetRegroupAllowed(TRUE);
  592. //
  593. // Now enable Clussvc to Clusnet Heartbeating.
  594. //
  595. if ((Status = NmInitializeClussvcClusnetHb()) != ERROR_SUCCESS) {
  596. return Status;
  597. }
  598. //
  599. // Advertise that the node is fully up now
  600. //
  601. Status = NmSetExtendedNodeState( ClusterNodeUp );
  602. if (Status != ERROR_SUCCESS) {
  603. // NmSetExtendedNodeState logs an error //
  604. return(Status);
  605. }
  606. //
  607. // Chittur Subbaraman (chitturs) - 10/28/99
  608. //
  609. // Process FM join events that must be done AFTER this cluster
  610. // node is declared as fully UP.
  611. //
  612. if ( bJoin ) {
  613. FmJoinPhase3();
  614. }
  615. //
  616. // We are now going to attempt to increase our working set size. This,
  617. // plus the priority class boost, should allow the cluster service
  618. // to run a little better and be more responsive to cluster events.
  619. //
  620. if ( GetProcessWorkingSetSize( GetCurrentProcess(),
  621. &minWorkingSetSize,
  622. &maxWorkingSetSize ) )
  623. {
  624. if ( minWorkingSetSize < MIN_WORKING_SET_SIZE ) {
  625. minWorkingSetSize = MIN_WORKING_SET_SIZE;
  626. }
  627. if ( maxWorkingSetSize < MAX_WORKING_SET_SIZE ) {
  628. maxWorkingSetSize = MAX_WORKING_SET_SIZE;
  629. }
  630. if ( SetProcessWorkingSetSize( GetCurrentProcess(),
  631. minWorkingSetSize,
  632. maxWorkingSetSize ) )
  633. {
  634. //
  635. // now report what we set it to
  636. //
  637. if ( GetProcessWorkingSetSize( GetCurrentProcess(),
  638. &minWorkingSetSize,
  639. &maxWorkingSetSize ) )
  640. {
  641. ClRtlLogPrint(LOG_NOISE,
  642. "[INIT] Working Set changed to [%1!u!, %2!u!].\n",
  643. minWorkingSetSize,
  644. maxWorkingSetSize);
  645. } else {
  646. ClRtlLogPrint(LOG_UNUSUAL,
  647. "[INIT] Failed to re-read our working set size, Status %1!u!.\n",
  648. GetLastError());
  649. }
  650. } else {
  651. ClRtlLogPrint(LOG_UNUSUAL,
  652. "[INIT] Failed to set our Min WS to %1!u!, Max WS to %2!u!, Status %3!u!.\n",
  653. minWorkingSetSize,
  654. maxWorkingSetSize,
  655. GetLastError());
  656. }
  657. } else {
  658. ClRtlLogPrint(LOG_UNUSUAL,
  659. "[INIT] Failed to get our working set size, Status %1!u!.\n",
  660. GetLastError()
  661. );
  662. }
  663. CspLogStartEvent(bJoin);
  664. #if 0
  665. //
  666. // Chittur Subbaraman (chitturs) - 11/4/98
  667. //
  668. if ( CsForceDatabaseRestore == TRUE )
  669. {
  670. //
  671. // If you stopped the service on any nodes for database restoration
  672. // purposes, then start them now
  673. //
  674. RdbStartSvcOnNodes ( L"clussvc" );
  675. }
  676. #endif
  677. hr = ClRtlInitiatePeriodicCleanupThread();
  678. if ( FAILED( hr ) ) {
  679. ClRtlLogPrint(LOG_UNUSUAL,
  680. "[INIT] Error 0x%1!08lx! occurred trying to initiate periodic cleanup thread. This is not fatal and will not prevent the service from starting.\n",
  681. hr);
  682. }
  683. ClRtlLogPrint(LOG_NOISE, "[INIT] Cluster started.\n");
  684. return(ERROR_SUCCESS);
  685. } // ClusterInitialize (aka ClusterStartup)
  686. VOID
  687. ClusterShutdown(
  688. DWORD ExitCode
  689. )
  690. /*++
  691. Routine Description:
  692. Shuts down the cluster in reverse order than it was brought up.
  693. Arguments:
  694. None.
  695. Return Value:
  696. ERROR_SUCCESS if successful
  697. Win32 error code otherwise.
  698. --*/
  699. {
  700. HRESULT hr = S_OK;
  701. //
  702. // Shutdown all components of the Cluster Service in approximately
  703. // the reverse order they we brought up.
  704. //
  705. ClRtlLogPrint(LOG_UNUSUAL,
  706. "[INIT] The cluster service is shutting down.\n");
  707. //
  708. // Enable this when we support ClusterShuttingDown state
  709. //
  710. // NmSetExtendedNodeState( ClusterNodeDown );
  711. #ifdef CLUSTER_TESTPOINT
  712. TESTPT(TpFailClusterShutdown) {
  713. return;
  714. }
  715. #endif
  716. MmSetRegroupAllowed(FALSE);
  717. // if replicated event logging was initialized, shut it down
  718. if (!CsNoRepEvtLogging)
  719. {
  720. //
  721. // Shutdown the cluster eventlog manager- this deregisters with the
  722. // eventlog server.
  723. EvShutdown();
  724. }
  725. CsServiceStatus.dwCheckPoint++;
  726. CsAnnounceServiceStatus();
  727. #if 0
  728. //
  729. // Chittur Subbaraman (chitturs) - 5/8/2000
  730. //
  731. // Don't shutdown DM updates for now so as to avoid spurious node shoot downs due to the locker
  732. // node shutting down and hence the DM update succeeding when in fact it should fail.
  733. //
  734. DmShutdownUpdates();
  735. #endif
  736. //
  737. // Move or offline all groups owned by this node. This will destroy
  738. // the resource monitors and the in-memory resource and group objects.
  739. //
  740. FmShutdownGroups();
  741. CsServiceStatus.dwCheckPoint++;
  742. CsAnnounceServiceStatus();
  743. // Shutdown the dm- this flushes the log file and releases the dm hooks.
  744. DmShutdown();
  745. CsServiceStatus.dwCheckPoint++;
  746. CsAnnounceServiceStatus();
  747. // Unsubscribe from Vss
  748. //
  749. if ( g_bCVssWriterClusterSubscribed ) {
  750. ClRtlLogPrint( LOG_NOISE, "[INIT] VSS: Unsubscribing\n" );
  751. hr = g_pCVssWriterCluster->Unsubscribe( );
  752. if ( FAILED( hr ) ) {
  753. ClRtlLogPrint( LOG_CRITICAL, "[INIT] VSS: Failed to Unsubscribe from VSS, status 0x%1!x!\n", hr );
  754. } else {
  755. g_bCVssWriterClusterSubscribed = FALSE;
  756. }
  757. }
  758. // Delete our Vss instance if we have one (and if we are subscribed).
  759. //
  760. if (g_pCVssWriterCluster && (g_bCVssWriterClusterSubscribed == FALSE) ) {
  761. delete g_pCVssWriterCluster;
  762. }
  763. TestpointDeInit();
  764. CsServiceStatus.dwCheckPoint++;
  765. CsAnnounceServiceStatus();
  766. NmCloseConnectoidAdviseSink();
  767. CoUninitialize();
  768. //
  769. // Triger banishing regroup incident prompting
  770. // other nodes in the cluster to regroup this node out
  771. //
  772. MMLeave();
  773. //
  774. // Exit the process now... there are a number of circular dependencies
  775. // that have been built up during the 'life of the cluster'. There
  776. // is no easy way to unwind from here... so just exit out.
  777. //
  778. //
  779. // Announce that we are stopped only if we were successful in
  780. // initializing. The SC will not restart the service if we report that
  781. // we've stopped. Make sure the service status announcement is the last
  782. // thing done since there is a race between this thread and the main
  783. // thread that will prevent code after the announcement from being
  784. // executed.
  785. //
  786. ClRtlLogPrint(( ExitCode == ERROR_SUCCESS ) ? LOG_NOISE : LOG_CRITICAL,
  787. "[CS] Service Stopped. exit code = %1!u!\n\n", ExitCode);
  788. if ( ExitCode == ERROR_SUCCESS ) {
  789. CsLogEvent(LOG_NOISE, SERVICE_SUCCESSFUL_TERMINATION);
  790. CsServiceStatus.dwCurrentState = SERVICE_STOPPED;
  791. CsServiceStatus.dwControlsAccepted = 0;
  792. CsServiceStatus.dwCheckPoint = 0;
  793. CsServiceStatus.dwWaitHint = 0;
  794. CspSetErrorCode( ExitCode, &CsServiceStatus );
  795. CsAnnounceServiceStatus();
  796. } else {
  797. ExitCode = CspSetErrorCode( ExitCode, &CsServiceStatus );
  798. }
  799. //release the mutex so that the next one can acquire the mutex immediately
  800. ReleaseMutex(CspMutex);
  801. ExitProcess(ExitCode);
  802. #if 0
  803. //
  804. // Everything after this point is what should happen in a clean shutdown.
  805. //
  806. // Shutdown the Failover Manager.
  807. FmShutdown();
  808. CsServiceStatus.dwCheckPoint++;
  809. CsAnnounceServiceStatus();
  810. //
  811. // Shutdown the Cluster Api.
  812. //
  813. ApiShutdown();
  814. CsServiceStatus.dwCheckPoint++;
  815. CsAnnounceServiceStatus();
  816. //
  817. // Stop the RPC server and deregister our endpoints & interfaces.
  818. //
  819. ClusterShutdownRpcServer();
  820. CsServiceStatus.dwCheckPoint++;
  821. CsAnnounceServiceStatus();
  822. //
  823. // At this point, all calls on the Intracluster and Extrocluster
  824. // RPC interfaces are complete and no more will be received.
  825. //
  826. // Note - Calls on the Clusapi interface are still possible.
  827. //
  828. //
  829. // Shutdown the Node Manager.
  830. //
  831. NmShutdown();
  832. CsServiceStatus.dwCheckPoint++;
  833. CsAnnounceServiceStatus();
  834. // Shutdown the Event Processor.
  835. EpShutdown();
  836. CsServiceStatus.dwCheckPoint++;
  837. CsAnnounceServiceStatus();
  838. LmShutdown();
  839. CsServiceStatus.dwCheckPoint++;
  840. CsAnnounceServiceStatus();
  841. CpShutdown();
  842. CsServiceStatus.dwCheckPoint++;
  843. CsAnnounceServiceStatus();
  844. //shutdown gum
  845. GumShutdown();
  846. CsServiceStatus.dwCheckPoint++;
  847. CsAnnounceServiceStatus();
  848. // Shutdown the Object Manager.
  849. OmShutdown();
  850. CsServiceStatus.dwCheckPoint++;
  851. CsAnnounceServiceStatus();
  852. //
  853. // Destroy the global work queues
  854. //
  855. if (CsDelayedWorkQueue != NULL) {
  856. IF_DEBUG(CLEANUP) {
  857. ClRtlLogPrint(LOG_NOISE,"[CS] Destroying delayed work queue...\n");
  858. }
  859. ClRtlDestroyWorkQueue(CsDelayedWorkQueue);
  860. CsDelayedWorkQueue = NULL;
  861. }
  862. CsServiceStatus.dwCheckPoint++;
  863. CsAnnounceServiceStatus();
  864. if (CsCriticalWorkQueue != NULL) {
  865. IF_DEBUG(CLEANUP) {
  866. ClRtlLogPrint(LOG_NOISE,"[CS] Destroying critical work queue...\n");
  867. }
  868. ClRtlDestroyWorkQueue(CsCriticalWorkQueue);
  869. CsDelayedWorkQueue = NULL;
  870. }
  871. ClRtlEventLogSetWorkQueue( NULL );
  872. if (CspEventReportingWorkQueue != NULL) {
  873. IF_DEBUG(CLEANUP) {
  874. ClRtlLogPrint(LOG_NOISE,"[CS] Destroying event reporing work queue...\n");
  875. }
  876. ClRtlDestroyWorkQueue(CspEventReportingWorkQueue);
  877. CspEventReportingWorkQueue = NULL;
  878. }
  879. //
  880. // Free global data
  881. //
  882. LocalFree(CsClusterName);
  883. if (CspMutex != NULL) {
  884. CloseHandle(CspMutex);
  885. CspMutex = NULL;
  886. }
  887. CsServiceStatus.dwCheckPoint++;
  888. CsAnnounceServiceStatus();
  889. CsLogEvent(LOG_NOISE, SERVICE_SUCCESSFUL_TERMINATION);
  890. #endif // 0
  891. return;
  892. }
  893. DWORD
  894. ClusterForm(
  895. VOID
  896. )
  897. /*++
  898. Routine Description:
  899. Code path for initializing a new instance of the cluster. This
  900. is taken when there are no nodes active in the cluster.
  901. Arguments:
  902. None
  903. Return Value:
  904. ERROR_SUCCESS if successful.
  905. Win32 error code otherwise.
  906. --*/
  907. {
  908. DWORD Status;
  909. PFM_GROUP pQuoGroup;
  910. DWORD dwError;
  911. DWORD dwQuorumDiskSignature = 0;
  912. //
  913. // Initialize the event handler.
  914. //
  915. Status = EpInitPhase1();
  916. if ( Status != ERROR_SUCCESS) {
  917. ClRtlLogPrint(LOG_CRITICAL,
  918. "[CS] EpInitPhase1 failed, Status = %1!u!\n",
  919. Status);
  920. return(Status);
  921. }
  922. //
  923. // The API server is required by FM, since it starts the resource monitor.
  924. //
  925. Status = ApiOnlineReadOnly();
  926. if ( Status != ERROR_SUCCESS) {
  927. ClRtlLogPrint(LOG_CRITICAL,
  928. "[CS] ApiInitPhase1 failed, Status = %1!u!\n",
  929. Status);
  930. goto partial_form_exit;
  931. }
  932. //
  933. // Arbitrate for the quorum resource.
  934. //
  935. Status = FmGetQuorumResource(&pQuoGroup, &dwQuorumDiskSignature);
  936. if ( Status != ERROR_SUCCESS ) {
  937. if ( ( Status == ERROR_FILE_NOT_FOUND ) &&
  938. ( CsForceDatabaseRestore == TRUE ) ) {
  939. //
  940. // Chittur Subbaraman (chitturs) - 10/30/98
  941. //
  942. // Try to fix up the quorum disk signature and if successful
  943. // try to get the quorum resource again. Note that the following
  944. // function will attempt a fix up only if the CsForceDatabaseRestore
  945. // flag is set.
  946. //
  947. if ( RdbFixupQuorumDiskSignature( dwQuorumDiskSignature ) ) {
  948. Status = FmGetQuorumResource( &pQuoGroup, NULL );
  949. if ( Status != ERROR_SUCCESS ) {
  950. Status = ERROR_QUORUM_DISK_NOT_FOUND;
  951. ClRtlLogPrint(LOG_CRITICAL,
  952. "[INIT] Could not get quorum resource even after fix up, Status = %1!u!\n",
  953. Status);
  954. goto partial_form_exit;
  955. }
  956. } else {
  957. Status = ERROR_QUORUM_DISK_NOT_FOUND;
  958. ClRtlLogPrint(LOG_CRITICAL,
  959. "[INIT] ClusterForm: Could not get quorum resource, Status = %1!u!\n",
  960. Status);
  961. goto partial_form_exit;
  962. }
  963. } else {
  964. Status = ERROR_QUORUM_DISK_NOT_FOUND;
  965. ClRtlLogPrint(LOG_CRITICAL,
  966. "[INIT] ClusterForm: Could not get quorum resource. No fixup attempted. Status = %1!u!\n",
  967. Status);
  968. goto partial_form_exit;
  969. }
  970. }
  971. //arbitrate for some quorum resources(mns) takes a while and since we call
  972. //arbitrate from online as well, we should inform the scm that we are making
  973. //progresss
  974. CsServiceStatus.dwCheckPoint++;
  975. CsAnnounceServiceStatus();
  976. //
  977. // Call the Database Manager to update the cluster registry.
  978. //
  979. Status = DmFormNewCluster();
  980. if ( Status != ERROR_SUCCESS ) {
  981. ClRtlLogPrint(LOG_CRITICAL,
  982. "[CS] Error calling DmUpdateFormNewCluster, Status = %1!u!\n",
  983. Status);
  984. goto partial_form_exit;
  985. }
  986. if (FmDoesQuorumAllowLogging(CLUS_CHAR_UNKNOWN) != ERROR_SUCCESS)
  987. CsNoQuorumLogging = TRUE;
  988. if (!CsNoQuorum)
  989. {
  990. // Bring the quorum resource online
  991. dwError = FmBringQuorumOnline();
  992. if ((dwError == ERROR_IO_PENDING) || (dwError == ERROR_SUCCESS))
  993. {
  994. //checkpoint with scm once again before waiting for log recovery
  995. //if log mount takes a long time then DmWaitQuorumResOnline()
  996. //should also increment the checkpoints
  997. CsServiceStatus.dwCheckPoint++;
  998. CsAnnounceServiceStatus();
  999. //this waits on an event for the quorum resorce to come online
  1000. //when the quorum resource comes online, the log file is opened
  1001. //if noquorumlogging flag is not specified
  1002. if ((dwError = DmWaitQuorumResOnline()) != ERROR_SUCCESS)
  1003. {
  1004. ClRtlLogPrint(LOG_NOISE,
  1005. "[CS] Wait for quorum resource to come online failed, error=%1!u!\r\n",
  1006. dwError);
  1007. Status = ERROR_QUORUM_RESOURCE_ONLINE_FAILED;
  1008. goto partial_form_exit;
  1009. }
  1010. }
  1011. else
  1012. {
  1013. ClRtlLogPrint(LOG_UNUSUAL,
  1014. "[CS] couldnt bring quorum resource online, Error =%1!u!\n",
  1015. dwError);
  1016. CL_LOGFAILURE(dwError);
  1017. Status = ERROR_QUORUM_RESOURCE_ONLINE_FAILED;
  1018. goto partial_form_exit;
  1019. }
  1020. }
  1021. //update status with scm, the quorum resource may take a while to come online
  1022. CsServiceStatus.dwCheckPoint++;
  1023. CsAnnounceServiceStatus();
  1024. if (!CsNoQuorumLogging)
  1025. {
  1026. //roll the Cluster Log File
  1027. if ((Status = DmRollChanges()) != ERROR_SUCCESS)
  1028. {
  1029. ClRtlLogPrint(LOG_CRITICAL,
  1030. "[CS] Error calling DmRollChanges, Status = %1!u!\n",
  1031. Status);
  1032. goto partial_form_exit;
  1033. }
  1034. }
  1035. //
  1036. // Close the groups/resources created by fm except for the quorum
  1037. // resource. The in memory data base needs to be created again with
  1038. // the new rolled changes
  1039. //
  1040. Status = FmFormNewClusterPhase1(pQuoGroup);
  1041. if ( Status != ERROR_SUCCESS ) {
  1042. ClRtlLogPrint(LOG_CRITICAL,
  1043. "[CS] Error calling FmOnline, Status = %1!u!\n",
  1044. Status);
  1045. goto partial_form_exit;
  1046. }
  1047. #ifdef CLUSTER_TESTPOINT
  1048. TESTPT(TpFailFormNewCluster) {
  1049. Status = 999999;
  1050. goto partial_form_exit;
  1051. }
  1052. #endif
  1053. //
  1054. // Start up the Node Manager. This will form a cluster at the membership
  1055. // level.
  1056. //
  1057. Status = NmFormNewCluster();
  1058. if ( Status != ERROR_SUCCESS ) {
  1059. ClRtlLogPrint(LOG_CRITICAL,
  1060. "[CS] Error calling NmOnline, Status = %1!u!\n",
  1061. Status);
  1062. goto partial_form_exit;
  1063. }
  1064. //
  1065. //call any registry fixup callbacks, if they are registered.
  1066. //This is useful for upgrades/uninstalls if you want to clean up
  1067. //the registry
  1068. Status = NmPerformFixups(NM_FORM_FIXUP);
  1069. if ( Status != ERROR_SUCCESS ) {
  1070. ClRtlLogPrint(LOG_CRITICAL,
  1071. "[CS] Error calling NmPerformFixups, Status = %1!u!\n",
  1072. Status);
  1073. goto partial_form_exit;
  1074. }
  1075. //
  1076. // The API server can now be brought fully online. This enables us
  1077. // to receive calls.
  1078. //
  1079. Status = ApiOnline();
  1080. if ( Status != ERROR_SUCCESS) {
  1081. ClRtlLogPrint(LOG_CRITICAL,
  1082. "[CS] ApiInitPhase2 failed, Status = %1!u!\n",
  1083. Status);
  1084. goto partial_form_exit;
  1085. }
  1086. //update status for scm
  1087. CsServiceStatus.dwCheckPoint++;
  1088. CsAnnounceServiceStatus();
  1089. //
  1090. // Call the Failover Manager Phase 2 routine next.
  1091. // Create the groups and resources.
  1092. //
  1093. Status = FmFormNewClusterPhase2();
  1094. if ( Status != ERROR_SUCCESS ) {
  1095. ClRtlLogPrint(LOG_CRITICAL,
  1096. "[CS] Error calling FmOnline, Status = %1!u!\n",
  1097. Status);
  1098. goto partial_form_exit;
  1099. }
  1100. //
  1101. // Fire up the intracluster RPC server so we can receive calls.
  1102. //
  1103. Status = ClusterRegisterIntraclusterRpcInterface();
  1104. if ( Status != ERROR_SUCCESS ) {
  1105. goto partial_form_exit;
  1106. }
  1107. //
  1108. // Finish initializing the cluster wide event logging
  1109. //
  1110. // ASSUMPTION: this is called after the NM has established cluster
  1111. // membership.
  1112. //
  1113. if (!CsNoRepEvtLogging)
  1114. {
  1115. //is replicated logging is not disabled
  1116. Status = EvOnline();
  1117. if ( Status != ERROR_SUCCESS ) {
  1118. ClRtlLogPrint(LOG_CRITICAL,
  1119. "[CS] Error calling EvOnline, Status = %1!u!\n",
  1120. Status);
  1121. }
  1122. }
  1123. if (!CsNoQuorumLogging)
  1124. {
  1125. //check if all nodes are up, if not take a checkpoint and
  1126. //turn quorum logging on
  1127. Status = DmUpdateFormNewCluster();
  1128. if ( Status != ERROR_SUCCESS ) {
  1129. ClRtlLogPrint(LOG_CRITICAL,
  1130. "[CS] Error calling DmCompleteFormNewCluster, Status = %1!u!\n",
  1131. Status);
  1132. }
  1133. }
  1134. ClRtlLogPrint(LOG_NOISE, "[INIT] Successfully formed a cluster.\n");
  1135. return(ERROR_SUCCESS);
  1136. partial_form_exit:
  1137. ClRtlLogPrint(LOG_NOISE, "[INIT] Cleaning up failed form attempt.\n");
  1138. return(Status);
  1139. }
  1140. VOID
  1141. ClusterLeave(
  1142. VOID
  1143. )
  1144. /*++
  1145. Routine Description:
  1146. Removes the local node from an active cluster or cleans up after
  1147. a failed attempt to join or form a cluster.
  1148. Arguments:
  1149. None
  1150. Return Value:
  1151. ERROR_SUCCESS if successful.
  1152. Win32 error code otherwise.
  1153. --*/
  1154. {
  1155. ClRtlLogPrint(LOG_NOISE, "[INIT] Leaving cluster\n");
  1156. //
  1157. // Turn off the cluster API
  1158. //
  1159. ApiOffline();
  1160. //
  1161. // If we are a cluster member, leave now.
  1162. //
  1163. NmLeaveCluster();
  1164. ClusterDeregisterRpcInterfaces();
  1165. return;
  1166. } // Cluster Leave
  1167. //
  1168. // RPC Server Control routines
  1169. //
  1170. RPC_STATUS
  1171. ClusterInitializeRpcServer(
  1172. VOID
  1173. )
  1174. /*++
  1175. Routine Description:
  1176. Initializes the RPC server for the cluster service.
  1177. Arguments:
  1178. None.
  1179. Return Value:
  1180. RPC_S_OK if the routine succeeds. An RPC error code if it fails.
  1181. --*/
  1182. {
  1183. RPC_STATUS Status;
  1184. DWORD i;
  1185. DWORD retry;
  1186. DWORD packagesRegistered = 0;
  1187. ClRtlLogPrint(LOG_NOISE, "[CS] Initializing RPC server.\n");
  1188. //
  1189. // Enable authentication of calls to our RPC interfaces. For NTLM,
  1190. // the PrincipleName is ignored, but we'll need to supply one if we
  1191. // switch authentication services later on. Note that it is not
  1192. // necessary to specify an authentication service for each interface.
  1193. //
  1194. for ( i = 0; i < CsNumberOfRPCSecurityPackages; ++i ) {
  1195. Status = RpcServerRegisterAuthInfo(NULL,
  1196. CsRPCSecurityPackage[ i ],
  1197. NULL,
  1198. NULL);
  1199. if (Status == RPC_S_OK) {
  1200. ++packagesRegistered;
  1201. } else {
  1202. ClRtlLogPrint(LOG_CRITICAL,
  1203. "[CS] Unable to register %1!ws! authentication for RPC, status %2!u!.\n",
  1204. CsRPCSecurityPackageName[ i ],
  1205. Status);
  1206. }
  1207. }
  1208. if ( packagesRegistered == 0 ) {
  1209. return ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED;
  1210. }
  1211. //
  1212. // Bind to UDP. This transport will be used by remote clients to
  1213. // access the clusapi interface and by cluster nodes to
  1214. // access the extrocluster (join) interface. This uses a dynamic
  1215. // endpoint.
  1216. //
  1217. Status = RpcServerUseProtseq(
  1218. TEXT("ncadg_ip_udp"),
  1219. RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
  1220. NULL);
  1221. if (Status != RPC_S_OK) {
  1222. ClRtlLogPrint(LOG_CRITICAL,
  1223. "[INIT] Unable to bind RPC to UDP, status %1!u!.\n",
  1224. Status);
  1225. return(Status);
  1226. }
  1227. //
  1228. // Figure out which UDP endpoint we got so we can register it with
  1229. // the endpoint mapper later. We must do this before we register any
  1230. // other protocol sequences, or they will show up in the vector.
  1231. // Groveling the binding vector for a specific transport is no fun.
  1232. //
  1233. CL_ASSERT( CsRpcBindingVector == NULL);
  1234. Status = RpcServerInqBindings(&CsRpcBindingVector);
  1235. if (Status != RPC_S_OK) {
  1236. ClRtlLogPrint(LOG_CRITICAL,
  1237. "[INIT] Unable to obtain RPC binding vector, status %1!u!.\n",
  1238. Status);
  1239. return(Status);
  1240. }
  1241. //
  1242. // Bind to LRPC. This transport will be used by clients running on this
  1243. // system to access the clusapi interface. This also uses a dynamic endpoint.
  1244. //
  1245. Status = RpcServerUseProtseq(
  1246. TEXT("ncalrpc"),
  1247. RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
  1248. NULL); // No SD. Let the object inherit from its "\RPC Control" parent object which has
  1249. // an IO ACE specifying R, W, E, for the World.
  1250. if (Status != RPC_S_OK) {
  1251. ClRtlLogPrint(LOG_CRITICAL,
  1252. "[INIT] Unable to bind RPC to LPC, status %1!u!.\n",
  1253. Status);
  1254. return(Status);
  1255. }
  1256. //
  1257. // Register the dynamic LRPC endpoint with the local endpoint mapper database
  1258. //
  1259. Status = CspRegisterDynamicLRPCEndpoint ();
  1260. if (Status != RPC_S_OK) {
  1261. ClRtlLogPrint(LOG_CRITICAL,
  1262. "[INIT] Unable to register dynamic LRPC endpoint, status %1!u!.\n",
  1263. Status);
  1264. return(Status);
  1265. }
  1266. //
  1267. // Bind to CDP (Cluster Datagram Protocol). This transport will be used
  1268. // for the intracluster interface. This uses a well-known endpoint.
  1269. //
  1270. // GN: Sometimes it takes a couple of seconds for resrcmon to go away after
  1271. // a clean shutdown. When SCM tries to restart the service the following call will fail.
  1272. // In order to overcome this we will give up only if we couldn't bind RPC to CDP
  1273. // 10 times with 1 second in between the calls
  1274. //
  1275. retry = 10;
  1276. for (;;) {
  1277. Status = RpcServerUseProtseqEp(
  1278. CLUSTER_RPC_PROTSEQ,
  1279. 1, // Max calls
  1280. CLUSTER_RPC_PORT,
  1281. NULL);
  1282. if (Status != RPC_S_DUPLICATE_ENDPOINT || retry == 0) {
  1283. break;
  1284. }
  1285. ClRtlLogPrint(LOG_UNUSUAL,
  1286. "[INIT] Unable to bind RPC to CDP, status %1!u!. Retrying...\n",
  1287. Status);
  1288. Sleep(1000);
  1289. --retry;
  1290. }
  1291. if (Status != RPC_S_OK) {
  1292. ClRtlLogPrint(LOG_CRITICAL,
  1293. "[INIT] Unable to bind RPC to CDP, status %1!u!.\n",
  1294. Status);
  1295. return(Status);
  1296. }
  1297. //
  1298. // Start our RPC server. Note that we will not get any calls until
  1299. // we register our interfaces.
  1300. //
  1301. Status = RpcServerListen(
  1302. CS_CONCURRENT_RPC_CALLS,
  1303. RPC_C_LISTEN_MAX_CALLS_DEFAULT,
  1304. TRUE);
  1305. if ((Status != RPC_S_OK) && (Status != RPC_S_ALREADY_LISTENING)) {
  1306. ClRtlLogPrint(LOG_CRITICAL,
  1307. "[CS] Unable to start RPC server, status %1!u!.\n",
  1308. Status
  1309. );
  1310. return(Status);
  1311. }
  1312. RpcSsDontSerializeContext();
  1313. return(RPC_S_OK);
  1314. }
  1315. DWORD
  1316. ClusterRegisterIntraclusterRpcInterface(
  1317. VOID
  1318. )
  1319. {
  1320. DWORD Status;
  1321. Status = RpcServerRegisterIfEx(
  1322. s_IntraCluster_v2_0_s_ifspec,
  1323. NULL,
  1324. NULL,
  1325. 0, // No need to set RPC_IF_ALLOW_SECURE_ONLY if security callback
  1326. // is specified. If security callback is specified, RPC
  1327. // will reject unauthenticated requests without invoking
  1328. // callback. This is the info obtained from RpcDev. See
  1329. // Windows Bug 572035.
  1330. RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
  1331. reinterpret_cast<RPC_IF_CALLBACK_FN(__stdcall *)>( ApipConnectCallback )
  1332. );
  1333. if (Status != RPC_S_OK) {
  1334. ClRtlLogPrint(LOG_CRITICAL,
  1335. "[INIT] Unable to register the IntraCluster interface, Status %1!u!.\n",
  1336. Status
  1337. );
  1338. return(Status);
  1339. }
  1340. CspIntraclusterRpcServerStarted = TRUE;
  1341. return(ERROR_SUCCESS);
  1342. } // ClusterRegisterIntraclusterRpcInterface
  1343. DWORD
  1344. ClusterRegisterExtroclusterRpcInterface(
  1345. VOID
  1346. )
  1347. {
  1348. DWORD Status;
  1349. Status = RpcServerRegisterIfEx(
  1350. s_ExtroCluster_v2_0_s_ifspec,
  1351. NULL,
  1352. NULL,
  1353. 0, // No need to set RPC_IF_ALLOW_SECURE_ONLY if security callback
  1354. // is specified. If security callback is specified, RPC
  1355. // will reject unauthenticated requests without invoking
  1356. // callback. This is the info obtained from RpcDev. See
  1357. // Windows Bug 572035.
  1358. RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
  1359. reinterpret_cast<RPC_IF_CALLBACK_FN( __stdcall *)>( ApipConnectCallback )
  1360. );
  1361. if (Status != RPC_S_OK) {
  1362. ClRtlLogPrint(LOG_CRITICAL,
  1363. "[INIT] Unable to register the ExtroCluster interface, status %1!u!.\n",
  1364. Status
  1365. );
  1366. return(Status);
  1367. }
  1368. CL_ASSERT( CsRpcBindingVector != NULL);
  1369. Status = RpcEpRegister(
  1370. s_ExtroCluster_v2_0_s_ifspec,
  1371. CsRpcBindingVector,
  1372. NULL,
  1373. L"Microsoft Extrocluster Interface"
  1374. );
  1375. if (Status != RPC_S_OK) {
  1376. ClRtlLogPrint(LOG_CRITICAL,
  1377. "[INIT] Unable to register the ExtroCluster interface endpoint, status %1!u!.\n",
  1378. Status
  1379. );
  1380. NmDumpRpcExtErrorInfo(Status);
  1381. return(Status);
  1382. }
  1383. return(ERROR_SUCCESS);
  1384. } // ClusterRegisterExtroclusterRpcInterface
  1385. DWORD
  1386. ClusterRegisterJoinVersionRpcInterface(
  1387. VOID
  1388. )
  1389. {
  1390. DWORD Status;
  1391. Status = RpcServerRegisterIfEx(
  1392. s_JoinVersion_v2_0_s_ifspec,
  1393. NULL,
  1394. NULL,
  1395. 0, // No need to set RPC_IF_ALLOW_SECURE_ONLY if security callback
  1396. // is specified. If security callback is specified, RPC
  1397. // will reject unauthenticated requests without invoking
  1398. // callback. This is the info obtained from RpcDev. See
  1399. // Windows Bug 572035.
  1400. RPC_C_PROTSEQ_MAX_REQS_DEFAULT,
  1401. reinterpret_cast<RPC_IF_CALLBACK_FN *>( ApipConnectCallback )
  1402. );
  1403. if (Status != RPC_S_OK) {
  1404. ClRtlLogPrint(LOG_CRITICAL,
  1405. "[INIT] Unable to register the JoinVersion interface, status %1!u!.\n",
  1406. Status
  1407. );
  1408. return(Status);
  1409. }
  1410. CL_ASSERT( CsRpcBindingVector != NULL);
  1411. Status = RpcEpRegister(
  1412. s_JoinVersion_v2_0_s_ifspec,
  1413. CsRpcBindingVector,
  1414. NULL,
  1415. L"Microsoft JoinVersion Interface"
  1416. );
  1417. if (Status != RPC_S_OK) {
  1418. ClRtlLogPrint(LOG_CRITICAL,
  1419. "[INIT] Unable to register the JoinVersion interface endpoint, status %1!u!.\n",
  1420. Status
  1421. );
  1422. NmDumpRpcExtErrorInfo(Status);
  1423. return(Status);
  1424. }
  1425. return(ERROR_SUCCESS);
  1426. } // ClusterRegisterJoinVersionRpcInterface
  1427. VOID
  1428. ClusterDeregisterRpcInterfaces(
  1429. VOID
  1430. )
  1431. {
  1432. RPC_STATUS Status;
  1433. ClRtlLogPrint(LOG_NOISE,
  1434. "[INIT] Deregistering RPC endpoints & interfaces.\n"
  1435. );
  1436. //
  1437. // Deregister the Extrocluster and JoinVersion interface endpoints.
  1438. // There is no endpoint for the Intracluster interface.
  1439. //
  1440. if (CsRpcBindingVector != NULL) {
  1441. Status = RpcEpUnregister(
  1442. s_ExtroCluster_v2_0_s_ifspec,
  1443. CsRpcBindingVector,
  1444. NULL
  1445. );
  1446. if ((Status != RPC_S_OK) && (Status != EPT_S_NOT_REGISTERED)) {
  1447. ClRtlLogPrint(LOG_UNUSUAL,
  1448. "[INIT] Failed to deregister endpoint for ExtroCluster interface, status %1!u!.\n",
  1449. Status
  1450. );
  1451. }
  1452. Status = RpcEpUnregister(
  1453. s_JoinVersion_v2_0_s_ifspec,
  1454. CsRpcBindingVector,
  1455. NULL
  1456. );
  1457. if ((Status != RPC_S_OK) && (Status != EPT_S_NOT_REGISTERED)) {
  1458. ClRtlLogPrint(LOG_UNUSUAL,
  1459. "[INIT] Failed to deregister endpoint for JoinVersion interface, status %1!u!.\n",
  1460. Status
  1461. );
  1462. }
  1463. }
  1464. //
  1465. // Deregister the interfaces
  1466. //
  1467. Status = RpcServerUnregisterIf(
  1468. s_ExtroCluster_v2_0_s_ifspec,
  1469. NULL,
  1470. 1 // Wait for outstanding calls to complete
  1471. );
  1472. if ((Status != RPC_S_OK) && (Status != RPC_S_UNKNOWN_IF)) {
  1473. ClRtlLogPrint(LOG_UNUSUAL,
  1474. "[INIT] Unable to deregister the ExtroCluster interface, Status %1!u!.\n",
  1475. Status
  1476. );
  1477. }
  1478. Status = RpcServerUnregisterIf(
  1479. s_JoinVersion_v2_0_s_ifspec,
  1480. NULL,
  1481. 1 // Wait for outstanding calls to complete
  1482. );
  1483. if ((Status != RPC_S_OK) && (Status != RPC_S_UNKNOWN_IF)) {
  1484. ClRtlLogPrint(LOG_UNUSUAL,
  1485. "[INIT] Unable to deregister the JoinVersion interface, Status %1!u!.\n",
  1486. Status
  1487. );
  1488. }
  1489. Status = RpcServerUnregisterIf(
  1490. s_IntraCluster_v2_0_s_ifspec,
  1491. NULL,
  1492. 1 // Wait for outstanding calls to complete
  1493. );
  1494. if ((Status != RPC_S_OK) && (Status != RPC_S_UNKNOWN_IF)) {
  1495. ClRtlLogPrint(LOG_UNUSUAL,
  1496. "[INIT] Unable to deregister the IntraCluster interface, Status %1!u!.\n",
  1497. Status
  1498. );
  1499. }
  1500. return;
  1501. } // ClusterDeregisterRpcInterfaces
  1502. VOID
  1503. ClusterShutdownRpcServer(
  1504. VOID
  1505. )
  1506. {
  1507. RPC_STATUS Status;
  1508. ClRtlLogPrint(LOG_NOISE, "[INIT] Shutting down RPC server.\n");
  1509. ClusterDeregisterRpcInterfaces();
  1510. Status = RpcMgmtStopServerListening(NULL);
  1511. if ((Status != RPC_S_OK) && (Status != RPC_S_NOT_LISTENING)) {
  1512. ClRtlLogPrint(LOG_UNUSUAL,
  1513. "[INIT] Failed to shutdown RPC server, status %1!u!.\n",
  1514. Status
  1515. );
  1516. }
  1517. #if 0
  1518. //
  1519. // Note - We really should wait for all outstanding calls to complete,
  1520. // but we can't because there is no way to shutdown any
  1521. // pending API GetNotify calls.
  1522. //
  1523. Status = RpcMgmtWaitServerListen();
  1524. if ((Status != RPC_S_OK) && (Status != RPC_S_NOT_LISTENING)) {
  1525. ClRtlLogPrint(LOG_UNUSUAL,
  1526. "[INIT] Failed to wait for all RPC calls to complete, status %1!u!.\n",
  1527. Status
  1528. );
  1529. }
  1530. #endif // 0
  1531. if (CsRpcBindingVector != NULL) {
  1532. RpcBindingVectorFree(&CsRpcBindingVector);
  1533. CsRpcBindingVector = NULL;
  1534. }
  1535. return;
  1536. } // ClusterShutdownRpcServer
  1537. LONG
  1538. CspExceptionFilter(
  1539. IN PEXCEPTION_POINTERS ExceptionInfo
  1540. )
  1541. /*++
  1542. Routine Description:
  1543. Top level exception handler for the cluster service process.
  1544. Currently this just exits immediately and assumes that the
  1545. cluster proxy will notice and restart us as appropriate.
  1546. Arguments:
  1547. ExceptionInfo - Supplies the exception information
  1548. Return Value:
  1549. None.
  1550. --*/
  1551. {
  1552. ClRtlLogPrint(LOG_CRITICAL,
  1553. "[CS] Exception. Code = 0x%1!lx!, Address = 0x%2!p!\n",
  1554. ExceptionInfo->ExceptionRecord->ExceptionCode,
  1555. ExceptionInfo->ExceptionRecord->ExceptionAddress);
  1556. ClRtlLogPrint(LOG_CRITICAL,
  1557. "[CS] Exception parameters: %1!lx!, %2!lx!, %3!lx!, %4!lx!\n",
  1558. ExceptionInfo->ExceptionRecord->ExceptionInformation[0],
  1559. ExceptionInfo->ExceptionRecord->ExceptionInformation[1],
  1560. ExceptionInfo->ExceptionRecord->ExceptionInformation[2],
  1561. ExceptionInfo->ExceptionRecord->ExceptionInformation[3]);
  1562. GenerateExceptionReport(ExceptionInfo);
  1563. if (lpfnOriginalExceptionFilter)
  1564. lpfnOriginalExceptionFilter(ExceptionInfo);
  1565. // the system level handler will be invoked if we return
  1566. // EXCEPTION_CONTINUE_SEARCH - for debug dont terminate the process
  1567. if ( IsDebuggerPresent()) {
  1568. return(EXCEPTION_CONTINUE_SEARCH);
  1569. } else {
  1570. #if !CLUSTER_BETA
  1571. TerminateProcess( GetCurrentProcess(),
  1572. ExceptionInfo->ExceptionRecord->ExceptionCode );
  1573. #endif
  1574. return(EXCEPTION_CONTINUE_SEARCH);
  1575. }
  1576. }
  1577. VOID
  1578. CsInconsistencyHalt(
  1579. IN DWORD Status
  1580. )
  1581. {
  1582. WCHAR string[16];
  1583. DWORD status;
  1584. //
  1585. // Chittur Subbaraman (chitturs) - 12/17/99
  1586. //
  1587. // Announce your status to the SCM as SERVICE_STOP_PENDING so that
  1588. // it does not affect restart. Also, it could let clients learn
  1589. // of the error status.
  1590. //
  1591. CsServiceStatus.dwCurrentState = SERVICE_STOP_PENDING;
  1592. CsServiceStatus.dwControlsAccepted = 0;
  1593. CsServiceStatus.dwCheckPoint = 0;
  1594. CsServiceStatus.dwWaitHint = 0;
  1595. status = CspSetErrorCode( Status, &CsServiceStatus );
  1596. CsAnnounceServiceStatus();
  1597. wsprintfW(&(string[0]), L"%u", Status);
  1598. ClRtlLogPrint(LOG_CRITICAL,
  1599. "[CS] Halting this node to prevent an inconsistency within the cluster. Error status = %1!u!\n",
  1600. Status
  1601. );
  1602. CsLogEvent1(
  1603. LOG_CRITICAL,
  1604. CS_EVENT_INCONSISTENCY_HALT,
  1605. string
  1606. );
  1607. //release the mutex so that the service when it starts can acqire the same
  1608. //without a delay
  1609. ReleaseMutex(CspMutex);
  1610. ExitProcess(status); // return the fake error code
  1611. }
  1612. PVOID
  1613. CsAlloc(
  1614. DWORD Size
  1615. )
  1616. {
  1617. PVOID p;
  1618. p = LocalAlloc(LMEM_FIXED, Size);
  1619. if (p == NULL) {
  1620. CsInconsistencyHalt( ERROR_NOT_ENOUGH_MEMORY );
  1621. }
  1622. return(p);
  1623. }
  1624. LPWSTR
  1625. CsStrDup(
  1626. LPCWSTR String
  1627. )
  1628. {
  1629. LPWSTR p;
  1630. DWORD Len;
  1631. Len = (lstrlenW(String)+1)*sizeof(WCHAR);
  1632. p=static_cast<LPWSTR>(LocalAlloc(LMEM_FIXED, Len));
  1633. if (p==NULL) {
  1634. CsInconsistencyHalt( ERROR_NOT_ENOUGH_MEMORY );
  1635. }
  1636. CopyMemory(p,String,Len);
  1637. return(p);
  1638. }
  1639. DWORD
  1640. VssWriterInit(
  1641. VOID
  1642. )
  1643. /*++
  1644. Routine Description:
  1645. Start subscribing for volume snapshot events as a writer.
  1646. Arguments:
  1647. None.
  1648. Return Value:
  1649. ERROR_SUCCESS - Subscription succeeded.
  1650. Error status the subscription fails.
  1651. Comments:
  1652. Should never be called from a ServiceMain() since this function would result in
  1653. possibly starting the EventSystem service. During autostart, any calls from
  1654. ServiceMain() that would demand start a service will cause the caller service
  1655. to hang.
  1656. --*/
  1657. {
  1658. DWORD dwStatus = ERROR_SUCCESS;
  1659. HRESULT hr;
  1660. //
  1661. // When this function is called, it is possible this global is not initialized
  1662. // by ClusterInitialize since it can return with success in an evict cleanup
  1663. // case. In that case, bail.
  1664. //
  1665. if ( !g_pCVssWriterCluster ) goto FnExit;
  1666. ClRtlLogPrint( LOG_NOISE, "[INIT] VSS Initializing\n" );
  1667. hr = g_pCVssWriterCluster->Initialize( g_VssIdCluster, // VSS_ID WriterId;
  1668. L"Cluster Service Writer", // LPCWSTR WriterName;
  1669. VSS_UT_SYSTEMSERVICE, // VSS_USAGE_TYPE UsageType;
  1670. VSS_ST_OTHER // VSS_SOURCE_TYPE SourceType;
  1671. // <default> VSS_APPLICATION_LEVEL AppLevel;
  1672. // <default> DWORD dwTimeoutFreeze
  1673. );
  1674. if ( FAILED( hr )) {
  1675. ClRtlLogPrint( LOG_CRITICAL, "[INIT] VSS Failed to initialize VSS, status 0x%1!x!\n", hr );
  1676. dwStatus = HRESULT_CODE( hr );
  1677. goto FnExit;
  1678. }
  1679. // Now we need to subscibe so that we get the events for backup.
  1680. //
  1681. ClRtlLogPrint( LOG_NOISE, "[INIT] VSS Calling subscribe to register for backup events.\n" );
  1682. hr = g_pCVssWriterCluster->Subscribe( );
  1683. if ( FAILED( hr )) {
  1684. ClRtlLogPrint( LOG_CRITICAL, "[INIT] VSS Failed to subscribe to VSS, status 0x%1!x!\n", hr );
  1685. dwStatus = HRESULT_CODE( hr );
  1686. goto FnExit;
  1687. } else {
  1688. g_bCVssWriterClusterSubscribed = TRUE;
  1689. }
  1690. FnExit:
  1691. return ( dwStatus );
  1692. }// VssWriterInit
  1693. RPC_STATUS
  1694. CspRegisterDynamicLRPCEndpoint(
  1695. VOID
  1696. )
  1697. /*++
  1698. Routine Description:
  1699. Inquire the server bindings, look for the LRPC protocol and register the clusapi interface
  1700. with the dynamic endpoint obtained for the LRPC protocol.
  1701. Arguments:
  1702. None.
  1703. Return Value:
  1704. RPC_S_OK if successful.
  1705. RPC error code otherwise.
  1706. --*/
  1707. {
  1708. RPC_STATUS rpcStatus;
  1709. RPC_BINDING_VECTOR *pServerBindingVector = NULL;
  1710. DWORD i;
  1711. WCHAR *pszProtSeq = NULL, *pServerStringBinding = NULL;
  1712. //
  1713. // Get the server binding vector. This includes all the protocols and EP's registered
  1714. // so far.
  1715. //
  1716. rpcStatus = RpcServerInqBindings( &pServerBindingVector );
  1717. if ( rpcStatus != RPC_S_OK )
  1718. {
  1719. ClRtlLogPrint(LOG_CRITICAL,
  1720. "[INIT] CspRegisterDynamicLRPCEndpoint: Unable to inquire server bindings, status %1!u!.\n",
  1721. rpcStatus);
  1722. NmDumpRpcExtErrorInfo( rpcStatus );
  1723. goto FnExit;
  1724. }
  1725. //
  1726. // Grovel the binding vector looking for the LRPC protocol information.
  1727. //
  1728. for( i = 0; i < pServerBindingVector->Count; i++ )
  1729. {
  1730. rpcStatus = RpcBindingToStringBinding( pServerBindingVector->BindingH[i],
  1731. &pServerStringBinding );
  1732. if ( rpcStatus != RPC_S_OK )
  1733. {
  1734. ClRtlLogPrint(LOG_CRITICAL,
  1735. "[INIT] CspRegisterDynamicLRPCEndpoint: Unable to convert binding to string, status %1!u!.\n",
  1736. rpcStatus);
  1737. NmDumpRpcExtErrorInfo( rpcStatus );
  1738. goto FnExit;
  1739. }
  1740. rpcStatus = RpcStringBindingParse( pServerStringBinding,
  1741. NULL,
  1742. &pszProtSeq,
  1743. NULL,
  1744. NULL,
  1745. NULL );
  1746. if ( rpcStatus != RPC_S_OK )
  1747. {
  1748. ClRtlLogPrint(LOG_CRITICAL,
  1749. "[INIT] CspRegisterDynamicLRPCEndpoint: Unable to parse server string binding, status %1!u!.\n",
  1750. rpcStatus);
  1751. NmDumpRpcExtErrorInfo( rpcStatus );
  1752. goto FnExit;
  1753. }
  1754. if ( lstrcmp ( pszProtSeq, TEXT("ncalrpc") ) == 0 )
  1755. {
  1756. //
  1757. // Found the LRPC protocol information
  1758. //
  1759. RPC_BINDING_VECTOR LrpcBindingVector;
  1760. LrpcBindingVector.Count = 1;
  1761. LrpcBindingVector.BindingH[0] = pServerBindingVector->BindingH[i];
  1762. //
  1763. // Register the dynamic endpoint obtained for the clusapi interface to field
  1764. // local calls.
  1765. //
  1766. rpcStatus = RpcEpRegister( s_clusapi_v2_0_s_ifspec,
  1767. &LrpcBindingVector,
  1768. NULL,
  1769. TEXT("Microsoft Cluster Server Local API") );
  1770. if ( rpcStatus != RPC_S_OK )
  1771. {
  1772. ClRtlLogPrint(LOG_CRITICAL,
  1773. "[INIT] CspRegisterDynamicLRPCEndpoint: Unable to register the clusapi interface lrpc endpoint, status %1!u!.\n",
  1774. rpcStatus);
  1775. NmDumpRpcExtErrorInfo( rpcStatus );
  1776. }
  1777. ClRtlLogPrint(LOG_NOISE,
  1778. "[INIT] CspRegisterDynamicLRPCEndpoint: Successfully registered LRPC endpoint with EP mapper\n");
  1779. goto FnExit;
  1780. }
  1781. RpcStringFree( &pszProtSeq );
  1782. pszProtSeq = NULL;
  1783. RpcStringFree( &pServerStringBinding );
  1784. pServerStringBinding = NULL;
  1785. } // for
  1786. //
  1787. // If you didn't find the LRPC information, return an error.
  1788. //
  1789. if ( i == pServerBindingVector->Count )
  1790. {
  1791. rpcStatus = RPC_S_NO_BINDINGS;
  1792. ClRtlLogPrint(LOG_CRITICAL,
  1793. "[INIT] CspRegisterDynamicLRPCEndpoint: Unable to get info on the LRPC binding, status %1!u!.\n",
  1794. rpcStatus);
  1795. goto FnExit;
  1796. }
  1797. FnExit:
  1798. //
  1799. // Free the strings and the binding vector if they haven't already been freed
  1800. //
  1801. if ( pszProtSeq != NULL ) RpcStringFree ( &pszProtSeq );
  1802. if ( pServerStringBinding != NULL ) RpcStringFree( &pServerStringBinding );
  1803. if ( pServerBindingVector != NULL ) RpcBindingVectorFree( &pServerBindingVector );
  1804. return ( rpcStatus );
  1805. }// CspRegisterDynamicLRPCEndpoint