Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

717 lines
25 KiB

  1. #ifndef _MMAPI_H_
  2. #define _MMAPI_H_
  3. /* ---------------------- MMapi.h ----------------------- */
  4. /* This module contains cluster Membership Manager (MM) functions.
  5. *
  6. * These functions are for the sole use of the ClusterManager (CM).
  7. * All are privileged and local; no user can call them. Security is
  8. * not checked. The module is not thread-aware; only a single thread
  9. * can use these functions at a time (unless otherwise noted).
  10. * Higher levels must ensure this. Blocking characteristics of the routines are
  11. * noted.
  12. *
  13. *
  14. * All nodes of the cluster must know their own unique nodenumber
  15. * within that cluster (a small int in the range 0..some_max-1). This
  16. * number is defined for the node at configuration time (either by the
  17. * user or by the setup code; this module doesn't care which) and is
  18. * essentially permanent. (The node number allows indexing and
  19. * bitmask operations easily, where names and non-small ints don't).
  20. * There is no code in MM to detect illegal use of nodenumber, staleness
  21. * of node number, etc.
  22. *
  23. * Clusters may also be named and/or numbered. Nodes are named. This
  24. * module makes no use of such facilities; it is based entirely on
  25. * node-number.
  26. *
  27. * It is assumed that all use of routines here is done on nodes which
  28. * agree to be members of the same cluster. This module does not check
  29. * such things.
  30. *
  31. * Cluster network connectivity must also be provided:
  32. *
  33. * - A node N must specify the various paths by which it can
  34. * communicate with every other node; each other node must define
  35. * its communication paths back to N. Full connectivity must be
  36. * guaranteed; each node must be able to talk directly to every
  37. * other node (and the reverse); for fault-tolerance, communication
  38. * paths must not only be replicated (minimally, duplicated) but
  39. * must also use entirely independent wiring and drivers. TCP/IP
  40. * lans and async connections are suggested. Heartbeat traffic
  41. * (which establishes cluster membership) may travel on any or all
  42. * of the connectivity paths. [Cluster management traffic may
  43. * travel on any or all of the connectivity paths, but may be
  44. * restricted to high-performance paths (eg, tcp/ip)].
  45. *
  46. * - A node must know the address of the cluster as a whole. This is
  47. * an IP address which failsover (or a netbios name which fails
  48. * over.. TBD) such that connecting to that cluster address provides
  49. * a way to talk to a valid active member of the cluster, here
  50. * called the PCM.
  51. *
  52. * Note that cluster connectivity is not defined by this interface;
  53. * it is assumed to be in a separate module. This module deals only in
  54. * communication to the cluster or communication to a nodenumber
  55. * within that cluster; it does not care about the details of how such
  56. * communication is done.
  57. *
  58. * Cluster connectivity must be known to all nodes in the cluster
  59. * and to a joining node, before the join attempt is made.
  60. *
  61. */
  62. #ifdef __cplusplus
  63. extern "C" {
  64. #endif /* __cplusplus */
  65. #include <windows.h>
  66. #include <bitset.h>
  67. /* The following errors can be returned from the MM module: */
  68. enum {
  69. MM_OK = 0, /* operation competed successfully */
  70. MM_TIMEOUT = 1, /* operation timed out */
  71. MM_TRANSIENT = 2, /* Transient failure; operation should be
  72. retried */
  73. MM_FAULT = 3, /* Illegal parameter; impossible condition,
  74. etc. NOTE: not all illegal calling
  75. sequences will be detected. Correct use
  76. of the MM functions is a responsibility
  77. of the CM caller. */
  78. MM_ALREADY = 4, /* node is already in the desired condition */
  79. MM_NOTMEMBER = 5, /* node needs to be a cluster member to
  80. perform this operation */
  81. };
  82. /* A node can be Up or Down */
  83. typedef enum { NODE_UP = 1,
  84. NODE_DOWN = 2
  85. } NODESTATUS;
  86. /* this type defines the cluster */
  87. typedef struct tagCLUSTERINFO {
  88. DWORD NumActiveNodes; /* Number of nodes currently
  89. participating in this cluster */
  90. LPDWORD UpNodeList; /* pointer to a <NumActiveNodes>
  91. sized array of node#s in the
  92. cluster which are up */
  93. DWORD clockPeriod; /* current setting */
  94. DWORD sendHBRate; /* current setting */
  95. DWORD rcvHBRate; /* current setting */
  96. } CLUSTERINFO, *LPCLUSTERINFO;
  97. /*
  98. * UpNodeList is the array of active cluster members, in numeric order. The pointer
  99. * may be null. If non-null, it is assumed that the space is big enough.
  100. *
  101. */
  102. /* the following are the typedefs for the callback functions from MM to
  103. the higher-level Cluster Mgr layer. */
  104. typedef DWORD (*MMNodeChange)(IN DWORD node, IN NODESTATUS newstatus);
  105. /* MMNodeChange is a function which will be called in this Up node
  106. * whenever the MM declares another node Up or Down. This occurs after
  107. * changing the current cluster membership (available via ClusterInfo) and
  108. * in the last stage of Regroup. The CM may then
  109. * initiate failovers, device ownership changes, user node status
  110. * events, etc. This routine must be quick and must not block
  111. * (acceptible time TBD). Note that this will happen on all nodes
  112. * of the cluster; it is up to the CM design to decide whether to
  113. * issue events from only the PCM or from each CM node.
  114. *
  115. * A node receives a NODE_UP callback for itself.
  116. *
  117. */
  118. typedef DWORD (*MMNodesDown)(IN BITSET nodes);
  119. /* MMNodesDown is a function that will be called at the end
  120. * of the regroup to indicate that node/multiple nodes is/are down.
  121. *
  122. * MMNodeChange is called only to indicate whether the node is up
  123. *
  124. */
  125. typedef BOOL (*MMQuorumSelect)(void);
  126. /* This is a callback to deal with the special case where only 2 members of the
  127. * cluster existed, and a Regroup incident occurred such that only one
  128. * member now survives OR there is a partition and both members survive (but cannot
  129. * know that). The intent of the Quorum function is to determine whether the other
  130. * node is alive or not, using mechanisms other than the normal heartbeating over the
  131. * normal comm links (eg, to do so by using non-heartbeat communication paths, such as
  132. * SCSI reservations). This function is called only in the case of where cluster
  133. * membership was previously exactly two nodes; and is called on any surviving node
  134. * of these two (which might mean it is called on one node or on both partitioned
  135. * nodes).
  136. *
  137. * If this routine returns TRUE, then the calling node stays in the cluster. If the
  138. * quorum algorithm determines that this node must die (because the other cluster member
  139. * exists), then this function should return FALSE;this will initiate an orderly
  140. * shutdown of the cluster services.
  141. *
  142. * In the case of a true partition, exactly one node should return TRUE.
  143. *
  144. * This routine may block and take a long time to execute (>2 secs).
  145. *
  146. */
  147. typedef void (*MMHoldAllIO)(void);
  148. /* This routine is called early (prior to Stage 1) in a Regroup incident.
  149. * It suspends all cluster IO (to all cluster-owned devices), and any relevant
  150. * intra-cluster messages, until resumed (or until this node dies).
  151. */
  152. typedef void (*MMResumeAllIO)(void);
  153. /* This is called during Regroup after the new cluster membership has been
  154. * determined, when it is known that this node will remain a member of the cluster (early in
  155. * Stage 4). All IO previously suspended by MMHoldAllIO should be resumed.
  156. */
  157. typedef void (*MMMsgCleanup1) (IN DWORD deadnode);
  158. /* This is called as the first part of intra-cluster message system cleanup (in stage 4).
  159. * It cancels all incoming messages from a failed node. In the case where multiple nodes are
  160. * evicted from the cluster, this function is called repeatedly, once for each node.
  161. *
  162. * This routine is synchronous and Regroup is suspended until it returns.
  163. * It must execute quickly.
  164. *
  165. */
  166. typedef void (*MMMsgCleanup2)(IN BITSET nodes);
  167. /* This is the second phase of message system cleanup (in stage 5). It cancels all outgoing
  168. * messages to dead nodes. Characteristics are as for Cleanup1.
  169. */
  170. typedef void (*MMHalt)(IN DWORD haltcode);
  171. /* This function is called whenever the MM detects that this node should immediately leave
  172. * the cluster (eg, on receipt of a poison packet or at some impossible error situation).
  173. * The HALT function should immediately initiate Cluster Management shutdown.
  174. * No MM functions should be called after this, other than MMShutdown.
  175. *
  176. * haltcode is a number identifying the halt reason.
  177. */
  178. typedef void (*MMJoinFailed)(void);
  179. /* This is called on a node being joined into the cluster when the join attempt in the PCM
  180. * fails. Following this callback, the node may petition to
  181. * join again, after cleaning up via a call to MMLeave.
  182. */
  183. /* The operations on clusters are defined below: */
  184. DWORD MMInit(
  185. IN DWORD mynode,
  186. IN DWORD MaxNodes,
  187. IN MMNodeChange UpDownCallback,
  188. IN MMQuorumSelect QuorumCallback,
  189. IN MMHoldAllIO HoldIOCallback,
  190. IN MMResumeAllIO ResumeIOCallback,
  191. IN MMMsgCleanup1 MsgCleanup1Callback,
  192. IN MMMsgCleanup2 MsgCleanup2Callback,
  193. IN MMHalt HaltCallback,
  194. IN MMJoinFailed JoinFailedCallback,
  195. IN MMNodesDown NodesDownCallback
  196. );
  197. /* This initialises various local MM data structures. It should be
  198. * called exactly once at CM startup time on every node. It must preceed any other
  199. * MM call. It sends no messages; the node need not have connectivity defined yet.
  200. * It does not block.
  201. *
  202. * Mynode is the node# of this node within the cluster. This is
  203. * assumed to be unique (but cannot be checked here to be so).
  204. *
  205. * The callbacks are described above.
  206. *
  207. * Error returns:
  208. *
  209. * MM_OK Success.
  210. *
  211. * MM_FAULT Something impossible happened.
  212. *
  213. */
  214. DWORD MMJoin(
  215. IN DWORD joiningNode,
  216. IN DWORD clockPeriod,
  217. IN DWORD sendHBRate,
  218. IN DWORD rcvHBRate,
  219. IN DWORD joinTimeout
  220. );
  221. /*
  222. *
  223. * This causes the specified node to join the active cluster.
  224. *
  225. * This routine should be issued by only one node of the cluster (the
  226. * PCM); all join attempts must be single-threaded (by code outside
  227. * this module).
  228. *
  229. * This routine may block and take a long time to execute.
  230. *
  231. * [Prior to this being called:
  232. * - joiningNode has communicated to the PCM of the cluster
  233. * that it wants to join.
  234. * - checks on validity of clustername, nodenumber, etc have been
  235. * made; any security checks have been done;
  236. * - connectivity paths have been established to/from the cluster
  237. * and joiningNode.
  238. * - the Registry etc has been downloaded.
  239. * ]
  240. *
  241. * joiningNode is the node number of the node being brought into
  242. * the cluster.
  243. *
  244. * If joiningNode = self (as passed in via MMinit), then the node
  245. * will become the first member of a new cluster; if not, the node
  246. * will be brought into the existing cluster.
  247. *
  248. * clockPeriod, sendRate, and rcvRate can only be set by the first call (ie
  249. * when the cluster is formed); later calls (from joining members)
  250. * inherit the original cluster values. The entire cluster therefore operates
  251. * with the same values.
  252. *
  253. * clockPeriod is the basic clock interval which drives all internal
  254. * MM activities, such as the various stages
  255. * of membership reconfiguration, and eventually user-perceived
  256. * recovery time. Unit= ms. This must be between the min and max
  257. * allowed (values TBD; current best setting = 300ms). Note that
  258. * clockperiod is path independent and node independent. All
  259. * cluster members regroup at the same rate over any/all available
  260. * paths; all periods are identical in all nodes.
  261. * A value of 0 implies default setting (currently 300ms).
  262. *
  263. * sendHBrate is the multiple of clockPeriod at which heartbeats are sent. This
  264. * must be between the min and max allowed (values TBD; current best setting = 4).
  265. * A value of 0 implies default setting (currently 4).
  266. *
  267. * rcvHBrate is the multiple of sendRate during which a heartbeat must arrive, or the
  268. * node initiates a Regroup (probably resulting in some node leaving the cluster).
  269. * This must be between min and max; (values TBD; current best setting = 2).
  270. * A value of 0 implies default setting (currently 2).
  271. *
  272. * The combination of these variables controls overall node-failure detection time,
  273. * Regroup time, and the sensitivity of MM to transient comm errors. There are
  274. * important considerations to be understood when changing these values; these,
  275. * and then formula for calculating recovery times etc, are given elsewhere.
  276. *
  277. *
  278. *--- NOTES:
  279. *--- safe and appropriate min and max values for these have yet to be chosen.
  280. *--- Changing the values from the defaults is currently UNSUPPORTED and can have
  281. *--- serious consequences.
  282. *
  283. * JoinTimeout is an overall timer on the entire Join attempt. If the
  284. * node has not achieved full cluster membership in this time, the
  285. * attempt is abandoned.
  286. *
  287. *
  288. * Error returns:
  289. *
  290. * MM_OK Success; cluster joined. During or soon after the join, a
  291. * node-up callback will soon occur on this
  292. * and on all cluster member nodes (including the new member).
  293. * The CM is then safe to
  294. * assign ownership to cluster-owned devices on the
  295. * node, and to start failover/failback processing.
  296. *
  297. * Note: this routine establishes cluster membership.
  298. * However, it is usually inadvisable to start high
  299. * level CM failbacks immediately, because other
  300. * cluster members are often still joining. The CM
  301. * should typically wait a while to see whether other
  302. * nodes arrive in the cluster soon.
  303. * Failure cases:
  304. *
  305. * In the joiningNode, a joinFail callback occurs if the joiningNode node was
  306. * in the middle of joining when the PCM's join attempt failed.(However, the callback
  307. * is not guaranteed to happen; the joiningNode may not have started the
  308. * join event yet). Any failure of the joiningNode to join the cluster
  309. * should be followed by a call to MMLeave() (ignoring the return code);
  310. * such failures may be from the JoinFail callback or just from overall
  311. * timeouts on the entire join operation. Any subsequent attempt by
  312. * joiningNode to re-join the cluster must be preceeded by a call to leave().
  313. *
  314. * MM_ALREADY The node is already a cluster member. This can
  315. * happen if a node reboots (or a CM is restarted)
  316. * and rejoins even before the cluster determines
  317. * that it has disappeared. The CM should Leave and
  318. * reJoin.
  319. *
  320. * MM_FAULT Permanent failure; something is very bad: the
  321. * node# is duplicated; some parameter is some
  322. * entirely illegal value. The CM is in deep weeds.
  323. *
  324. * MM_TRANSIENT Transient failure. The cluster state changed
  325. * during the operation (eg a node left the cluster).
  326. * The operation should be retried.
  327. *
  328. * MM_TIMEOUT Timeout; cluster membership not achieved in time.
  329. *
  330. *
  331. *
  332. */
  333. DWORD MmSetRegroupAllowed( IN BOOL allowed);
  334. /* This function can be used to allow/disallow regroup participation
  335. * for the current node.
  336. *
  337. * Originally regroup was allowed immediately after receiving RGP_START
  338. * event. Since this happens before join is complete
  339. * joiner can arbitrate and win, leaving
  340. * the other side without a quorum device.
  341. *
  342. * It is required to add MmSetRegroupAllowed(TRUE) at the very end
  343. * of the ClusterJoin. The node doesn't need to call MmSetRegroupAllowed(TRUE)
  344. * for ClusterForm, since MMJoin will call
  345. * MmSetRegroupAllowed(TRUE) for the cluster forming node
  346. *
  347. * MmSetRegroupAllowed(FALSE) can be used to disable regroup
  348. * participation during shutdown.
  349. *
  350. *
  351. * Errors:
  352. *
  353. * MM_OK : successful completition
  354. *
  355. * MM_TRANSIENT : disallowing regroup when regroup is in progress
  356. *
  357. * MM_ALREADY : node is already in the desired condition
  358. *
  359. *
  360. */
  361. void MMShutdown (void);
  362. /* This shuts down the MM and Regroup services. Prior to this, the node should
  363. * voluntarily have left the cluster. Following this, all membership services
  364. * are non-functional; no further MM call may occur.
  365. *
  366. * THIS CALL MUST BE PRECEDED BY INCOMING MESSAGE CALLBACK SHUTDOWN.
  367. */
  368. DWORD MMLeave(void);
  369. /*
  370. *
  371. * This function causes the current node to leave the active cluster (go to
  372. * Down state). The node no longer sends Regroup or Heartbeats to other cluster members.
  373. * A NodeDown event will not be generated in this node. A Regroup is triggered in the
  374. * remaining nodes (if this node was a member of the cluster).
  375. * A node-down callback will occur on all remaining cluster members.
  376. *
  377. * This initiates a clean, voluntary, leave operation. For safety, prior to this,
  378. * the calling node's CM should arrange to lose ownership of all cluster-owned
  379. * devices assigned to this node (and so cause failovers, etc).
  380. *
  381. * This routine returns normally. The caller (the CM) should then shutdown
  382. * the cluster. MMShutdown or MMHalt may occur after this call, or
  383. * the node may be re-joined to the cluster. All apply-to-the-PCM-to-join
  384. * attempts by a node must be preceded by a call to MMleave().
  385. *
  386. * This routine may block.
  387. *
  388. * Errors:
  389. *
  390. * MM_OK : Elvis has left the cluster.
  391. *
  392. * MM_NOTMEMBER : the node is not currently a cluster member.
  393. *
  394. */
  395. DWORD MMEject( IN DWORD node );
  396. /*
  397. *
  398. * This function causes the specified node to be ejected from the active cluster. The
  399. * targetted node will be sent a poison packet and will enter its MMHalt code. A Regroup
  400. * incident will be initiated. A node-down callback will occur on all remaining cluster
  401. * members.
  402. *
  403. *
  404. * Note that the targetted node is Downed before that node has
  405. * a chance to call any remove-ownership or voluntary failover code. As
  406. * such, this is very dangerous. This call is provided only as a last
  407. * resort in removing an insane node from the cluster; normal removal
  408. * of a node from the cluster should occur by CM-CM communication,
  409. * followed by the node itself doing a voluntary Leave on itself.
  410. *
  411. * This routine returns when the node has been told to die. Completion of the removal
  412. * occurs asynchronously, and a NodeDown event will be generated when successful.
  413. *
  414. * This routine may block.
  415. *
  416. * Errors:
  417. *
  418. * MM_OK : The node has been told to leave the cluster.
  419. *
  420. * MM_NOTMEMBER : the node is not currently a cluster member.
  421. *
  422. * MM_TRANSIENT : My node state is in transition. OK to retry.
  423. *
  424. */
  425. DWORD MMNodeUnreachable (IN DWORD node);
  426. /* This should be called by the CM's messaging module when a node
  427. * becomes unreachable FROM this node via all paths. This affects the connectivity
  428. * algorithm of the next Regroup incident. This function returns quickly
  429. * and without blocking.
  430. *
  431. * Errors:
  432. *
  433. * Always MM_OK
  434. *
  435. */
  436. /* info about the cluster */
  437. //[Fixed] : NmAdviseNodeFailure doesnt seem to cause a regroup
  438. //SS: this is a workaround
  439. DWORD MMForceRegroup( IN DWORD node );
  440. DWORD MMClusterInfo (IN OUT LPCLUSTERINFO clinfo);
  441. /* Returns the current cluster information.
  442. *
  443. * This can be called in nodes which are not members of the cluster;
  444. * such calls always return NumActiveNodes = 0, because Down nodes
  445. * have no knowledge of current cluster membership.
  446. *
  447. * If called during a Regroup incident, this returns the currently known
  448. * membership. If membership changes, the Up/Down events are delivered
  449. * after the information used by ClusterInfo is updated. Users should be aware of
  450. * the inherent race condition between these two, and (if using both) should be aware
  451. * that Up and Down events may be seen from nodes which were already In or Not In
  452. * the cluster. (Typically, these events should just be discarded).
  453. *
  454. * This routine need not be single-threaded and does not block.
  455. *
  456. * Errors:
  457. *
  458. * Always MM_OK
  459. *
  460. *
  461. */
  462. BOOL MMIsNodeUp(IN DWORD node);
  463. /* Returns true iff the node is a member of the current cluster.
  464. */
  465. /* debugging and test only */
  466. DWORD MMDiag(
  467. IN OUT LPCSTR messageBuffer, // Diagnostic message
  468. IN DWORD maximumLength, // maximum size of buffer
  469. IN OUT LPDWORD ActualLength // length of messageBuffer going in and coming out
  470. );
  471. /* This function is called with "diagnostic" messages that are to be handled by the
  472. * membership manager. The result of handling these messages is returned in the
  473. * buffer. This is for test purposes only.
  474. */
  475. DWORD MMMapStatusToDosError(IN DWORD MMStatus);
  476. DWORD MMMapHaltCodeToDosError(IN DWORD HaltCode);
  477. #define MM_STOP_REQUESTED 1002 // Alias of RGP_SHUTDOWN_DURING_RGP in jrgpos.h
  478. #define MM_INVALID_NODE 0
  479. /* !!!!!!!! The following two functions return Dos error codes, not MmStatus codes */
  480. DWORD MMSetQuorumOwner(
  481. IN DWORD NodeId,
  482. IN BOOL Block,
  483. OUT PDWORD pdwSelQuoOwnerId
  484. );
  485. /*++
  486. Routine Description:
  487. Inform Membership engine about changes in ownership of
  488. the quorum resource.
  489. Arguments:
  490. NodeId - Node number to be set as a quorum owner.
  491. Code assumes that Node is either equal to MyNodeId.
  492. In this case the current node is about to become a
  493. quorum owner or it has a value MM_INVALID_NODE, when
  494. the owner decides to relinquish the quorum ownership
  495. Block - if the quorum owner needs to relinquish the
  496. quorum immediately no matter what (RmTerminate, RmFail),
  497. this parameter should be set to FALSE and to TRUE otherwise.
  498. pdwSelQuoOwnerId - If a regroup was in progress, this contains the
  499. node id of the node that was chosen for arbitrating for the
  500. quorum in the last regroup. If none was chosen, this contains
  501. MM_INVALID_NODE.
  502. Return Value:
  503. ERROR_SUCCESS - QuorumOwner variable is set to specified value
  504. ERROR_RETRY - Regroup was in progress when this function
  505. was called and regroup engine decision conflicts with current assignment.
  506. Comments:
  507. This function needs to be called before calls to
  508. RmArbitrate, RmOnline, RmOffline, RmTerminate, RmFailResource
  509. Depending on the result, the caller should either proceed with
  510. Arbitrate/Online or Offline or return an error if MM_TRANSIENT is returned.
  511. If Block is set to TRUE, the call will block until the end of the regroup if
  512. the regroup was in progress on the moment of the call
  513. */
  514. DWORD MMGetArbitrationWinner(
  515. OUT PDWORD NodeId
  516. );
  517. /*++
  518. Routine Description:
  519. Returns the node that won the arbitration during the last regroup
  520. or MM_INVALID_NODE if there was no arbitration performed.
  521. Arguments:
  522. NodeId - a pointer to a variable that receives nodeid of
  523. arbitration winner.
  524. Return Value:
  525. ERROR_SUCCESS - success
  526. ERROR_RETRY - Regroup was in progress when this function
  527. was called.
  528. */
  529. VOID MMApproxArbitrationWinner(
  530. OUT PDWORD NodeId
  531. );
  532. /*++
  533. Routine Description:
  534. Returns the node that won the arbitration during the last regroup
  535. that was doing arbitration.
  536. The call will block if the regroup is in progress.
  537. Arguments:
  538. NodeId - a pointer to a variable that receives nodeid of
  539. arbitration winner.
  540. Return Value:
  541. none
  542. */
  543. VOID MMBlockIfRegroupIsInProgress(
  544. VOID
  545. );
  546. /*++
  547. Routine Description:
  548. The call will block if the regroup is in progress.
  549. Arguments:
  550. Return Value:
  551. none
  552. */
  553. VOID MMStartClussvcClusnetHb(
  554. VOID
  555. );
  556. /*++
  557. Routine Description:
  558. This routine would start clussvc to clusnet heartbeating.
  559. Arguments:
  560. Return Value:
  561. none
  562. */
  563. VOID MMStopClussvcClusnetHb(
  564. VOID
  565. );
  566. /*++
  567. Routine Description:
  568. This routine would stop clussvc to clusnet heartbeating. This would be called by other
  569. components like FM when they want to stop Clussvc->Clusnet heartbeating.
  570. Arguments:
  571. Return Value:
  572. none
  573. */
  574. extern BOOL MmStartClussvcToClusnetHeartbeat;
  575. extern DWORD NmClusSvcHeartbeatTimeout;
  576. extern DWORD MmQuorumArbitrationTimeout;
  577. extern DWORD MmQuorumArbitrationEqualizer;
  578. /*++
  579. MmQuorumArbitrationTimeout (in seconds)
  580. How many seconds a node is allowed to spent arbitrating for the quorum,
  581. before giving up
  582. MmQuorumArbitrationEqualizer (in seconds)
  583. If quourum arbitration took less than specified number of seconds
  584. regroup engine will delay, so that the total arbitration time will
  585. be equal MmQuorumArbitrationEqualizer.
  586. */
  587. #ifdef __cplusplus
  588. }
  589. #endif /* __cplusplus */
  590. /* -------------------------- end ------------------------------- */
  591. #endif /* _MMAPI_H_ */