Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1007 lines
22 KiB

  1. /*++
  2. Copyright (c) 1995-1996 Microsoft Corporation
  3. Module Name:
  4. OrSvr.cxx
  5. Abstract:
  6. Object resolver server side class implementations. CServerOxid, CServerOid,
  7. CServerSet and CServerSetTable classes are implemented here.
  8. Author:
  9. Mario Goertzel [MarioGo]
  10. Revision History:
  11. MarioGo 04-03-95 Combined many smaller .cxx files
  12. MarioGo 01-12-96 Locally unique IDs
  13. --*/
  14. #include<or.hxx>
  15. //
  16. // ScanForOrphanedPinnedOids
  17. //
  18. // This function is called from CServerOxid::ProcessRelease when
  19. // the soxid is deregistered or the server dies.
  20. //
  21. void
  22. ScanForOrphanedPinnedOids(CServerOxid* pDeadOxid)
  23. {
  24. CListElement* pLE;
  25. CServerOid* pOid;
  26. CServerOid* pNextOid;
  27. ASSERT(gpServerLock->HeldExclusive());
  28. // Walk the list, unpinning oids which were owned by the
  29. // dead soxid.
  30. pLE = gpServerPinnedOidList->First();
  31. pOid = pLE ? CServerOid::ContainingRecord2(pLE) : NULL;
  32. while (pOid)
  33. {
  34. pLE = pLE->Next();
  35. pNextOid = pLE ? CServerOid::ContainingRecord2(pLE) : NULL;
  36. if (pOid->GetOxid() == pDeadOxid)
  37. {
  38. // This will remove it from the pinned list
  39. pOid->SetPinned(FALSE);
  40. }
  41. pOid = pNextOid;
  42. }
  43. ASSERT(gpServerLock->HeldExclusive());
  44. return;
  45. }
  46. //
  47. // CServerOid methods
  48. //
  49. void
  50. CServerOid::Reference()
  51. {
  52. ASSERT(gpServerLock->HeldExclusive());
  53. BOOL fRemove = (this->References() == 0);
  54. // We may remove something from a PList more then once; this won't
  55. // hurt anything. fRemove is used to avoid trying to remove it more
  56. // often then necessary without taking lock.
  57. this->CIdTableElement::Reference();
  58. if (fRemove)
  59. {
  60. CPListElement * t = Remove();
  61. ASSERT(t == &this->_plist || t == 0);
  62. }
  63. }
  64. DWORD
  65. CServerOid::Release()
  66. {
  67. ASSERT(gpServerLock->HeldExclusive());
  68. DWORD c = this->CReferencedObject::Dereference();
  69. if (0 == c)
  70. {
  71. // If another thread is already running this down it
  72. // means we got referenced and released during the rundown
  73. // callback. That thread will figure out what to do.
  74. if (IsRunningDown())
  75. {
  76. KdPrintEx((DPFLTR_DCOMSS_ID,
  77. DPFLTR_WARNING_LEVEL,
  78. "OR: Oid running down in release: %p\n",
  79. this));
  80. }
  81. else if (IsFreed() || this->_pOxid->IsRunning() == FALSE)
  82. {
  83. // Server freed this OID already; no need to run it down
  84. KdPrintEx((DPFLTR_DCOMSS_ID,
  85. DPFLTR_INFO_LEVEL,
  86. "OR: OID %p freed by server so not rundown\n",
  87. this));
  88. SetRundown(TRUE);
  89. delete this;
  90. }
  91. else
  92. {
  93. // All serverset (or pinned) references have been released. Put
  94. // ourselves into the oid plist so we can be rundown.
  95. ASSERT(!IsPinned());
  96. Insert();
  97. }
  98. }
  99. // this pointer maybe invalid.
  100. return(c);
  101. }
  102. CServerOid::~CServerOid()
  103. {
  104. ASSERT(gpServerLock->HeldExclusive());
  105. ASSERT(_pOxid);
  106. ASSERT(_fRunningDown);
  107. ASSERT(!IsPinned());
  108. _pOxid->Release();
  109. gpServerOidTable->Remove(this);
  110. }
  111. void
  112. CServerOid::KeepAlive()
  113. // A client has removed this oid from its set. This keeps
  114. // the oid alive for another timeout period.
  115. {
  116. ASSERT(gpServerLock->HeldExclusive());
  117. if (IsRunningDown() == FALSE && References() == 0)
  118. {
  119. // It's in the rundown list, move it to the end.
  120. CPListElement *pT = Remove();
  121. ASSERT(pT == &this->_plist);
  122. Insert();
  123. }
  124. }
  125. void
  126. CServerOid::SetPinned(BOOL fPinned)
  127. {
  128. ASSERT(gpServerLock->HeldExclusive());
  129. // Assert that this is a state switch.
  130. ASSERT(_fPinned ? !fPinned : fPinned);
  131. // Set new state
  132. _fPinned = fPinned;
  133. // When we are pinned, we take an extra reference to avoid further
  134. // rundown attempts. When unpinned, we remove the reference.
  135. if (_fPinned)
  136. {
  137. this->Reference();
  138. // Now we should not be in any list
  139. ASSERT(_list.NotInList());
  140. ASSERT(_plist.NotInList());
  141. gpServerPinnedOidList->Insert(&_list);
  142. }
  143. else
  144. {
  145. // We should be in the gpServerPinnedOidList list, but not the plist
  146. ASSERT(_list.InList());
  147. ASSERT(_plist.NotInList());
  148. gpServerPinnedOidList->Remove(&_list);
  149. // This Release call may put us in the oidplist
  150. this->Release();
  151. }
  152. ASSERT(gpServerLock->HeldExclusive());
  153. return;
  154. }
  155. //
  156. // CServerOidPList method
  157. //
  158. CServerOid *
  159. CServerOidPList::MaybeRemoveMatchingOxid(
  160. IN CTime &when,
  161. IN CServerOid *pOid
  162. )
  163. {
  164. CPListElement *ple;
  165. CServerOid *poid;
  166. CMutexLock lock(&this->_lock);
  167. ple = (CPListElement *)CPList::First();
  168. while( ple != 0
  169. && *ple->GetTimeout() < when)
  170. {
  171. poid = CServerOid::ContainingRecord(ple);
  172. if (poid->Match(pOid))
  173. {
  174. Remove(ple);
  175. return(poid);
  176. }
  177. ple = ple->Next();
  178. }
  179. return(0);
  180. }
  181. //
  182. // CServerOxid methods
  183. //
  184. void
  185. CServerOxid::ProcessRelease()
  186. /*++
  187. Routine Desciption
  188. The server process owning this OXID has either died
  189. or deregistered this oxid. Releases the oxid and
  190. nulls the pProcess pointer.
  191. Arguments:
  192. n/a
  193. Return Value:
  194. n/a
  195. --*/
  196. {
  197. ASSERT(gpServerLock->HeldExclusive());
  198. ASSERT(_pProcess);
  199. _fRunning = FALSE;
  200. ScanForOrphanedPinnedOids(this);
  201. Release();
  202. // This pointer may now be invalid, _pProcess pointer maybe invalid.
  203. }
  204. CServerOxid::~CServerOxid(void)
  205. {
  206. ASSERT(gpServerLock->HeldExclusive());
  207. ASSERT(_pProcess);
  208. ASSERT(!IsRunning()); // implies that the oxid has been removed from the table
  209. ASSERT(!_fRundownInProgress);
  210. _pProcess->Release();
  211. }
  212. ORSTATUS
  213. CServerOxid::GetInfo(
  214. OUT OXID_INFO *poxidInfo,
  215. IN BOOL fLocal
  216. )
  217. // Server lock held shared.
  218. {
  219. ORSTATUS status;
  220. DUALSTRINGARRAY *psa;
  221. if (!IsRunning())
  222. {
  223. // Server crashed, info is not needed.
  224. return(OR_NOSERVER);
  225. }
  226. if (fLocal)
  227. {
  228. psa = _pProcess->GetLocalBindings();
  229. }
  230. else
  231. {
  232. psa = _pProcess->GetRemoteBindings();
  233. }
  234. if (!psa)
  235. {
  236. return(OR_NOMEM);
  237. }
  238. // copy the data
  239. memcpy(poxidInfo, &_info, sizeof(_info));
  240. poxidInfo->psa = psa;
  241. return(OR_OK);
  242. }
  243. void
  244. CServerOxid::RundownOids(ULONG cOids,
  245. CServerOid* aOids[])
  246. // Note: Returns without the server lock held.
  247. {
  248. RPC_STATUS status = RPC_S_OK;
  249. ASSERT(cOids > 0);
  250. ASSERT(cOids <= MAX_OID_RUNDOWNS_PER_CALL);
  251. ASSERT(gpServerLock->HeldExclusive());
  252. // We only issue the rundown call if a) we're still running
  253. // and b) there isn't another rundown call to this oxid
  254. // already in progress.
  255. if (IsRunning() && !_fRundownInProgress)
  256. {
  257. // Note: The server lock is released during the callback.
  258. // Since the OID hasn't rundown yet, it will keep a reference
  259. // to this OXID which in turn keeps the process object alive.
  260. // Ask our process object to issue an async call to try
  261. // to rundown the specified oids. If call was sent ok,
  262. // return. Otherwise fall thru and cleanup below.
  263. _fRundownInProgress = TRUE;
  264. status = _pProcess->RundownOids(this,
  265. cOids,
  266. aOids);
  267. ASSERT(!gpServerLock->HeldExclusive());
  268. if (status == RPC_S_OK)
  269. {
  270. return;
  271. }
  272. else
  273. {
  274. // Re-take the lock
  275. gpServerLock->LockExclusive();
  276. // No rundown in progress now
  277. _fRundownInProgress = FALSE;
  278. }
  279. }
  280. BYTE aRundownStatus[MAX_OID_RUNDOWNS_PER_CALL];
  281. // If server died or apartment was unregistered, okay to run all
  282. // oids down. Otherwise don't run any down (this is also the
  283. // case for when we are still running but another rundown is already
  284. // in progress).
  285. for (ULONG i = 0; i < cOids; i++)
  286. {
  287. aRundownStatus[i] = IsRunning() ? ORS_DONTRUNDOWN : ORS_OK_TO_RUNDOWN;
  288. }
  289. // Call the notify function whih will do appropriate
  290. // cleanup on the oids
  291. ProcessRundownResultsInternal(FALSE, cOids, aOids, aRundownStatus);
  292. gpServerLock->UnlockExclusive();
  293. }
  294. void
  295. CServerOxid::ProcessRundownResults(ULONG cOids,
  296. CServerOid* aOids[],
  297. BYTE aRundownStatus[])
  298. /*++
  299. Routine Desciption
  300. Takes the appropriate action based on the result of trying
  301. to rundown one or more oids.
  302. Arguments:
  303. cOids -- # of oids in aOids
  304. aOids -- array of CServerOid*'s that we tried to rundown
  305. aRundownStatus -- array of status values from the
  306. OID_RUNDOWN_STATUS enumeration.
  307. Return Value:
  308. void
  309. --*/
  310. {
  311. ProcessRundownResultsInternal(TRUE,
  312. cOids,
  313. aOids,
  314. aRundownStatus);
  315. return;
  316. }
  317. void
  318. CServerOxid::ProcessRundownResultsInternal(BOOL fAsyncReturn,
  319. ULONG cOids,
  320. CServerOid* aOids[],
  321. BYTE aRundownStatus[])
  322. /*++
  323. Routine Desciption
  324. Takes the appropriate action based on the result of trying
  325. to rundown one or more oids.
  326. Arguments:
  327. fAsyncReturn -- TRUE if we are processing these results in response
  328. to a async rundown call returning, FALSE otherwise.
  329. cOids -- # of oids in aOids
  330. aOids -- array of CServerOid*'s that we tried to rundown
  331. aRundownStatus -- array of status values from the
  332. OID_RUNDOWN_STATUS enumeration.
  333. Return Value:
  334. void
  335. --*/
  336. {
  337. ULONG i;
  338. ASSERT(gpServerLock->HeldExclusive());
  339. // Flip the "rundown in progress" flag as necessary.
  340. if (fAsyncReturn)
  341. {
  342. ASSERT(_fRundownInProgress);
  343. _fRundownInProgress = FALSE;
  344. }
  345. for(i = 0; i < cOids; i++)
  346. {
  347. CServerOid* pOid;
  348. pOid = aOids[i];
  349. ASSERT(pOid);
  350. ASSERT(this == pOid->GetOxid());
  351. if (aRundownStatus[i] == ORS_OID_PINNED)
  352. {
  353. KdPrintEx((DPFLTR_DCOMSS_ID,
  354. DPFLTR_INFO_LEVEL,
  355. "OR: Randown OID %p but the client says it's pinned\n",
  356. pOid));
  357. // Server says that particular oid is "pinned", ie cannot
  358. // be rundown until we are told otherwise. Note that we
  359. // check for this before we check # of references -- the
  360. // ORS_OID_PINNED status takes precedence.
  361. pOid->SetPinned(TRUE);
  362. pOid->SetRundown(FALSE);
  363. }
  364. else if (pOid->References() != 0)
  365. {
  366. // Added to a set while running down and still referenced.
  367. pOid->SetRundown(FALSE);
  368. }
  369. else if (aRundownStatus[i] == ORS_OK_TO_RUNDOWN)
  370. {
  371. delete pOid;
  372. }
  373. else
  374. {
  375. ASSERT(aRundownStatus[i] == ORS_DONTRUNDOWN);
  376. KdPrintEx((DPFLTR_DCOMSS_ID,
  377. DPFLTR_INFO_LEVEL,
  378. "OR: Randown OID %p but the client kept it alive\n",
  379. pOid));
  380. // Client want us to keep it alive and is still running.
  381. pOid->SetRundown(FALSE);
  382. pOid->Insert();
  383. }
  384. }
  385. }
  386. ORSTATUS
  387. CServerOxid::GetRemoteInfo(
  388. OUT OXID_INFO *pInfo,
  389. IN USHORT cClientProtseqs,
  390. IN USHORT aClientProtseqs[]
  391. )
  392. // Server lock held shared.
  393. {
  394. ORSTATUS status;
  395. USHORT protseq;
  396. status = GetInfo(pInfo, FALSE);
  397. if (OR_OK == status)
  398. {
  399. protseq = FindMatchingProtseq(cClientProtseqs,
  400. aClientProtseqs,
  401. pInfo->psa->aStringArray
  402. );
  403. if (0 == protseq)
  404. {
  405. MIDL_user_free(pInfo->psa);
  406. pInfo->psa = 0;
  407. status = OR_I_NOPROTSEQ;
  408. }
  409. }
  410. return(status);
  411. }
  412. ORSTATUS
  413. CServerOxid::LazyUseProtseq(
  414. IN USHORT cClientProtseqs,
  415. IN USHORT *aClientProtseqs
  416. )
  417. // Server lock held shared, returns with the server lock exclusive.
  418. // Note: It is possible, that after this call the OXID has been deleted.
  419. {
  420. ORSTATUS status;
  421. if (IsRunning())
  422. {
  423. // Keep this OXID process alive while making the callback. If the process
  424. // crashes and this OXID has no OIDs it could be released by everybody
  425. // else. This keeps the OXID and process alive until we finish.
  426. this->Reference();
  427. gpServerLock->UnlockShared();
  428. status = _pProcess->UseProtseqIfNeeded(cClientProtseqs, aClientProtseqs);
  429. gpServerLock->LockExclusive();
  430. this->Release();
  431. }
  432. else
  433. {
  434. gpServerLock->ConvertToExclusive();
  435. status = OR_NOSERVER;
  436. }
  437. // Note: The this poiner maybe BAD now.
  438. return(status);
  439. }
  440. //
  441. // CServerSet methods.
  442. //
  443. ORSTATUS
  444. CServerSet::AddObject(OID &oid)
  445. {
  446. ORSTATUS status = OR_OK;
  447. CServerOid *pOid;
  448. ASSERT(gpServerLock->HeldExclusive());
  449. CIdKey key(oid);
  450. pOid = (CServerOid *)gpServerOidTable->Lookup(key);
  451. if (pOid)
  452. {
  453. ASSERT(_blistOids.Member(pOid) == FALSE);
  454. // Don't add duplicate IDs to the set
  455. if (_blistOids.Member(pOid) == FALSE)
  456. {
  457. status = _blistOids.Insert(pOid);
  458. if (status == OR_OK)
  459. {
  460. pOid->Reference();
  461. }
  462. }
  463. }
  464. else
  465. status = OR_BADOID;
  466. VALIDATE((status, OR_BADOID, OR_NOMEM, 0));
  467. return(status);
  468. }
  469. void
  470. CServerSet::RemoveObject(OID &oid)
  471. {
  472. CServerOid *pOid;
  473. ASSERT(gpServerLock->HeldExclusive());
  474. CIdKey key(oid);
  475. pOid = (CServerOid *)gpServerOidTable->Lookup(key);
  476. if (pOid)
  477. {
  478. CServerOid *pOidTmp = (CServerOid *)_blistOids.Remove(pOid);
  479. if (pOid == pOidTmp)
  480. {
  481. pOid->Release();
  482. }
  483. else
  484. {
  485. // Set doesn't contain the specified oid, treat this as an
  486. // add and delete by keeping the oid alive for another timeout
  487. // period.
  488. ASSERT(pOidTmp == 0);
  489. pOid->KeepAlive();
  490. }
  491. }
  492. }
  493. BOOL
  494. CServerSet::ValidateObjects(BOOL fShared)
  495. // fShared - Indicates if the server lock is held
  496. // shared (TRUE) or exclusive (FALSE).
  497. //
  498. // Return - TRUE the lock is still shared, false
  499. // the lock is held exclusive.
  500. {
  501. CServerOid *pOid;
  502. CBListIterator oids(&_blistOids);
  503. // Since we own a reference on all the Oids they must still exist.
  504. // No need to lock exclusive until we find something to delete.
  505. while(pOid = (CServerOid *)oids.Next())
  506. {
  507. if (!pOid->IsRunning())
  508. {
  509. if (fShared)
  510. {
  511. KdPrintEx((DPFLTR_DCOMSS_ID,
  512. DPFLTR_WARNING_LEVEL,
  513. "OR: Cleanup in set (%p), removing dead oids.\n",
  514. this,
  515. pOid));
  516. gpServerLock->ConvertToExclusive();
  517. fShared = FALSE;
  518. oids.Reset(&_blistOids);
  519. continue;
  520. }
  521. CServerOid *pOidTmp = (CServerOid *)_blistOids.Remove(pOid);
  522. ASSERT(pOidTmp == pOid);
  523. ASSERT(pOid->IsRunning() == FALSE);
  524. pOid->Release();
  525. }
  526. }
  527. return(fShared);
  528. }
  529. BOOL
  530. CServerSet::Rundown()
  531. // Rundown the whole set.
  532. {
  533. CServerOid *poid;
  534. CTime now;
  535. ASSERT(gpServerLock->HeldExclusive());
  536. if (_timeout > now)
  537. {
  538. // Don't rundown if we've recieved a late ping.
  539. return(FALSE);
  540. }
  541. if (_fLocal && _blistOids.Size() != 0)
  542. {
  543. KdPrintEx((DPFLTR_DCOMSS_ID,
  544. DPFLTR_WARNING_LEVEL,
  545. "OR: Premature rundown of local set ignored.\n"));
  546. return(FALSE);
  547. }
  548. KdPrintEx((DPFLTR_DCOMSS_ID,
  549. DPFLTR_WARNING_LEVEL,
  550. "OR: Set %p's client appears to have died\n",
  551. this));
  552. CBListIterator oids(&_blistOids);
  553. while(poid = (CServerOid *)oids.Next())
  554. {
  555. poid->Release();
  556. }
  557. return(TRUE);
  558. }
  559. //
  560. // CServerSetTable implementation
  561. //
  562. CServerSet *
  563. CServerSetTable::Allocate(
  564. IN USHORT sequence,
  565. IN PSID psid,
  566. IN BOOL fLocal,
  567. OUT ID &setid
  568. )
  569. /*++
  570. Routine Description:
  571. Allocates a new CServerSet and returns the setid for the new set.
  572. Arguments:
  573. sequence - initial sequence number for the new set.
  574. psid - pointer to an NT SID structure for the new set.
  575. fLocal - TRUE : set is for the local client,
  576. FALSE : set is for a remote client
  577. setid - the setid of the set returned. Unchanged if return value 0.
  578. Return Value:
  579. 0 - Unable to allocate a resource
  580. non-zero - A pointer to the newly created set.
  581. --*/
  582. {
  583. ASSERT(gpServerLock->HeldExclusive());
  584. UINT i;
  585. LARGE_INTEGER li;
  586. ASSERT(_cAllocated <= _cMax);
  587. if (_cAllocated == _cMax)
  588. {
  589. // Table is full, realloc
  590. // Do this first, if it succeeds great even if
  591. // a later allocation fails. If not, fail now.
  592. IndexElement *pNew = new IndexElement[_cMax * 2];
  593. if (!pNew)
  594. {
  595. return(0);
  596. }
  597. for (i = 0; i < _cMax; i++)
  598. {
  599. pNew[i] = _pElements[i];
  600. }
  601. for(i = _cMax; i < _cMax*2; i++)
  602. {
  603. pNew[i]._sequence = GetTickCount();
  604. pNew[i]._pSet = 0;
  605. }
  606. delete _pElements;
  607. _pElements = pNew;
  608. _cMax *= 2;
  609. }
  610. CServerSet *pSet = new CServerSet(sequence, psid, fLocal);
  611. if (0 == pSet)
  612. {
  613. return(0);
  614. }
  615. ASSERT(_pElements);
  616. ASSERT(_cMax > _cAllocated);
  617. for(i = _iFirstFree; i < _cMax; i++)
  618. {
  619. if (0 == _pElements[i]._pSet)
  620. {
  621. _pElements[i]._sequence++;
  622. _pElements[i]._pSet = pSet;
  623. li.HighPart = i + 1;
  624. li.LowPart = _pElements[i]._sequence;
  625. setid = li.QuadPart;
  626. _iFirstFree = i + 1;
  627. _cAllocated++;
  628. return pSet;
  629. }
  630. }
  631. ASSERT(0);
  632. return(0);
  633. }
  634. CServerSet *
  635. CServerSetTable::Lookup(
  636. IN ID setid
  637. )
  638. /*++
  639. Routine Description:
  640. Looks up an a set given the sets ID.
  641. Server lock held shared.
  642. Arguments:
  643. setid - the ID of the set to lookup
  644. Return Value:
  645. 0 - set doesn't exist
  646. non-zero - the set.
  647. --*/
  648. {
  649. LARGE_INTEGER li;
  650. li.QuadPart = setid;
  651. LONG i = li.HighPart - 1;
  652. DWORD sequence = (DWORD)(setid & ~((ID)0));
  653. if (i >= 0 && (DWORD) i < _cMax)
  654. {
  655. if (_pElements[i]._sequence == sequence)
  656. {
  657. // May still be null if it is free and has not yet be reused.
  658. return(_pElements[i]._pSet);
  659. }
  660. }
  661. return(0);
  662. }
  663. ID
  664. CServerSetTable::CheckForRundowns(
  665. )
  666. /*++
  667. Routine Description:
  668. Used by ping and worker threads to monitor for sets that should
  669. be rundown. It is called with the server lock held shared.
  670. Arguments:
  671. None
  672. Return Value:
  673. 0 - Didn't find a set to rundown
  674. non-zero - ID of a set which may need to be rundown.
  675. --*/
  676. {
  677. UINT i, end;
  678. LARGE_INTEGER id;
  679. id.QuadPart = 0;
  680. ASSERT(_iRundown < _cMax);
  681. if (_cAllocated == 0)
  682. {
  683. return(0);
  684. }
  685. i = _iRundown;
  686. do
  687. {
  688. ASSERT(_cAllocated); // loop assumes one or more allocated elements.
  689. i = (i + 1) % _cMax;
  690. }
  691. while(0 == _pElements[i]._pSet);
  692. ASSERT(_pElements[i]._pSet);
  693. if (_pElements[i]._pSet->ShouldRundown())
  694. {
  695. id.HighPart = i + 1;
  696. id.LowPart = _pElements[i]._sequence;
  697. }
  698. _iRundown = i;
  699. return(id.QuadPart);
  700. }
  701. BOOL
  702. CServerSetTable::RundownSetIfNeeded(
  703. IN ID setid
  704. )
  705. /*++
  706. Routine Description:
  707. Rundowns down a set (or sets) if needed. Called by
  708. ping and worker threads. Server lock held exclusive.
  709. Arguments:
  710. setid - An ID previously returned from CheckForRundowns.
  711. Return Value:
  712. TRUE - A set was actually rundown
  713. FALSE - No sets actually rundown
  714. --*/
  715. {
  716. ASSERT(gpServerLock->HeldExclusive());
  717. CServerSet *pSet = Lookup(setid);
  718. if (0 == pSet || FALSE == pSet->ShouldRundown())
  719. {
  720. // Set already randown or has been pinged in the meantime.
  721. return(FALSE);
  722. }
  723. // PERF REVIEW this function has the option of running multiple sets,
  724. // saving the worker thread from taking and leaving the lock many times
  725. // when a bunch of sets all rundown. This feature is not used.
  726. LARGE_INTEGER li;
  727. li.QuadPart = setid;
  728. UINT i = li.HighPart - 1;
  729. if (pSet->Rundown())
  730. {
  731. delete pSet;
  732. _cAllocated--;
  733. if (i < _iFirstFree) _iFirstFree = i;
  734. _pElements[i]._pSet = 0;
  735. return(TRUE);
  736. }
  737. return(FALSE);
  738. }
  739. void
  740. CServerSetTable::PingAllSets()
  741. /*++
  742. Routine Description:
  743. Performs a ping of all sets currently in the table.
  744. Arguments:
  745. none
  746. Return Value:
  747. void
  748. --*/
  749. {
  750. ASSERT(gpServerLock->HeldExclusive());
  751. ULONG i;
  752. for(i = 0; i < _cMax; i++)
  753. {
  754. if (_pElements[i]._pSet)
  755. {
  756. _pElements[i]._pSet->Ping(FALSE);
  757. }
  758. }
  759. }