Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1200 lines
35 KiB

  1. /*++
  2. Copyright (c) 1997 Microsoft Corporation
  3. Module Name:
  4. event.c
  5. Abstract:
  6. general kernel to user event facility. Maintains a list of events that have
  7. occurred and delivers them to Umode via the completion of an IOCTL IRP.
  8. How this works:
  9. The consumer opens a handle to clusnet and issues an
  10. IOCTL_CLUSNET_SET_EVENT_MASK IRP indicating a mask of the events in which it
  11. is interested. A kernel consumer must also supply a callback routine through
  12. which it is notified about the event, i.e, they don't need to drop an
  13. IOCTL_CLUSNET_GET_NEXT_EVENT IRP to receive notifications. The consumer is
  14. linked onto the EventFileHandles list through the Linkage field in the
  15. CN_FSCONTEXT structure. All synchronization is provided through one lock
  16. called EventLock.
  17. Umode consumers issue an IOCTL_CLUSNET_GET_NEXT_EVENT IRP to reap the next
  18. interesting event. If no events are queued, the IRP is marked pending and a
  19. pointer to it is stored in the FS context. CnEventIrpCancel is set as the
  20. cancel routine. Note that only one IRP can be pending at a time; if an IRP
  21. is already queued, this one is completed with STATUS_UNSUCCESSFUL.
  22. If an event is waiting, it is removed from the FS context's list, the data
  23. copied to the IRP's buffer and completed with success.
  24. Posters call CnIssueEvent to post the event of interest. This obtains the
  25. event lock, walks the file object list, and for consumers that are
  26. interested in this event, allocates an Event context block (maintained as a
  27. nonpaged lookaside list) and queues it to that file object's list of
  28. events. It then posts a work queue item to run CnpDeliverEvents. We can't do
  29. IRP processing directly since that would violate lock ordering within
  30. clusnet.
  31. CnDeliverEvents obtains the IO cancel and event locks, then runs the file
  32. context list to see if there are events queued for any pending IRPs. If so,
  33. the event data is copied to the systembuffer and the IRP is completed.
  34. Author:
  35. Charlie Wickham (charlwi) 17-Feb-1997
  36. Environment:
  37. Kernel Mode
  38. Revision History:
  39. Charlie Wickham (charlwi) 25-Oct-1999
  40. Split CnIssueEvent into two routines: CnIssueEvent which strictly looks
  41. up the apporpriate consumers of the event and CnpDeliverEvents which
  42. runs at IRQL 0 to complete any IRPs that are waiting for new
  43. events. This was done to prevent out of order event delivery; since the
  44. event lock was near the top of locks to acquire first, the net IF down
  45. events had to be queued to a worker thread which was bad. Now the event
  46. lock is lowest which doesn't require a worker thread to post. The worker
  47. thread still runs when it detects that there is an IRP waiting for an
  48. event.
  49. David Dion (daviddio) 29-Nov-2000
  50. Disallow modification of the EventFileHandles list while event
  51. deliveries are in process. Because CnpDeliverEvents and CnIssueEvent
  52. drop their locks to deliver (via IRP completion and kmode callback,
  53. respectively), a race condition can occur where an FS context event
  54. mask is cleared and the FS context linkage fields are reset.
  55. Modification of the EventFileHandles list is prevented using a count
  56. of currently delivering threads that is protected by the EventLock.
  57. David Dion (daviddio) 13-Nov-2001
  58. In order to preserve ordering of event delivery and to avoid calling
  59. out of clusnet while holding locks, all events are queued for delivery
  60. by a system worker thread. The only exceptions are those events whose
  61. types are in the mask CN_EVENT_TYPE_KMODE_FASTTRACK. These events are
  62. delivered in-line to kernel-mode callbacks, since immediate delivery
  63. is required (e.g. to stop disk reservations) and an imminent service
  64. termination means ordering is not important.
  65. --*/
  66. #include "precomp.h"
  67. #pragma hdrstop
  68. #include "event.tmh"
  69. #define CN_EVENT_TYPE_KMODE_FASTTRACK ( ClusnetEventHalt | \
  70. ClusnetEventPoisonPacketReceived \
  71. )
  72. /* Forward */
  73. NTSTATUS
  74. CnSetEventMask(
  75. IN PCN_FSCONTEXT FsContext,
  76. IN PCLUSNET_SET_EVENT_MASK_REQUEST EventRequest
  77. );
  78. NTSTATUS
  79. CnGetNextEvent(
  80. IN PIRP Irp,
  81. IN PIO_STACK_LOCATION IrpSp
  82. );
  83. NTSTATUS
  84. CnIssueEvent(
  85. CLUSNET_EVENT_TYPE Event,
  86. CL_NODE_ID NodeId OPTIONAL,
  87. CL_NETWORK_ID NetworkId OPTIONAL
  88. );
  89. VOID
  90. CnEventIrpCancel(
  91. PDEVICE_OBJECT DeviceObject,
  92. PIRP Irp
  93. );
  94. /* End Forward */
  95. VOID
  96. CnStartEventDelivery(
  97. VOID
  98. )
  99. /*++
  100. Routine Description:
  101. Synchronizes iteration through EventFileHandles list with respect
  102. to the EventDeliveryInProgress counter and the EventDeliveryComplete
  103. KEVENT.
  104. Arguments:
  105. None.
  106. Return value:
  107. None.
  108. Notes:
  109. Called with and returns with EventLock held.
  110. --*/
  111. {
  112. CnVerifyCpuLockMask(
  113. CNP_EVENT_LOCK, // Required
  114. 0, // Forbidden
  115. CNP_EVENT_LOCK_MAX // Maximum
  116. );
  117. CnAssert(EventDeliveryInProgress >= 0);
  118. if (++EventDeliveryInProgress == 1) {
  119. #if DBG
  120. if (KeResetEvent(&EventDeliveryComplete) == 0) {
  121. CnAssert(FALSE);
  122. }
  123. #else // DBG
  124. KeClearEvent(&EventDeliveryComplete);
  125. #endif // DBG
  126. }
  127. EventRevisitRequired = FALSE;
  128. CnVerifyCpuLockMask(
  129. CNP_EVENT_LOCK, // Required
  130. 0, // Forbidden
  131. CNP_EVENT_LOCK_MAX // Maximum
  132. );
  133. } // CnStartEventDelivery
  134. BOOLEAN
  135. CnStopEventDelivery(
  136. VOID
  137. )
  138. /**
  139. Routine Description:
  140. Synchronizes iteration through EventFileHandles list with respect
  141. to the EventDeliveryInProgress counter and the EventDeliveryComplete
  142. KEVENT. Checks the EventRevisitRequired flag to determine if an
  143. event IRP arrived during the preceding delivery.
  144. When signalling EventDeliveryComplete, IO_NETWORK_INCREMENT is used
  145. to try to avoid starvation of waiters versus other event-delivering
  146. threads.
  147. Arguments:
  148. None.
  149. Return value:
  150. TRUE if a new event or event IRP may have been added to the
  151. EventFileHandles list, and it is necessary to rescan.
  152. Notes:
  153. Called with and returns with EventLock held.
  154. --*/
  155. {
  156. BOOLEAN eventRevisitRequired = EventRevisitRequired;
  157. CnVerifyCpuLockMask(
  158. CNP_EVENT_LOCK, // Required
  159. 0, // Forbidden
  160. CNP_EVENT_LOCK_MAX // Maximum
  161. );
  162. EventRevisitRequired = FALSE;
  163. CnAssert(EventDeliveryInProgress >= 1);
  164. if (--EventDeliveryInProgress == 0) {
  165. if (KeSetEvent(
  166. &EventDeliveryComplete,
  167. IO_NETWORK_INCREMENT,
  168. FALSE
  169. ) != 0) {
  170. CnAssert(FALSE);
  171. }
  172. }
  173. if (eventRevisitRequired) {
  174. CnTrace(
  175. EVENT_DETAIL, StopDeliveryRevisitRequired,
  176. "[CN] CnStopEventDelivery: revisit required."
  177. );
  178. }
  179. CnVerifyCpuLockMask(
  180. CNP_EVENT_LOCK, // Required
  181. 0, // Forbidden
  182. CNP_EVENT_LOCK_MAX // Maximum
  183. );
  184. return(eventRevisitRequired);
  185. } // CnStopEventDelivery
  186. BOOLEAN
  187. CnIsEventDeliveryInProgress(
  188. VOID
  189. )
  190. /*++
  191. Routine Description:
  192. Checks the EventDeliveryInProgress counter to determine if
  193. an event delivery is in progress. If so, sets the
  194. EventRevisitRequired flag.
  195. Arguments:
  196. None.
  197. Return value:
  198. TRUE if event delivery in progress.
  199. Notes:
  200. Called with and returns with EventLock held.
  201. --*/
  202. {
  203. CnVerifyCpuLockMask(
  204. CNP_EVENT_LOCK, // Required
  205. 0, // Forbidden
  206. CNP_EVENT_LOCK_MAX // Maximum
  207. );
  208. if (EventDeliveryInProgress > 0) {
  209. return(EventRevisitRequired = TRUE);
  210. } else {
  211. return(FALSE);
  212. }
  213. CnVerifyCpuLockMask(
  214. CNP_EVENT_LOCK, // Required
  215. 0, // Forbidden
  216. CNP_EVENT_LOCK_MAX // Maximum
  217. );
  218. } // CnIsEventDeliveryInProgress
  219. BOOLEAN
  220. CnWaitForEventDelivery(
  221. IN PKIRQL EventLockIrql
  222. )
  223. /*++
  224. Routine Description:
  225. Waits for EventDeliveryComplete event to be signalled as long
  226. as EventDeliveryInProgress counter is greater than zero.
  227. Maintains a starvation counter to avoid looping forever.
  228. Starvation threshold of 100 was chosen arbitrarily.
  229. Arguments:
  230. EventLockIrql - irql at which EventLock was acquired
  231. Return value:
  232. TRUE if returning with no deliveries in progress
  233. FALSE if starvation threshold is exceeded and returning with
  234. deliveries in progress
  235. Notes:
  236. Called with and returns with EventLock held; however, EventLock
  237. may be dropped and reacquired during execution.
  238. This call blocks, so no other spinlocks may be held at
  239. invocation.
  240. --*/
  241. {
  242. NTSTATUS status;
  243. ULONG starvationCounter;
  244. CnVerifyCpuLockMask(
  245. CNP_EVENT_LOCK, // Required
  246. (ULONG) ~(CNP_EVENT_LOCK), // Forbidden
  247. CNP_EVENT_LOCK_MAX // Maximum
  248. );
  249. starvationCounter = 100;
  250. while (starvationCounter-- > 0) {
  251. if (EventDeliveryInProgress == 0) {
  252. return(TRUE);
  253. }
  254. CnReleaseLock(&EventLock, *EventLockIrql);
  255. status = KeWaitForSingleObject(
  256. &EventDeliveryComplete,
  257. Executive,
  258. KernelMode,
  259. FALSE,
  260. NULL
  261. );
  262. CnAssert(status == STATUS_SUCCESS);
  263. CnAcquireLock(&EventLock, EventLockIrql);
  264. }
  265. CnTrace(
  266. EVENT_DETAIL, EventWaitStarvation,
  267. "[CN] CnWaitForEventDelivery: starvation threshold %u "
  268. "exceeded.",
  269. starvationCounter
  270. );
  271. IF_CNDBG( CN_DEBUG_EVENT ) {
  272. CNPRINT(("[CN] CnWaitForEventDelivery: starvation threshold "
  273. "expired.\n"));
  274. }
  275. CnVerifyCpuLockMask(
  276. CNP_EVENT_LOCK, // Required
  277. (ULONG) ~(CNP_EVENT_LOCK), // Forbidden
  278. CNP_EVENT_LOCK // Maximum
  279. );
  280. return(FALSE);
  281. } // CnWaitForEventDelivery
  282. NTSTATUS
  283. CnSetEventMask(
  284. IN PCN_FSCONTEXT FsContext,
  285. IN PCLUSNET_SET_EVENT_MASK_REQUEST EventRequest
  286. )
  287. /*++
  288. Routine Description:
  289. For a given file handle context, set the event mask associated
  290. with it
  291. Arguments:
  292. FsContext - pointer to the clusnet file handle context block
  293. EventMask - mask of interested events
  294. Return Value:
  295. STATUS_TIMEOUT if unable to modify EventFileHandles list.
  296. STATUS_INVALID_PARAMETER_MIX if providing NULL event mask on
  297. first call
  298. STATUS_SUCCESS on success.
  299. Notes:
  300. This call may block.
  301. --*/
  302. {
  303. CN_IRQL OldIrql;
  304. NTSTATUS Status = STATUS_SUCCESS;
  305. PLIST_ENTRY NextEntry;
  306. CnVerifyCpuLockMask(
  307. 0, // Required
  308. 0xFFFFFFFF, // Forbidden
  309. 0 // Maximum
  310. );
  311. CnAcquireLock( &EventLock, &OldIrql );
  312. #if 0
  313. PCN_FSCONTEXT ListFsContext;
  314. NextEntry = EventFileHandles.Flink;
  315. while ( NextEntry != &EventFileHandles ) {
  316. ListFsContext = CONTAINING_RECORD( NextEntry, CN_FSCONTEXT, Linkage );
  317. if ( ListFsContext == FsContext ) {
  318. break;
  319. }
  320. NextEntry = ListFsContext->Linkage.Flink;
  321. }
  322. #endif
  323. if ( EventRequest->EventMask != 0 ) {
  324. //
  325. // adding or updating a handle. If not in the list then add them.
  326. // Remember the events and, if appropriate, the callback func to use
  327. // when an event occurs.
  328. //
  329. if ( IsListEmpty( &FsContext->Linkage )) {
  330. //
  331. // Do not modify the EventFileHandles list if an event
  332. // delivery is in progress.
  333. //
  334. if (CnWaitForEventDelivery(&OldIrql)) {
  335. InsertHeadList( &EventFileHandles, &FsContext->Linkage );
  336. } else {
  337. Status = STATUS_TIMEOUT;
  338. }
  339. }
  340. if (NT_SUCCESS(Status)) {
  341. FsContext->EventMask = EventRequest->EventMask;
  342. FsContext->KmodeEventCallback = EventRequest->KmodeEventCallback;
  343. }
  344. } else if ( !IsListEmpty( &FsContext->Linkage )) {
  345. //
  346. // Null event mask and the fileobj on the event file obj list means
  347. // remove this guy from the list. Zap any events that may been queued
  348. // waiting for an IRP. Re-init the linkage to empty so we'll add them
  349. // back on if they re-init the mask.
  350. //
  351. FsContext->EventMask = 0;
  352. //
  353. // Do not modify the EventFileHandles list if an event
  354. // delivery is in progress. It is okay to modify this
  355. // FsContext structure since the EventLock is held.
  356. //
  357. if (CnWaitForEventDelivery(&OldIrql)) {
  358. RemoveEntryList( &FsContext->Linkage );
  359. InitializeListHead( &FsContext->Linkage );
  360. } else {
  361. Status = STATUS_TIMEOUT;
  362. }
  363. while ( !IsListEmpty( &FsContext->EventList )) {
  364. NextEntry = RemoveHeadList( &FsContext->EventList );
  365. ExFreeToNPagedLookasideList( EventLookasideList, NextEntry );
  366. }
  367. } else {
  368. //
  369. // can't provide NULL event mask first time in
  370. //
  371. Status = STATUS_INVALID_PARAMETER_MIX;
  372. }
  373. CnReleaseLock( &EventLock, OldIrql );
  374. if (Status != STATUS_SUCCESS) {
  375. CnTrace(
  376. EVENT_DETAIL, SetEventMaskFailed,
  377. "[CN] CnSetEventMask failed, status %!status!.",
  378. Status
  379. );
  380. }
  381. CnVerifyCpuLockMask(
  382. 0, // Required
  383. 0xFFFFFFFF, // Forbidden
  384. 0 // Maximum
  385. );
  386. return Status;
  387. } // CnSetEventMask
  388. VOID
  389. CnpDeliverEvents(
  390. IN PDEVICE_OBJECT DeviceObject,
  391. IN PVOID Parameter
  392. )
  393. /*++
  394. Routine Description:
  395. Deliver any queued events to those who are waiting. If an IRP is already
  396. queued, complete it with the info supplied.
  397. Arguments:
  398. DeviceObject - clusnet device object, not used
  399. Parameter - PIO_WORKITEM that must be freed
  400. Return Value:
  401. None
  402. --*/
  403. {
  404. CN_IRQL OldIrql;
  405. PCLUSNET_EVENT_ENTRY Event;
  406. PCLUSNET_EVENT_RESPONSE UserEventData;
  407. PCN_FSCONTEXT FsContext;
  408. PLIST_ENTRY NextFsHandleEntry;
  409. PIRP EventIrp;
  410. PLIST_ENTRY Entry;
  411. ULONG eventsDelivered = 0;
  412. BOOLEAN revisitRequired;
  413. CnVerifyCpuLockMask(
  414. 0, // Required
  415. 0xFFFFFFFF, // Forbidden
  416. 0 // Maximum
  417. );
  418. //
  419. // free the workitem
  420. //
  421. IoFreeWorkItem( (PIO_WORKITEM) Parameter );
  422. //
  423. // grab the cancel and event locks and loop through the file handles,
  424. // looking to see which file objs have events queued and IRPs pending.
  425. //
  426. CnAcquireCancelSpinLock ( &OldIrql );
  427. CnAcquireLockAtDpc( &EventLock );
  428. do {
  429. //
  430. // Indicate that a thread is iterating through the EventFileHandles
  431. // list to deliver events.
  432. //
  433. CnTrace(
  434. EVENT_DETAIL, DeliverEventsStartIteration,
  435. "[CN] CnpDeliverEvents: starting file handles list iteration."
  436. );
  437. CnStartEventDelivery();
  438. NextFsHandleEntry = EventFileHandles.Flink;
  439. while ( NextFsHandleEntry != &EventFileHandles ) {
  440. FsContext = CONTAINING_RECORD( NextFsHandleEntry, CN_FSCONTEXT, Linkage );
  441. if ( FsContext->EventIrp != NULL ) {
  442. //
  443. // Deliver the first event, if it exists. Any other queued events
  444. // will be delivered when subsequent Event IRPs are submitted.
  445. //
  446. if ( !IsListEmpty( &FsContext->EventList ) ) {
  447. Entry = RemoveHeadList( &FsContext->EventList );
  448. Event = CONTAINING_RECORD( Entry, CLUSNET_EVENT_ENTRY, Linkage );
  449. //
  450. // clear the pointer to the pended IRP and remove the entry from the
  451. // event list while synchronized.
  452. //
  453. EventIrp = FsContext->EventIrp;
  454. FsContext->EventIrp = NULL;
  455. CnReleaseLockFromDpc( &EventLock );
  456. IF_CNDBG( CN_DEBUG_EVENT ) {
  457. CNPRINT(("[CN] CnDeliverEvents: completing IRP %p with event %d\n",
  458. EventIrp, Event->EventData.EventType));
  459. }
  460. EventIrp->CancelIrql = OldIrql;
  461. UserEventData = (PCLUSNET_EVENT_RESPONSE)EventIrp->AssociatedIrp.SystemBuffer;
  462. UserEventData->Epoch = Event->EventData.Epoch;
  463. UserEventData->EventType = Event->EventData.EventType;
  464. UserEventData->NodeId = Event->EventData.NodeId;
  465. UserEventData->NetworkId = Event->EventData.NetworkId;
  466. ExFreeToNPagedLookasideList( EventLookasideList, Entry );
  467. CnTrace(
  468. EVENT_DETAIL, DeliverEventsCompletingIrp,
  469. "[CN] CnpDeliverEvents: Completing IRP %p "
  470. "to deliver event: "
  471. "Epoch %u, Type %x, NodeId %x, NetworkId %x.",
  472. EventIrp,
  473. UserEventData->Epoch,
  474. UserEventData->EventType,
  475. UserEventData->NodeId,
  476. UserEventData->NetworkId
  477. );
  478. //
  479. // IO Cancel lock is released in this routine
  480. //
  481. CnCompletePendingRequest(EventIrp,
  482. STATUS_SUCCESS,
  483. sizeof( CLUSNET_EVENT_RESPONSE ));
  484. CnAcquireCancelSpinLock ( &OldIrql );
  485. CnAcquireLockAtDpc( &EventLock );
  486. ++eventsDelivered;
  487. }
  488. } else if ( FsContext->KmodeEventCallback ) {
  489. //
  490. // Deliver all queued events.
  491. //
  492. while ( !IsListEmpty( &FsContext->EventList ) ) {
  493. Entry = RemoveHeadList( &FsContext->EventList );
  494. Event = CONTAINING_RECORD( Entry, CLUSNET_EVENT_ENTRY, Linkage );
  495. CnReleaseLockFromDpc( &EventLock );
  496. CnReleaseCancelSpinLock(OldIrql);
  497. CnTrace(
  498. EVENT_DETAIL, IssueEventKmodeCallback,
  499. "[CN] CnpDeliverEvents: invoking kernel-mode callback %p "
  500. "for Event Type %x NodeId %x NetworkId %x.",
  501. FsContext->KmodeEventCallback,
  502. Event->EventData.EventType,
  503. Event->EventData.NodeId,
  504. Event->EventData.NetworkId
  505. );
  506. (*FsContext->KmodeEventCallback)(
  507. Event->EventData.EventType,
  508. Event->EventData.NodeId,
  509. Event->EventData.NetworkId
  510. );
  511. ExFreeToNPagedLookasideList( EventLookasideList, Entry );
  512. CnAcquireCancelSpinLock ( &OldIrql );
  513. CnAcquireLockAtDpc( &EventLock );
  514. ++eventsDelivered;
  515. }
  516. }
  517. NextFsHandleEntry = FsContext->Linkage.Flink;
  518. }
  519. CnTrace(
  520. EVENT_DETAIL, DeliverEventsStopIteration,
  521. "[CN] CnpDeliverEvents: file handle list iteration complete."
  522. );
  523. } while ( CnStopEventDelivery() );
  524. CnReleaseLockFromDpc( &EventLock );
  525. CnReleaseCancelSpinLock( OldIrql );
  526. CnTrace(
  527. EVENT_DETAIL, DeliverEventsSummary,
  528. "[CN] CnpDeliverEvents: delivered %u events.",
  529. eventsDelivered
  530. );
  531. IF_CNDBG( CN_DEBUG_EVENT ) {
  532. CNPRINT(("[CN] CnDeliverEvents: events delivered %d\n", eventsDelivered ));
  533. }
  534. CnVerifyCpuLockMask(
  535. 0, // Required
  536. 0xFFFFFFFF, // Forbidden
  537. 0 // Maximum
  538. );
  539. } // CnDeliverEvents
  540. NTSTATUS
  541. CnIssueEvent(
  542. CLUSNET_EVENT_TYPE EventType,
  543. CL_NODE_ID NodeId OPTIONAL,
  544. CL_NETWORK_ID NetworkId OPTIONAL
  545. )
  546. /*++
  547. Routine Description:
  548. Post an event to each file object's event queue that is interested in this
  549. type of event. Schedule a work queue item to run down the file objs to
  550. deliver the events. We can't complete the IRPs directly since we might
  551. violate the locking order inside clusnet.
  552. Arguments:
  553. EventType - type of event
  554. NodeId - optional node Id associated with event
  555. NetworkId - optional network Id associated with event
  556. Return Value:
  557. STATUS_SUCCESS
  558. STATUS_INSUFFICIENT_RESOUCES
  559. --*/
  560. {
  561. CN_IRQL OldIrql;
  562. PCLUSNET_EVENT_ENTRY Event;
  563. PCLUSNET_EVENT_RESPONSE UserData;
  564. PCN_FSCONTEXT FsContext;
  565. PLIST_ENTRY NextFsHandleEntry;
  566. PIRP EventIrp;
  567. PIO_WORKITEM EventWorkItem;
  568. BOOLEAN startupWorkerThread = FALSE;
  569. BOOLEAN eventHandled = FALSE;
  570. CnVerifyCpuLockMask(
  571. 0, // Required
  572. CNP_EVENT_LOCK, // Forbidden
  573. CNP_EVENT_LOCK_PRECEEDING // Maximum
  574. );
  575. CnTrace(
  576. EVENT_DETAIL, CnIssueEvent,
  577. "[CN] CnIssueEvent: Event Type %x, NodeId %x, NetworkId %x.",
  578. EventType, NodeId, NetworkId
  579. );
  580. IF_CNDBG( CN_DEBUG_EVENT ) {
  581. CNPRINT(( "[CN] CnIssueEvent: Event type 0x%lx Node: %d Network: %d\n",
  582. EventType, NodeId, NetworkId ));
  583. }
  584. //
  585. // grab the event lock and loop through the file handles, looking to see
  586. // which ones are interested in this event
  587. //
  588. CnAcquireLock( &EventLock, &OldIrql );
  589. //
  590. // Indicate that a thread is iterating through the EventFileHandles
  591. // list to deliver events (kernel-mode callback counts as a delivery).
  592. //
  593. CnTrace(
  594. EVENT_DETAIL, IssueEventStartIteration,
  595. "[CN] CnIssueEvent: starting file handles list iteration."
  596. );
  597. CnStartEventDelivery();
  598. NextFsHandleEntry = EventFileHandles.Flink;
  599. if ( NextFsHandleEntry == &EventFileHandles ) {
  600. IF_CNDBG( CN_DEBUG_EVENT ) {
  601. CNPRINT(( "[CN] CnIssueEvent: No file objs on event file handle list\n"));
  602. }
  603. }
  604. while ( NextFsHandleEntry != &EventFileHandles ) {
  605. FsContext = CONTAINING_RECORD( NextFsHandleEntry, CN_FSCONTEXT, Linkage );
  606. if ( FsContext->EventMask & EventType ) {
  607. //
  608. // If this FsContext has a kernel-mode callback and the event type
  609. // is one which would cause clusdisk reservations to be stopped
  610. // (PoisonPacketReceived or Halt), fast-track it rather than
  611. // queueing to a system worker thread. Ordering is not important,
  612. // since a Halt is imminent.
  613. //
  614. if (FsContext->KmodeEventCallback &&
  615. (EventType & CN_EVENT_TYPE_KMODE_FASTTRACK)) {
  616. CnReleaseLock( &EventLock, OldIrql );
  617. CnTrace(
  618. EVENT_DETAIL, FastTrackEventKmodeCallback,
  619. "[CN] CnpDeliverEvents: fast-tracking kernel-mode callback %p "
  620. "for Event Type %x NodeId %x NetworkId %x.",
  621. FsContext->KmodeEventCallback,
  622. EventType,
  623. NodeId,
  624. NetworkId
  625. );
  626. (*FsContext->KmodeEventCallback)(
  627. EventType,
  628. NodeId,
  629. NetworkId
  630. );
  631. CnAcquireLock( &EventLock, &OldIrql );
  632. } else {
  633. //
  634. // post a copy of this event on the handle's list.
  635. //
  636. Event = ExAllocateFromNPagedLookasideList( EventLookasideList );
  637. if ( Event == NULL ) {
  638. IF_CNDBG( CN_DEBUG_EVENT ) {
  639. CNPRINT(( "[CN] CnIssueEvent: No more Event buffers!\n"));
  640. }
  641. CnStopEventDelivery();
  642. CnReleaseLock( &EventLock, OldIrql );
  643. return STATUS_INSUFFICIENT_RESOURCES;
  644. }
  645. Event->EventData.Epoch = EventEpoch;
  646. Event->EventData.EventType = EventType;
  647. Event->EventData.NodeId = NodeId;
  648. Event->EventData.NetworkId = NetworkId;
  649. InsertTailList( &FsContext->EventList, &Event->Linkage );
  650. //
  651. // run the worker thread if there is an IRP already queued or
  652. // if there is a kernel-mode callback
  653. //
  654. if ( FsContext->EventIrp || FsContext->KmodeEventCallback ) {
  655. startupWorkerThread = TRUE;
  656. }
  657. CnTrace(
  658. EVENT_DETAIL, IssueEventQueued,
  659. "[CN] CnIssueEvent: queued event to FsContext %p, "
  660. "Event Type %x NodeId %x NetworkId %x, "
  661. "kernel-mode callback %p, event IRP %p ",
  662. FsContext,
  663. EventType,
  664. NodeId,
  665. NetworkId,
  666. FsContext->KmodeEventCallback,
  667. FsContext->EventIrp
  668. );
  669. }
  670. eventHandled = TRUE;
  671. }
  672. NextFsHandleEntry = FsContext->Linkage.Flink;
  673. }
  674. //
  675. // Indicate that iteration through the EventFileHandles list
  676. // is complete.
  677. //
  678. CnTrace(
  679. EVENT_DETAIL, IssueEventStartIteration,
  680. "[CN] CnIssueEvent: file handles list iteration complete."
  681. );
  682. startupWorkerThread |= CnStopEventDelivery();
  683. CnReleaseLock( &EventLock, OldIrql );
  684. if ( startupWorkerThread ) {
  685. //
  686. // schedule deliver event routine to run
  687. //
  688. CnTrace(
  689. EVENT_DETAIL, IssueEventScheduleWorker,
  690. "[CN] CnIssueEvent: scheduling worker thread."
  691. );
  692. EventWorkItem = IoAllocateWorkItem( CnDeviceObject );
  693. if ( EventWorkItem != NULL ) {
  694. IoQueueWorkItem(
  695. EventWorkItem,
  696. CnpDeliverEvents,
  697. DelayedWorkQueue,
  698. EventWorkItem
  699. );
  700. }
  701. }
  702. if ( !eventHandled ) {
  703. CnTrace(
  704. EVENT_DETAIL, IssueEventNoConsumers,
  705. "[CN] CnIssueEvent: No consumers for Event Type %x Node %u Network %u.",
  706. EventType, NodeId, NetworkId
  707. );
  708. IF_CNDBG( CN_DEBUG_EVENT ) {
  709. CNPRINT(( "[CN] CnIssueEvent: No consumers for Event type 0x%lx Node: %d Network: %d\n",
  710. EventType, NodeId, NetworkId ));
  711. }
  712. }
  713. CnVerifyCpuLockMask(
  714. 0, // Required
  715. CNP_EVENT_LOCK, // Forbidden
  716. CNP_EVENT_LOCK_PRECEEDING // Maximum
  717. );
  718. return STATUS_SUCCESS;
  719. } // CnIssueEvent
  720. VOID
  721. CnEventIrpCancel(
  722. PDEVICE_OBJECT DeviceObject,
  723. PIRP Irp
  724. )
  725. /*++
  726. Routine Description:
  727. Cancellation handler for CnGetNextEvent requests.
  728. Return Value:
  729. None
  730. Notes:
  731. Called with cancel spinlock held.
  732. Returns with cancel spinlock released.
  733. --*/
  734. {
  735. PIO_STACK_LOCATION IrpSp = IoGetCurrentIrpStackLocation(Irp);
  736. PFILE_OBJECT fileObject;
  737. CN_IRQL cancelIrql = Irp->CancelIrql;
  738. PCN_FSCONTEXT FsContext = (PCN_FSCONTEXT) IrpSp->FileObject->FsContext;
  739. CnMarkIoCancelLockAcquired();
  740. fileObject = CnBeginCancelRoutine(Irp);
  741. CnAcquireLockAtDpc( &EventLock );
  742. CnReleaseCancelSpinLock(DISPATCH_LEVEL);
  743. CnTrace(
  744. EVENT_DETAIL, EventIrpCancel,
  745. "[CN] Cancelling event IRP %p.",
  746. Irp
  747. );
  748. IF_CNDBG( CN_DEBUG_EVENT ) {
  749. CNPRINT(("[CN] CnEventIrpCancel: canceling %p\n", Irp ));
  750. }
  751. CnAssert(DeviceObject == CnDeviceObject);
  752. //
  753. // We can only complete the irp if it really belongs to the Event code. The
  754. // IRP could have been completed before we acquired the Event lock.
  755. //
  756. if ( FsContext->EventIrp == Irp ) {
  757. FsContext->EventIrp = NULL;
  758. CnReleaseLock( &EventLock, cancelIrql );
  759. CnAcquireCancelSpinLock(&(Irp->CancelIrql));
  760. CnEndCancelRoutine(fileObject);
  761. CnCompletePendingRequest(Irp, STATUS_CANCELLED, 0);
  762. return;
  763. }
  764. CnReleaseLock( &EventLock, cancelIrql );
  765. CnAcquireCancelSpinLock( &cancelIrql );
  766. CnEndCancelRoutine(fileObject);
  767. CnReleaseCancelSpinLock(cancelIrql);
  768. CnVerifyCpuLockMask(
  769. 0, // Required
  770. 0xFFFFFFFF, // Forbidden
  771. 0 // Maximum
  772. );
  773. return;
  774. } // CnEventIrpCancel
  775. NTSTATUS
  776. CnGetNextEvent(
  777. IN PIRP Irp,
  778. IN PIO_STACK_LOCATION IrpSp
  779. )
  780. /*++
  781. Routine Description:
  782. This routine obtains the next event from the event list for
  783. this file handle. If an event is queued, it completes this IRP
  784. with the event data. Otherwise, the IRP is pended, waiting for
  785. an event to be posted.
  786. Return Value:
  787. STATUS_PENDING if IRP successfully captured
  788. STATUS_UNSUCCESSFUL if no more room in the list or IRP couldn't be
  789. Notes:
  790. Returns with cancel spinlock released.
  791. --*/
  792. {
  793. NTSTATUS Status;
  794. KIRQL OldIrql;
  795. PLIST_ENTRY Entry;
  796. PCLUSNET_EVENT_ENTRY Event;
  797. PCN_FSCONTEXT FsContext = IrpSp->FileObject->FsContext;
  798. PCLUSNET_EVENT_RESPONSE UserEventData = (PCLUSNET_EVENT_RESPONSE)
  799. Irp->AssociatedIrp.SystemBuffer;
  800. BOOLEAN DeliveryInProgress = FALSE;
  801. CnVerifyCpuLockMask(
  802. 0, // Required
  803. 0xFFFFFFFF, // Forbidden
  804. 0 // Maximum
  805. );
  806. //
  807. // acquire the IO cancel lock, then our event lock so we're synch'ed
  808. // with regards to the state of the IRP and the event list
  809. //
  810. CnAcquireCancelSpinLock( &OldIrql );
  811. CnAcquireLockAtDpc( &EventLock );
  812. //
  813. // check first if we have an event queued. if we have an event queued
  814. // and there is no delivery in progress we can complete the IRP now.
  815. // otherwise, we need to pend the IRP to avoid out-of-order delivery.
  816. //
  817. if ( !IsListEmpty( &FsContext->EventList )
  818. && !(DeliveryInProgress = CnIsEventDeliveryInProgress())
  819. ) {
  820. //
  821. // complete the IRP now
  822. //
  823. CnReleaseCancelSpinLock(DISPATCH_LEVEL);
  824. Entry = RemoveHeadList( &FsContext->EventList );
  825. CnReleaseLock( &EventLock, OldIrql );
  826. Event = CONTAINING_RECORD( Entry, CLUSNET_EVENT_ENTRY, Linkage );
  827. *UserEventData = Event->EventData;
  828. CnTrace(
  829. EVENT_DETAIL, GetNextEventCompletingIrp,
  830. "[CN] Completing IRP %p to deliver event: "
  831. "Epoch %u, Type %x, NodeId %x, NetworkId %x.",
  832. Irp,
  833. UserEventData->Epoch,
  834. UserEventData->EventType,
  835. UserEventData->NodeId,
  836. UserEventData->NetworkId
  837. );
  838. IF_CNDBG( CN_DEBUG_EVENT ) {
  839. CNPRINT(("[CN] CnGetNextEvent: completing IRP %p with event %d\n",
  840. Irp, Event->EventData.EventType));
  841. }
  842. ExFreeToNPagedLookasideList( EventLookasideList, Entry );
  843. Irp->IoStatus.Information = sizeof(CLUSNET_EVENT_RESPONSE);
  844. Status = STATUS_SUCCESS;
  845. } else {
  846. //
  847. // make sure we have room for the new IRP
  848. //
  849. if ( FsContext->EventIrp ) {
  850. CnReleaseCancelSpinLock( DISPATCH_LEVEL );
  851. CnTrace(
  852. EVENT_DETAIL, GetNextIrpAlreadyPending,
  853. "[CN] CnGetNextEvent: IRP %p is already pending.",
  854. FsContext->EventIrp
  855. );
  856. IF_CNDBG( CN_DEBUG_EVENT ) {
  857. CNPRINT(("[CN] CnGetNextEvent: IRP %p is already pending\n",
  858. FsContext->EventIrp));
  859. }
  860. Status = STATUS_UNSUCCESSFUL;
  861. } else {
  862. Status = CnMarkRequestPending( Irp, IrpSp, CnEventIrpCancel );
  863. CnAssert( NT_SUCCESS( Status ));
  864. CnReleaseCancelSpinLock( DISPATCH_LEVEL );
  865. if ( NT_SUCCESS( Status )) {
  866. //
  867. // remember this IRP in our open file context block
  868. //
  869. FsContext->EventIrp = Irp;
  870. CnTrace(
  871. EVENT_DETAIL, GetNextEventDeliveryInProgress,
  872. "[CN] CnGetNextEvent: pending IRP %p, "
  873. "delivery in progress: %!bool!",
  874. Irp, DeliveryInProgress
  875. );
  876. IF_CNDBG( CN_DEBUG_EVENT ) {
  877. CNPRINT(("[CN] CnGetNextEvent: pending IRP %p\n", Irp));
  878. }
  879. Status = STATUS_PENDING;
  880. }
  881. }
  882. CnReleaseLock(&EventLock, OldIrql);
  883. }
  884. CnVerifyCpuLockMask(
  885. 0, // Required
  886. 0xFFFFFFFF, // Forbidden
  887. 0 // Maximum
  888. );
  889. return Status;
  890. } // CnGetNextEvent
  891. /* end event.c */