Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1036 lines
25 KiB

  1. /*++
  2. Copyright (c) Microsoft Corporation
  3. Module Name:
  4. logger.c
  5. Abstract:
  6. Most of the code to manage logs. The interlocking is fairly simple,
  7. but worth a note. Each log structure has a kernel resource. This
  8. is used to protect the volatile structures. In addition, some
  9. of the things in a log structure are examined by the
  10. DPC match code. Any such values should be modified only
  11. with the log lock held. Note that the DPC code only
  12. modifies the UseCount of a log structure, but it relies
  13. on some of the flags and the log size values to know whether
  14. the log is valid and whether there is room for more data.
  15. There is also a paged counterpart for each log, but it
  16. is very simple and exists only to reference the real log
  17. structure.
  18. Author:
  19. Revision History:
  20. --*/
  21. /*----------------------------------------------------------------------------
  22. A note on the interlocking, as of 24-Feb-1997.
  23. There are three important locks: the FilterListResourceLock which is
  24. a resource, the g_filter.ifListLock, which is a spin lock but acts
  25. like a resource, and the log lock of each log each of which is a spin
  26. lock. As noted in ioctl.c, the first two locks are used to serialize
  27. operations among APIs and DPCs respectively. The log lock is also used
  28. to serialize DPC operations and is used as a finer-grained interlocked. Aside
  29. from granularity it is required to serialize on an MP since there can a DPC
  30. callout on each processor!
  31. The correct order is always to lock the FilterListResourceLock first, then
  32. the g_filter.ifListLock and finally the appropriate log lock. It is never
  33. correct to lock more than one log lock since no ordering among logs exists (if
  34. you need this you will have to invent it). The log lock is always an exclusive
  35. lock -- that is it does not act like a resource.
  36. The log also has a RESOURCE. This is used to protect the mapping. Note that
  37. if Apc is enabled, this does not prevent a conflict between the Apc routine
  38. and the base thread code. There is a unique test in the Apc routine to detect
  39. and recover from this.
  40. ----------------------------------------------------------------------------*/
  41. #include "globals.h"
  42. #include <align.h>
  43. LIST_ENTRY g_pLogs;
  44. DWORD g_dwLogClump;
  45. extern POBJECT_TYPE *ExEventObjectType;
  46. VOID
  47. PfLogApc(
  48. IN PKAPC Apc,
  49. IN PKNORMAL_ROUTINE *NormalRoutine,
  50. IN PVOID *NormalContext,
  51. IN PVOID *SystemArgument1,
  52. IN PVOID *SystemArgument2
  53. );
  54. VOID
  55. RemoveLogFromInterfaces(PPFLOGINTERFACE pLog);
  56. NTSTATUS
  57. DoAMapping(
  58. PBYTE pbVA,
  59. DWORD dwSize,
  60. PMDL * pMdl,
  61. PBYTE * pbKernelVA);
  62. VOID
  63. PfCancelIrp(
  64. IN PDEVICE_OBJECT DeviceObject,
  65. IN PIRP Irp
  66. );
  67. PPFPAGEDLOG
  68. FindLogById(PPFFCB Fcb, PFLOGGER LogId);
  69. #pragma alloc_text(PAGED, FindLogById)
  70. #pragma alloc_text(PAGED, ReferenceLogByHandleId)
  71. VOID
  72. SetCancelOnIrp(PIRP Irp,
  73. PPFLOGINTERFACE pLog)
  74. {
  75. IoAcquireCancelSpinLock(&Irp->CancelIrql);
  76. #if DOLOGAPC //according to arnold miller it is broken
  77. Irp->IoStatus.Status = pLog;
  78. #endif
  79. IoSetCancelRoutine(Irp, PfCancelIrp);
  80. IoReleaseCancelSpinLock(Irp->CancelIrql);
  81. }
  82. VOID
  83. InitLogs()
  84. {
  85. InitializeListHead(&g_pLogs);
  86. //
  87. // It's possible to get this from the registry
  88. //
  89. g_dwLogClump = MAX_NOMINAL_LOG_MAP;
  90. }
  91. VOID
  92. AddRefToLog(PPFLOGINTERFACE pLog)
  93. {
  94. InterlockedIncrement(&pLog->UseCount);
  95. }
  96. PPFPAGEDLOG
  97. FindLogById(PPFFCB Fcb, PFLOGGER LogId)
  98. {
  99. PPFPAGEDLOG pPage;
  100. PAGED_CODE();
  101. for(pPage = (PPFPAGEDLOG)Fcb->leLogs.Flink;
  102. (PLIST_ENTRY)pPage != &Fcb->leLogs;
  103. pPage = (PPFPAGEDLOG)pPage->Next.Flink)
  104. {
  105. if((PFLOGGER)pPage == LogId)
  106. {
  107. return(pPage);
  108. }
  109. }
  110. return(NULL);
  111. }
  112. NTSTATUS
  113. ReferenceLogByHandleId(PFLOGGER LogId,
  114. PPFFCB Fcb,
  115. PPFLOGINTERFACE * ppLog)
  116. /*++
  117. Routine Description:
  118. Given a log ID, find the log entry, reference it, and return
  119. a pointer to the underlying log structure.
  120. --*/
  121. {
  122. PPFPAGEDLOG pPage;
  123. NTSTATUS Status = STATUS_INVALID_PARAMETER;
  124. PAGED_CODE();
  125. pPage = FindLogById(Fcb, LogId);
  126. if(pPage)
  127. {
  128. *ppLog = pPage->pLog;
  129. //
  130. // don't need the write lock since the reference
  131. // from the FCB is good enough
  132. //
  133. InterlockedIncrement(&pPage->pLog->UseCount);
  134. Status = STATUS_SUCCESS;
  135. }
  136. return(Status);
  137. }
  138. NTSTATUS
  139. PfDeleteLog(PPFDELETELOG pfDel,
  140. PPFFCB Fcb)
  141. /*++
  142. Routine Description:
  143. Called when the log is deleted by the process either
  144. explicity or by closing the handle. The paged log
  145. structure is taken care of by the caller.
  146. --*/
  147. {
  148. KIRQL kIrql;
  149. PPFPAGEDLOG pPage;
  150. PPFLOGINTERFACE pLog;
  151. pPage = FindLogById(Fcb, pfDel->pfLogId);
  152. if(!pPage)
  153. {
  154. return(STATUS_INVALID_PARAMETER);
  155. }
  156. pLog = pPage->pLog;
  157. //
  158. // grab the interlocks
  159. //
  160. KeEnterCriticalRegion();
  161. ExAcquireResourceExclusiveLite(&pLog->Resource, TRUE);
  162. kIrql = LockLog(pLog);
  163. pLog->dwFlags |= LOG_BADMEM; // shut off logging
  164. UnLockLog(pLog, kIrql);
  165. #if DOLOGAPC
  166. if(pLog->Irp)
  167. {
  168. pLog->Irp->IoStatus.Status = STATUS_SUCCESS;
  169. IoCompleteRequest(pLog->Irp, IO_NO_INCREMENT);
  170. DereferenceLog(pLog);
  171. pLog->Irp = NULL;
  172. }
  173. #endif
  174. //
  175. // if a current mapping, unmap it
  176. //
  177. if(pLog->Mdl)
  178. {
  179. MmUnlockPages(pLog->Mdl);
  180. IoFreeMdl(pLog->Mdl);
  181. pLog->Mdl = 0;
  182. }
  183. //
  184. // Need to remove it from the interfaces. Do this with
  185. // the resource unlocked. Since the FCB still has the log referenced,
  186. // and the FCB is locked, the log should not go away. The only
  187. // compelling reason for the resource is to interlock against APCs and
  188. // setting BADMEM should have taken care of that.
  189. //
  190. ExReleaseResourceLite(&pLog->Resource);
  191. KeLeaveCriticalRegion();
  192. RemoveLogFromInterfaces(pLog);
  193. //
  194. // free the paged log structure
  195. //
  196. RemoveEntryList(&pPage->Next);
  197. ExFreePool(pPage);
  198. //
  199. // Dereference the log structure. It might or might not
  200. // go away.
  201. //
  202. DereferenceLog(pLog);
  203. return(STATUS_SUCCESS);
  204. }
  205. VOID
  206. DereferenceLog(PPFLOGINTERFACE pLog)
  207. /*++
  208. Routine Description:
  209. Derefence the log and if the reference count goes to zero,
  210. free the log.
  211. --*/
  212. {
  213. BOOL fFreed;
  214. LOCK_STATE LockState;
  215. //
  216. // grab the resource to prevent confusion with cancelled
  217. // Irps.
  218. //
  219. fFreed = InterlockedDecrement(&pLog->UseCount) == 0;
  220. if(fFreed)
  221. {
  222. AcquireWriteLock(&g_filters.ifListLock,&LockState);
  223. RemoveEntryList(&pLog->NextLog);
  224. ReleaseWriteLock(&g_filters.ifListLock,&LockState);
  225. #if DOLOGAPC
  226. ASSERT(!pLog->Irp && !pLog->Mdl);
  227. #endif
  228. if(pLog->Event)
  229. {
  230. ObDereferenceObject(pLog->Event);
  231. }
  232. ExDeleteResourceLite( &pLog->Resource);
  233. ExFreePool(pLog);
  234. }
  235. }
  236. NTSTATUS
  237. PfLogCreateLog(PPFLOG pLog,
  238. PPFFCB Fcb,
  239. PIRP Irp)
  240. /*++
  241. Routine Description:
  242. Create a new log entry.
  243. --*/
  244. {
  245. PPFLOGINTERFACE pfLog;
  246. KPROCESSOR_MODE Mode;
  247. NTSTATUS Status;
  248. LOCK_STATE LockState;
  249. DWORD dwBytesMapped;
  250. PBYTE pbKernelAddress;
  251. PPFPAGEDLOG pPage;
  252. PAGED_CODE();
  253. pPage = (PPFPAGEDLOG)ExAllocatePoolWithTag(
  254. PagedPool,
  255. sizeof(*pPage),
  256. 'pflg');
  257. if(!pPage)
  258. {
  259. return(STATUS_NO_MEMORY);
  260. }
  261. pfLog = (PPFLOGINTERFACE)ExAllocatePoolWithTag(
  262. NonPagedPool,
  263. sizeof(*pfLog),
  264. 'pflg');
  265. if(!pfLog)
  266. {
  267. ExFreePool(pPage);
  268. return(STATUS_NO_MEMORY);
  269. }
  270. RtlZeroMemory(pfLog, sizeof(*pfLog));
  271. ExInitializeResourceLite(&pfLog->Resource);
  272. if(pLog->hEvent)
  273. {
  274. Mode = ExGetPreviousMode();
  275. Status = ObReferenceObjectByHandle(
  276. pLog->hEvent,
  277. EVENT_MODIFY_STATE,
  278. *ExEventObjectType,
  279. Mode,
  280. (PVOID *)&pfLog->Event,
  281. NULL);
  282. if(!NT_SUCCESS(Status))
  283. {
  284. goto Bad;
  285. }
  286. }
  287. pLog->pfLogId = pfLog->pfLogId = (PFLOGGER)pPage;
  288. //
  289. // Copy the user addresses. Note we don't probe it because this is
  290. // too expensive. The probing is done when we remap the buffer,
  291. // either now or in the APC.
  292. //
  293. pfLog->pUserAddress = 0;
  294. pfLog->dwTotalSize = 0;
  295. pfLog->dwPastMapped = 0;
  296. pfLog->dwMapOffset = 0;
  297. pfLog->dwMapCount = 0;
  298. if(pLog->dwFlags & LOG_LOG_ABSORB)
  299. {
  300. pfLog->dwMapWindowSize = MAX_ABSORB_LOG_MAP;
  301. }
  302. else
  303. {
  304. pfLog->dwMapWindowSize = g_dwLogClump;
  305. }
  306. pfLog->dwMapWindowSize2 = pfLog->dwMapWindowSize * 2;
  307. pfLog->dwMapWindowSizeFloor = pfLog->dwMapWindowSize / 2;
  308. pfLog->dwMapCount = 0;
  309. //
  310. // Mapped. Note we don't save room for the header since
  311. // that will be returned when the caller calls to release
  312. // the buffer.
  313. //
  314. pfLog->UseCount = 1;
  315. //
  316. // Add it to the list of Logs.
  317. //
  318. KeInitializeSpinLock(&pfLog->LogLock);
  319. AcquireWriteLock(&g_filters.ifListLock,&LockState);
  320. InsertTailList(&g_pLogs, &pfLog->NextLog);
  321. ReleaseWriteLock(&g_filters.ifListLock,&LockState);
  322. pPage->pLog = pfLog;
  323. InsertTailList(&Fcb->leLogs, &pPage->Next);
  324. return(STATUS_SUCCESS);
  325. //
  326. // if here, something went awry. Clean up and return the status
  327. //
  328. Bad:
  329. ExDeleteResourceLite(&pfLog->Resource);
  330. if(pfLog->Event)
  331. {
  332. ObDereferenceObject(pfLog->Event);
  333. }
  334. ExFreePool(pPage);
  335. ExFreePool(pfLog);
  336. return(Status);
  337. }
  338. NTSTATUS
  339. PfLogSetBuffer( PPFSETBUFFER pSet, PPFFCB Fcb, PIRP Irp )
  340. /*++
  341. Routine Description:
  342. Set a new buffer for the log. Return use count of the old buffer
  343. as well.
  344. --*/
  345. {
  346. PMDL Mdl;
  347. PBYTE pbKernelAddress;
  348. DWORD dwBytesMapped, dwOldUsed, dwOldLost;
  349. NTSTATUS Status;
  350. PBYTE pbVA = pSet->pbBaseOfLog;
  351. DWORD dwSize = pSet->dwSize;
  352. DWORD dwSize1 = pSet->dwEntriesThreshold;
  353. DWORD dwThreshold = pSet->dwSizeThreshold;
  354. DWORD dwLoggedEntries;
  355. KIRQL kIrql;
  356. PPFLOGINTERFACE pLog;
  357. PPFPAGEDLOG pPage;
  358. PPFSETBUFFER pLogOut = Irp->UserBuffer;
  359. PBYTE pbUserAdd;
  360. if(!COUNT_IS_ALIGNED(dwSize, ALIGN_WORST))
  361. {
  362. //
  363. // not quadword aligned. tsk tsk.
  364. //
  365. return(STATUS_MAPPED_ALIGNMENT);
  366. }
  367. if(!(pPage = FindLogById(Fcb, pSet->pfLogId)))
  368. {
  369. return(STATUS_INVALID_PARAMETER);
  370. }
  371. pLog = pPage->pLog;
  372. //
  373. // Acquire the resource that protects the mapping.
  374. //
  375. KeEnterCriticalRegion();
  376. ExAcquireResourceExclusiveLite(&pLog->Resource, TRUE);
  377. //
  378. // Now map the first segment.
  379. //
  380. #if DOLOGAPC
  381. if(dwSize < pLog->dwMapWindowSize2)
  382. {
  383. dwBytesMapped = dwSize;
  384. }
  385. else
  386. {
  387. dwBytesMapped = pLog->dwMapWindowSize;
  388. }
  389. #else
  390. dwBytesMapped = dwSize;
  391. #endif
  392. if(dwBytesMapped)
  393. {
  394. Status = DoAMapping(
  395. pbVA,
  396. dwBytesMapped,
  397. &Mdl,
  398. &pbKernelAddress);
  399. }
  400. else
  401. {
  402. Status = STATUS_SUCCESS;
  403. pbKernelAddress = 0;
  404. pbVA = NULL;
  405. Mdl = NULL;
  406. }
  407. if(NT_SUCCESS(Status))
  408. {
  409. PMDL OldMdl;
  410. //
  411. // Made the mapping. Now swap it in.
  412. //
  413. #if DOLOGAPC
  414. //
  415. // init the APC routine.
  416. //
  417. KeInitializeApc(
  418. &pLog->Apc,
  419. &(PsGetCurrentThread()->Tcb),
  420. CurrentApcEnvironment,
  421. PfLogApc,
  422. NULL,
  423. NULL,
  424. 0,
  425. NULL);
  426. pLog->ApcInited = 1;
  427. if(pLog->Irp)
  428. {
  429. pLog->Irp->IoStatus.Status = STATUS_SUCCESS;
  430. IoCompleteRequest(pLog->Irp, IO_NO_INCREMENT);
  431. DereferenceLog(pLog);
  432. }
  433. if(dwBytesMapped)
  434. {
  435. //
  436. // This appears to be a bug as we have the log
  437. // resource and will now get the Irp cancel lock. Our
  438. // cancel routine does this in the other order, giving the
  439. // appearance of a race to a deadlock, but the cancel routine
  440. // won't get called until we own all of the locks so
  441. // we will not be blocked.
  442. //
  443. AddRefToLog(pLog);
  444. SetCancelOnIrp(Irp, pLog);
  445. pLog->Irp = Irp;
  446. }
  447. else
  448. {
  449. pLog->Irp = 0;
  450. }
  451. #endif
  452. pbUserAdd = pLog->pUserAddress;
  453. //
  454. // interlock against the stack's DPC callout
  455. // and "swap" the logs
  456. //
  457. kIrql = LockLog(pLog);
  458. dwOldUsed = pLog->dwPastMapped + pLog->dwMapOffset;
  459. dwOldLost = pLog->dwLostEntries;
  460. dwLoggedEntries = pLog->dwLoggedEntries;
  461. pLog->dwLoggedEntries = 0;
  462. pLog->dwLostEntries = 0;
  463. pLog->dwMapCount = dwBytesMapped;
  464. pLog->dwPastMapped = 0;
  465. pLog->dwFlags &= ~(LOG_BADMEM | LOG_OUTMEM | LOG_CANTMAP);
  466. pLog->pUserAddress = pbVA;
  467. pLog->dwTotalSize = dwSize;
  468. OldMdl = pLog->Mdl;
  469. pLog->pCurrentMapPointer = pbKernelAddress;
  470. pLog->dwMapOffset = 0;
  471. pLog->Mdl = Mdl;
  472. pLog->dwSignalThreshold = dwThreshold;
  473. pLog->dwEntriesThreshold = dwSize1;
  474. UnLockLog(pLog, kIrql);
  475. if(OldMdl)
  476. {
  477. MmUnlockPages(OldMdl);
  478. IoFreeMdl(OldMdl);
  479. }
  480. pSet->dwSize = pLogOut->dwSize = dwOldUsed;
  481. pSet->pbPreviousAddress = pLogOut->pbPreviousAddress = pbUserAdd;
  482. pSet->dwLostEntries = pLogOut->dwLostEntries = dwOldLost;
  483. pSet->dwLoggedEntries = pLogOut ->dwLoggedEntries = dwLoggedEntries;
  484. }
  485. ExReleaseResourceLite(&pLog->Resource);
  486. KeLeaveCriticalRegion();
  487. if(dwBytesMapped && NT_SUCCESS(Status))
  488. {
  489. #if LOGAPC
  490. Status = STATUS_PENDING;
  491. #endif
  492. }
  493. return(Status);
  494. }
  495. NTSTATUS
  496. DoAMapping(
  497. PBYTE pbVA,
  498. DWORD dwSize,
  499. PMDL * ppMdl,
  500. PBYTE * pbKernelVA)
  501. /*++
  502. Routine Description:
  503. Map a user buffer into kernel space and lock it.
  504. This is called when a log is created as well as
  505. when the mapped portion of a log needs to be moved.
  506. The log has a sliding mapped windows so that the
  507. actual buffer can be large but the system resoures
  508. committed to it modest. The added cost is in sliding
  509. the windows as needed.
  510. The log structure, not known to this routine, should be
  511. appropriately protected.
  512. Returns: various status conditions
  513. --*/
  514. {
  515. NTSTATUS Status = STATUS_SUCCESS;
  516. *ppMdl = 0;
  517. try
  518. {
  519. *ppMdl = IoAllocateMdl(
  520. (PVOID)pbVA,
  521. dwSize,
  522. FALSE,
  523. TRUE,
  524. NULL);
  525. if(*ppMdl)
  526. {
  527. //
  528. // Got a Mdl. Now lock the pages. If this fails, the exception
  529. // takes us out of this block.
  530. //
  531. MmProbeAndLockPages(*ppMdl,
  532. UserMode,
  533. IoWriteAccess);
  534. //
  535. // all locked. Now map the locked pages to a kernel
  536. // address. If it fails, unlock the pages.
  537. //
  538. *pbKernelVA = MmGetSystemAddressForMdlSafe(*ppMdl, HighPagePriority);
  539. if (*pbKernelVA == NULL) {
  540. Status = STATUS_NO_MEMORY;
  541. MmUnlockPages(*ppMdl);
  542. }
  543. }
  544. }
  545. except (EXCEPTION_EXECUTE_HANDLER)
  546. {
  547. //
  548. // This covers IoAllocateMdl and MmProbeAndLockPages
  549. // failing.
  550. //
  551. Status = GetExceptionCode();
  552. }
  553. if(!NT_SUCCESS(Status))
  554. {
  555. if(*ppMdl)
  556. {
  557. IoFreeMdl(*ppMdl);
  558. }
  559. return(Status);
  560. }
  561. if(!*ppMdl)
  562. {
  563. return(STATUS_NO_MEMORY);
  564. }
  565. return(STATUS_SUCCESS);
  566. }
  567. VOID
  568. PfLogApc(
  569. IN PKAPC Apc,
  570. IN PKNORMAL_ROUTINE *NormalRoutine,
  571. IN PVOID *NormalContext,
  572. IN PVOID *SystemArgument1,
  573. IN PVOID *SystemArgument2
  574. )
  575. /*++
  576. Routine Description:
  577. This is the special APC routine that runs to map or remap a log
  578. It returns its status via SystemArgument1 which is a pointer
  579. to the log structure. Note that the log structure was referenced
  580. when the Apc was enqueued, so the pointer is guaranteed to be
  581. valid. However, the log itself may not be valid, so the first
  582. order of business is to lock the log and verify it.
  583. --*/
  584. {
  585. #if DOLOGAPC
  586. PPFLOGINTERFACE pLog = (PPFLOGINTERFACE)*SystemArgument1;
  587. PMDL Mdl;
  588. PBYTE pbVA;
  589. NTSTATUS Status = STATUS_SUCCESS;
  590. KIRQL kIrql;
  591. //
  592. // Need to extend the mapping of this Log. Lock the log.
  593. //
  594. KeEnterCriticalRegion();
  595. ExAcquireResourceExclusiveLite(&pLog->Resource, TRUE);
  596. //
  597. // slide the mapping as long as the resource has not nested and
  598. // the log is valid. Note the nesting test is made to prevent
  599. // the APC routine from interfering with the base thread code
  600. // that might also be trying to do a log operation.
  601. //
  602. if((pLog->Resource.OwnerThreads[0].OwnerCount == 1)
  603. &&
  604. pLog->Irp
  605. &&
  606. !(pLog->dwFlags & LOG_BADMEM))
  607. {
  608. DWORD dwSpaceRemaining, dwSpaceToMap, dwOffset;
  609. //
  610. // the log is still valid. Slide the mapping down. Because
  611. // logging may still be going on, the new mapping needs to
  612. // overlap slightly. Once the new mapping exists, we can
  613. // fix up the pointers under the spin lock.
  614. //
  615. dwSpaceRemaining = pLog->dwTotalSize -
  616. (pLog->dwPastMapped + pLog->dwMapCount);
  617. if(pLog->Event
  618. &&
  619. (dwSpaceRemaining < pLog->dwSignalThreshold))
  620. {
  621. KeSetEvent(pLog->Event, LOG_PRIO_BOOST, FALSE);
  622. }
  623. if(!dwSpaceRemaining)
  624. {
  625. //
  626. // Nothing left to map. Just go away
  627. //
  628. pLog->dwFlags |= LOG_CANTMAP;
  629. }
  630. else
  631. {
  632. //
  633. // Still space. Grab it. Don't leave anything dangling
  634. // though. That is, there should always be at least
  635. // MAX_NOMINAL_LOG_MAP bytes left for the next time.
  636. //
  637. if(dwSpaceRemaining < pLog->dwMapWindowSize2 )
  638. {
  639. dwSpaceToMap = dwSpaceRemaining;
  640. }
  641. else
  642. {
  643. dwSpaceToMap = pLog->dwMapWindowSize;
  644. }
  645. //
  646. // Now compute the extra space to map. No need for
  647. // the lock since the resource prevents remapping
  648. //
  649. dwOffset = (volatile DWORD)pLog->dwMapOffset;
  650. dwSpaceToMap += pLog->dwMapCount - dwOffset;
  651. //
  652. // Now the address of the new mapping.
  653. //
  654. pbVA = pLog->pUserAddress + dwOffset + pLog->dwPastMapped;
  655. Status = DoAMapping(
  656. pbVA,
  657. dwSpaceToMap,
  658. &Mdl,
  659. &pbVA);
  660. if(NT_SUCCESS(Status))
  661. {
  662. PMDL OldMdl;
  663. //
  664. // get the spin lock and slide things down. Also
  665. // capture the old Mdl so it can be freed.
  666. //
  667. kIrql = LockLog(pLog);
  668. OldMdl = pLog->Mdl;
  669. pLog->Mdl = Mdl;
  670. pLog->pCurrentMapPointer = pbVA;
  671. pLog->dwMapCount = dwSpaceToMap;
  672. pLog->dwMapOffset -= dwOffset;
  673. pLog->dwPastMapped += dwOffset;
  674. UnLockLog(pLog, kIrql);
  675. if(OldMdl)
  676. {
  677. MmUnlockPages(OldMdl);
  678. IoFreeMdl(OldMdl);
  679. }
  680. }
  681. else
  682. {
  683. //
  684. // In principle, this should take the filter spin lock,
  685. // but whatever race it creates with the match code
  686. // is harmless, so don't bother.
  687. //
  688. pLog->dwFlags |= LOG_OUTMEM;
  689. pLog->MapStatus = Status;
  690. }
  691. }
  692. }
  693. //
  694. // small race here in that the APC is still in progress. However,
  695. // it is most likely that we advanced the log and therefore
  696. // an APC won't be needed any time soon. If it is, then it may
  697. // run needlessly.
  698. pLog->lApcInProgress = 0;
  699. ExReleaseResourceLite(&pLog->Resource);
  700. KeLeaveCriticalRegion();
  701. DereferenceLog(pLog);
  702. #endif
  703. }
  704. VOID
  705. PfCancelIrp(
  706. IN PDEVICE_OBJECT DeviceObject,
  707. IN PIRP Irp
  708. )
  709. /*++
  710. Routine Description:
  711. Called when an IRP is cancelled. This is used to catch
  712. when the thread owning the log terminates.
  713. --*/
  714. {
  715. #if DOLOGAPC
  716. PPFLOGINTERFACE pLog = (PPFLOGINTERFACE) Irp->IoStatus.Status;
  717. //
  718. // Invalidate the log. Unmap the memory. The cancel spin
  719. // lock prevents the log from going away.
  720. //
  721. if(pLog->Irp == Irp)
  722. {
  723. KIRQL kIrql;
  724. PMDL Mdl;
  725. //
  726. // Same Irp.
  727. //
  728. kIrql = LockLog(pLog);
  729. //
  730. // reference it so it won't go away
  731. //
  732. AddRefToLog(pLog);
  733. //
  734. // if this is still the correct IRP, mark the log invalid. This
  735. // closes a race with AdvanceLog since LOG_BADMEM will prevent
  736. // an APC insertion.
  737. //
  738. if(pLog->Irp == Irp)
  739. {
  740. pLog->dwFlags |= LOG_BADMEM;
  741. pLog->ApcInited = FALSE;
  742. }
  743. UnLockLog(pLog, kIrql);
  744. IoReleaseCancelSpinLock(Irp->CancelIrql);
  745. //
  746. // Now get the resource to prevent others from
  747. // tampering. Assume this will never nest.
  748. //
  749. KeEnterCriticalRegion();
  750. ExAcquireResourceExclusiveLite(&pLog->Resource, TRUE);
  751. //
  752. // Make sure it's the same IRP. This could have changed
  753. // while we were not interlocked. If the Irp changed, keep
  754. // hands off.
  755. //
  756. if(pLog->Irp == Irp)
  757. {
  758. //
  759. // if a current mapping, unmap it
  760. //
  761. if(pLog->Mdl)
  762. {
  763. MmUnlockPages(pLog->Mdl);
  764. IoFreeMdl(pLog->Mdl);
  765. pLog->Mdl = 0;
  766. }
  767. pLog->Irp->IoStatus.Status = STATUS_CANCELLED;
  768. IoCompleteRequest(pLog->Irp, IO_NO_INCREMENT);
  769. DereferenceLog(pLog);
  770. pLog->Irp = 0;
  771. }
  772. ExReleaseResourceLite(&pLog->Resource);
  773. KeLeaveCriticalRegion();
  774. DereferenceLog(pLog);
  775. }
  776. else
  777. {
  778. IoReleaseCancelSpinLock(Irp->CancelIrql);
  779. }
  780. #endif // DOLOGAPC
  781. }
  782. VOID
  783. AdvanceLog(PPFLOGINTERFACE pLog)
  784. /*++
  785. Routine Description:
  786. Called to schedule the APC to move the log mapping.
  787. If the APC can't be inserted, just forget it.
  788. --*/
  789. {
  790. #if DOLOGAPC
  791. //
  792. // can't use the routines in logger.c 'cause the spin
  793. // lock is in force
  794. //
  795. if(pLog->ApcInited
  796. &&
  797. pLog->Irp
  798. &&
  799. !(pLog->dwFlags & (LOG_BADMEM | LOG_OUTMEM | LOG_CANTMAP))
  800. &&
  801. InterlockedExchange(&pLog->lApcInProgress, 1) == 0)
  802. {
  803. InterlockedIncrement(&pLog->UseCount);
  804. if(!KeInsertQueueApc(
  805. &pLog->Apc,
  806. (PVOID)pLog,
  807. NULL,
  808. LOG_PRIO_BOOST))
  809. {
  810. //
  811. // failed to insert
  812. //
  813. InterlockedDecrement(&pLog->UseCount);
  814. pLog->lApcInProgress = 0;
  815. }
  816. }
  817. #endif
  818. }
  819. KIRQL
  820. LockLog(PPFLOGINTERFACE pLog)
  821. /*++
  822. Routine Description:
  823. Acquire the log spin lock. This is called by the match code
  824. at DPC only
  825. --*/
  826. {
  827. KIRQL kIrql;
  828. KeAcquireSpinLock(&pLog->LogLock, &kIrql);
  829. return(kIrql);
  830. }
  831. VOID
  832. RemoveLogFromInterfaces(PPFLOGINTERFACE pLog)
  833. {
  834. PLIST_ENTRY pList;
  835. PFILTER_INTERFACE pf;
  836. //
  837. // protect the interface list. The assumption is that no
  838. // resources, aside from an FCB lock are held.
  839. //
  840. KeEnterCriticalRegion();
  841. ExAcquireResourceExclusiveLite(&FilterListResourceLock, TRUE);
  842. for(pList = g_filters.leIfListHead.Flink;
  843. pList != &g_filters.leIfListHead;
  844. pList = pList->Flink)
  845. {
  846. pf = CONTAINING_RECORD(pList, FILTER_INTERFACE, leIfLink);
  847. if(pLog == pf->pLog)
  848. {
  849. LOCK_STATE LockState;
  850. AcquireWriteLock(&g_filters.ifListLock,&LockState);
  851. pf->pLog = NULL;
  852. ReleaseWriteLock(&g_filters.ifListLock,&LockState);
  853. DereferenceLog(pLog);
  854. }
  855. }
  856. ExReleaseResourceLite(&FilterListResourceLock);
  857. KeLeaveCriticalRegion();
  858. }