Source code of Windows XP (NT5)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1497 lines
36 KiB

  1. /*++
  2. Copyright (c) 1996 Microsoft Corporation
  3. Module Name:
  4. tckrnl.c
  5. Abstract:
  6. This module contains routines that talk to the kernel
  7. Author:
  8. Jim Stewart (jstew) August 14, 1996
  9. Revision History:
  10. Ofer Bar (oferbar) Oct 1, 1997
  11. --*/
  12. #include "precomp.h"
  13. #pragma hdrstop
  14. //
  15. // we use this mutex to synchronous start up with other traffic.dll's
  16. //
  17. const UCHAR TrafficSyncMutex[] = "_TRAFFIC_CTL_MUTEX";
  18. HANDLE hGpcNotifyThread = INVALID_HANDLE_VALUE;
  19. HANDLE hGpcNotifyStopEvent = INVALID_HANDLE_VALUE;
  20. DWORD
  21. IoAddFlow(
  22. IN PFLOW_STRUC pFlow,
  23. IN BOOLEAN Async
  24. )
  25. /*++
  26. Routine Description:
  27. This procedure builds up the structure necessary to add a flow.
  28. Arguments:
  29. Return Value:
  30. status
  31. --*/
  32. {
  33. DWORD Status = NO_ERROR;
  34. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  35. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  36. PCF_INFO_QOS Kflow;
  37. PGPC_ADD_CF_INFO_REQ GpcReq;
  38. PGPC_ADD_CF_INFO_RES GpcRes;
  39. ULONG InBuffSize;
  40. ULONG OutBuffSize;
  41. ULONG CfInfoSize;
  42. PIO_APC_ROUTINE pCbRoutine = NULL;
  43. ULONG l;
  44. HANDLE hEvent = NULL;
  45. //
  46. // allocate memory for a CF_INFO struct to be passed to the GPC.
  47. //
  48. ASSERT(pFlow->pGenFlow);
  49. pFlow->GpcHandle = NULL;
  50. l = pFlow->GenFlowLen;
  51. ASSERT(l > 0);
  52. CfInfoSize = l + FIELD_OFFSET(CF_INFO_QOS, GenFlow);
  53. InBuffSize = sizeof(GPC_ADD_CF_INFO_REQ) + CfInfoSize;
  54. //
  55. // And for the return info...
  56. //
  57. OutBuffSize = sizeof(GPC_ADD_CF_INFO_RES);
  58. AllocMem(&GpcRes, OutBuffSize);
  59. pFlow->CompletionBuffer = (PVOID)GpcRes;
  60. AllocMem(&GpcReq, InBuffSize);
  61. if (GpcRes && GpcReq) {
  62. RtlZeroMemory(GpcRes, OutBuffSize);
  63. RtlZeroMemory(GpcReq, InBuffSize);
  64. //
  65. // fill in the flow information
  66. //
  67. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  68. GpcReq->ClientCfInfoContext = pFlow;
  69. GpcReq->CfInfoSize = CfInfoSize;
  70. Kflow = (PCF_INFO_QOS)&GpcReq->CfInfo;
  71. //
  72. // fill the instance name
  73. //
  74. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  75. RtlCopyMemory(Kflow->InstanceName,
  76. pTcIfc->InstanceName,
  77. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  78. //
  79. // set the flow flags
  80. //
  81. Kflow->Flags = pFlow->UserFlags;
  82. //
  83. // copy the generic flow parameter
  84. //
  85. RtlCopyMemory(&Kflow->GenFlow,
  86. pFlow->pGenFlow,
  87. l);
  88. if (pClient->ClHandlers.ClAddFlowCompleteHandler && Async) {
  89. pCbRoutine = CbAddFlowComplete;
  90. } else {
  91. hEvent = pFlow->PendingEvent;
  92. }
  93. Status = DeviceControl( pGlobals->GpcFileHandle,
  94. hEvent,
  95. pCbRoutine,
  96. (PVOID)pFlow,
  97. &pFlow->IoStatBlock,
  98. IOCTL_GPC_ADD_CF_INFO,
  99. GpcReq,
  100. InBuffSize,
  101. GpcRes,
  102. OutBuffSize);
  103. if (!ERROR_FAILED(Status)) {
  104. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  105. //
  106. // wait for the event to signal
  107. //
  108. IF_DEBUG(IOCTLS) {
  109. WSPRINT(("IoAddFlow: Waiting for event 0x%X...\n",
  110. PtrToUlong(hEvent)));
  111. }
  112. Status = WaitForSingleObject(hEvent,
  113. INFINITE
  114. );
  115. IF_DEBUG(IOCTLS) {
  116. WSPRINT(("IoAddFlow: ... Event 0x%X signaled, Status=0x%X\n",
  117. PtrToUlong(hEvent), Status));
  118. }
  119. }
  120. if (Status == NO_ERROR) {
  121. Status = MapNtStatus2WinError(GpcRes->Status);
  122. IF_DEBUG(IOCTLS) {
  123. WSPRINT(("IoAddFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  124. GpcRes->Status, Status));
  125. }
  126. }
  127. if (ERROR_SUCCESS == Status) {
  128. ASSERT(GpcRes->GpcCfInfoHandle);
  129. pFlow->GpcHandle = GpcRes->GpcCfInfoHandle;
  130. pFlow->InstanceNameLength = GpcRes->InstanceNameLength;
  131. RtlCopyMemory(pFlow->InstanceName,
  132. GpcRes->InstanceName,
  133. GpcRes->InstanceNameLength
  134. );
  135. pFlow->InstanceName[pFlow->InstanceNameLength/sizeof(WCHAR)] = L'\0';
  136. IF_DEBUG(IOCTLS) {
  137. WSPRINT(("IoAddFlow: Flow Handle=%d Name=%S\n",
  138. pFlow->GpcHandle,
  139. pFlow->InstanceName));
  140. }
  141. }
  142. }
  143. } else {
  144. Status = ERROR_NOT_ENOUGH_MEMORY;
  145. }
  146. //
  147. // No, it's not a bug
  148. // GpcRes will be release in CompleteAddFlow
  149. //
  150. if (GpcReq)
  151. FreeMem(GpcReq);
  152. IF_DEBUG(IOCTLS) {
  153. WSPRINT(("<==IoAddFlow: Status=0x%X\n",
  154. Status));
  155. }
  156. return Status;
  157. }
  158. DWORD
  159. IoAddClassMapFlow(
  160. IN PFLOW_STRUC pFlow,
  161. IN BOOLEAN Async
  162. )
  163. /*++
  164. Routine Description:
  165. This procedure builds up the structure necessary to add a flow.
  166. Arguments:
  167. Return Value:
  168. status
  169. --*/
  170. {
  171. DWORD Status = NO_ERROR;
  172. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  173. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  174. PCF_INFO_CLASS_MAP Kflow;
  175. PGPC_ADD_CF_INFO_REQ GpcReq;
  176. PGPC_ADD_CF_INFO_RES GpcRes;
  177. ULONG InBuffSize;
  178. ULONG OutBuffSize;
  179. ULONG CfInfoSize;
  180. PIO_APC_ROUTINE pCbRoutine = NULL;
  181. ULONG l;
  182. HANDLE hEvent = NULL;
  183. return ERROR_CALL_NOT_IMPLEMENTED;
  184. #if NEVER
  185. // As this is not published in MSDN and not implemented in PSCHED also
  186. //
  187. // allocate memory for a CF_INFO struct to be passed to the GPC.
  188. //
  189. ASSERT(pFlow->pClassMapFlow);
  190. pFlow->GpcHandle = NULL;
  191. l = sizeof(TC_CLASS_MAP_FLOW) + pFlow->pClassMapFlow->ObjectsLength;
  192. CfInfoSize = l + FIELD_OFFSET(CF_INFO_CLASS_MAP, ClassMapInfo);
  193. InBuffSize = sizeof(GPC_ADD_CF_INFO_REQ) + CfInfoSize;
  194. //
  195. // And for the return info...
  196. //
  197. OutBuffSize = sizeof(GPC_ADD_CF_INFO_RES);
  198. AllocMem(&GpcRes, OutBuffSize);
  199. pFlow->CompletionBuffer = (PVOID)GpcRes;
  200. AllocMem(&GpcReq, InBuffSize);
  201. if (GpcRes && GpcReq) {
  202. RtlZeroMemory(GpcRes, OutBuffSize);
  203. RtlZeroMemory(GpcReq, InBuffSize);
  204. //
  205. // fill in the flow information
  206. //
  207. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  208. GpcReq->ClientCfInfoContext = pFlow;
  209. GpcReq->CfInfoSize = CfInfoSize;
  210. Kflow = (PCF_INFO_CLASS_MAP)&GpcReq->CfInfo;
  211. //
  212. // fill the instance name
  213. //
  214. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  215. RtlCopyMemory(Kflow->InstanceName,
  216. pTcIfc->InstanceName,
  217. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  218. //
  219. // copy the generic flow parameter
  220. //
  221. RtlCopyMemory(&Kflow->ClassMapInfo,
  222. pFlow->pClassMapFlow,
  223. l);
  224. if (pClient->ClHandlers.ClAddFlowCompleteHandler && Async) {
  225. pCbRoutine = CbAddFlowComplete;
  226. } else {
  227. hEvent = pFlow->PendingEvent;
  228. }
  229. Status = DeviceControl( pGlobals->GpcFileHandle,
  230. hEvent,
  231. pCbRoutine,
  232. (PVOID)pFlow,
  233. &pFlow->IoStatBlock,
  234. IOCTL_GPC_ADD_CF_INFO,
  235. GpcReq,
  236. InBuffSize,
  237. GpcRes,
  238. OutBuffSize);
  239. if (!ERROR_FAILED(Status)) {
  240. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  241. //
  242. // wait for the event to signal
  243. //
  244. IF_DEBUG(IOCTLS) {
  245. WSPRINT(("IoAddClassMapFlow: Waiting for event 0x%X...\n",
  246. PtrToUlong(hEvent)));
  247. }
  248. Status = WaitForSingleObject(hEvent,
  249. INFINITE
  250. );
  251. IF_DEBUG(IOCTLS) {
  252. WSPRINT(("IoAddClassMapFlow: ... Event 0x%X signaled, Status=0x%X\n",
  253. PtrToUlong(hEvent), Status));
  254. }
  255. }
  256. if (Status == NO_ERROR) {
  257. Status = MapNtStatus2WinError(GpcRes->Status);
  258. IF_DEBUG(IOCTLS) {
  259. WSPRINT(("IoAddFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  260. GpcRes->Status, Status));
  261. }
  262. }
  263. if (!ERROR_FAILED(Status)) {
  264. ASSERT(GpcRes->GpcCfInfoHandle);
  265. pFlow->GpcHandle = GpcRes->GpcCfInfoHandle;
  266. pFlow->InstanceNameLength = GpcRes->InstanceNameLength;
  267. RtlCopyMemory(pFlow->InstanceName,
  268. GpcRes->InstanceName,
  269. GpcRes->InstanceNameLength
  270. );
  271. pFlow->InstanceName[pFlow->InstanceNameLength/sizeof(WCHAR)] = L'\0';
  272. IF_DEBUG(IOCTLS) {
  273. WSPRINT(("IoAddClassMapFlow: Flow Handle=%d Name=%S\n",
  274. pFlow->GpcHandle,
  275. pFlow->InstanceName));
  276. }
  277. }
  278. }
  279. } else {
  280. Status = ERROR_NOT_ENOUGH_MEMORY;
  281. }
  282. //
  283. // No, it's not a bug
  284. // GpcRes will be release in CompleteAddFlow
  285. //
  286. if (GpcReq)
  287. FreeMem(GpcReq);
  288. IF_DEBUG(IOCTLS) {
  289. WSPRINT(("<==IoAddClassMapFlow: Status=0x%X\n",
  290. Status));
  291. }
  292. return Status;
  293. #endif
  294. }
  295. DWORD
  296. IoModifyFlow(
  297. IN PFLOW_STRUC pFlow,
  298. IN BOOLEAN Async
  299. )
  300. /*++
  301. Routine Description:
  302. This procedure builds up the structure necessary to modify a flow.
  303. Arguments:
  304. pFlow
  305. Return Value:
  306. status
  307. --*/
  308. {
  309. DWORD Status = NO_ERROR;
  310. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  311. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  312. PCF_INFO_QOS Kflow;
  313. PGPC_MODIFY_CF_INFO_REQ GpcReq;
  314. PGPC_MODIFY_CF_INFO_RES GpcRes;
  315. ULONG InBuffSize;
  316. ULONG OutBuffSize;
  317. ULONG CfInfoSize;
  318. PIO_APC_ROUTINE pCbRoutine = NULL;
  319. ULONG l;
  320. HANDLE hEvent = NULL;
  321. //
  322. // allocate memory for a CF_INFO struct to be passed to the GPC.
  323. //
  324. ASSERT(pFlow->pGenFlow1);
  325. l = pFlow->GenFlowLen1;
  326. ASSERT(l > 0);
  327. CfInfoSize = l + FIELD_OFFSET(CF_INFO_QOS, GenFlow);
  328. InBuffSize = sizeof(GPC_MODIFY_CF_INFO_REQ) + CfInfoSize;
  329. //
  330. // And for the return info...
  331. //
  332. OutBuffSize = sizeof(GPC_MODIFY_CF_INFO_RES);
  333. AllocMem(&GpcRes, OutBuffSize);
  334. pFlow->CompletionBuffer = (PVOID)GpcRes;
  335. AllocMem(&GpcReq, InBuffSize);
  336. if (GpcRes && GpcReq) {
  337. RtlZeroMemory(GpcRes, OutBuffSize);
  338. RtlZeroMemory(GpcReq, InBuffSize);
  339. //
  340. // fill in the flow information
  341. //
  342. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  343. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  344. GpcReq->CfInfoSize = CfInfoSize;
  345. Kflow = (PCF_INFO_QOS)&GpcReq->CfInfo;
  346. //
  347. // fill the instance name
  348. //
  349. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  350. RtlCopyMemory(Kflow->InstanceName,
  351. pTcIfc->InstanceName,
  352. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  353. //
  354. // copy the generic flow parameter
  355. //
  356. RtlCopyMemory(&Kflow->GenFlow,
  357. pFlow->pGenFlow1,
  358. l);
  359. if (pClient->ClHandlers.ClModifyFlowCompleteHandler && Async) {
  360. pCbRoutine = CbModifyFlowComplete;
  361. } else {
  362. hEvent = pFlow->PendingEvent;
  363. }
  364. Status = DeviceControl( pGlobals->GpcFileHandle,
  365. hEvent,
  366. pCbRoutine,
  367. (PVOID)pFlow,
  368. &pFlow->IoStatBlock,
  369. IOCTL_GPC_MODIFY_CF_INFO,
  370. GpcReq,
  371. InBuffSize,
  372. GpcRes,
  373. OutBuffSize);
  374. if (!ERROR_FAILED(Status)) {
  375. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  376. //
  377. // wait for the event to signal
  378. //
  379. IF_DEBUG(IOCTLS) {
  380. WSPRINT(("IoModifyFlow: Waiting for event 0x%X\n",
  381. PtrToUlong(hEvent)));
  382. }
  383. Status = WaitForSingleObject(hEvent,
  384. INFINITE
  385. );
  386. IF_DEBUG(IOCTLS) {
  387. WSPRINT(("IoModifyFlow: ... Event 0x%X signaled, Status=0x%X\n",
  388. PtrToUlong(hEvent), Status));
  389. }
  390. }
  391. if (Status == NO_ERROR) {
  392. Status = MapNtStatus2WinError(GpcRes->Status);
  393. IF_DEBUG(IOCTLS) {
  394. WSPRINT(("IoModifyFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  395. GpcRes->Status, Status));
  396. }
  397. }
  398. } else{
  399. Status = MapNtStatus2WinError(GpcRes->Status);
  400. IF_DEBUG(IOCTLS) {
  401. WSPRINT(("IoModifyFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  402. GpcRes->Status, Status));
  403. }
  404. }
  405. } else {
  406. Status = ERROR_NOT_ENOUGH_MEMORY;
  407. }
  408. //
  409. // No, it's not a bug
  410. // GpcRes will be release in CompleteModifyFlow
  411. //
  412. if (GpcReq)
  413. FreeMem(GpcReq);
  414. IF_DEBUG(IOCTLS) {
  415. WSPRINT(("IoModifyFlow: Status=0x%X\n",
  416. Status));
  417. }
  418. return Status;
  419. }
  420. DWORD
  421. IoDeleteFlow(
  422. IN PFLOW_STRUC pFlow,
  423. IN BOOLEAN Async
  424. )
  425. /*++
  426. Routine Description:
  427. This procedure builds up the structure necessary to delete a flow.
  428. It then calls a routine to pass this info to the GPC.
  429. Arguments:
  430. pFlow
  431. Return Value:
  432. status
  433. --*/
  434. {
  435. DWORD Status;
  436. ULONG InBuffSize;
  437. ULONG OutBuffSize;
  438. PGPC_REMOVE_CF_INFO_REQ GpcReq;
  439. PGPC_REMOVE_CF_INFO_RES GpcRes;
  440. PIO_APC_ROUTINE pCbRoutine = NULL;
  441. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  442. HANDLE hEvent = NULL;
  443. if (IS_REMOVED(pFlow->Flags)) {
  444. //
  445. // this flow has been already deleted in the kernel
  446. // due to a flow close notification.
  447. // no need to send IOTCL to GPC, just return OK
  448. //
  449. IF_DEBUG(IOCTLS) {
  450. WSPRINT(("IoDeleteFlow: Flow has already been deleted=0x%X\n",
  451. PtrToUlong(pFlow)));
  452. }
  453. return NO_ERROR;
  454. }
  455. //
  456. // If we add this over here, then if WMI deletes the flow,
  457. // the user mode call will just return above.
  458. //
  459. GetLock(pFlow->Lock);
  460. pFlow->Flags |= TC_FLAGS_REMOVED;
  461. FreeLock(pFlow->Lock);
  462. //
  463. // allocate memory for in and out buffers
  464. //
  465. InBuffSize = sizeof(GPC_REMOVE_CF_INFO_REQ);
  466. OutBuffSize = sizeof(GPC_REMOVE_CF_INFO_RES);
  467. AllocMem(&GpcRes, OutBuffSize);
  468. pFlow->CompletionBuffer = (PVOID)GpcRes;
  469. AllocMem(&GpcReq, InBuffSize);
  470. if (GpcReq && GpcRes){
  471. IF_DEBUG(IOCTLS) {
  472. WSPRINT(("IoDeleteFlow: preparing to delete the flow=0x%X\n",
  473. PtrToUlong(pFlow)));
  474. }
  475. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  476. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  477. if (pClient->ClHandlers.ClDeleteFlowCompleteHandler && Async) {
  478. pCbRoutine = CbDeleteFlowComplete;
  479. } else {
  480. hEvent = pFlow->PendingEvent;
  481. }
  482. Status = DeviceControl( pGlobals->GpcFileHandle,
  483. hEvent,
  484. pCbRoutine,
  485. (PVOID)pFlow,
  486. &pFlow->IoStatBlock,
  487. IOCTL_GPC_REMOVE_CF_INFO,
  488. GpcReq,
  489. InBuffSize,
  490. GpcRes,
  491. OutBuffSize);
  492. if (!ERROR_FAILED(Status)) {
  493. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  494. //
  495. // wait for the event to signal
  496. //
  497. IF_DEBUG(IOCTLS) {
  498. WSPRINT(("IoDeleteFlow: Waiting for event 0x%X\n",
  499. PtrToUlong(hEvent)));
  500. }
  501. Status = WaitForSingleObject(hEvent,
  502. INFINITE
  503. );
  504. IF_DEBUG(IOCTLS) {
  505. WSPRINT(("IoDeleteFlow: ... Event 0x%X signaled, Status=0x%X\n",
  506. PtrToUlong(hEvent), Status));
  507. }
  508. }
  509. if (Status == NO_ERROR) {
  510. Status = MapNtStatus2WinError(GpcRes->Status);
  511. IF_DEBUG(IOCTLS) {
  512. WSPRINT(("IoDeleteFlow: Gpc returned=0x%X mapped to 0x%X\n",
  513. GpcRes->Status, Status));
  514. }
  515. //
  516. // If the deletion was unsuccessful, let's un-mark the REMOVED flag.
  517. //
  518. if (ERROR_FAILED(Status)) {
  519. GetLock(pFlow->Lock);
  520. pFlow->Flags &= ~TC_FLAGS_REMOVED;
  521. FreeLock(pFlow->Lock);
  522. }
  523. }
  524. }
  525. } else {
  526. Status = ERROR_NOT_ENOUGH_MEMORY;
  527. }
  528. //
  529. // No, it's not a bug
  530. // GpcRes will be release in CompleteDeleteFlow
  531. //
  532. if (GpcReq)
  533. FreeMem(GpcReq);
  534. IF_DEBUG(IOCTLS) {
  535. WSPRINT(("<==IoDeleteFlow: Status=0x%X\n",
  536. Status));
  537. }
  538. return Status;
  539. }
  540. DWORD
  541. IoAddFilter(
  542. IN PFILTER_STRUC pFilter
  543. )
  544. /*++
  545. Routine Description:
  546. This procedure builds up the structure necessary to add a filter.
  547. It then calls a routine to pass this info to the GPC.
  548. Arguments:
  549. pFilter
  550. Return Value:
  551. status
  552. --*/
  553. {
  554. DWORD Status;
  555. PGPC_ADD_PATTERN_REQ GpcReq;
  556. PGPC_ADD_PATTERN_RES GpcRes;
  557. ULONG InBuffSize;
  558. ULONG OutBuffSize;
  559. PFLOW_STRUC pFlow = pFilter->pFlow;
  560. PTC_GEN_FILTER pGpcFilter = pFilter->pGpcFilter;
  561. PUCHAR p;
  562. ULONG PatternSize;
  563. IO_STATUS_BLOCK IoStatBlock;
  564. pFilter->GpcHandle = NULL;
  565. ASSERT(pGpcFilter);
  566. ASSERT(pFlow);
  567. PatternSize = pGpcFilter->PatternSize;
  568. InBuffSize = sizeof(GPC_ADD_PATTERN_REQ) + 2*PatternSize;
  569. OutBuffSize = sizeof(GPC_ADD_PATTERN_RES);
  570. AllocMem(&GpcReq, InBuffSize);
  571. AllocMem(&GpcRes, OutBuffSize);
  572. if (GpcReq && GpcRes){
  573. IF_DEBUG(IOCTLS) {
  574. WSPRINT(("IoAddFilter: Filling request: size: in=%d, out=%d\n",
  575. InBuffSize, OutBuffSize));
  576. }
  577. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  578. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  579. GpcReq->ClientPatternContext = (GPC_CLIENT_HANDLE)pFilter;
  580. GpcReq->Priority = 0;
  581. GpcReq->PatternSize = PatternSize;
  582. GpcReq->ProtocolTemplate = pFilter->GpcProtocolTemplate;
  583. //
  584. // fill in the pattern
  585. //
  586. p = (PUCHAR)&GpcReq->PatternAndMask;
  587. RtlCopyMemory(p, pGpcFilter->Pattern, PatternSize);
  588. //
  589. // fill in the mask
  590. //
  591. p += PatternSize;
  592. RtlCopyMemory(p, pGpcFilter->Mask, PatternSize);
  593. Status = DeviceControl( pGlobals->GpcFileHandle,
  594. NULL,
  595. NULL,
  596. NULL,
  597. &IoStatBlock,
  598. IOCTL_GPC_ADD_PATTERN,
  599. GpcReq,
  600. InBuffSize,
  601. GpcRes,
  602. OutBuffSize);
  603. if (!ERROR_FAILED(Status)) {
  604. Status = MapNtStatus2WinError(GpcRes->Status);
  605. IF_DEBUG(IOCTLS) {
  606. WSPRINT(("IoAddFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  607. GpcRes->Status, Status));
  608. }
  609. //
  610. // save the filter handle
  611. //
  612. if (!ERROR_FAILED(Status)) {
  613. pFilter->GpcHandle = GpcRes->GpcPatternHandle;
  614. } else {
  615. IF_DEBUG(IOCTLS) {
  616. WSPRINT(("IoAddFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  617. GpcRes->Status, Status));
  618. }
  619. IF_DEBUG(IOCTLS) {
  620. WSPRINT(("Error - failed the addfilter call\n"));
  621. }
  622. //ASSERT(Status == ERROR_DUPLICATE_FILTER); removed for WAN - interface up down situation
  623. }
  624. }
  625. } else {
  626. Status = ERROR_NOT_ENOUGH_MEMORY;
  627. IF_DEBUG(ERRORS) {
  628. WSPRINT(("IoAddFilter: Error =0x%X\n",
  629. Status));
  630. }
  631. }
  632. if (GpcReq)
  633. FreeMem(GpcReq);
  634. if (GpcRes)
  635. FreeMem(GpcRes);
  636. IF_DEBUG(IOCTLS) {
  637. WSPRINT(("<==IoAddFilter: Returned =0x%X\n",
  638. Status));
  639. }
  640. return Status;
  641. }
  642. DWORD
  643. IoDeleteFilter(
  644. IN PFILTER_STRUC pFilter
  645. )
  646. /*++
  647. Routine Description:
  648. This procedure builds up the structure necessary to delete a filter.
  649. It then calls a routine to pass this info to the GPC.
  650. Arguments:
  651. pFilter
  652. Return Value:
  653. status
  654. --*/
  655. {
  656. DWORD Status;
  657. ULONG InBuffSize;
  658. ULONG OutBuffSize;
  659. GPC_REMOVE_PATTERN_REQ GpcReq;
  660. GPC_REMOVE_PATTERN_RES GpcRes;
  661. IO_STATUS_BLOCK IoStatBlock;
  662. //
  663. // allocate memory for in and out buffers
  664. //
  665. if (IS_REMOVED(pFilter->Flags)) {
  666. //
  667. // this filter has been already deleted in the kernel
  668. // due to a flow close notification.
  669. // no need to send IOTCL to GPC, just return OK
  670. //
  671. IF_DEBUG(IOCTLS) {
  672. WSPRINT(("IoDeleteFilter: Filter has already been deleted=0x%X\n",
  673. PtrToUlong(pFilter)));
  674. }
  675. return NO_ERROR;
  676. }
  677. //
  678. // If we add this over here, then if WMI deletes the Interface (and the
  679. // flows/filters) the user mode call will just return above.
  680. //
  681. GetLock(pFilter->Lock);
  682. pFilter->Flags |= TC_FLAGS_REMOVED;
  683. FreeLock(pFilter->Lock);
  684. InBuffSize = sizeof(GPC_REMOVE_PATTERN_REQ);
  685. OutBuffSize = sizeof(GPC_REMOVE_PATTERN_RES);
  686. GpcReq.ClientHandle = pFilter->pFlow->pGpcClient->GpcHandle;
  687. GpcReq.GpcPatternHandle = pFilter->GpcHandle;
  688. ASSERT(GpcReq.ClientHandle);
  689. ASSERT(GpcReq.GpcPatternHandle);
  690. Status = DeviceControl( pGlobals->GpcFileHandle,
  691. NULL,
  692. NULL,
  693. NULL,
  694. &IoStatBlock,
  695. IOCTL_GPC_REMOVE_PATTERN,
  696. &GpcReq,
  697. InBuffSize,
  698. &GpcRes,
  699. OutBuffSize
  700. );
  701. if (!ERROR_FAILED(Status)) {
  702. Status = MapNtStatus2WinError(GpcRes.Status);
  703. IF_DEBUG(IOCTLS) {
  704. WSPRINT(("IoDeleteFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  705. GpcRes.Status, Status));
  706. }
  707. //
  708. // If the deletion was unsuccessful, let's un-mark the REMOVED flag.
  709. //
  710. if (ERROR_FAILED(Status)) {
  711. GetLock(pFilter->Lock);
  712. pFilter->Flags &= ~TC_FLAGS_REMOVED;
  713. FreeLock(pFilter->Lock);
  714. }
  715. }
  716. IF_DEBUG(IOCTLS) {
  717. WSPRINT(("<==IoDeleteFilter: Status=0x%X\n",
  718. Status));
  719. }
  720. return Status;
  721. }
  722. DWORD
  723. IoRegisterClient(
  724. IN PGPC_CLIENT pGpcClient
  725. )
  726. {
  727. DWORD Status;
  728. GPC_REGISTER_CLIENT_REQ GpcReq;
  729. GPC_REGISTER_CLIENT_RES GpcRes;
  730. ULONG InBuffSize;
  731. ULONG OutBuffSize;
  732. IO_STATUS_BLOCK IoStatBlock;
  733. InBuffSize = sizeof(GPC_REGISTER_CLIENT_REQ);
  734. OutBuffSize = sizeof(GPC_REGISTER_CLIENT_RES);
  735. GpcReq.CfId = pGpcClient->CfInfoType;
  736. GpcReq.Flags = GPC_FLAGS_FRAGMENT;
  737. GpcReq.MaxPriorities = 1;
  738. GpcReq.ClientContext =
  739. (GPC_CLIENT_HANDLE)UlongToPtr(GetCurrentProcessId()); // process id
  740. Status = DeviceControl( pGlobals->GpcFileHandle,
  741. NULL,
  742. NULL,
  743. NULL,
  744. &IoStatBlock,
  745. IOCTL_GPC_REGISTER_CLIENT,
  746. &GpcReq,
  747. InBuffSize,
  748. &GpcRes,
  749. OutBuffSize );
  750. if (!ERROR_FAILED(Status)) {
  751. Status = MapNtStatus2WinError(GpcRes.Status);
  752. pGpcClient->GpcHandle = GpcRes.ClientHandle;
  753. IF_DEBUG(IOCTLS) {
  754. WSPRINT(("IoRegisterClient: GpcRes returned=0x%X mapped to =0x%X\n",
  755. GpcRes.Status, Status));
  756. }
  757. }
  758. IF_DEBUG(IOCTLS) {
  759. WSPRINT(("<==IoRegisterClient: Status=0x%X\n",
  760. Status));
  761. }
  762. return Status;
  763. }
  764. DWORD
  765. IoDeregisterClient(
  766. IN PGPC_CLIENT pGpcClient
  767. )
  768. {
  769. DWORD Status;
  770. GPC_DEREGISTER_CLIENT_REQ GpcReq;
  771. GPC_DEREGISTER_CLIENT_RES GpcRes;
  772. ULONG InBuffSize;
  773. ULONG OutBuffSize;
  774. IO_STATUS_BLOCK IoStatBlock;
  775. InBuffSize = sizeof(GPC_DEREGISTER_CLIENT_REQ);
  776. OutBuffSize = sizeof(GPC_DEREGISTER_CLIENT_RES);
  777. GpcReq.ClientHandle = pGpcClient->GpcHandle;
  778. Status = DeviceControl( pGlobals->GpcFileHandle,
  779. NULL,
  780. NULL,
  781. NULL,
  782. &IoStatBlock,
  783. IOCTL_GPC_DEREGISTER_CLIENT,
  784. &GpcReq,
  785. InBuffSize,
  786. &GpcRes,
  787. OutBuffSize
  788. );
  789. if (!ERROR_FAILED(Status)) {
  790. Status = MapNtStatus2WinError(GpcRes.Status);
  791. IF_DEBUG(IOCTLS) {
  792. WSPRINT(("IoDeegisterClient: GpcRes returned=0x%X mapped to =0x%X\n",
  793. GpcRes.Status, Status));
  794. }
  795. }
  796. IF_DEBUG(IOCTLS) {
  797. WSPRINT(("<==IoDeregisterClient: Status=0x%X\n",
  798. Status));
  799. }
  800. return Status;
  801. }
  802. PGPC_NOTIFY_REQUEST_RES GpcResCb;
  803. DWORD
  804. IoRequestNotify(
  805. VOID
  806. //IN PGPC_CLIENT pGpcClient
  807. )
  808. /*
  809. Description:
  810. This routine sends a notification request buffer to the GPC.
  811. The request will pend until the GPC notifies about a flow
  812. being deleted. This will cause a callback to CbGpcNotifyRoutine.
  813. */
  814. {
  815. DWORD Status;
  816. ULONG OutBuffSize;
  817. //
  818. // allocate memory for in and out buffers
  819. //
  820. OutBuffSize = sizeof(GPC_NOTIFY_REQUEST_RES);
  821. AllocMem(&GpcResCb, OutBuffSize);
  822. if (GpcResCb){
  823. Status = DeviceControl( pGlobals->GpcFileHandle,
  824. NULL,
  825. CbGpcNotifyRoutine,
  826. (PVOID)GpcResCb,
  827. &GpcResCb->IoStatBlock,
  828. IOCTL_GPC_NOTIFY_REQUEST,
  829. NULL, //GpcReq,
  830. 0, //InBuffSize,
  831. GpcResCb,
  832. OutBuffSize);
  833. if (ERROR_FAILED(Status)) {
  834. FreeMem(GpcResCb);
  835. GpcResCb = NULL;
  836. }
  837. else if ( ERROR_PENDING(Status) )
  838. {
  839. Status = NO_ERROR;
  840. }
  841. } else {
  842. Status = ERROR_NOT_ENOUGH_MEMORY;
  843. }
  844. IF_DEBUG(IOCTLS) {
  845. WSPRINT(("<==IoRequestNotify: Buffer=%p Status=0x%X\n",
  846. GpcResCb, Status));
  847. }
  848. return Status;
  849. }
  850. VOID
  851. CancelIoRequestNotify()
  852. /*
  853. Description:
  854. This routine cancels the IRP in GPC and waits for the pending
  855. IO to be cancelled. The callback routine set an event when
  856. IO request is canclled and this routine waits for that event
  857. before returning.
  858. */
  859. {
  860. // Non-zero value of GpcResCb indicates a pending IRP
  861. if (GpcResCb)
  862. {
  863. GpcCancelEvent = CreateEvent (
  864. NULL,
  865. FALSE,
  866. FALSE,
  867. NULL );
  868. if ( CancelIo ( pGlobals->GpcFileHandle ) )
  869. {
  870. if ( GpcCancelEvent )
  871. {
  872. WaitForSingleObjectEx(
  873. GpcCancelEvent,
  874. INFINITE,
  875. TRUE );
  876. CloseHandle ( GpcCancelEvent );
  877. GpcCancelEvent = NULL;
  878. }
  879. else
  880. {
  881. IF_DEBUG(IOCTLS) {
  882. WSPRINT((
  883. "CancelIo: Status=0x%X\n",
  884. GetLastError() ));
  885. }
  886. }
  887. }
  888. FreeMem(GpcResCb);
  889. IF_DEBUG(IOCTLS)
  890. {
  891. WSPRINT(("<==CancelIoRequestNotify: Freed %p\n",
  892. GpcResCb ));
  893. }
  894. }
  895. return;
  896. }
  897. void
  898. IncrementLibraryUsageCount(
  899. HINSTANCE hinst,
  900. int nCount)
  901. /*
  902. Utility routine to increment the ref count on
  903. the TRAFFIC.DLL so that it will not get unloaded
  904. before the GPCNotify thread gets a chance to run.
  905. */
  906. {
  907. TCHAR szModuleName[_MAX_PATH];
  908. GetModuleFileName(hinst, szModuleName, _MAX_PATH);
  909. while (nCount--)
  910. LoadLibrary(szModuleName);
  911. return;
  912. }
  913. DWORD
  914. GpcNotifyThreadFunction ()
  915. /*
  916. This routine registers an IRP with GPC to listen for
  917. FLOW close notifications and waits for the stop event.
  918. When the event is signalled the IRP is canceled and this
  919. thread exits.
  920. Since the wait is done in an alertable state GPC callbacks
  921. are executed in this thread itself.
  922. */
  923. {
  924. DWORD dwError;
  925. dwError = IoRequestNotify();
  926. WaitForSingleObjectEx(
  927. hGpcNotifyStopEvent,
  928. INFINITE,
  929. TRUE );
  930. CancelIoRequestNotify();
  931. FreeLibraryAndExitThread(
  932. hinstTrafficDll,
  933. 0 );
  934. return 0;
  935. }
  936. DWORD
  937. StartGpcNotifyThread()
  938. /*
  939. Description:
  940. This routine starts a thread which queues an IRP for
  941. GPC notifications.
  942. */
  943. {
  944. DWORD dwError = 0;
  945. DWORD dwThreadId = 0;
  946. // Increment the ref count on this DLL so it will not be unloaded
  947. // before the GpcNotifyThreadFunction gets to run
  948. IncrementLibraryUsageCount(
  949. hinstTrafficDll,
  950. 1);
  951. // Create the stop event for the thread to receive
  952. // GPC flow close notifications
  953. hGpcNotifyStopEvent = CreateEvent (
  954. NULL,
  955. FALSE,
  956. FALSE,
  957. NULL );
  958. if ( !hGpcNotifyStopEvent )
  959. {
  960. dwError = GetLastError();
  961. goto Error;
  962. }
  963. // Start the thread.
  964. hGpcNotifyThread = CreateThread(
  965. NULL,
  966. 0,
  967. (LPTHREAD_START_ROUTINE )GpcNotifyThreadFunction,
  968. NULL,
  969. 0,
  970. &dwThreadId );
  971. if ( !hGpcNotifyThread )
  972. {
  973. dwError = GetLastError();
  974. goto Error;
  975. }
  976. // Not closing the thread handle as StopGpcNotifyThread
  977. // routine will use this handle to wait for thread to
  978. // terminate.
  979. return 0;
  980. Error:
  981. if ( hGpcNotifyStopEvent )
  982. {
  983. CloseHandle ( hGpcNotifyStopEvent );
  984. hGpcNotifyStopEvent = NULL;
  985. }
  986. if ( hGpcNotifyThread )
  987. {
  988. CloseHandle ( hGpcNotifyThread );
  989. hGpcNotifyThread = NULL;
  990. }
  991. return dwError;
  992. }
  993. DWORD
  994. StopGpcNotifyThread()
  995. /*
  996. Description:
  997. Signal the GPC notification thread to stop
  998. and wait it to stop.
  999. */
  1000. {
  1001. // If there was no thread created nothing more to do.
  1002. if ( hGpcNotifyThread )
  1003. {
  1004. // Tell GPC Notify thread to stop
  1005. SetEvent ( hGpcNotifyStopEvent );
  1006. // Wait for it to stop
  1007. WaitForSingleObject (
  1008. hGpcNotifyThread,
  1009. INFINITE );
  1010. CloseHandle( hGpcNotifyThread );
  1011. hGpcNotifyThread = NULL;
  1012. CloseHandle ( hGpcNotifyStopEvent );
  1013. hGpcNotifyStopEvent = NULL;
  1014. }
  1015. return 0;
  1016. }
  1017. DWORD
  1018. IoEnumerateFlows(
  1019. IN PGPC_CLIENT pGpcClient,
  1020. IN OUT PHANDLE pEnumHandle,
  1021. IN OUT PULONG pFlowCount,
  1022. IN OUT PULONG pBufSize,
  1023. OUT PGPC_ENUM_CFINFO_RES *ppBuffer
  1024. )
  1025. /*
  1026. Description:
  1027. This routine sends a notification request buffer to the GPC.
  1028. The request will pend until the GPC notifies about a flow
  1029. being deleted. This will cause a callback to CbGpcNotifyRoutine.
  1030. */
  1031. {
  1032. DWORD Status;
  1033. ULONG InBuffSize;
  1034. ULONG OutBuffSize;
  1035. PGPC_ENUM_CFINFO_REQ GpcReq;
  1036. PGPC_ENUM_CFINFO_RES GpcRes;
  1037. IO_STATUS_BLOCK IoStatBlock;
  1038. //
  1039. // allocate memory for in and out buffers
  1040. //
  1041. InBuffSize = sizeof(GPC_ENUM_CFINFO_REQ);
  1042. OutBuffSize = *pBufSize + FIELD_OFFSET(GPC_ENUM_CFINFO_RES,EnumBuffer);
  1043. *ppBuffer = NULL;
  1044. AllocMem(&GpcRes, OutBuffSize);
  1045. AllocMem(&GpcReq, InBuffSize);
  1046. if (GpcReq && GpcRes) {
  1047. GpcReq->ClientHandle = pGpcClient->GpcHandle;
  1048. GpcReq->EnumHandle = *pEnumHandle;
  1049. GpcReq->CfInfoCount = *pFlowCount;
  1050. Status = DeviceControl( pGlobals->GpcFileHandle,
  1051. NULL,
  1052. NULL,
  1053. NULL,
  1054. &IoStatBlock,
  1055. IOCTL_GPC_ENUM_CFINFO,
  1056. GpcReq,
  1057. InBuffSize,
  1058. GpcRes,
  1059. OutBuffSize);
  1060. if (!ERROR_FAILED(Status)) {
  1061. Status = MapNtStatus2WinError(GpcRes->Status);
  1062. IF_DEBUG(IOCTLS) {
  1063. WSPRINT(("IoEnumerateFlows: GpcRes returned=0x%X mapped to =0x%X\n",
  1064. GpcRes->Status, Status));
  1065. }
  1066. if (!ERROR_FAILED(Status)) {
  1067. *pEnumHandle = GpcRes->EnumHandle;
  1068. *pFlowCount = GpcRes->TotalCfInfo;
  1069. *pBufSize = (ULONG)IoStatBlock.Information -
  1070. FIELD_OFFSET(GPC_ENUM_CFINFO_RES,EnumBuffer);
  1071. *ppBuffer = GpcRes;
  1072. }
  1073. }
  1074. } else {
  1075. Status = ERROR_NOT_ENOUGH_MEMORY;
  1076. }
  1077. if (GpcReq)
  1078. FreeMem(GpcReq);
  1079. if (ERROR_FAILED(Status)) {
  1080. //
  1081. // free GpcReq only if there was an error
  1082. //
  1083. if (GpcRes)
  1084. FreeMem(GpcRes);
  1085. }
  1086. IF_DEBUG(IOCTLS) {
  1087. WSPRINT(("<==IoEnumerateFlows: Status=0x%X\n",
  1088. Status));
  1089. }
  1090. return Status;
  1091. }