Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1527 lines
39 KiB

  1. /*++
  2. Copyright (c) 1996 Microsoft Corporation
  3. Module Name:
  4. tckrnl.c
  5. Abstract:
  6. This module contains routines that talk to the kernel
  7. Author:
  8. Jim Stewart (jstew) August 14, 1996
  9. Revision History:
  10. Ofer Bar (oferbar) Oct 1, 1997
  11. --*/
  12. #include "precomp.h"
  13. #pragma hdrstop
  14. //
  15. // we use this mutex to synchronous start up with other traffic.dll's
  16. //
  17. const UCHAR TrafficSyncMutex[] = "_TRAFFIC_CTL_MUTEX";
  18. HANDLE hGpcNotifyThread = NULL;
  19. HANDLE hGpcNotifyStopEvent = NULL;
  20. HANDLE hGpcNotifyThreadStoppedEvent = NULL;
  21. DWORD
  22. IoAddFlow(
  23. IN PFLOW_STRUC pFlow,
  24. IN BOOLEAN Async
  25. )
  26. /*++
  27. Routine Description:
  28. This procedure builds up the structure necessary to add a flow.
  29. Arguments:
  30. Return Value:
  31. status
  32. --*/
  33. {
  34. DWORD Status = NO_ERROR;
  35. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  36. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  37. PCF_INFO_QOS Kflow;
  38. PGPC_ADD_CF_INFO_REQ GpcReq;
  39. PGPC_ADD_CF_INFO_RES GpcRes;
  40. ULONG InBuffSize;
  41. ULONG OutBuffSize;
  42. ULONG CfInfoSize;
  43. PIO_APC_ROUTINE pCbRoutine = NULL;
  44. ULONG l;
  45. HANDLE hEvent = NULL;
  46. //
  47. // allocate memory for a CF_INFO struct to be passed to the GPC.
  48. //
  49. ASSERT(pFlow->pGenFlow);
  50. pFlow->GpcHandle = NULL;
  51. l = pFlow->GenFlowLen;
  52. ASSERT(l > 0);
  53. CfInfoSize = l + FIELD_OFFSET(CF_INFO_QOS, GenFlow);
  54. InBuffSize = sizeof(GPC_ADD_CF_INFO_REQ) + CfInfoSize;
  55. //
  56. // And for the return info...
  57. //
  58. OutBuffSize = sizeof(GPC_ADD_CF_INFO_RES);
  59. AllocMem(&GpcRes, OutBuffSize);
  60. pFlow->CompletionBuffer = (PVOID)GpcRes;
  61. AllocMem(&GpcReq, InBuffSize);
  62. if (GpcRes && GpcReq) {
  63. RtlZeroMemory(GpcRes, OutBuffSize);
  64. RtlZeroMemory(GpcReq, InBuffSize);
  65. //
  66. // fill in the flow information
  67. //
  68. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  69. GpcReq->ClientCfInfoContext = pFlow;
  70. GpcReq->CfInfoSize = CfInfoSize;
  71. Kflow = (PCF_INFO_QOS)&GpcReq->CfInfo;
  72. //
  73. // fill the instance name
  74. //
  75. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  76. RtlCopyMemory(Kflow->InstanceName,
  77. pTcIfc->InstanceName,
  78. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  79. //
  80. // set the flow flags
  81. //
  82. Kflow->Flags = pFlow->UserFlags;
  83. //
  84. // copy the generic flow parameter
  85. //
  86. RtlCopyMemory(&Kflow->GenFlow,
  87. pFlow->pGenFlow,
  88. l);
  89. if (pClient->ClHandlers.ClAddFlowCompleteHandler && Async) {
  90. pCbRoutine = CbAddFlowComplete;
  91. } else {
  92. hEvent = pFlow->PendingEvent;
  93. }
  94. Status = DeviceControl( pGlobals->GpcFileHandle,
  95. hEvent,
  96. pCbRoutine,
  97. (PVOID)pFlow,
  98. &pFlow->IoStatBlock,
  99. IOCTL_GPC_ADD_CF_INFO,
  100. GpcReq,
  101. InBuffSize,
  102. GpcRes,
  103. OutBuffSize);
  104. if (!ERROR_FAILED(Status)) {
  105. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  106. //
  107. // wait for the event to signal
  108. //
  109. IF_DEBUG(IOCTLS) {
  110. WSPRINT(("IoAddFlow: Waiting for event 0x%X...\n",
  111. PtrToUlong(hEvent)));
  112. }
  113. Status = WaitForSingleObject(hEvent,
  114. INFINITE
  115. );
  116. IF_DEBUG(IOCTLS) {
  117. WSPRINT(("IoAddFlow: ... Event 0x%X signaled, Status=0x%X\n",
  118. PtrToUlong(hEvent), Status));
  119. }
  120. }
  121. if (Status == NO_ERROR) {
  122. Status = MapNtStatus2WinError(GpcRes->Status);
  123. IF_DEBUG(IOCTLS) {
  124. WSPRINT(("IoAddFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  125. GpcRes->Status, Status));
  126. }
  127. }
  128. if (ERROR_SUCCESS == Status) {
  129. ASSERT(GpcRes->GpcCfInfoHandle);
  130. pFlow->GpcHandle = GpcRes->GpcCfInfoHandle;
  131. pFlow->InstanceNameLength = GpcRes->InstanceNameLength;
  132. RtlCopyMemory(pFlow->InstanceName,
  133. GpcRes->InstanceName,
  134. GpcRes->InstanceNameLength
  135. );
  136. pFlow->InstanceName[pFlow->InstanceNameLength/sizeof(WCHAR)] = L'\0';
  137. IF_DEBUG(IOCTLS) {
  138. WSPRINT(("IoAddFlow: Flow Handle=%d Name=%S\n",
  139. pFlow->GpcHandle,
  140. pFlow->InstanceName));
  141. }
  142. }
  143. }
  144. } else {
  145. Status = ERROR_NOT_ENOUGH_MEMORY;
  146. }
  147. //
  148. // No, it's not a bug
  149. // GpcRes will be release in CompleteAddFlow
  150. //
  151. if (GpcReq)
  152. FreeMem(GpcReq);
  153. IF_DEBUG(IOCTLS) {
  154. WSPRINT(("<==IoAddFlow: Status=0x%X\n",
  155. Status));
  156. }
  157. return Status;
  158. }
  159. DWORD
  160. IoAddClassMapFlow(
  161. IN PFLOW_STRUC pFlow,
  162. IN BOOLEAN Async
  163. )
  164. /*++
  165. Routine Description:
  166. This procedure builds up the structure necessary to add a flow.
  167. Arguments:
  168. Return Value:
  169. status
  170. --*/
  171. {
  172. DWORD Status = NO_ERROR;
  173. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  174. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  175. PCF_INFO_CLASS_MAP Kflow;
  176. PGPC_ADD_CF_INFO_REQ GpcReq;
  177. PGPC_ADD_CF_INFO_RES GpcRes;
  178. ULONG InBuffSize;
  179. ULONG OutBuffSize;
  180. ULONG CfInfoSize;
  181. PIO_APC_ROUTINE pCbRoutine = NULL;
  182. ULONG l;
  183. HANDLE hEvent = NULL;
  184. return ERROR_CALL_NOT_IMPLEMENTED;
  185. #if NEVER
  186. // As this is not published in MSDN and not implemented in PSCHED also
  187. //
  188. // allocate memory for a CF_INFO struct to be passed to the GPC.
  189. //
  190. ASSERT(pFlow->pClassMapFlow);
  191. pFlow->GpcHandle = NULL;
  192. l = sizeof(TC_CLASS_MAP_FLOW) + pFlow->pClassMapFlow->ObjectsLength;
  193. CfInfoSize = l + FIELD_OFFSET(CF_INFO_CLASS_MAP, ClassMapInfo);
  194. InBuffSize = sizeof(GPC_ADD_CF_INFO_REQ) + CfInfoSize;
  195. //
  196. // And for the return info...
  197. //
  198. OutBuffSize = sizeof(GPC_ADD_CF_INFO_RES);
  199. AllocMem(&GpcRes, OutBuffSize);
  200. pFlow->CompletionBuffer = (PVOID)GpcRes;
  201. AllocMem(&GpcReq, InBuffSize);
  202. if (GpcRes && GpcReq) {
  203. RtlZeroMemory(GpcRes, OutBuffSize);
  204. RtlZeroMemory(GpcReq, InBuffSize);
  205. //
  206. // fill in the flow information
  207. //
  208. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  209. GpcReq->ClientCfInfoContext = pFlow;
  210. GpcReq->CfInfoSize = CfInfoSize;
  211. Kflow = (PCF_INFO_CLASS_MAP)&GpcReq->CfInfo;
  212. //
  213. // fill the instance name
  214. //
  215. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  216. RtlCopyMemory(Kflow->InstanceName,
  217. pTcIfc->InstanceName,
  218. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  219. //
  220. // copy the generic flow parameter
  221. //
  222. RtlCopyMemory(&Kflow->ClassMapInfo,
  223. pFlow->pClassMapFlow,
  224. l);
  225. if (pClient->ClHandlers.ClAddFlowCompleteHandler && Async) {
  226. pCbRoutine = CbAddFlowComplete;
  227. } else {
  228. hEvent = pFlow->PendingEvent;
  229. }
  230. Status = DeviceControl( pGlobals->GpcFileHandle,
  231. hEvent,
  232. pCbRoutine,
  233. (PVOID)pFlow,
  234. &pFlow->IoStatBlock,
  235. IOCTL_GPC_ADD_CF_INFO,
  236. GpcReq,
  237. InBuffSize,
  238. GpcRes,
  239. OutBuffSize);
  240. if (!ERROR_FAILED(Status)) {
  241. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  242. //
  243. // wait for the event to signal
  244. //
  245. IF_DEBUG(IOCTLS) {
  246. WSPRINT(("IoAddClassMapFlow: Waiting for event 0x%X...\n",
  247. PtrToUlong(hEvent)));
  248. }
  249. Status = WaitForSingleObject(hEvent,
  250. INFINITE
  251. );
  252. IF_DEBUG(IOCTLS) {
  253. WSPRINT(("IoAddClassMapFlow: ... Event 0x%X signaled, Status=0x%X\n",
  254. PtrToUlong(hEvent), Status));
  255. }
  256. }
  257. if (Status == NO_ERROR) {
  258. Status = MapNtStatus2WinError(GpcRes->Status);
  259. IF_DEBUG(IOCTLS) {
  260. WSPRINT(("IoAddFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  261. GpcRes->Status, Status));
  262. }
  263. }
  264. if (!ERROR_FAILED(Status)) {
  265. ASSERT(GpcRes->GpcCfInfoHandle);
  266. pFlow->GpcHandle = GpcRes->GpcCfInfoHandle;
  267. pFlow->InstanceNameLength = GpcRes->InstanceNameLength;
  268. RtlCopyMemory(pFlow->InstanceName,
  269. GpcRes->InstanceName,
  270. GpcRes->InstanceNameLength
  271. );
  272. pFlow->InstanceName[pFlow->InstanceNameLength/sizeof(WCHAR)] = L'\0';
  273. IF_DEBUG(IOCTLS) {
  274. WSPRINT(("IoAddClassMapFlow: Flow Handle=%d Name=%S\n",
  275. pFlow->GpcHandle,
  276. pFlow->InstanceName));
  277. }
  278. }
  279. }
  280. } else {
  281. Status = ERROR_NOT_ENOUGH_MEMORY;
  282. }
  283. //
  284. // No, it's not a bug
  285. // GpcRes will be release in CompleteAddFlow
  286. //
  287. if (GpcReq)
  288. FreeMem(GpcReq);
  289. IF_DEBUG(IOCTLS) {
  290. WSPRINT(("<==IoAddClassMapFlow: Status=0x%X\n",
  291. Status));
  292. }
  293. return Status;
  294. #endif
  295. }
  296. DWORD
  297. IoModifyFlow(
  298. IN PFLOW_STRUC pFlow,
  299. IN BOOLEAN Async
  300. )
  301. /*++
  302. Routine Description:
  303. This procedure builds up the structure necessary to modify a flow.
  304. Arguments:
  305. pFlow
  306. Return Value:
  307. status
  308. --*/
  309. {
  310. DWORD Status = NO_ERROR;
  311. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  312. PTC_IFC pTcIfc = pFlow->pInterface->pTcIfc;
  313. PCF_INFO_QOS Kflow;
  314. PGPC_MODIFY_CF_INFO_REQ GpcReq;
  315. PGPC_MODIFY_CF_INFO_RES GpcRes;
  316. ULONG InBuffSize;
  317. ULONG OutBuffSize;
  318. ULONG CfInfoSize;
  319. PIO_APC_ROUTINE pCbRoutine = NULL;
  320. ULONG l;
  321. HANDLE hEvent = NULL;
  322. //
  323. // allocate memory for a CF_INFO struct to be passed to the GPC.
  324. //
  325. ASSERT(pFlow->pGenFlow1);
  326. l = pFlow->GenFlowLen1;
  327. ASSERT(l > 0);
  328. CfInfoSize = l + FIELD_OFFSET(CF_INFO_QOS, GenFlow);
  329. InBuffSize = sizeof(GPC_MODIFY_CF_INFO_REQ) + CfInfoSize;
  330. //
  331. // And for the return info...
  332. //
  333. OutBuffSize = sizeof(GPC_MODIFY_CF_INFO_RES);
  334. AllocMem(&GpcRes, OutBuffSize);
  335. pFlow->CompletionBuffer = (PVOID)GpcRes;
  336. AllocMem(&GpcReq, InBuffSize);
  337. if (GpcRes && GpcReq) {
  338. RtlZeroMemory(GpcRes, OutBuffSize);
  339. RtlZeroMemory(GpcReq, InBuffSize);
  340. //
  341. // fill in the flow information
  342. //
  343. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  344. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  345. GpcReq->CfInfoSize = CfInfoSize;
  346. Kflow = (PCF_INFO_QOS)&GpcReq->CfInfo;
  347. //
  348. // fill the instance name
  349. //
  350. Kflow->InstanceNameLength = (USHORT) pTcIfc->InstanceNameLength;
  351. RtlCopyMemory(Kflow->InstanceName,
  352. pTcIfc->InstanceName,
  353. pTcIfc->InstanceNameLength * sizeof(WCHAR));
  354. //
  355. // copy the generic flow parameter
  356. //
  357. RtlCopyMemory(&Kflow->GenFlow,
  358. pFlow->pGenFlow1,
  359. l);
  360. if (pClient->ClHandlers.ClModifyFlowCompleteHandler && Async) {
  361. pCbRoutine = CbModifyFlowComplete;
  362. } else {
  363. hEvent = pFlow->PendingEvent;
  364. }
  365. Status = DeviceControl( pGlobals->GpcFileHandle,
  366. hEvent,
  367. pCbRoutine,
  368. (PVOID)pFlow,
  369. &pFlow->IoStatBlock,
  370. IOCTL_GPC_MODIFY_CF_INFO,
  371. GpcReq,
  372. InBuffSize,
  373. GpcRes,
  374. OutBuffSize);
  375. if (!ERROR_FAILED(Status)) {
  376. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  377. //
  378. // wait for the event to signal
  379. //
  380. IF_DEBUG(IOCTLS) {
  381. WSPRINT(("IoModifyFlow: Waiting for event 0x%X\n",
  382. PtrToUlong(hEvent)));
  383. }
  384. Status = WaitForSingleObject(hEvent,
  385. INFINITE
  386. );
  387. IF_DEBUG(IOCTLS) {
  388. WSPRINT(("IoModifyFlow: ... Event 0x%X signaled, Status=0x%X\n",
  389. PtrToUlong(hEvent), Status));
  390. }
  391. }
  392. if (Status == NO_ERROR) {
  393. Status = MapNtStatus2WinError(GpcRes->Status);
  394. IF_DEBUG(IOCTLS) {
  395. WSPRINT(("IoModifyFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  396. GpcRes->Status, Status));
  397. }
  398. }
  399. } else{
  400. Status = MapNtStatus2WinError(GpcRes->Status);
  401. IF_DEBUG(IOCTLS) {
  402. WSPRINT(("IoModifyFlow: GpcRes returned=0x%X mapped to =0x%X\n",
  403. GpcRes->Status, Status));
  404. }
  405. }
  406. } else {
  407. Status = ERROR_NOT_ENOUGH_MEMORY;
  408. }
  409. //
  410. // No, it's not a bug
  411. // GpcRes will be release in CompleteModifyFlow
  412. //
  413. if (GpcReq)
  414. FreeMem(GpcReq);
  415. IF_DEBUG(IOCTLS) {
  416. WSPRINT(("IoModifyFlow: Status=0x%X\n",
  417. Status));
  418. }
  419. return Status;
  420. }
  421. DWORD
  422. IoDeleteFlow(
  423. IN PFLOW_STRUC pFlow,
  424. IN BOOLEAN Async
  425. )
  426. /*++
  427. Routine Description:
  428. This procedure builds up the structure necessary to delete a flow.
  429. It then calls a routine to pass this info to the GPC.
  430. Arguments:
  431. pFlow
  432. Return Value:
  433. status
  434. --*/
  435. {
  436. DWORD Status;
  437. ULONG InBuffSize;
  438. ULONG OutBuffSize;
  439. PGPC_REMOVE_CF_INFO_REQ GpcReq;
  440. PGPC_REMOVE_CF_INFO_RES GpcRes;
  441. PIO_APC_ROUTINE pCbRoutine = NULL;
  442. PCLIENT_STRUC pClient = pFlow->pInterface->pClient;
  443. HANDLE hEvent = NULL;
  444. if (IS_REMOVED(pFlow->Flags)) {
  445. //
  446. // this flow has been already deleted in the kernel
  447. // due to a flow close notification.
  448. // no need to send IOTCL to GPC, just return OK
  449. //
  450. IF_DEBUG(IOCTLS) {
  451. WSPRINT(("IoDeleteFlow: Flow has already been deleted=0x%X\n",
  452. PtrToUlong(pFlow)));
  453. }
  454. return NO_ERROR;
  455. }
  456. //
  457. // If we add this over here, then if WMI deletes the flow,
  458. // the user mode call will just return above.
  459. //
  460. GetLock(pFlow->Lock);
  461. pFlow->Flags |= TC_FLAGS_REMOVED;
  462. FreeLock(pFlow->Lock);
  463. //
  464. // allocate memory for in and out buffers
  465. //
  466. InBuffSize = sizeof(GPC_REMOVE_CF_INFO_REQ);
  467. OutBuffSize = sizeof(GPC_REMOVE_CF_INFO_RES);
  468. AllocMem(&GpcRes, OutBuffSize);
  469. pFlow->CompletionBuffer = (PVOID)GpcRes;
  470. AllocMem(&GpcReq, InBuffSize);
  471. if (GpcReq && GpcRes){
  472. IF_DEBUG(IOCTLS) {
  473. WSPRINT(("IoDeleteFlow: preparing to delete the flow=0x%X\n",
  474. PtrToUlong(pFlow)));
  475. }
  476. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  477. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  478. if (pClient->ClHandlers.ClDeleteFlowCompleteHandler && Async) {
  479. pCbRoutine = CbDeleteFlowComplete;
  480. } else {
  481. hEvent = pFlow->PendingEvent;
  482. }
  483. Status = DeviceControl( pGlobals->GpcFileHandle,
  484. hEvent,
  485. pCbRoutine,
  486. (PVOID)pFlow,
  487. &pFlow->IoStatBlock,
  488. IOCTL_GPC_REMOVE_CF_INFO,
  489. GpcReq,
  490. InBuffSize,
  491. GpcRes,
  492. OutBuffSize);
  493. if (!ERROR_FAILED(Status)) {
  494. if (hEvent && Status == ERROR_SIGNAL_PENDING) {
  495. //
  496. // wait for the event to signal
  497. //
  498. IF_DEBUG(IOCTLS) {
  499. WSPRINT(("IoDeleteFlow: Waiting for event 0x%X\n",
  500. PtrToUlong(hEvent)));
  501. }
  502. Status = WaitForSingleObject(hEvent,
  503. INFINITE
  504. );
  505. IF_DEBUG(IOCTLS) {
  506. WSPRINT(("IoDeleteFlow: ... Event 0x%X signaled, Status=0x%X\n",
  507. PtrToUlong(hEvent), Status));
  508. }
  509. }
  510. if (Status == NO_ERROR) {
  511. Status = MapNtStatus2WinError(GpcRes->Status);
  512. IF_DEBUG(IOCTLS) {
  513. WSPRINT(("IoDeleteFlow: Gpc returned=0x%X mapped to 0x%X\n",
  514. GpcRes->Status, Status));
  515. }
  516. //
  517. // If the deletion was unsuccessful, let's un-mark the REMOVED flag.
  518. //
  519. if (ERROR_FAILED(Status)) {
  520. GetLock(pFlow->Lock);
  521. pFlow->Flags &= ~TC_FLAGS_REMOVED;
  522. FreeLock(pFlow->Lock);
  523. }
  524. }
  525. }
  526. } else {
  527. Status = ERROR_NOT_ENOUGH_MEMORY;
  528. }
  529. //
  530. // No, it's not a bug
  531. // GpcRes will be release in CompleteDeleteFlow
  532. //
  533. if (GpcReq)
  534. FreeMem(GpcReq);
  535. IF_DEBUG(IOCTLS) {
  536. WSPRINT(("<==IoDeleteFlow: Status=0x%X\n",
  537. Status));
  538. }
  539. return Status;
  540. }
  541. DWORD
  542. IoAddFilter(
  543. IN PFILTER_STRUC pFilter
  544. )
  545. /*++
  546. Routine Description:
  547. This procedure builds up the structure necessary to add a filter.
  548. It then calls a routine to pass this info to the GPC.
  549. Arguments:
  550. pFilter
  551. Return Value:
  552. status
  553. --*/
  554. {
  555. DWORD Status;
  556. PGPC_ADD_PATTERN_REQ GpcReq;
  557. PGPC_ADD_PATTERN_RES GpcRes;
  558. ULONG InBuffSize;
  559. ULONG OutBuffSize;
  560. PFLOW_STRUC pFlow = pFilter->pFlow;
  561. PTC_GEN_FILTER pGpcFilter = pFilter->pGpcFilter;
  562. PUCHAR p;
  563. ULONG PatternSize;
  564. IO_STATUS_BLOCK IoStatBlock;
  565. pFilter->GpcHandle = NULL;
  566. ASSERT(pGpcFilter);
  567. ASSERT(pFlow);
  568. PatternSize = pGpcFilter->PatternSize;
  569. InBuffSize = sizeof(GPC_ADD_PATTERN_REQ) + 2*PatternSize;
  570. OutBuffSize = sizeof(GPC_ADD_PATTERN_RES);
  571. AllocMem(&GpcReq, InBuffSize);
  572. AllocMem(&GpcRes, OutBuffSize);
  573. if (GpcReq && GpcRes){
  574. IF_DEBUG(IOCTLS) {
  575. WSPRINT(("IoAddFilter: Filling request: size: in=%d, out=%d\n",
  576. InBuffSize, OutBuffSize));
  577. }
  578. GpcReq->ClientHandle = pFlow->pGpcClient->GpcHandle;
  579. GpcReq->GpcCfInfoHandle = pFlow->GpcHandle;
  580. GpcReq->ClientPatternContext = (GPC_CLIENT_HANDLE)pFilter;
  581. GpcReq->Priority = 0;
  582. GpcReq->PatternSize = PatternSize;
  583. GpcReq->ProtocolTemplate = pFilter->GpcProtocolTemplate;
  584. //
  585. // fill in the pattern
  586. //
  587. p = (PUCHAR)&GpcReq->PatternAndMask;
  588. RtlCopyMemory(p, pGpcFilter->Pattern, PatternSize);
  589. //
  590. // fill in the mask
  591. //
  592. p += PatternSize;
  593. RtlCopyMemory(p, pGpcFilter->Mask, PatternSize);
  594. Status = DeviceControl( pGlobals->GpcFileHandle,
  595. NULL,
  596. NULL,
  597. NULL,
  598. &IoStatBlock,
  599. IOCTL_GPC_ADD_PATTERN,
  600. GpcReq,
  601. InBuffSize,
  602. GpcRes,
  603. OutBuffSize);
  604. if (!ERROR_FAILED(Status)) {
  605. Status = MapNtStatus2WinError(GpcRes->Status);
  606. IF_DEBUG(IOCTLS) {
  607. WSPRINT(("IoAddFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  608. GpcRes->Status, Status));
  609. }
  610. //
  611. // save the filter handle
  612. //
  613. if (!ERROR_FAILED(Status)) {
  614. pFilter->GpcHandle = GpcRes->GpcPatternHandle;
  615. } else {
  616. IF_DEBUG(IOCTLS) {
  617. WSPRINT(("IoAddFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  618. GpcRes->Status, Status));
  619. }
  620. IF_DEBUG(IOCTLS) {
  621. WSPRINT(("Error - failed the addfilter call\n"));
  622. }
  623. //ASSERT(Status == ERROR_DUPLICATE_FILTER); removed for WAN - interface up down situation
  624. }
  625. }
  626. } else {
  627. Status = ERROR_NOT_ENOUGH_MEMORY;
  628. IF_DEBUG(ERRORS) {
  629. WSPRINT(("IoAddFilter: Error =0x%X\n",
  630. Status));
  631. }
  632. }
  633. if (GpcReq)
  634. FreeMem(GpcReq);
  635. if (GpcRes)
  636. FreeMem(GpcRes);
  637. IF_DEBUG(IOCTLS) {
  638. WSPRINT(("<==IoAddFilter: Returned =0x%X\n",
  639. Status));
  640. }
  641. return Status;
  642. }
  643. DWORD
  644. IoDeleteFilter(
  645. IN PFILTER_STRUC pFilter
  646. )
  647. /*++
  648. Routine Description:
  649. This procedure builds up the structure necessary to delete a filter.
  650. It then calls a routine to pass this info to the GPC.
  651. Arguments:
  652. pFilter
  653. Return Value:
  654. status
  655. --*/
  656. {
  657. DWORD Status;
  658. ULONG InBuffSize;
  659. ULONG OutBuffSize;
  660. GPC_REMOVE_PATTERN_REQ GpcReq;
  661. GPC_REMOVE_PATTERN_RES GpcRes;
  662. IO_STATUS_BLOCK IoStatBlock;
  663. //
  664. // allocate memory for in and out buffers
  665. //
  666. if (IS_REMOVED(pFilter->Flags)) {
  667. //
  668. // this filter has been already deleted in the kernel
  669. // due to a flow close notification.
  670. // no need to send IOTCL to GPC, just return OK
  671. //
  672. IF_DEBUG(IOCTLS) {
  673. WSPRINT(("IoDeleteFilter: Filter has already been deleted=0x%X\n",
  674. PtrToUlong(pFilter)));
  675. }
  676. return NO_ERROR;
  677. }
  678. //
  679. // If we add this over here, then if WMI deletes the Interface (and the
  680. // flows/filters) the user mode call will just return above.
  681. //
  682. GetLock(pFilter->Lock);
  683. pFilter->Flags |= TC_FLAGS_REMOVED;
  684. FreeLock(pFilter->Lock);
  685. InBuffSize = sizeof(GPC_REMOVE_PATTERN_REQ);
  686. OutBuffSize = sizeof(GPC_REMOVE_PATTERN_RES);
  687. GpcReq.ClientHandle = pFilter->pFlow->pGpcClient->GpcHandle;
  688. GpcReq.GpcPatternHandle = pFilter->GpcHandle;
  689. ASSERT(GpcReq.ClientHandle);
  690. ASSERT(GpcReq.GpcPatternHandle);
  691. Status = DeviceControl( pGlobals->GpcFileHandle,
  692. NULL,
  693. NULL,
  694. NULL,
  695. &IoStatBlock,
  696. IOCTL_GPC_REMOVE_PATTERN,
  697. &GpcReq,
  698. InBuffSize,
  699. &GpcRes,
  700. OutBuffSize
  701. );
  702. if (!ERROR_FAILED(Status)) {
  703. Status = MapNtStatus2WinError(GpcRes.Status);
  704. IF_DEBUG(IOCTLS) {
  705. WSPRINT(("IoDeleteFilter: GpcRes returned=0x%X mapped to =0x%X\n",
  706. GpcRes.Status, Status));
  707. }
  708. //
  709. // If the deletion was unsuccessful, let's un-mark the REMOVED flag.
  710. //
  711. if (ERROR_FAILED(Status)) {
  712. GetLock(pFilter->Lock);
  713. pFilter->Flags &= ~TC_FLAGS_REMOVED;
  714. FreeLock(pFilter->Lock);
  715. }
  716. }
  717. IF_DEBUG(IOCTLS) {
  718. WSPRINT(("<==IoDeleteFilter: Status=0x%X\n",
  719. Status));
  720. }
  721. return Status;
  722. }
  723. DWORD
  724. IoRegisterClient(
  725. IN PGPC_CLIENT pGpcClient
  726. )
  727. {
  728. DWORD Status;
  729. GPC_REGISTER_CLIENT_REQ GpcReq;
  730. GPC_REGISTER_CLIENT_RES GpcRes;
  731. ULONG InBuffSize;
  732. ULONG OutBuffSize;
  733. IO_STATUS_BLOCK IoStatBlock;
  734. InBuffSize = sizeof(GPC_REGISTER_CLIENT_REQ);
  735. OutBuffSize = sizeof(GPC_REGISTER_CLIENT_RES);
  736. GpcReq.CfId = pGpcClient->CfInfoType;
  737. GpcReq.Flags = GPC_FLAGS_FRAGMENT;
  738. GpcReq.MaxPriorities = 1;
  739. GpcReq.ClientContext =
  740. (GPC_CLIENT_HANDLE)UlongToPtr(GetCurrentProcessId()); // process id
  741. Status = DeviceControl( pGlobals->GpcFileHandle,
  742. NULL,
  743. NULL,
  744. NULL,
  745. &IoStatBlock,
  746. IOCTL_GPC_REGISTER_CLIENT,
  747. &GpcReq,
  748. InBuffSize,
  749. &GpcRes,
  750. OutBuffSize );
  751. if (!ERROR_FAILED(Status)) {
  752. Status = MapNtStatus2WinError(GpcRes.Status);
  753. pGpcClient->GpcHandle = GpcRes.ClientHandle;
  754. IF_DEBUG(IOCTLS) {
  755. WSPRINT(("IoRegisterClient: GpcRes returned=0x%X mapped to =0x%X\n",
  756. GpcRes.Status, Status));
  757. }
  758. }
  759. IF_DEBUG(IOCTLS) {
  760. WSPRINT(("<==IoRegisterClient: Status=0x%X\n",
  761. Status));
  762. }
  763. return Status;
  764. }
  765. DWORD
  766. IoDeregisterClient(
  767. IN PGPC_CLIENT pGpcClient
  768. )
  769. {
  770. DWORD Status;
  771. GPC_DEREGISTER_CLIENT_REQ GpcReq;
  772. GPC_DEREGISTER_CLIENT_RES GpcRes;
  773. ULONG InBuffSize;
  774. ULONG OutBuffSize;
  775. IO_STATUS_BLOCK IoStatBlock;
  776. InBuffSize = sizeof(GPC_DEREGISTER_CLIENT_REQ);
  777. OutBuffSize = sizeof(GPC_DEREGISTER_CLIENT_RES);
  778. GpcReq.ClientHandle = pGpcClient->GpcHandle;
  779. Status = DeviceControl( pGlobals->GpcFileHandle,
  780. NULL,
  781. NULL,
  782. NULL,
  783. &IoStatBlock,
  784. IOCTL_GPC_DEREGISTER_CLIENT,
  785. &GpcReq,
  786. InBuffSize,
  787. &GpcRes,
  788. OutBuffSize
  789. );
  790. if (!ERROR_FAILED(Status)) {
  791. Status = MapNtStatus2WinError(GpcRes.Status);
  792. IF_DEBUG(IOCTLS) {
  793. WSPRINT(("IoDeegisterClient: GpcRes returned=0x%X mapped to =0x%X\n",
  794. GpcRes.Status, Status));
  795. }
  796. }
  797. IF_DEBUG(IOCTLS) {
  798. WSPRINT(("<==IoDeregisterClient: Status=0x%X\n",
  799. Status));
  800. }
  801. return Status;
  802. }
  803. PGPC_NOTIFY_REQUEST_RES GpcResCb;
  804. DWORD
  805. IoRequestNotify(
  806. VOID
  807. //IN PGPC_CLIENT pGpcClient
  808. )
  809. /*
  810. Description:
  811. This routine sends a notification request buffer to the GPC.
  812. The request will pend until the GPC notifies about a flow
  813. being deleted. This will cause a callback to CbGpcNotifyRoutine.
  814. */
  815. {
  816. DWORD Status;
  817. ULONG OutBuffSize;
  818. //
  819. // allocate memory for in and out buffers
  820. //
  821. OutBuffSize = sizeof(GPC_NOTIFY_REQUEST_RES);
  822. AllocMem(&GpcResCb, OutBuffSize);
  823. if (GpcResCb){
  824. Status = DeviceControl( pGlobals->GpcFileHandle,
  825. NULL,
  826. CbGpcNotifyRoutine,
  827. (PVOID)GpcResCb,
  828. &GpcResCb->IoStatBlock,
  829. IOCTL_GPC_NOTIFY_REQUEST,
  830. NULL, //GpcReq,
  831. 0, //InBuffSize,
  832. GpcResCb,
  833. OutBuffSize);
  834. if (ERROR_FAILED(Status)) {
  835. FreeMem(GpcResCb);
  836. GpcResCb = NULL;
  837. }
  838. else if ( ERROR_PENDING(Status) )
  839. {
  840. Status = NO_ERROR;
  841. }
  842. } else {
  843. Status = ERROR_NOT_ENOUGH_MEMORY;
  844. }
  845. IF_DEBUG(IOCTLS) {
  846. WSPRINT(("<==IoRequestNotify: Buffer=%p Status=0x%X\n",
  847. GpcResCb, Status));
  848. }
  849. return Status;
  850. }
  851. VOID
  852. CancelIoRequestNotify()
  853. /*
  854. Description:
  855. This routine cancels the IRP in GPC and waits for the pending
  856. IO to be cancelled. The callback routine set an event when
  857. IO request is canclled and this routine waits for that event
  858. before returning.
  859. */
  860. {
  861. // Non-zero value of GpcResCb indicates a pending IRP
  862. if (GpcResCb)
  863. {
  864. GpcCancelEvent = CreateEvent (
  865. NULL,
  866. FALSE,
  867. FALSE,
  868. NULL );
  869. if ( CancelIo ( pGlobals->GpcFileHandle ) )
  870. {
  871. if ( GpcCancelEvent )
  872. {
  873. WaitForSingleObjectEx(
  874. GpcCancelEvent,
  875. INFINITE,
  876. TRUE );
  877. CloseHandle ( GpcCancelEvent );
  878. GpcCancelEvent = NULL;
  879. }
  880. else
  881. {
  882. IF_DEBUG(IOCTLS) {
  883. WSPRINT((
  884. "CancelIo: Status=0x%X\n",
  885. GetLastError() ));
  886. }
  887. }
  888. }
  889. FreeMem(GpcResCb);
  890. IF_DEBUG(IOCTLS)
  891. {
  892. WSPRINT(("<==CancelIoRequestNotify: Freed %p\n",
  893. GpcResCb ));
  894. }
  895. }
  896. return;
  897. }
  898. void
  899. IncrementLibraryUsageCount(
  900. HINSTANCE hinst,
  901. int nCount)
  902. /*
  903. Utility routine to increment the ref count on
  904. the TRAFFIC.DLL so that it will not get unloaded
  905. before the GPCNotify thread gets a chance to run.
  906. */
  907. {
  908. TCHAR szModuleName[_MAX_PATH];
  909. memset(szModuleName, 0, sizeof(TCHAR) * _MAX_PATH);
  910. GetModuleFileName(hinst, szModuleName, _MAX_PATH);
  911. szModuleName[_MAX_PATH - 1] = 0;
  912. while (nCount--)
  913. LoadLibrary(szModuleName);
  914. return;
  915. }
  916. DWORD
  917. GpcNotifyThreadFunction ()
  918. /*
  919. This routine registers an IRP with GPC to listen for
  920. FLOW close notifications and waits for the stop event.
  921. When the event is signalled the IRP is canceled and this
  922. thread exits.
  923. Since the wait is done in an alertable state GPC callbacks
  924. are executed in this thread itself.
  925. */
  926. {
  927. DWORD dwError;
  928. dwError = IoRequestNotify();
  929. WaitForSingleObjectEx(
  930. hGpcNotifyStopEvent,
  931. INFINITE,
  932. TRUE );
  933. CancelIoRequestNotify();
  934. SetEvent( hGpcNotifyThreadStoppedEvent );
  935. FreeLibraryAndExitThread(
  936. hinstTrafficDll,
  937. 0 );
  938. return 0;
  939. }
  940. DWORD
  941. StartGpcNotifyThread()
  942. /*
  943. Description:
  944. This routine starts a thread which queues an IRP for
  945. GPC notifications.
  946. */
  947. {
  948. DWORD dwError = 0;
  949. DWORD dwThreadId = 0;
  950. // Increment the ref count on this DLL so it will not be unloaded
  951. // before the GpcNotifyThreadFunction gets to run
  952. IncrementLibraryUsageCount(
  953. hinstTrafficDll,
  954. 1);
  955. // Create the stop event for the thread to receive
  956. // GPC flow close notifications
  957. hGpcNotifyStopEvent = CreateEvent (
  958. NULL,
  959. FALSE,
  960. FALSE,
  961. NULL );
  962. if ( !hGpcNotifyStopEvent )
  963. {
  964. dwError = GetLastError();
  965. goto Error;
  966. }
  967. hGpcNotifyThreadStoppedEvent = CreateEvent (NULL,
  968. FALSE,
  969. FALSE,
  970. NULL );
  971. if ( !hGpcNotifyThreadStoppedEvent )
  972. {
  973. dwError = GetLastError();
  974. goto Error;
  975. }
  976. // Start the thread.
  977. hGpcNotifyThread = CreateThread(
  978. NULL,
  979. 0,
  980. (LPTHREAD_START_ROUTINE )GpcNotifyThreadFunction,
  981. NULL,
  982. 0,
  983. &dwThreadId );
  984. if ( !hGpcNotifyThread )
  985. {
  986. dwError = GetLastError();
  987. goto Error;
  988. }
  989. // Close the thread handle as we don't need it in any case. But, don't set
  990. // it to NULL because it is used as a check to figure out whether this
  991. // thread was started or not.
  992. CloseHandle ( hGpcNotifyThread );
  993. ASSERT(hGpcNotifyThread != NULL);
  994. // Not closing the thread handle as StopGpcNotifyThread
  995. // routine will use this handle to wait for thread to
  996. // terminate.
  997. return 0;
  998. Error:
  999. if ( hGpcNotifyStopEvent )
  1000. {
  1001. CloseHandle ( hGpcNotifyStopEvent );
  1002. hGpcNotifyStopEvent = NULL;
  1003. }
  1004. if ( hGpcNotifyThreadStoppedEvent ) {
  1005. CloseHandle ( hGpcNotifyThreadStoppedEvent );
  1006. hGpcNotifyThreadStoppedEvent = NULL;
  1007. }
  1008. if ( hGpcNotifyThread )
  1009. {
  1010. CloseHandle ( hGpcNotifyThread );
  1011. hGpcNotifyThread = NULL;
  1012. }
  1013. return dwError;
  1014. }
  1015. DWORD
  1016. StopGpcNotifyThread()
  1017. /*
  1018. Description:
  1019. Signal the GPC notification thread to stop
  1020. and wait it to stop.
  1021. */
  1022. {
  1023. // If there was no thread created nothing more to do.
  1024. if ( hGpcNotifyThread )
  1025. {
  1026. // Tell GPC Notify thread to stop
  1027. SetEvent ( hGpcNotifyStopEvent );
  1028. // Wait for it to stop
  1029. WaitForSingleObject (
  1030. hGpcNotifyThreadStoppedEvent,
  1031. INFINITE );
  1032. CloseHandle( hGpcNotifyThreadStoppedEvent );
  1033. hGpcNotifyThread = NULL;
  1034. CloseHandle ( hGpcNotifyStopEvent );
  1035. hGpcNotifyStopEvent = NULL;
  1036. }
  1037. return 0;
  1038. }
  1039. DWORD
  1040. IoEnumerateFlows(
  1041. IN PGPC_CLIENT pGpcClient,
  1042. IN OUT PHANDLE pEnumHandle,
  1043. IN OUT PULONG pFlowCount,
  1044. IN OUT PULONG pBufSize,
  1045. OUT PGPC_ENUM_CFINFO_RES *ppBuffer
  1046. )
  1047. /*
  1048. Description:
  1049. This routine sends a notification request buffer to the GPC.
  1050. The request will pend until the GPC notifies about a flow
  1051. being deleted. This will cause a callback to CbGpcNotifyRoutine.
  1052. */
  1053. {
  1054. DWORD Status;
  1055. ULONG InBuffSize;
  1056. ULONG OutBuffSize;
  1057. PGPC_ENUM_CFINFO_REQ GpcReq;
  1058. PGPC_ENUM_CFINFO_RES GpcRes;
  1059. IO_STATUS_BLOCK IoStatBlock;
  1060. //
  1061. // allocate memory for in and out buffers
  1062. //
  1063. InBuffSize = sizeof(GPC_ENUM_CFINFO_REQ);
  1064. OutBuffSize = *pBufSize + FIELD_OFFSET(GPC_ENUM_CFINFO_RES,EnumBuffer);
  1065. *ppBuffer = NULL;
  1066. AllocMem(&GpcRes, OutBuffSize);
  1067. AllocMem(&GpcReq, InBuffSize);
  1068. if (GpcReq && GpcRes) {
  1069. GpcReq->ClientHandle = pGpcClient->GpcHandle;
  1070. GpcReq->EnumHandle = *pEnumHandle;
  1071. GpcReq->CfInfoCount = *pFlowCount;
  1072. Status = DeviceControl( pGlobals->GpcFileHandle,
  1073. NULL,
  1074. NULL,
  1075. NULL,
  1076. &IoStatBlock,
  1077. IOCTL_GPC_ENUM_CFINFO,
  1078. GpcReq,
  1079. InBuffSize,
  1080. GpcRes,
  1081. OutBuffSize);
  1082. if (!ERROR_FAILED(Status)) {
  1083. Status = MapNtStatus2WinError(GpcRes->Status);
  1084. IF_DEBUG(IOCTLS) {
  1085. WSPRINT(("IoEnumerateFlows: GpcRes returned=0x%X mapped to =0x%X\n",
  1086. GpcRes->Status, Status));
  1087. }
  1088. if (!ERROR_FAILED(Status)) {
  1089. *pEnumHandle = GpcRes->EnumHandle;
  1090. *pFlowCount = GpcRes->TotalCfInfo;
  1091. *pBufSize = (ULONG)IoStatBlock.Information -
  1092. FIELD_OFFSET(GPC_ENUM_CFINFO_RES,EnumBuffer);
  1093. *ppBuffer = GpcRes;
  1094. }
  1095. }
  1096. } else {
  1097. Status = ERROR_NOT_ENOUGH_MEMORY;
  1098. }
  1099. if (GpcReq)
  1100. FreeMem(GpcReq);
  1101. if (ERROR_FAILED(Status)) {
  1102. //
  1103. // free GpcReq only if there was an error
  1104. //
  1105. if (GpcRes)
  1106. FreeMem(GpcRes);
  1107. }
  1108. IF_DEBUG(IOCTLS) {
  1109. WSPRINT(("<==IoEnumerateFlows: Status=0x%X\n",
  1110. Status));
  1111. }
  1112. return Status;
  1113. }