Leaked source code of windows server 2003
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1905 lines
48 KiB

  1. /*++
  2. Copyright (c) 1998 Microsoft Corporation
  3. Module Name:
  4. blockmgr.cpp
  5. Abstract:
  6. This module contains the implementation of the block memory manager
  7. Author:
  8. Keith Lau (keithlau@microsoft.com)
  9. Revision History:
  10. keithlau 02/27/98 created
  11. --*/
  12. #include "windows.h"
  13. #include "dbgtrace.h"
  14. #include "filehc.h"
  15. #include "signatur.h"
  16. #include "blockmgr.h"
  17. //
  18. // I really wanted to keep the memory manager completely independent from
  19. // the rest of the stuff, but I realized it makes more sense to have the
  20. // memory manager be aware of the IMailMsgPropertyStream so hrer goes ...
  21. //
  22. // If you remove CommitDirtyBlocks, you can get rid of the include below.
  23. //
  24. #include "mailmsg.h"
  25. //
  26. // A commit writes the entire stream, but possibly using several iterations.
  27. // This specifies how many blocks to write in each iteration.
  28. //
  29. #define CMAILMSG_COMMIT_PAGE_BLOCK_SIZE 256
  30. // Global (set by registry key) indicating that property pages be filled with
  31. // a byte pattern after allocation
  32. extern DWORD g_fFillPropertyPages;
  33. /***************************************************************************/
  34. // Debug stuff
  35. //
  36. #ifndef _ASSERT
  37. #define _ASSERT(x) if (!(x)) DebugBreak()
  38. #endif
  39. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  40. HRESULT SetAllocationBoundary(
  41. FLAT_ADDRESS faOffset,
  42. LPBLOCK_HEAP_NODE pNode
  43. )
  44. {
  45. DWORD dwBit;
  46. faOffset &= BLOCK_HEAP_PAYLOAD_MASK;
  47. faOffset >>= 2;
  48. dwBit = (DWORD)(faOffset & 7);
  49. faOffset >>= 3;
  50. pNode->stAttributes.rgbBoundaries[faOffset] |= (0x80 >> dwBit);
  51. return(S_OK);
  52. }
  53. HRESULT VerifyAllocationBoundary(
  54. FLAT_ADDRESS faOffset,
  55. DWORD dwLength,
  56. LPBLOCK_HEAP_NODE pNode
  57. )
  58. {
  59. DWORD dwStartingBit;
  60. DWORD dwStartingByte;
  61. DWORD dwBitsToScan;
  62. // 7f because we can start on a boundary and be perfectly cool
  63. BYTE bStartingMask = 0x7f;
  64. BYTE bEndingMask = 0xff;
  65. faOffset &= BLOCK_HEAP_PAYLOAD_MASK;
  66. faOffset >>= 2; // DWORD per bit
  67. // Determine the start
  68. // note : these casts are safe because the value in faOffset is
  69. // only 10-bits (BLOCK_HEAP_PAYLOAD_MASK) at this point.
  70. dwStartingBit = (DWORD)(faOffset & 7);
  71. dwStartingByte = (DWORD)(faOffset >> 3);
  72. bStartingMask >>= dwStartingBit;
  73. // Determine the number of bits to scan, each bit corresponds
  74. // to a DWORD, rounded up to next DWORD
  75. dwBitsToScan = dwLength + 3;
  76. dwBitsToScan >>= 2;
  77. // Scan it
  78. // Case 1: Start and End bits within the same byte
  79. if ((dwStartingBit + dwBitsToScan) <= 8)
  80. {
  81. DWORD dwBitsFromRight = 8 - (dwStartingBit + dwBitsToScan);
  82. bEndingMask <<= dwBitsFromRight;
  83. bStartingMask = bStartingMask & bEndingMask;
  84. if (pNode->stAttributes.rgbBoundaries[dwStartingByte] & bStartingMask)
  85. return(TYPE_E_OUTOFBOUNDS);
  86. }
  87. else
  88. // Case 2: Multiple bytes
  89. {
  90. if (pNode->stAttributes.rgbBoundaries[dwStartingByte++] & bStartingMask)
  91. return(TYPE_E_OUTOFBOUNDS);
  92. dwBitsToScan -= (8 - dwStartingBit);
  93. while (dwBitsToScan >= 8)
  94. {
  95. // See if we cross any boundaries
  96. if (dwBitsToScan >= 32)
  97. {
  98. if (*(UNALIGNED DWORD *)(pNode->stAttributes.rgbBoundaries + dwStartingByte) != 0)
  99. return(TYPE_E_OUTOFBOUNDS);
  100. dwStartingByte += 4;
  101. dwBitsToScan -= 32;
  102. }
  103. else if (dwBitsToScan >= 16)
  104. {
  105. if (*(UNALIGNED WORD *)(pNode->stAttributes.rgbBoundaries + dwStartingByte) != 0)
  106. return(TYPE_E_OUTOFBOUNDS);
  107. dwStartingByte += 2;
  108. dwBitsToScan -= 16;
  109. }
  110. else
  111. {
  112. if (pNode->stAttributes.rgbBoundaries[dwStartingByte++] != 0)
  113. return(TYPE_E_OUTOFBOUNDS);
  114. dwBitsToScan -= 8;
  115. }
  116. }
  117. // Final byte
  118. if (dwBitsToScan)
  119. {
  120. bEndingMask <<= (8 - dwBitsToScan);
  121. if (pNode->stAttributes.rgbBoundaries[dwStartingByte] & bEndingMask)
  122. return(TYPE_E_OUTOFBOUNDS);
  123. }
  124. }
  125. return(S_OK);
  126. }
  127. #endif
  128. /***************************************************************************/
  129. // Memory accessor class
  130. //
  131. CPool CBlockMemoryAccess::m_Pool((DWORD)'pBMv');
  132. HRESULT CBlockMemoryAccess::AllocBlock(
  133. LPVOID *ppvBlock,
  134. DWORD dwBlockSize
  135. )
  136. {
  137. TraceFunctEnterEx((LPARAM)this, "CBlockMemoryAccess::AllocBlock");
  138. _ASSERT(dwBlockSize == BLOCK_HEAP_NODE_SIZE);
  139. LPVOID pvBlock = m_Pool.Alloc();
  140. if (pvBlock) {
  141. ((LPBLOCK_HEAP_NODE) pvBlock)->stAttributes.fFlags = 0;
  142. } else if (SUCCEEDED(CMemoryAccess::AllocBlock(ppvBlock, BLOCK_HEAP_NODE_SIZE)))
  143. {
  144. pvBlock = *ppvBlock;
  145. ((LPBLOCK_HEAP_NODE) pvBlock)->stAttributes.fFlags = BLOCK_NOT_CPOOLED;
  146. }
  147. if (pvBlock)
  148. {
  149. ZeroMemory(((LPBLOCK_HEAP_NODE)pvBlock)->rgpChildren, sizeof(LPBLOCK_HEAP_NODE) * BLOCK_HEAP_ORDER);
  150. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  151. ZeroMemory(((LPBLOCK_HEAP_NODE)pvBlock)->stAttributes.rgbBoundaries, BLOCK_HEAP_PAYLOAD >> 5);
  152. #endif
  153. // If debugging registry key is set, init the payload to a byte pattern of '????'
  154. if(g_fFillPropertyPages)
  155. {
  156. FillMemory(((LPBLOCK_HEAP_NODE)pvBlock)->rgbData,
  157. sizeof(((LPBLOCK_HEAP_NODE)pvBlock)->rgbData), 0x3F);
  158. }
  159. *ppvBlock = pvBlock;
  160. TraceFunctLeaveEx((LPARAM) this);
  161. return(S_OK);
  162. }
  163. *ppvBlock = NULL;
  164. ErrorTrace((LPARAM)this, "CBlockMemoryAccess::AllocBlock failed");
  165. TraceFunctLeaveEx((LPARAM)this);
  166. return(E_OUTOFMEMORY);
  167. }
  168. HRESULT CBlockMemoryAccess::FreeBlock(
  169. LPVOID pvBlock
  170. )
  171. {
  172. TraceFunctEnterEx((LPARAM)this, "CBlockMemoryAccess::FreeBlock");
  173. if ((((LPBLOCK_HEAP_NODE) pvBlock)->stAttributes.fFlags) &
  174. BLOCK_NOT_CPOOLED)
  175. {
  176. CMemoryAccess::FreeBlock(pvBlock);
  177. } else {
  178. m_Pool.Free(pvBlock);
  179. }
  180. return(S_OK);
  181. }
  182. HRESULT CMemoryAccess::AllocBlock(
  183. LPVOID *ppvBlock,
  184. DWORD dwBlockSize
  185. )
  186. {
  187. TraceFunctEnterEx(0, "CMemoryAccess::AllocBlock");
  188. LPVOID pvBlock = (LPVOID) new BYTE[dwBlockSize];
  189. if (pvBlock)
  190. {
  191. ZeroMemory(pvBlock, dwBlockSize);
  192. *ppvBlock = pvBlock;
  193. return(S_OK);
  194. }
  195. *ppvBlock = NULL;
  196. return(E_OUTOFMEMORY);
  197. }
  198. HRESULT CMemoryAccess::FreeBlock(
  199. LPVOID pvBlock
  200. )
  201. {
  202. TraceFunctEnterEx(0, "CMemoryAccess::FreeBlock");
  203. delete[] pvBlock;
  204. TraceFunctLeave();
  205. return S_OK;
  206. }
  207. /***************************************************************************/
  208. // CBlockContext implementation
  209. //
  210. BOOL CBlockContext::IsValid()
  211. {
  212. return((m_dwSignature == BLOCK_CONTEXT_SIGNATURE_VALID));
  213. }
  214. void CBlockContext::Set(
  215. LPBLOCK_HEAP_NODE pLastAccessedNode,
  216. FLAT_ADDRESS faLastAccessedNodeOffset
  217. )
  218. {
  219. m_pLastAccessedNode = pLastAccessedNode;
  220. m_faLastAccessedNodeOffset = faLastAccessedNodeOffset;
  221. m_dwSignature = BLOCK_CONTEXT_SIGNATURE_VALID;
  222. }
  223. void CBlockContext::Invalidate()
  224. {
  225. m_dwSignature = BLOCK_CONTEXT_SIGNATURE_INVALID;
  226. }
  227. /***************************************************************************/
  228. // CBlockManager implementation
  229. //
  230. CBlockManager::CBlockManager(
  231. IMailMsgProperties *pMsg,
  232. CBlockManagerGetStream *pParent
  233. )
  234. {
  235. TraceFunctEnterEx((LPARAM)this, "CBlockManager::CBlockManager");
  236. // Initialize
  237. m_dwSignature = BLOCK_HEAP_SIGNATURE_VALID;
  238. m_pRootNode = NULL;
  239. m_faEndOfData = 0;
  240. m_idNodeCount = 0;
  241. m_pParent = pParent;
  242. m_pMsg = pMsg;
  243. SetDirty(FALSE);
  244. #ifdef DEBUG
  245. m_fCommitting = FALSE;
  246. #endif
  247. TraceFunctLeaveEx((LPARAM)this);
  248. }
  249. CBlockManager::~CBlockManager()
  250. {
  251. TraceFunctEnterEx((LPARAM)this, "CBlockManager::~CBlockManager");
  252. // Releases all blocks
  253. Release();
  254. // Finally, invalidate signature
  255. m_dwSignature = BLOCK_HEAP_SIGNATURE_INVALID;
  256. TraceFunctLeaveEx((LPARAM)this);
  257. }
  258. HRESULT CBlockManager::SetStreamSize(
  259. DWORD dwStreamSize
  260. )
  261. {
  262. // Initialize the stream size, this is only used when binding a
  263. // fresh MailMsg object to an existing stream.
  264. m_faEndOfData = (FLAT_ADDRESS)dwStreamSize;
  265. m_idNodeCount = ((dwStreamSize + BLOCK_HEAP_PAYLOAD_MASK) >> BLOCK_HEAP_PAYLOAD_BITS);
  266. return(S_OK);
  267. }
  268. BOOL CBlockManager::IsValid()
  269. {
  270. return(m_dwSignature == BLOCK_HEAP_SIGNATURE_VALID);
  271. }
  272. HRESULT CBlockManager::GetStream(
  273. IMailMsgPropertyStream **ppStream,
  274. BOOL fLockAcquired
  275. )
  276. {
  277. _ASSERT(ppStream);
  278. if (!ppStream || !m_pParent)
  279. return(E_POINTER);
  280. HRESULT hrRes = m_pParent->GetStream(ppStream, fLockAcquired);
  281. return(hrRes);
  282. }
  283. HRESULT CBlockManager::MoveToNode(
  284. LPBLOCK_HEAP_NODE *ppNode,
  285. HEAP_NODE_ID idTargetNode,
  286. BOOL fLockAcquired
  287. )
  288. {
  289. HRESULT hrRes = S_OK;
  290. LPBLOCK_HEAP_NODE pNode;
  291. HEAP_NODE_ID idNode;
  292. if (!ppNode || !*ppNode)
  293. return(E_POINTER);
  294. if (idTargetNode >= m_idNodeCount)
  295. return(STG_E_INVALIDPARAMETER);
  296. pNode = *ppNode;
  297. idNode = pNode->stAttributes.idNode;
  298. // Jump if in the same parent node
  299. if (idNode && idTargetNode)
  300. {
  301. if (((idNode - 1) >> BLOCK_HEAP_ORDER_BITS) ==
  302. ((idTargetNode - 1) >> BLOCK_HEAP_ORDER_BITS))
  303. {
  304. HEAP_NODE_ID idChildNode = (idTargetNode - 1) & BLOCK_HEAP_ORDER_MASK;
  305. LPBLOCK_HEAP_NODE pParent = pNode->stAttributes.pParentNode;
  306. *ppNode = pParent->rgpChildren[idChildNode];
  307. if (!*ppNode)
  308. hrRes = LoadBlockIfUnavailable(
  309. idTargetNode,
  310. pParent,
  311. idChildNode,
  312. ppNode,
  313. fLockAcquired);
  314. return(hrRes);
  315. }
  316. }
  317. hrRes = GetNodeFromNodeId(
  318. idTargetNode,
  319. ppNode,
  320. fLockAcquired);
  321. return(hrRes);
  322. }
  323. HRESULT CBlockManager::GetNextNode(
  324. LPBLOCK_HEAP_NODE *ppNode,
  325. BOOL fLockAcquired
  326. )
  327. {
  328. if (!ppNode || !*ppNode)
  329. return(E_POINTER);
  330. HRESULT hrRes = MoveToNode(
  331. ppNode,
  332. (*ppNode)->stAttributes.idNode + 1,
  333. fLockAcquired);
  334. if (FAILED(hrRes))
  335. *ppNode = NULL;
  336. return(hrRes);
  337. }
  338. HRESULT CBlockManager::LoadBlockIfUnavailable(
  339. HEAP_NODE_ID idNode,
  340. LPBLOCK_HEAP_NODE pParent,
  341. HEAP_NODE_ID idChildNode,
  342. LPBLOCK_HEAP_NODE *ppNode,
  343. BOOL fLockAcquired
  344. )
  345. {
  346. _ASSERT(ppNode);
  347. if (*ppNode)
  348. return(S_OK);
  349. HRESULT hrRes = S_OK;
  350. IMailMsgPropertyStream *pStream;
  351. hrRes = GetStream(&pStream, fLockAcquired);
  352. if (!SUCCEEDED(hrRes))
  353. return(E_UNEXPECTED);
  354. // Calculate the stream offset and load the block
  355. // idNode shifted really contains an offset not a full pointer here so we
  356. // can (and must) cast it for the call to ReadBlocks to be OK
  357. DWORD dwOffset = (DWORD)(idNode << BLOCK_HEAP_PAYLOAD_BITS);
  358. if (!fLockAcquired)
  359. WriteLock();
  360. if (!*ppNode)
  361. {
  362. LPBLOCK_HEAP_NODE pNode = NULL;
  363. DWORD dwLength = BLOCK_HEAP_PAYLOAD;
  364. hrRes = m_bma.AllocBlock(
  365. (LPVOID *)&pNode,
  366. BLOCK_HEAP_NODE_SIZE);
  367. if (SUCCEEDED(hrRes))
  368. {
  369. LPBYTE pTemp = pNode->rgbData;
  370. hrRes = pStream->ReadBlocks(
  371. m_pMsg,
  372. 1,
  373. &dwOffset,
  374. &dwLength,
  375. &pTemp,
  376. NULL);
  377. if (FAILED(hrRes) &&
  378. (hrRes != HRESULT_FROM_WIN32(ERROR_HANDLE_EOF)))
  379. {
  380. HRESULT myRes = m_bma.FreeBlock(pNode);
  381. _ASSERT(SUCCEEDED(myRes));
  382. }
  383. else
  384. {
  385. if (pParent)
  386. pParent->rgpChildren[idChildNode] = pNode;
  387. pNode->stAttributes.pParentNode = pParent;
  388. RESET_BLOCK_FLAGS(pNode->stAttributes.fFlags);
  389. pNode->stAttributes.idChildNode = idChildNode;
  390. pNode->stAttributes.idNode = idNode;
  391. pNode->stAttributes.faOffset = dwOffset;
  392. *ppNode = pNode;
  393. hrRes = S_OK;
  394. }
  395. }
  396. }
  397. if (!fLockAcquired)
  398. WriteUnlock();
  399. return(hrRes);
  400. }
  401. inline HRESULT CBlockManager::GetEdgeListFromNodeId(
  402. HEAP_NODE_ID idNode,
  403. HEAP_NODE_ID *rgEdgeList,
  404. DWORD *pdwEdgeCount
  405. )
  406. {
  407. DWORD dwCurrentLevel;
  408. HEAP_NODE_ID *pEdge = rgEdgeList;
  409. // This is a strictly internal call, we are assuming the caller
  410. // will be optimized and will handle cases for idNode <=
  411. // BLOCK_HEAP_ORDER. Processing only starts for 2 layers or more
  412. // Debug: make sure we are within range
  413. _ASSERT(idNode > BLOCK_HEAP_ORDER);
  414. _ASSERT(idNode <= NODE_ID_ABSOLUTE_MAX);
  415. // Strip off the root node
  416. idNode--;
  417. // We need to do depth minus 1 loops since the top edge will be
  418. // the remainder of the final loop
  419. for (dwCurrentLevel = 0;
  420. dwCurrentLevel < (MAX_HEAP_DEPTH - 1);
  421. )
  422. {
  423. // The quotient is the parent node in the upper level,
  424. // the remainder is the the edge from the parent to the
  425. // current node.
  426. *pEdge++ = idNode & BLOCK_HEAP_ORDER_MASK;
  427. idNode >>= BLOCK_HEAP_ORDER_BITS;
  428. idNode--;
  429. dwCurrentLevel++;
  430. // If the node is less than the number of children per node,
  431. // we are done.
  432. if (idNode < BLOCK_HEAP_ORDER)
  433. break;
  434. }
  435. *pEdge++ = idNode;
  436. *pdwEdgeCount = dwCurrentLevel + 1;
  437. return(S_OK);
  438. }
  439. //
  440. // Inner-loop optimized for O(1) cost.
  441. //
  442. HRESULT CBlockManager::GetNodeFromNodeId(
  443. HEAP_NODE_ID idNode,
  444. LPBLOCK_HEAP_NODE *ppNode,
  445. BOOL fLockAcquired
  446. )
  447. {
  448. HRESULT hrRes = S_OK;
  449. _ASSERT(IsValid());
  450. _ASSERT(ppNode);
  451. // If top level node, we return immediately. Note this is
  452. // supposed to be the case 90% of the time
  453. hrRes = LoadBlockIfUnavailable(0, NULL, 0, &m_pRootNode, fLockAcquired);
  454. if (!idNode || FAILED(hrRes))
  455. {
  456. *ppNode = m_pRootNode;
  457. return(hrRes);
  458. }
  459. LPBLOCK_HEAP_NODE pNode = m_pRootNode;
  460. LPBLOCK_HEAP_NODE *ppMyNode = &m_pRootNode;
  461. // Now, see if the referenced node exists
  462. if (idNode >= m_idNodeCount)
  463. return(STG_E_INVALIDPARAMETER);
  464. // Optimize for 1 hop, we would scarcely have to go into
  465. // the else case ...
  466. if (idNode <= BLOCK_HEAP_ORDER)
  467. {
  468. ppMyNode = &(m_pRootNode->rgpChildren[idNode - 1]);
  469. hrRes = LoadBlockIfUnavailable(idNode, m_pRootNode, idNode - 1, ppMyNode, fLockAcquired);
  470. if (SUCCEEDED(hrRes))
  471. *ppNode = *ppMyNode;
  472. }
  473. else
  474. {
  475. HEAP_NODE_ID rgEdgeList[MAX_HEAP_DEPTH];
  476. DWORD dwEdgeCount;
  477. HEAP_NODE_ID CurrentEdge;
  478. HEAP_NODE_ID idFactor = 0;
  479. // Debug: make sure we are within range
  480. _ASSERT(idNode <= NODE_ID_ABSOLUTE_MAX);
  481. // Get the edge list, backwards
  482. GetEdgeListFromNodeId(idNode, rgEdgeList, &dwEdgeCount);
  483. _ASSERT(dwEdgeCount >= 2);
  484. // Walk the list backwards
  485. while (dwEdgeCount--)
  486. {
  487. // Find the next bucket and calculate the node ID
  488. CurrentEdge = rgEdgeList[dwEdgeCount];
  489. ppMyNode = &(pNode->rgpChildren[CurrentEdge]);
  490. idFactor <<= BLOCK_HEAP_ORDER_BITS;
  491. idFactor += (CurrentEdge + 1);
  492. hrRes = LoadBlockIfUnavailable(idFactor, pNode, CurrentEdge, ppMyNode, fLockAcquired);
  493. if (FAILED(hrRes))
  494. break;
  495. // Set the current node to the bucket in the layer below
  496. pNode = *ppMyNode;
  497. }
  498. // Fill in the results ...
  499. *ppNode = pNode;
  500. }
  501. return(hrRes);
  502. }
  503. //
  504. // Identical optimizations as GetNodeFromNodeId, O(1) cost.
  505. //
  506. HRESULT CBlockManager::GetParentNodeFromNodeId(
  507. HEAP_NODE_ID idNode,
  508. LPBLOCK_HEAP_NODE *ppNode
  509. )
  510. {
  511. HRESULT hrRes = S_OK;
  512. TraceFunctEnterEx((LPARAM)this, "CBlockManager::GetParentNodeFromNodeId");
  513. _ASSERT(IsValid());
  514. _ASSERT(ppNode);
  515. // The root node has no parent, this should be avoided
  516. // before calling this function, be we will fail gracefully
  517. if (!idNode)
  518. {
  519. _ASSERT(idNode != 0);
  520. *ppNode = NULL;
  521. return(STG_E_INVALIDPARAMETER);
  522. }
  523. // Note m_pRootNode can be NULL if idNode is zero!
  524. _ASSERT(m_pRootNode);
  525. LPBLOCK_HEAP_NODE pNode = m_pRootNode;
  526. LPBLOCK_HEAP_NODE *ppMyNode = &m_pRootNode;
  527. // Optimize for 1 hop, we would scarcely have to go into
  528. // the else case ...
  529. if (idNode > BLOCK_HEAP_ORDER)
  530. {
  531. HEAP_NODE_ID rgEdgeList[MAX_HEAP_DEPTH];
  532. DWORD dwEdgeCount;
  533. HEAP_NODE_ID CurrentEdge;
  534. HEAP_NODE_ID idFactor = 0;
  535. // Debug: make sure we are within range
  536. _ASSERT(idNode <= NODE_ID_ABSOLUTE_MAX);
  537. // Get the edge list, backwards
  538. GetEdgeListFromNodeId(idNode, rgEdgeList, &dwEdgeCount);
  539. _ASSERT(dwEdgeCount >= 2);
  540. // Walk the list backwards
  541. --dwEdgeCount;
  542. while (dwEdgeCount)
  543. {
  544. // Find the next bucket and calculate the node ID
  545. CurrentEdge = rgEdgeList[dwEdgeCount];
  546. ppMyNode = &(pNode->rgpChildren[CurrentEdge]);
  547. idFactor <<= BLOCK_HEAP_ORDER_BITS;
  548. idFactor += (CurrentEdge + 1);
  549. hrRes = LoadBlockIfUnavailable(idFactor, pNode, CurrentEdge, ppMyNode, TRUE);
  550. if (FAILED(hrRes))
  551. break;
  552. // Set the current node to the bucket in the layer below
  553. pNode = *ppMyNode;
  554. dwEdgeCount--;
  555. }
  556. }
  557. // Fill in the results ...
  558. *ppNode = *ppMyNode;
  559. TraceFunctLeaveEx((LPARAM)this);
  560. return(hrRes);
  561. }
  562. #define GetNodeIdFromOffset(faOffset) ((faOffset) >> BLOCK_HEAP_PAYLOAD_BITS)
  563. HRESULT CBlockManager::InsertNodeGivenPreviousNode(
  564. LPBLOCK_HEAP_NODE pNodeToInsert,
  565. LPBLOCK_HEAP_NODE pPreviousNode
  566. )
  567. {
  568. HRESULT hrRes = S_OK;
  569. _ASSERT(IsValid());
  570. _ASSERT(pNodeToInsert);
  571. LPBLOCK_HEAP_NODE_ATTRIBUTES pAttrib = &pNodeToInsert->stAttributes;
  572. TraceFunctEnterEx((LPARAM)this, "CBlockManager::InsertNodeGivenPreviousNode");
  573. if (!pPreviousNode)
  574. {
  575. // This is the root node ...
  576. DebugTrace((LPARAM)this, "Inserting the root node");
  577. pAttrib->pParentNode = NULL;
  578. pAttrib->idChildNode = 0;
  579. pAttrib->idNode = 0;
  580. pAttrib->faOffset = 0;
  581. DEFAULT_BLOCK_FLAGS(pAttrib->fFlags);
  582. m_pRootNode = pNodeToInsert;
  583. TraceFunctLeaveEx((LPARAM)this);
  584. return(S_OK);
  585. }
  586. else
  587. {
  588. LPBLOCK_HEAP_NODE_ATTRIBUTES pOldAttrib = &pPreviousNode->stAttributes;
  589. // Fill out the attributes for the new node, we have a special case for the first node
  590. // after the root, where we need to explicitly point its parent to the root node
  591. if (pOldAttrib->idNode == 0)
  592. {
  593. pAttrib->pParentNode = m_pRootNode;
  594. // We are child Id 0 again
  595. pAttrib->idChildNode = 0;
  596. }
  597. else
  598. {
  599. pAttrib->pParentNode = pOldAttrib->pParentNode;
  600. pAttrib->idChildNode = pOldAttrib->idChildNode + 1;
  601. }
  602. pAttrib->idNode = pOldAttrib->idNode + 1;
  603. pAttrib->faOffset = pOldAttrib->faOffset + BLOCK_HEAP_PAYLOAD;
  604. DEFAULT_BLOCK_FLAGS(pAttrib->fFlags);
  605. if (pOldAttrib->idChildNode < BLOCK_HEAP_ORDER_MASK)
  606. {
  607. // We are in the same parent node, so it's simple
  608. DebugTrace((LPARAM)this, "Inserting node at slot %u",
  609. pAttrib->idChildNode);
  610. pAttrib->pParentNode->rgpChildren[pAttrib->idChildNode] = pNodeToInsert;
  611. TraceFunctLeaveEx((LPARAM)this);
  612. return(S_OK);
  613. }
  614. }
  615. // The previous node and the new node have different parents,
  616. // so we got to work from scratch ...
  617. LPBLOCK_HEAP_NODE pNode = NULL;
  618. // We might as well search from the top ...
  619. hrRes = GetParentNodeFromNodeId(pAttrib->idNode, &pNode);
  620. if (SUCCEEDED(hrRes))
  621. {
  622. // Update the affected attributes
  623. DebugTrace((LPARAM)this, "Inserting node at slot 0");
  624. pAttrib->pParentNode = pNode;
  625. pAttrib->idChildNode = 0;
  626. // Hook up our parent
  627. pNode->rgpChildren[0] = pNodeToInsert;
  628. }
  629. else
  630. {
  631. // The only reason for failre is that the parent
  632. // of the requested parent is not allocated
  633. _ASSERT(hrRes == STG_E_INVALIDPARAMETER);
  634. }
  635. TraceFunctLeaveEx((LPARAM)this);
  636. return(hrRes);
  637. }
  638. HRESULT CBlockManager::GetAllocatedSize(
  639. FLAT_ADDRESS *pfaSizeAllocated
  640. )
  641. {
  642. HRESULT hrRes = S_OK;
  643. _ASSERT(IsValid());
  644. _ASSERT(pfaSizeAllocated);
  645. TraceFunctEnterEx((LPARAM)this, "CBlockManager::GetAllocatedSize");
  646. if (!pfaSizeAllocated)
  647. hrRes = STG_E_INVALIDPARAMETER;
  648. else
  649. *pfaSizeAllocated = AtomicAdd(&m_faEndOfData, 0);
  650. TraceFunctLeaveEx((LPARAM)this);
  651. return(hrRes);
  652. }
  653. HRESULT CBlockManager::AllocateMemory(
  654. DWORD dwSizeDesired,
  655. FLAT_ADDRESS *pfaOffsetToAllocatedMemory,
  656. DWORD *pdwSizeAllocated,
  657. CBlockContext *pContext // Optional
  658. )
  659. {
  660. HRESULT hrRes = S_OK;
  661. _ASSERT(IsValid());
  662. _ASSERT(pfaOffsetToAllocatedMemory);
  663. _ASSERT(pdwSizeAllocated);
  664. TraceFunctEnterEx((LPARAM)this, "CBlockManager::AllocateMemory");
  665. hrRes = AllocateMemoryEx(
  666. TRUE,
  667. dwSizeDesired,
  668. pfaOffsetToAllocatedMemory,
  669. pdwSizeAllocated,
  670. pContext);
  671. TraceFunctLeaveEx((LPARAM)this);
  672. return (hrRes);
  673. }
  674. HRESULT CBlockManager::AllocateMemoryEx(
  675. BOOL fAcquireLock,
  676. DWORD dwSizeDesired,
  677. FLAT_ADDRESS *pfaOffsetToAllocatedMemory,
  678. DWORD *pdwSizeAllocated,
  679. CBlockContext *pContext // Optional
  680. )
  681. {
  682. DWORD dwSize;
  683. FLAT_ADDRESS faOffset;
  684. FLAT_ADDRESS faStartOfBlock;
  685. HEAP_NODE_ID idNode;
  686. HEAP_NODE_ID idCurrentNode = 0;
  687. HEAP_NODE_ID idLastNodeToCreate = 0;
  688. HRESULT hrRes = S_OK;
  689. BOOL fMarkStart = FALSE;
  690. LPBLOCK_HEAP_NODE pNode = NULL;
  691. _ASSERT(IsValid());
  692. _ASSERT(pfaOffsetToAllocatedMemory);
  693. _ASSERT(pdwSizeAllocated);
  694. TraceFunctEnterEx((LPARAM)this, "CBlockManager::AllocateMemoryEx");
  695. // First of all, we do an atomic reservation of the memory
  696. // which allows multiple threads to concurrently call
  697. // AllocateMemory
  698. // DWORD-align the allocation
  699. dwSizeDesired += BLOCK_DWORD_ALIGN_MASK;
  700. dwSizeDesired &= ~(BLOCK_DWORD_ALIGN_MASK);
  701. faStartOfBlock = AtomicAdd(&m_faEndOfData, dwSizeDesired);
  702. // Fill this in first so if we succeed, we won't have to fill
  703. // this in everywhere and if this fails, it's no big deal.
  704. *pdwSizeAllocated = dwSizeDesired;
  705. DebugTrace((LPARAM)this, "Allocating %u bytes", dwSizeDesired);
  706. // OK, we have two scenarios.
  707. // 1) The current block is large enough to honor the request
  708. // 2) We need one or more extra blocks to accomodate the
  709. // request.
  710. idNode = GetNodeIdFromOffset(faStartOfBlock);
  711. // Calculate all the required parameters
  712. faOffset = faStartOfBlock & BLOCK_HEAP_PAYLOAD_MASK;
  713. dwSize = BLOCK_HEAP_PAYLOAD - (DWORD)faOffset;
  714. // Invalidate the context
  715. if (pContext)
  716. pContext->Invalidate();
  717. if (idNode < m_idNodeCount)
  718. {
  719. // The starting node exists
  720. hrRes = GetNodeFromNodeId(idNode, &pNode);
  721. if (FAILED(hrRes)) {
  722. TraceFunctLeave();
  723. return hrRes;
  724. }
  725. _ASSERT(pNode);
  726. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  727. // Set the beginning of the allocation
  728. SetAllocationBoundary(faStartOfBlock, pNode);
  729. #endif
  730. // Set the context here, most likely a write will follow immediately
  731. if (pContext)
  732. pContext->Set(pNode, pNode->stAttributes.faOffset);
  733. if (dwSize >= dwSizeDesired)
  734. {
  735. // Scenario 1: enough space left
  736. DebugTrace((LPARAM)this, "Allocated from existing node");
  737. // Just fill in the output parameters
  738. *pfaOffsetToAllocatedMemory = faStartOfBlock;
  739. TraceFunctLeaveEx((LPARAM)this);
  740. return(S_OK);
  741. }
  742. // Scenario 2a: More blocks needed, starting from the
  743. // next block, see how many more we need
  744. dwSizeDesired -= dwSize;
  745. }
  746. else
  747. {
  748. // Scenario 2b: More blocks needed.
  749. // NOTE: This should be a rare code path except for
  750. // high contention ...
  751. // Now we have again 2 cases:
  752. // 1) If our offset is in the middle of a block, then
  753. // we know another thread is creating the current block
  754. // and all we have to do is to wait for the block to be
  755. // created, but create any subsequent blocks.
  756. // 2) If we are exactly at the start of the block, then
  757. // it is the responsibility of the current thread to
  758. // create the block.
  759. if (faOffset != 0)
  760. {
  761. // Scenario 1: We don't have to create the current block.
  762. // so skip the current block
  763. dwSizeDesired -= dwSize;
  764. }
  765. }
  766. DebugTrace((LPARAM)this, "Creating new node");
  767. // We must grab an exclusive lock before we go ahead and
  768. // create any blocks
  769. #ifndef BLOCKMGR_DISABLE_CONTENTION_CONTROL
  770. if (fAcquireLock) WriteLock();
  771. #endif
  772. // At this point, we can do whatever we want with the node
  773. // list and nodes. We will try to create all the missing
  774. // nodes, whether or not it lies in our desired region or not.
  775. //
  776. // We need to do this because if an allocation failed before,
  777. // we have missing nodes between the end of the allocated nodes
  778. // and the current node we are allocating. Since these nodes
  779. // contain links to the deeper nodes, we will break if we have
  780. // missing nodes.
  781. //
  782. // This is necessary since this function is not serailzed
  783. // elsewhere. So a thread entering later than another can
  784. // grab the lock before the earlier thread. If we don't
  785. // fill in the bubbles, the current thread will still have
  786. // to wait for the earlier blocks to be created by the
  787. // earlier thread. We would also have chaos if our allocations
  788. // worked and the ones in front of us failed. This may be
  789. // a bottleneck for all threads on this message, but once
  790. // we're done this lock, they'll all unblock. Moreover, if
  791. // we fail, they will all have to fail!
  792. // Figure out how many blocks to create, up to the known limit
  793. idLastNodeToCreate =
  794. (m_faEndOfData + BLOCK_HEAP_PAYLOAD_MASK) >> BLOCK_HEAP_PAYLOAD_BITS;
  795. // We know where the block starts, question is whether we're
  796. // successful or not.
  797. *pfaOffsetToAllocatedMemory = faStartOfBlock;
  798. // The node count could have changed while we were waiting
  799. // for the lock, so we have to refresh our records.
  800. // Better yet, if another thread already created our blocks
  801. // for us, we can just leave ...
  802. idCurrentNode = m_idNodeCount;
  803. if (idCurrentNode < idLastNodeToCreate)
  804. {
  805. LPBLOCK_HEAP_NODE pNewNode = NULL;
  806. BOOL fSetContext = TRUE;
  807. // No such luck, gotta go in and do the hard work ...
  808. if (!pContext)
  809. fSetContext = FALSE;
  810. // Now, we have a function that inserts a node given
  811. // the pervious node (not the parent), so we have to
  812. // go find the previous node. This has got to be
  813. // there unless our node list is messed up.
  814. pNode = NULL;
  815. if (idCurrentNode > 0)
  816. {
  817. // This is not the root, so we can find its prev.
  818. hrRes = GetNodeFromNodeId(idCurrentNode - 1, &pNode, TRUE);
  819. if (FAILED(hrRes)) {
  820. #ifndef BLOCKMGR_DISABLE_CONTENTION_CONTROL
  821. if (fAcquireLock) WriteUnlock();
  822. #endif
  823. TraceFunctLeave();
  824. return hrRes;
  825. }
  826. _ASSERT(pNode);
  827. _ASSERT(pNode->stAttributes.idNode == (idCurrentNode -1));
  828. }
  829. while (idCurrentNode < idLastNodeToCreate)
  830. {
  831. hrRes = m_bma.AllocBlock((LPVOID *)&pNewNode, sizeof(BLOCK_HEAP_NODE));
  832. if (!SUCCEEDED(hrRes))
  833. {
  834. // We can't proceed, but what we've got is cool
  835. DebugTrace((LPARAM)this,
  836. "Failed to allocate node %u", idCurrentNode);
  837. break;
  838. }
  839. DebugTrace((LPARAM)this, "Allocated node %u", idCurrentNode);
  840. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  841. // Need to do some work here
  842. ZeroMemory(pNewNode->stAttributes.rgbBoundaries,
  843. sizeof(pNewNode->stAttributes.rgbBoundaries));
  844. // See if we have to mark the start of the
  845. #endif
  846. // Got the block, fill in the info and insert the block
  847. // Again, we shouldn't fail if we get this far.
  848. hrRes = InsertNodeGivenPreviousNode(pNewNode, pNode);
  849. _ASSERT(SUCCEEDED(hrRes));
  850. // Set the context value here if we need to note if the
  851. // following condition is TRUE, we were in scenario 2b above.
  852. if (idCurrentNode == idNode)
  853. {
  854. if (fSetContext)
  855. {
  856. // The context is actually the node that marks the
  857. // start of the reserved block
  858. // Note we only need to do this if we were in scenario
  859. // 2b above.
  860. pContext->Set(pNewNode, pNewNode->stAttributes.faOffset);
  861. fSetContext = FALSE;
  862. }
  863. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  864. // Set the beginning of the allocation
  865. SetAllocationBoundary(faStartOfBlock, pNewNode);
  866. #endif
  867. }
  868. // Next
  869. pNode = pNewNode;
  870. idCurrentNode++;
  871. }
  872. // Now update the counter to reflect what we've created.
  873. m_idNodeCount = idCurrentNode;
  874. }
  875. #ifndef BLOCKMGR_DISABLE_CONTENTION_CONTROL
  876. if (fAcquireLock) WriteUnlock();
  877. #endif
  878. TraceFunctLeaveEx((LPARAM)this);
  879. return (hrRes);
  880. }
  881. BOOL CBlockManager::IsMemoryAllocated(
  882. FLAT_ADDRESS faOffset,
  883. DWORD dwLength
  884. )
  885. {
  886. // Note we chack for actually allocated memory by checking
  887. // m_idNodeCount, where m_faEndOfData includes data that is
  888. // reserved but not yet allocated.
  889. HEAP_NODE_ID idNode = GetNodeIdFromOffset(faOffset);
  890. if (idNode < m_idNodeCount)
  891. {
  892. idNode = GetNodeIdFromOffset(faOffset + dwLength - 1);
  893. if (idNode < m_idNodeCount)
  894. return(TRUE);
  895. _ASSERT(FALSE);
  896. }
  897. _ASSERT(FALSE);
  898. return(FALSE);
  899. }
  900. HRESULT CBlockManager::OperateOnMemory(
  901. DWORD dwOperation,
  902. LPBYTE pbBuffer,
  903. FLAT_ADDRESS faTargetOffset,
  904. DWORD dwBytesToDo,
  905. DWORD *pdwBytesDone,
  906. CBlockContext *pContext // Optional
  907. )
  908. {
  909. BOOL fUseContext = (pContext != NULL);
  910. BOOL fBounddaryCheck = !(dwOperation & BOP_NO_BOUNDARY_CHECK);
  911. BOOL fLockAcquired = (dwOperation & BOP_LOCK_ACQUIRED);
  912. DWORD dwHopsAway = 0;
  913. HRESULT hrRes = S_OK;
  914. LPBLOCK_HEAP_NODE pNode = NULL;
  915. _ASSERT(IsValid());
  916. _ASSERT(pbBuffer);
  917. _ASSERT(pdwBytesDone);
  918. TraceFunctEnterEx((LPARAM)this, "CBlockManager::OperateOnMemory");
  919. // Mask out the operation
  920. dwOperation &= BOP_OPERATION_MASK;
  921. if (fUseContext)
  922. {
  923. FLAT_ADDRESS faOffset = pContext->m_faLastAccessedNodeOffset;
  924. // We will not continue if a bad context is passed in
  925. if (!pContext->IsValid())
  926. fUseContext = FALSE;
  927. else
  928. {
  929. // More debug sanity checks
  930. _ASSERT(pContext->m_pLastAccessedNode->stAttributes.faOffset
  931. == faOffset);
  932. // We will see if the context really helps
  933. if (faOffset <= faTargetOffset)
  934. {
  935. // Let's see how many hops away
  936. dwHopsAway = (DWORD)
  937. ((faTargetOffset - faOffset) >> BLOCK_HEAP_PAYLOAD_BITS);
  938. // Not worth it if more than a number of hops away
  939. if (dwHopsAway > BLOCK_MAX_ALLOWED_LINEAR_HOPS)
  940. fUseContext = FALSE;
  941. }
  942. else
  943. fUseContext = FALSE;
  944. }
  945. }
  946. if (fUseContext)
  947. {
  948. DebugTrace((LPARAM) this, "using context");
  949. // Quickly access the starting target node ...
  950. pNode = pContext->m_pLastAccessedNode;
  951. while (dwHopsAway--)
  952. {
  953. hrRes = GetNextNode(&pNode, fLockAcquired);
  954. if (FAILED(hrRes))
  955. {
  956. fUseContext = FALSE;
  957. break;
  958. }
  959. }
  960. }
  961. if (!fUseContext)
  962. {
  963. DebugTrace((LPARAM) this, "ignoring context");
  964. // Okay, gotta find the desired node from scratch ...
  965. hrRes = GetNodeFromNodeId( GetNodeIdFromOffset(faTargetOffset),
  966. &pNode,
  967. fLockAcquired);
  968. if (!SUCCEEDED(hrRes))
  969. {
  970. ErrorTrace((LPARAM)this, "GetNodeIdFromOffset failed");
  971. TraceFunctLeaveEx((LPARAM)this);
  972. return(STG_E_INVALIDPARAMETER);
  973. }
  974. _ASSERT(pNode);
  975. }
  976. DebugTrace((LPARAM) this, "pNode = 0x%x", pNode);
  977. _ASSERT(pNode != NULL);
  978. if (!IsMemoryAllocated(faTargetOffset, dwBytesToDo))
  979. {
  980. ErrorTrace((LPARAM)this,
  981. "Specified range is unallocated");
  982. TraceFunctLeaveEx((LPARAM)this);
  983. return(STG_E_INVALIDPARAMETER);
  984. }
  985. // Clear the counter ...
  986. *pdwBytesDone = 0;
  987. // Do the actual processing
  988. switch (dwOperation)
  989. {
  990. case BOP_READ:
  991. case BOP_WRITE:
  992. {
  993. DWORD dwChunkSize;
  994. DWORD dwBytesDone = 0;
  995. faTargetOffset &= BLOCK_HEAP_PAYLOAD_MASK;
  996. dwChunkSize = (DWORD)(BLOCK_HEAP_PAYLOAD - faTargetOffset);
  997. while (dwBytesToDo)
  998. {
  999. if (dwBytesToDo < dwChunkSize)
  1000. dwChunkSize = dwBytesToDo;
  1001. #ifdef DEBUG_TRACK_ALLOCATION_BOUNDARIES
  1002. if (fBounddaryCheck)
  1003. {
  1004. // Make sure we are not stepping over boundaries
  1005. hrRes = VerifyAllocationBoundary(faTargetOffset,
  1006. dwChunkSize,
  1007. pNode);
  1008. if (!SUCCEEDED(hrRes))
  1009. break;
  1010. }
  1011. #endif
  1012. if (dwOperation == BOP_READ)
  1013. {
  1014. DebugTrace((LPARAM)this,
  1015. "Reading %u bytes", dwChunkSize);
  1016. MoveMemory((LPVOID)pbBuffer,
  1017. (LPVOID)&(pNode->rgbData[faTargetOffset]),
  1018. dwChunkSize);
  1019. }
  1020. else
  1021. {
  1022. DebugTrace((LPARAM)this,
  1023. "Writing %u bytes", dwChunkSize);
  1024. MoveMemory((LPVOID)&(pNode->rgbData[faTargetOffset]),
  1025. (LPVOID)pbBuffer,
  1026. dwChunkSize);
  1027. // Set the block to dirty
  1028. pNode->stAttributes.fFlags |= BLOCK_IS_DIRTY;
  1029. SetDirty(TRUE);
  1030. }
  1031. // Adjust the read buffer for the next read/write
  1032. pbBuffer += dwChunkSize;
  1033. // Adjust the counters
  1034. dwBytesToDo -= dwChunkSize;
  1035. dwBytesDone += dwChunkSize;
  1036. // After the first operation, the offset will always
  1037. // be zero, and the default chunk size is a full payload
  1038. faTargetOffset = 0;
  1039. dwChunkSize = BLOCK_HEAP_PAYLOAD;
  1040. // Read next chunk
  1041. if (dwBytesToDo)
  1042. {
  1043. // See if we have to load this ...
  1044. hrRes = GetNextNode(&pNode, fLockAcquired);
  1045. if (FAILED(hrRes))
  1046. break;
  1047. }
  1048. }
  1049. // Fill out how much we've done
  1050. *pdwBytesDone = dwBytesDone;
  1051. }
  1052. break;
  1053. default:
  1054. ErrorTrace((LPARAM)this,
  1055. "Invalid operation %u", dwOperation);
  1056. hrRes = STG_E_INVALIDFUNCTION;
  1057. }
  1058. // Update context if succeeded
  1059. if (SUCCEEDED(hrRes) && pContext)
  1060. {
  1061. pContext->Set(pNode, pNode->stAttributes.faOffset);
  1062. }
  1063. TraceFunctLeaveEx((LPARAM)this);
  1064. return(hrRes);
  1065. }
  1066. HRESULT CBlockManager::ReadMemory(
  1067. LPBYTE pbBuffer,
  1068. FLAT_ADDRESS faTargetOffset,
  1069. DWORD dwBytesToRead,
  1070. DWORD *pdwBytesRead,
  1071. CBlockContext *pContext // Optional
  1072. )
  1073. {
  1074. return(OperateOnMemory(
  1075. BOP_READ,
  1076. pbBuffer,
  1077. faTargetOffset,
  1078. dwBytesToRead,
  1079. pdwBytesRead,
  1080. pContext));
  1081. }
  1082. HRESULT CBlockManager::WriteMemory(
  1083. LPBYTE pbBuffer,
  1084. FLAT_ADDRESS faTargetOffset,
  1085. DWORD dwBytesToWrite,
  1086. DWORD *pdwBytesWritten,
  1087. CBlockContext *pContext // Optional
  1088. )
  1089. {
  1090. return(OperateOnMemory(
  1091. BOP_WRITE,
  1092. pbBuffer,
  1093. faTargetOffset,
  1094. dwBytesToWrite,
  1095. pdwBytesWritten,
  1096. pContext));
  1097. }
  1098. HRESULT CBlockManager::ReleaseNode(
  1099. LPBLOCK_HEAP_NODE pNode
  1100. )
  1101. {
  1102. HRESULT hrRes = S_OK;
  1103. HRESULT tempRes;
  1104. // Release all children recursively
  1105. for (DWORD i = 0; i < BLOCK_HEAP_ORDER; i++)
  1106. if (pNode->rgpChildren[i])
  1107. {
  1108. tempRes = ReleaseNode(pNode->rgpChildren[i]);
  1109. if (FAILED(tempRes))
  1110. hrRes = tempRes;
  1111. pNode->rgpChildren[i] = NULL;
  1112. }
  1113. // Release self
  1114. m_bma.FreeBlock(pNode);
  1115. return(hrRes);
  1116. }
  1117. HRESULT CBlockManager::Release()
  1118. {
  1119. HRESULT hrRes = S_OK;
  1120. _ASSERT(IsValid());
  1121. TraceFunctEnterEx((LPARAM)this, "CBlockManager::Release");
  1122. // This function assumes that no more threads are using this
  1123. // class and no new threads are inside trying to reserve
  1124. // memory. Though, for good measure, this function still
  1125. // grabs a write lock so that at least it does not get
  1126. // corrupt when stray threads are still lingering around.
  1127. // Grab the lock before we go in and destroy the node list
  1128. #ifndef BLOCKMGR_DISABLE_CONTENTION_CONTROL
  1129. WriteLock();
  1130. #endif
  1131. if (m_pRootNode)
  1132. {
  1133. hrRes = ReleaseNode(m_pRootNode);
  1134. if (SUCCEEDED(hrRes))
  1135. m_pRootNode = NULL;
  1136. }
  1137. #ifndef BLOCKMGR_DISABLE_CONTENTION_CONTROL
  1138. WriteUnlock();
  1139. #endif
  1140. TraceFunctLeaveEx((LPARAM)this);
  1141. return (hrRes);
  1142. }
  1143. HRESULT CBlockManager::AtomicDereferenceAndRead(
  1144. LPBYTE pbBuffer,
  1145. DWORD *pdwBufferSize,
  1146. LPBYTE pbInfoStruct,
  1147. FLAT_ADDRESS faOffsetToInfoStruct,
  1148. DWORD dwSizeOfInfoStruct,
  1149. DWORD dwOffsetInInfoStructToOffset,
  1150. DWORD dwOffsetInInfoStructToSize,
  1151. CBlockContext *pContext // Optional
  1152. )
  1153. {
  1154. HRESULT hrRes = S_OK;
  1155. FLAT_ADDRESS faOffset;
  1156. DWORD dwSizeToRead;
  1157. DWORD dwSizeRead;
  1158. _ASSERT(IsValid());
  1159. _ASSERT(pbBuffer);
  1160. _ASSERT(pdwBufferSize);
  1161. _ASSERT(pbInfoStruct);
  1162. // pContext can be NULL
  1163. TraceFunctEnterEx((LPARAM)this,
  1164. "CBlockManager::AtomicDereferenceAndRead");
  1165. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1166. // Acquire the synchronization object
  1167. WriteLock();
  1168. #endif
  1169. do
  1170. {
  1171. BOOL fInsufficient = FALSE;
  1172. DWORD dwBufferSize = *pdwBufferSize;
  1173. // Read the info struct
  1174. DebugTrace((LPARAM)this, "Reading information structure");
  1175. hrRes = OperateOnMemory(
  1176. BOP_READ | BOP_LOCK_ACQUIRED,
  1177. pbInfoStruct,
  1178. faOffsetToInfoStruct,
  1179. dwSizeOfInfoStruct,
  1180. &dwSizeRead,
  1181. pContext);
  1182. if (!SUCCEEDED(hrRes))
  1183. break;
  1184. // Fill out the parameters
  1185. faOffset = *(UNALIGNED FLAT_ADDRESS *)(pbInfoStruct + dwOffsetInInfoStructToOffset);
  1186. dwSizeToRead = *(UNALIGNED DWORD *)(pbInfoStruct + dwOffsetInInfoStructToSize);
  1187. DebugTrace((LPARAM)this, "Reading %u bytes from offset %u",
  1188. dwSizeToRead, (DWORD)faOffset);
  1189. // See if we have enough buffer
  1190. if (dwBufferSize < dwSizeToRead)
  1191. {
  1192. fInsufficient = TRUE;
  1193. DebugTrace((LPARAM)this,
  1194. "Insufficient buffer, only reading %u bytes",
  1195. dwBufferSize);
  1196. }
  1197. else
  1198. dwBufferSize = dwSizeToRead;
  1199. // Do the read
  1200. hrRes = OperateOnMemory(
  1201. BOP_READ | BOP_LOCK_ACQUIRED,
  1202. pbBuffer,
  1203. faOffset,
  1204. dwBufferSize,
  1205. &dwSizeRead,
  1206. pContext);
  1207. if (!SUCCEEDED(hrRes))
  1208. break;
  1209. *pdwBufferSize = dwSizeToRead;
  1210. // If we had insufficient buffer, we must return the
  1211. // correct HRESULT
  1212. if (fInsufficient)
  1213. hrRes = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
  1214. } while (0);
  1215. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1216. WriteUnlock();
  1217. #endif
  1218. TraceFunctLeaveEx((LPARAM)this);
  1219. return (hrRes);
  1220. }
  1221. inline HRESULT CBlockManager::WriteAndIncrement(
  1222. LPBYTE pbBuffer,
  1223. FLAT_ADDRESS faOffset,
  1224. DWORD dwBytesToWrite,
  1225. DWORD *pdwValueToIncrement,
  1226. DWORD dwIncrementValue,
  1227. CBlockContext *pContext // Optional
  1228. )
  1229. {
  1230. HRESULT hrRes = S_OK;
  1231. DWORD dwSize;
  1232. _ASSERT(IsValid());
  1233. _ASSERT(pbBuffer);
  1234. // pdwValueToIncrement and pContext can be NULL
  1235. TraceFunctEnterEx((LPARAM)this, "CBlockManager::WriteAndIncrement");
  1236. // Very simple, this function assumes no contention since the caller
  1237. // is already supposed to be in some sort of atomic operation
  1238. hrRes = OperateOnMemory(
  1239. BOP_WRITE | BOP_LOCK_ACQUIRED,
  1240. pbBuffer,
  1241. faOffset,
  1242. dwBytesToWrite,
  1243. &dwSize,
  1244. pContext);
  1245. if (SUCCEEDED(hrRes))
  1246. {
  1247. // This must be true if the write succeeded, but then ...
  1248. _ASSERT(dwBytesToWrite == dwSize);
  1249. // The write is successful, then increment the value in
  1250. // an interlocked fashion. We do that such that simultaneous
  1251. // reads will be locked out properly. Simultaneous writes
  1252. // should be serialized by design but we do this for good
  1253. // measure in case the caller is not aware of this requirement.
  1254. if (pdwValueToIncrement)
  1255. AtomicAdd(pdwValueToIncrement, dwIncrementValue);
  1256. }
  1257. TraceFunctLeaveEx((LPARAM)this);
  1258. return (hrRes);
  1259. }
  1260. HRESULT CBlockManager::AtomicWriteAndIncrement(
  1261. LPBYTE pbBuffer,
  1262. FLAT_ADDRESS faOffset,
  1263. DWORD dwBytesToWrite,
  1264. DWORD *pdwValueToIncrement,
  1265. DWORD dwReferenceValue,
  1266. DWORD dwIncrementValue,
  1267. CBlockContext *pContext // Optional
  1268. )
  1269. {
  1270. HRESULT hrRes = S_OK;
  1271. _ASSERT(IsValid());
  1272. _ASSERT(pbBuffer);
  1273. // pdwValueToIncrement and pContext can be NULL
  1274. TraceFunctEnterEx((LPARAM)this,
  1275. "CBlockManager::AtomicWriteAndIncrement");
  1276. // Since acquiring the synchronization is potentially costly,
  1277. // we do a final sanity check to make sure no thread had
  1278. // beaten us in taking this slot
  1279. if (pdwValueToIncrement &&
  1280. *pdwValueToIncrement != dwReferenceValue)
  1281. {
  1282. DebugTrace((LPARAM)this, "Aborting due to change in property count");
  1283. TraceFunctLeaveEx((LPARAM)this);
  1284. return(HRESULT_FROM_WIN32(ERROR_RETRY));
  1285. }
  1286. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1287. // This is a pass-thru call to the WriteAndIncrement but
  1288. // after acquiring the synchronization object
  1289. WriteLock();
  1290. #endif
  1291. // The wait for the lock could have been long, so we do a second
  1292. // check to see if we're out of luck after all this ...
  1293. if (pdwValueToIncrement &&
  1294. *pdwValueToIncrement != dwReferenceValue)
  1295. {
  1296. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1297. // Gotta release it!
  1298. WriteUnlock();
  1299. #endif
  1300. DebugTrace((LPARAM)this, "Aborting after acquiring lock");
  1301. TraceFunctLeaveEx((LPARAM)this);
  1302. return(HRESULT_FROM_WIN32(ERROR_RETRY));
  1303. }
  1304. hrRes = WriteAndIncrement(
  1305. pbBuffer,
  1306. faOffset,
  1307. dwBytesToWrite,
  1308. pdwValueToIncrement,
  1309. dwIncrementValue,
  1310. pContext);
  1311. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1312. WriteUnlock();
  1313. #endif
  1314. TraceFunctLeaveEx((LPARAM)this);
  1315. return (hrRes);
  1316. }
  1317. HRESULT CBlockManager::AtomicAllocWriteAndIncrement(
  1318. DWORD dwDesiredSize,
  1319. FLAT_ADDRESS *pfaOffsetToAllocatedMemory,
  1320. FLAT_ADDRESS faOffsetToWriteOffsetToAllocatedMemory,
  1321. FLAT_ADDRESS faOffsetToWriteSizeOfAllocatedMemory,
  1322. LPBYTE pbInitialValueForAllocatedMemory,
  1323. DWORD dwSizeOfInitialValue,
  1324. LPBYTE pbBufferToWriteFrom,
  1325. DWORD dwOffsetInAllocatedMemoryToWriteTo,
  1326. DWORD dwSizeofBuffer,
  1327. DWORD *pdwValueToIncrement,
  1328. DWORD dwReferenceValue,
  1329. DWORD dwIncrementValue,
  1330. CBlockContext *pContext // Optional
  1331. )
  1332. {
  1333. HRESULT hrRes = S_OK;
  1334. DWORD dwAllocatedSize;
  1335. DWORD dwSize;
  1336. _ASSERT(IsValid());
  1337. _ASSERT(pfaOffsetToAllocatedMemory);
  1338. _ASSERT(pbBufferToWriteFrom);
  1339. _ASSERT(pdwValueToIncrement);
  1340. // pContext can be NULL
  1341. TraceFunctEnterEx((LPARAM)this,
  1342. "CBlockManager::AtomicAllocWriteAndIncrement");
  1343. // Since acquiring the synchronization is potentially costly,
  1344. // we do a final sanity check to make sure no thread had
  1345. // beaten us in taking this slot
  1346. if (*pdwValueToIncrement != dwReferenceValue)
  1347. {
  1348. DebugTrace((LPARAM)this, "Aborting due to change in property count");
  1349. TraceFunctLeaveEx((LPARAM)this);
  1350. return(HRESULT_FROM_WIN32(ERROR_RETRY));
  1351. }
  1352. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1353. // This is a pass-thru call to AllocateMemoryEx and
  1354. // WriteAndIncrement after acquiring the synchronization object
  1355. WriteLock();
  1356. #endif
  1357. // The wait for the lock could have been long, so we do a second
  1358. // check to see if we're out of luck after all this ...
  1359. if (*pdwValueToIncrement != dwReferenceValue)
  1360. {
  1361. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1362. // Gotta release it!
  1363. WriteUnlock();
  1364. #endif
  1365. DebugTrace((LPARAM)this, "Aborting after acquiring lock");
  1366. TraceFunctLeaveEx((LPARAM)this);
  1367. return(HRESULT_FROM_WIN32(ERROR_RETRY));
  1368. }
  1369. // Try to allocate the requested block
  1370. hrRes = AllocateMemoryEx(
  1371. FALSE,
  1372. dwDesiredSize,
  1373. pfaOffsetToAllocatedMemory,
  1374. &dwAllocatedSize,
  1375. pContext);
  1376. if (SUCCEEDED(hrRes))
  1377. {
  1378. // Okay, initialize the memory allocated
  1379. if (pbInitialValueForAllocatedMemory)
  1380. {
  1381. hrRes = WriteMemory(
  1382. pbInitialValueForAllocatedMemory,
  1383. *pfaOffsetToAllocatedMemory,
  1384. dwSizeOfInitialValue,
  1385. &dwSize,
  1386. pContext);
  1387. // See if we need to write the size and offset info
  1388. if (SUCCEEDED(hrRes))
  1389. {
  1390. if (faOffsetToWriteOffsetToAllocatedMemory !=
  1391. INVALID_FLAT_ADDRESS)
  1392. hrRes = WriteMemory(
  1393. (LPBYTE)pfaOffsetToAllocatedMemory,
  1394. faOffsetToWriteOffsetToAllocatedMemory,
  1395. sizeof(FLAT_ADDRESS),
  1396. &dwSize,
  1397. pContext);
  1398. if (SUCCEEDED(hrRes) &&
  1399. faOffsetToWriteSizeOfAllocatedMemory !=
  1400. INVALID_FLAT_ADDRESS)
  1401. hrRes = WriteMemory(
  1402. (LPBYTE)&dwAllocatedSize,
  1403. faOffsetToWriteSizeOfAllocatedMemory,
  1404. sizeof(DWORD),
  1405. &dwSize,
  1406. pContext);
  1407. }
  1408. }
  1409. if (SUCCEEDED(hrRes))
  1410. {
  1411. // OK, since we got the memory, the write should not
  1412. // fail, but we check the result anyway.
  1413. hrRes = WriteAndIncrement(
  1414. pbBufferToWriteFrom,
  1415. *pfaOffsetToAllocatedMemory +
  1416. dwOffsetInAllocatedMemoryToWriteTo,
  1417. dwSizeofBuffer,
  1418. pdwValueToIncrement,
  1419. dwIncrementValue,
  1420. pContext);
  1421. }
  1422. }
  1423. #ifndef BLOCKMGR_DISABLE_ATOMIC_FUNCS
  1424. WriteUnlock();
  1425. #endif
  1426. TraceFunctLeaveEx((LPARAM)this);
  1427. return (hrRes);
  1428. }
  1429. HRESULT CBlockManager::MarkBlockAs(
  1430. LPBYTE pbData,
  1431. BOOL fClean
  1432. )
  1433. {
  1434. LPBLOCK_HEAP_NODE pNode;
  1435. TraceFunctEnterEx((LPARAM)this, "CBlockManager::MarkBlockAs");
  1436. // Find the attributes record from the data pointer
  1437. pNode = CONTAINING_RECORD(pbData, BLOCK_HEAP_NODE, rgbData);
  1438. _ASSERT(pNode);
  1439. _ASSERT(pNode->stAttributes.fFlags & BLOCK_PENDING_COMMIT);
  1440. // Cannot be pending and dirty
  1441. _ASSERT(!(pNode->stAttributes.fFlags & BLOCK_IS_DIRTY));
  1442. // Undo the dirty bit and mark as pending
  1443. pNode->stAttributes.fFlags &= ~(BLOCK_PENDING_COMMIT);
  1444. if (!fClean) {
  1445. pNode->stAttributes.fFlags |= BLOCK_IS_DIRTY;
  1446. SetDirty(TRUE);
  1447. }
  1448. TraceFunctLeaveEx((LPARAM)this);
  1449. return(S_OK);
  1450. }
  1451. HRESULT CBlockManager::CommitDirtyBlocks(
  1452. FLAT_ADDRESS faStartingOffset,
  1453. FLAT_ADDRESS faLengthToScan,
  1454. DWORD dwFlags,
  1455. IMailMsgPropertyStream *pStream,
  1456. BOOL fDontMarkAsCommit,
  1457. BOOL fComputeBlockCountsOnly,
  1458. DWORD *pcBlocksToWrite,
  1459. DWORD *pcTotalBytesToWrite,
  1460. IMailMsgNotify *pNotify
  1461. )
  1462. {
  1463. HRESULT hrRes = S_OK;
  1464. HEAP_NODE_ID idNode;
  1465. LPBLOCK_HEAP_NODE pNode;
  1466. DWORD dwBlocksToScan;
  1467. BOOL fLimitedLength;
  1468. DWORD dwCount = 0;
  1469. DWORD rgdwOffset[CMAILMSG_COMMIT_PAGE_BLOCK_SIZE];
  1470. DWORD rgdwSize[CMAILMSG_COMMIT_PAGE_BLOCK_SIZE];
  1471. LPBYTE rgpData[CMAILMSG_COMMIT_PAGE_BLOCK_SIZE];
  1472. DWORD *pdwOffset;
  1473. DWORD *pdwSize;
  1474. LPBYTE *ppbData;
  1475. _ASSERT(pStream);
  1476. TraceFunctEnterEx((LPARAM)this, "CBlockManager::CommitDirtyBlocks");
  1477. fLimitedLength = FALSE;
  1478. pNode = NULL;
  1479. if (faStartingOffset != INVALID_FLAT_ADDRESS)
  1480. {
  1481. idNode = GetNodeIdFromOffset(faStartingOffset);
  1482. if (idNode >= m_idNodeCount)
  1483. {
  1484. hrRes = STG_E_INVALIDPARAMETER;
  1485. goto Cleanup;
  1486. }
  1487. hrRes = GetNodeFromNodeId(idNode, &pNode);
  1488. if (!SUCCEEDED(hrRes))
  1489. goto Cleanup;
  1490. if (faLengthToScan != INVALID_FLAT_ADDRESS)
  1491. {
  1492. // See how many blocks to scan, rounding up
  1493. faLengthToScan += (faStartingOffset & BLOCK_HEAP_PAYLOAD_MASK);
  1494. faLengthToScan += BLOCK_HEAP_PAYLOAD_MASK;
  1495. dwBlocksToScan = (DWORD)(faLengthToScan >> BLOCK_HEAP_PAYLOAD_BITS);
  1496. fLimitedLength = TRUE;
  1497. }
  1498. else
  1499. dwBlocksToScan = 0;
  1500. }
  1501. else
  1502. {
  1503. hrRes = STG_E_INVALIDPARAMETER;
  1504. goto Cleanup;
  1505. }
  1506. // Loop until we fill up the array or have no more blocks
  1507. dwCount = 0;
  1508. pdwOffset = rgdwOffset;
  1509. pdwSize = rgdwSize;
  1510. ppbData = rgpData;
  1511. while (pNode)
  1512. {
  1513. if (fLimitedLength && !dwBlocksToScan--)
  1514. break;
  1515. if ((dwFlags & MAILMSG_GETPROPS_COMPLETE) ||
  1516. (pNode->stAttributes.fFlags & BLOCK_IS_DIRTY))
  1517. {
  1518. // Make sure we are not full ...
  1519. if (dwCount == CMAILMSG_COMMIT_PAGE_BLOCK_SIZE)
  1520. {
  1521. *pcBlocksToWrite += dwCount;
  1522. if (!fComputeBlockCountsOnly) {
  1523. // We are full, then write out the blocks
  1524. hrRes = pStream->WriteBlocks(
  1525. m_pMsg,
  1526. dwCount,
  1527. rgdwOffset,
  1528. rgdwSize,
  1529. rgpData,
  1530. pNotify);
  1531. if (!SUCCEEDED(hrRes))
  1532. break;
  1533. if (!fDontMarkAsCommit) {
  1534. // Go back and mark all blocks as clean
  1535. ppbData = rgpData;
  1536. while (--dwCount)
  1537. MarkBlockAs(*ppbData++, TRUE);
  1538. }
  1539. }
  1540. dwCount = 0;
  1541. // Reset our pointers and go on
  1542. pdwOffset = rgdwOffset;
  1543. pdwSize = rgdwSize;
  1544. ppbData = rgpData;
  1545. }
  1546. if (!fComputeBlockCountsOnly && !fDontMarkAsCommit) {
  1547. // Undo the dirty bit and mark as pending
  1548. pNode->stAttributes.fFlags &= BLOCK_CLEAN_MASK;
  1549. pNode->stAttributes.fFlags |= BLOCK_PENDING_COMMIT;
  1550. }
  1551. // Fill in the array elements
  1552. // faOffset really contains an offset not a full pointer here so we
  1553. // can (and must) cast it for the calls to WriteBlocks to be OK
  1554. *pdwOffset++ = (DWORD)pNode->stAttributes.faOffset;
  1555. *pdwSize++ = BLOCK_HEAP_PAYLOAD;
  1556. *ppbData++ = pNode->rgbData;
  1557. *pcTotalBytesToWrite += BLOCK_HEAP_PAYLOAD;
  1558. dwCount++;
  1559. }
  1560. // Next node, pNode == NULL if no more nodes
  1561. hrRes = GetNextNode(&pNode, FALSE);
  1562. if (hrRes == STG_E_INVALIDPARAMETER) hrRes = S_OK;
  1563. DebugTrace((LPARAM) this, "hrRes = %x", hrRes);
  1564. }
  1565. if (SUCCEEDED(hrRes) && dwCount)
  1566. {
  1567. *pcBlocksToWrite += dwCount;
  1568. if (!fComputeBlockCountsOnly) {
  1569. // Write out the remaining blocks
  1570. hrRes = pStream->WriteBlocks(
  1571. m_pMsg,
  1572. dwCount,
  1573. rgdwOffset,
  1574. rgdwSize,
  1575. rgpData,
  1576. pNotify);
  1577. }
  1578. }
  1579. if (FAILED(hrRes)) SetCommitMode(FALSE);
  1580. if (!fComputeBlockCountsOnly && !fDontMarkAsCommit && dwCount) {
  1581. // Go back and mark all blocks to the correct state
  1582. ppbData = rgpData;
  1583. while (--dwCount)
  1584. MarkBlockAs(*ppbData++, SUCCEEDED(hrRes));
  1585. }
  1586. Cleanup:
  1587. TraceFunctLeaveEx((LPARAM)this);
  1588. return(hrRes);
  1589. }